From 9e8f167aadfe8e5cdf50f7230608878d7da9de11 Mon Sep 17 00:00:00 2001 From: Shwetha-Acharya Date: Tue, 21 Nov 2023 11:18:35 +0000 Subject: [PATCH] Deployed 58e71a4 with MkDocs version: 1.4.2 --- .nojekyll | 0 404.html | 4398 +++++++++++ .../Access-Control-Lists/index.html | 4832 ++++++++++++ .../Accessing-Gluster-from-Windows/index.html | 4638 ++++++++++++ .../Automatic-File-Replication/index.html | 4678 ++++++++++++ Administrator-Guide/Bareos/index.html | 4615 ++++++++++++ .../Brick-Naming-Conventions/index.html | 4527 +++++++++++ .../index.html | 4579 +++++++++++ Administrator-Guide/Consul/index.html | 4882 ++++++++++++ .../Directory-Quota/index.html | 4809 ++++++++++++ Administrator-Guide/Events-APIs/index.html | 6705 ++++++++++++++++ .../index.html | 4605 +++++++++++ .../Geo-Replication/index.html | 5312 +++++++++++++ Administrator-Guide/Gluster-On-ZFS/index.html | 4828 ++++++++++++ .../GlusterFS-Cinder/index.html | 4697 ++++++++++++ .../GlusterFS-Coreutils/index.html | 4756 ++++++++++++ .../GlusterFS-Filter/index.html | 4456 +++++++++++ .../GlusterFS-Introduction/index.html | 4515 +++++++++++ .../GlusterFS-Keystone-Quickstart/index.html | 4565 +++++++++++ .../GlusterFS-iSCSI/index.html | 4576 +++++++++++ .../index.html | 4613 ++++++++++++ Administrator-Guide/Hook-scripts/index.html | 4594 +++++++++++ .../Linux-Kernel-Tuning/index.html | 4924 ++++++++++++ Administrator-Guide/Logging/index.html | 4696 ++++++++++++ .../Managing-Snapshots/index.html | 4895 ++++++++++++ .../Managing-Volumes/index.html | 5389 +++++++++++++ .../Mandatory-Locks/index.html | 4560 +++++++++++ .../Monitoring-Workload/index.html | 5525 ++++++++++++++ .../index.html | 5251 +++++++++++++ .../index.html | 4676 ++++++++++++ Administrator-Guide/Object-Storage/index.html | 4523 +++++++++++ .../Performance-Testing/index.html | 4807 ++++++++++++ .../Performance-Tuning/index.html | 4620 ++++++++++++ Administrator-Guide/Puppet/index.html | 5797 ++++++++++++++ Administrator-Guide/RDMA-Transport/index.html | 4506 +++++++++++ Administrator-Guide/SSL/index.html | 4721 ++++++++++++ .../Setting-Up-Clients/index.html | 5194 +++++++++++++ .../Setting-Up-Volumes/index.html | 4964 ++++++++++++ .../index.html | 4657 ++++++++++++ .../Start-Stop-Daemon/index.html | 4712 ++++++++++++ Administrator-Guide/Storage-Pools/index.html | 4643 ++++++++++++ .../Thin-Arbiter-Volumes/index.html | 4504 +++++++++++ Administrator-Guide/Trash/index.html | 4628 ++++++++++++ .../Tuning-Volume-Options/index.html | 5316 +++++++++++++ .../arbiter-volumes-and-quorum/index.html | 4654 ++++++++++++ .../formatting-and-mounting-bricks/index.html | 4584 +++++++++++ Administrator-Guide/index.html | 4546 +++++++++++ Administrator-Guide/io_uring/index.html | 4495 +++++++++++ Administrator-Guide/overview/index.html | 4480 +++++++++++ .../setting-up-storage/index.html | 4452 +++++++++++ CLI-Reference/cli-main/index.html | 4671 ++++++++++++ CNAME | 1 + .../Adding-your-blog/index.html | 4481 +++++++++++ .../Bug-Reporting-Guidelines/index.html | 4767 ++++++++++++ Contributors-Guide/Bug-Triage/index.html | 4675 ++++++++++++ .../GlusterFS-Release-process/index.html | 4769 ++++++++++++ .../Guidelines-For-Maintainers/index.html | 4580 +++++++++++ Contributors-Guide/Index/index.html | 4545 +++++++++++ .../Backport-Guidelines/index.html | 4552 +++++++++++ Developer-guide/Building-GlusterFS/index.html | 4885 ++++++++++++ Developer-guide/Developers-Index/index.html | 4587 +++++++++++ .../Development-Workflow/index.html | 5225 +++++++++++++ Developer-guide/Easy-Fix-Bugs/index.html | 4500 +++++++++++ .../index.html | 4586 +++++++++++ Developer-guide/Projects/index.html | 4601 +++++++++++ .../index.html | 4868 ++++++++++++ Developer-guide/compiling-rpms/index.html | 4752 ++++++++++++ .../coredump-on-customer-setup/index.html | 4509 +++++++++++ .../gfind-missing-files/index.html | 4576 +++++++++++ GlusterFS-Tools/glusterfind/index.html | 4729 ++++++++++++ GlusterFS-Tools/index.html | 4481 +++++++++++ Install-Guide/Common-criteria/index.html | 4571 +++++++++++ Install-Guide/Community-Packages/index.html | 4910 ++++++++++++ Install-Guide/Configure/index.html | 4661 ++++++++++++ Install-Guide/Install/index.html | 4624 ++++++++++++ Install-Guide/Overview/index.html | 4677 ++++++++++++ Install-Guide/Setup-Bare-metal/index.html | 4549 +++++++++++ Install-Guide/Setup-aws/index.html | 4537 +++++++++++ Install-Guide/Setup-virt/index.html | 4513 +++++++++++ Ops-Guide/Overview/index.html | 4459 +++++++++++ Ops-Guide/Tools/index.html | 4550 +++++++++++ Quick-Start-Guide/Architecture/index.html | 5131 +++++++++++++ Quick-Start-Guide/Quickstart/index.html | 4790 ++++++++++++ Troubleshooting/gfid-to-path/index.html | 4611 +++++++++++ Troubleshooting/gluster-crash/index.html | 4446 +++++++++++ Troubleshooting/index.html | 4537 +++++++++++ .../resolving-splitbrain/index.html | 5443 +++++++++++++ Troubleshooting/statedump/index.html | 5105 +++++++++++++ .../troubleshooting-afr/index.html | 4781 ++++++++++++ .../troubleshooting-filelocks/index.html | 4512 +++++++++++ .../troubleshooting-georep/index.html | 4718 ++++++++++++ .../troubleshooting-glusterd/index.html | 4626 ++++++++++++ .../troubleshooting-gnfs/index.html | 4761 ++++++++++++ .../troubleshooting-memory/index.html | 4448 +++++++++++ .../generic-upgrade-procedure/index.html | 4760 ++++++++++++ Upgrade-Guide/index.html | 4509 +++++++++++ Upgrade-Guide/op-version/index.html | 4533 +++++++++++ Upgrade-Guide/upgrade-to-10/index.html | 4569 +++++++++++ Upgrade-Guide/upgrade-to-11/index.html | 4571 +++++++++++ Upgrade-Guide/upgrade-to-3.10/index.html | 4719 ++++++++++++ Upgrade-Guide/upgrade-to-3.11/index.html | 4723 ++++++++++++ Upgrade-Guide/upgrade-to-3.12/index.html | 4743 ++++++++++++ Upgrade-Guide/upgrade-to-3.13/index.html | 4723 ++++++++++++ Upgrade-Guide/upgrade-to-3.5/index.html | 4698 ++++++++++++ Upgrade-Guide/upgrade-to-3.6/index.html | 4716 ++++++++++++ Upgrade-Guide/upgrade-to-3.7/index.html | 4712 ++++++++++++ Upgrade-Guide/upgrade-to-3.8/index.html | 4649 ++++++++++++ Upgrade-Guide/upgrade-to-3.9/index.html | 4484 +++++++++++ Upgrade-Guide/upgrade-to-4.0/index.html | 4724 ++++++++++++ Upgrade-Guide/upgrade-to-4.1/index.html | 4746 ++++++++++++ Upgrade-Guide/upgrade-to-5/index.html | 4531 +++++++++++ Upgrade-Guide/upgrade-to-6/index.html | 4632 ++++++++++++ Upgrade-Guide/upgrade-to-7/index.html | 4556 +++++++++++ Upgrade-Guide/upgrade-to-8/index.html | 4578 +++++++++++ Upgrade-Guide/upgrade-to-9/index.html | 4569 +++++++++++ analytics.txt | 1 + assets/images/favicon.png | Bin 0 -> 1870 bytes assets/javascripts/bundle.51d95adb.min.js | 29 + assets/javascripts/bundle.51d95adb.min.js.map | 8 + assets/javascripts/lunr/min/lunr.ar.min.js | 1 + assets/javascripts/lunr/min/lunr.da.min.js | 18 + assets/javascripts/lunr/min/lunr.de.min.js | 18 + assets/javascripts/lunr/min/lunr.du.min.js | 18 + assets/javascripts/lunr/min/lunr.es.min.js | 18 + assets/javascripts/lunr/min/lunr.fi.min.js | 18 + assets/javascripts/lunr/min/lunr.fr.min.js | 18 + assets/javascripts/lunr/min/lunr.hi.min.js | 1 + assets/javascripts/lunr/min/lunr.hu.min.js | 18 + assets/javascripts/lunr/min/lunr.it.min.js | 18 + assets/javascripts/lunr/min/lunr.ja.min.js | 1 + assets/javascripts/lunr/min/lunr.jp.min.js | 1 + assets/javascripts/lunr/min/lunr.ko.min.js | 1 + assets/javascripts/lunr/min/lunr.multi.min.js | 1 + assets/javascripts/lunr/min/lunr.nl.min.js | 18 + assets/javascripts/lunr/min/lunr.no.min.js | 18 + assets/javascripts/lunr/min/lunr.pt.min.js | 18 + assets/javascripts/lunr/min/lunr.ro.min.js | 18 + assets/javascripts/lunr/min/lunr.ru.min.js | 18 + .../lunr/min/lunr.stemmer.support.min.js | 1 + assets/javascripts/lunr/min/lunr.sv.min.js | 18 + assets/javascripts/lunr/min/lunr.ta.min.js | 1 + assets/javascripts/lunr/min/lunr.th.min.js | 1 + assets/javascripts/lunr/min/lunr.tr.min.js | 18 + assets/javascripts/lunr/min/lunr.vi.min.js | 1 + assets/javascripts/lunr/min/lunr.zh.min.js | 1 + assets/javascripts/lunr/tinyseg.js | 206 + assets/javascripts/lunr/wordcut.js | 6708 +++++++++++++++++ .../workers/search.e5c33ebb.min.js | 42 + .../workers/search.e5c33ebb.min.js.map | 8 + assets/stylesheets/main.558e4712.min.css | 1 + assets/stylesheets/main.558e4712.min.css.map | 1 + assets/stylesheets/palette.2505c338.min.css | 1 + .../stylesheets/palette.2505c338.min.css.map | 1 + css/custom.css | 3 + glossary/index.html | 4599 +++++++++++ google64817fdc11b2f6b6.html | 1 + images/640px-GlusterFS-Architecture.png | Bin 0 -> 97477 bytes images/Bugzilla-watching-bugs@gluster.org.png | Bin 0 -> 13334 bytes images/Bugzilla-watching.png | Bin 0 -> 11400 bytes images/Distribute.png | Bin 0 -> 37440 bytes images/Distributed-Replicated-Volume.png | Bin 0 -> 62929 bytes .../Distributed-Striped-Replicated-Volume.png | Bin 0 -> 57210 bytes images/Distributed-Striped-Volume.png | Bin 0 -> 53781 bytes images/Distributed-Volume.png | Bin 0 -> 47211 bytes images/FUSE-access.png | Bin 0 -> 126725 bytes images/FUSE-structure.png | Bin 0 -> 36167 bytes images/First-translator.png | Bin 0 -> 7721 bytes images/Geo-Rep-LAN.png | Bin 0 -> 163417 bytes images/Geo-Rep-WAN.png | Bin 0 -> 96291 bytes images/Geo-Rep03-Internet.png | Bin 0 -> 131824 bytes images/Geo-Rep04-Cascading.png | Bin 0 -> 187341 bytes images/Geo-replication-async.jpg | Bin 0 -> 30403 bytes images/Geo-replication-sync.png | Bin 0 -> 14373 bytes images/GlusterFS-Architecture.png | Bin 0 -> 133597 bytes images/GlusterFS_Translator_Stack.png | Bin 0 -> 169527 bytes images/Graph.png | Bin 0 -> 18112 bytes images/Hadoop-Architecture.png | Bin 0 -> 43815 bytes images/Libgfapi-access.png | Bin 0 -> 99779 bytes images/New-DispersedVol.png | Bin 0 -> 56418 bytes images/New-Distributed-DisperseVol.png | Bin 0 -> 65543 bytes images/New-Distributed-ReplicatedVol.png | Bin 0 -> 69137 bytes images/New-DistributedVol.png | Bin 0 -> 54240 bytes images/New-ReplicatedVol.png | Bin 0 -> 53975 bytes images/Overallprocess.png | Bin 0 -> 40598 bytes images/Replicated-Volume.png | Bin 0 -> 44077 bytes images/Striped-Replicated-Volume.png | Bin 0 -> 62113 bytes images/Striped-Volume.png | Bin 0 -> 43316 bytes images/Translator-h.png | Bin 0 -> 114830 bytes images/Translator.png | Bin 0 -> 1927 bytes images/UFO-Architecture.png | Bin 0 -> 72139 bytes images/VSA-Architecture.png | Bin 0 -> 38875 bytes images/favicon.ico | Bin 0 -> 15406 bytes images/icon.svg | 19 + images/logo.png | Bin 0 -> 40924 bytes images/url.png | Bin 0 -> 50689 bytes index.html | 4482 +++++++++++ js/custom-features.js | 40 + presentations/index.html | 5120 +++++++++++++ release-notes/10.0/index.html | 4724 ++++++++++++ release-notes/10.1/index.html | 4545 +++++++++++ release-notes/10.2/index.html | 4537 +++++++++++ release-notes/10.3/index.html | 4536 +++++++++++ release-notes/10.4/index.html | 4541 +++++++++++ release-notes/11.0/index.html | 4658 ++++++++++++ release-notes/11.1/index.html | 4546 +++++++++++ release-notes/3.10.0/index.html | 5167 +++++++++++++ release-notes/3.10.1/index.html | 4558 +++++++++++ release-notes/3.10.10/index.html | 4528 +++++++++++ release-notes/3.10.11/index.html | 4531 +++++++++++ release-notes/3.10.12/index.html | 4545 +++++++++++ release-notes/3.10.2/index.html | 4583 +++++++++++ release-notes/3.10.3/index.html | 4552 +++++++++++ release-notes/3.10.4/index.html | 4553 +++++++++++ release-notes/3.10.5/index.html | 4562 +++++++++++ release-notes/3.10.6/index.html | 4558 +++++++++++ release-notes/3.10.7/index.html | 4547 +++++++++++ release-notes/3.10.8/index.html | 4545 +++++++++++ release-notes/3.10.9/index.html | 4544 +++++++++++ release-notes/3.11.0/index.html | 5222 +++++++++++++ release-notes/3.11.1/index.html | 4631 ++++++++++++ release-notes/3.11.2/index.html | 4555 +++++++++++ release-notes/3.11.3/index.html | 4544 +++++++++++ release-notes/3.12.0/index.html | 5046 +++++++++++++ release-notes/3.12.1/index.html | 4548 +++++++++++ release-notes/3.12.10/index.html | 4533 +++++++++++ release-notes/3.12.11/index.html | 4532 +++++++++++ release-notes/3.12.12/index.html | 4532 +++++++++++ release-notes/3.12.13/index.html | 4528 +++++++++++ release-notes/3.12.14/index.html | 4564 +++++++++++ release-notes/3.12.15/index.html | 4530 +++++++++++ release-notes/3.12.2/index.html | 4579 +++++++++++ release-notes/3.12.3/index.html | 4569 +++++++++++ release-notes/3.12.4/index.html | 4530 +++++++++++ release-notes/3.12.5/index.html | 4528 +++++++++++ release-notes/3.12.6/index.html | 4533 +++++++++++ release-notes/3.12.7/index.html | 4513 +++++++++++ release-notes/3.12.8/index.html | 4498 +++++++++++ release-notes/3.12.9/index.html | 4536 +++++++++++ release-notes/3.13.0/index.html | 5059 +++++++++++++ release-notes/3.13.1/index.html | 4540 +++++++++++ release-notes/3.13.2/index.html | 4534 +++++++++++ release-notes/3.5.0/index.html | 4830 ++++++++++++ release-notes/3.5.1/index.html | 4635 ++++++++++++ release-notes/3.5.2/index.html | 4586 +++++++++++ release-notes/3.5.3/index.html | 4600 +++++++++++ release-notes/3.5.4/index.html | 4594 +++++++++++ release-notes/3.6.0/index.html | 4863 ++++++++++++ release-notes/3.6.3/index.html | 4596 +++++++++++ release-notes/3.7.0/index.html | 4865 ++++++++++++ release-notes/3.7.1/index.html | 4605 +++++++++++ release-notes/3.9.0/index.html | 5433 +++++++++++++ release-notes/4.0.0/index.html | 5929 +++++++++++++++ release-notes/4.0.1/index.html | 4528 +++++++++++ release-notes/4.0.2/index.html | 4542 +++++++++++ release-notes/4.1.0/index.html | 5354 +++++++++++++ release-notes/4.1.1/index.html | 4531 +++++++++++ release-notes/4.1.10/index.html | 4527 +++++++++++ release-notes/4.1.2/index.html | 4538 +++++++++++ release-notes/4.1.3/index.html | 4541 +++++++++++ release-notes/4.1.4/index.html | 4559 +++++++++++ release-notes/4.1.5/index.html | 4530 +++++++++++ release-notes/4.1.6/index.html | 4560 +++++++++++ release-notes/4.1.7/index.html | 4530 +++++++++++ release-notes/4.1.8/index.html | 4530 +++++++++++ release-notes/4.1.9/index.html | 4529 +++++++++++ release-notes/5.0/index.html | 5125 +++++++++++++ release-notes/5.1/index.html | 4556 +++++++++++ release-notes/5.10/index.html | 4527 +++++++++++ release-notes/5.11/index.html | 4526 +++++++++++ release-notes/5.12/index.html | 4537 +++++++++++ release-notes/5.13/index.html | 4537 +++++++++++ release-notes/5.2/index.html | 4529 +++++++++++ release-notes/5.3/index.html | 4533 +++++++++++ release-notes/5.5/index.html | 4544 +++++++++++ release-notes/5.6/index.html | 4536 +++++++++++ release-notes/5.8/index.html | 4538 +++++++++++ release-notes/5.9/index.html | 4526 +++++++++++ release-notes/6.0/index.html | 5177 +++++++++++++ release-notes/6.1/index.html | 4545 +++++++++++ release-notes/6.10/index.html | 4504 +++++++++++ release-notes/6.2/index.html | 4538 +++++++++++ release-notes/6.3/index.html | 4525 +++++++++++ release-notes/6.4/index.html | 4543 +++++++++++ release-notes/6.5/index.html | 4531 +++++++++++ release-notes/6.6/index.html | 4559 +++++++++++ release-notes/6.7/index.html | 4543 +++++++++++ release-notes/6.8/index.html | 4542 +++++++++++ release-notes/6.9/index.html | 4535 +++++++++++ release-notes/7.0/index.html | 4909 ++++++++++++ release-notes/7.1/index.html | 4602 +++++++++++ release-notes/7.2/index.html | 4531 +++++++++++ release-notes/7.3/index.html | 4533 +++++++++++ release-notes/7.4/index.html | 4533 +++++++++++ release-notes/7.5/index.html | 4530 +++++++++++ release-notes/7.6/index.html | 4529 +++++++++++ release-notes/7.7/index.html | 4533 +++++++++++ release-notes/7.8/index.html | 4541 +++++++++++ release-notes/7.9/index.html | 4534 +++++++++++ release-notes/8.0/index.html | 4932 ++++++++++++ release-notes/8.1/index.html | 4541 +++++++++++ release-notes/8.2/index.html | 4532 +++++++++++ release-notes/8.3/index.html | 4541 +++++++++++ release-notes/8.4/index.html | 4537 +++++++++++ release-notes/8.5/index.html | 4538 +++++++++++ release-notes/8.6/index.html | 4536 +++++++++++ release-notes/9.0/index.html | 4824 ++++++++++++ release-notes/9.1/index.html | 4540 +++++++++++ release-notes/9.2/index.html | 4536 +++++++++++ release-notes/9.3/index.html | 4540 +++++++++++ release-notes/9.4/index.html | 4540 +++++++++++ release-notes/9.5/index.html | 4536 +++++++++++ release-notes/9.6/index.html | 4535 +++++++++++ release-notes/geo-rep-in-3.7/index.html | 4697 ++++++++++++ .../glusterfs-selinux2.0.1/index.html | 4456 +++++++++++ release-notes/index.html | 4916 ++++++++++++ search/search_index.json | 1 + security/index.html | 4426 +++++++++++ sitemap.xml | 1168 +++ sitemap.xml.gz | Bin 0 -> 1830 bytes 319 files changed, 1101777 insertions(+) create mode 100644 .nojekyll create mode 100644 404.html create mode 100644 Administrator-Guide/Access-Control-Lists/index.html create mode 100644 Administrator-Guide/Accessing-Gluster-from-Windows/index.html create mode 100644 Administrator-Guide/Automatic-File-Replication/index.html create mode 100644 Administrator-Guide/Bareos/index.html create mode 100644 Administrator-Guide/Brick-Naming-Conventions/index.html create mode 100644 Administrator-Guide/Building-QEMU-With-gfapi-For-Debian-Based-Systems/index.html create mode 100644 Administrator-Guide/Consul/index.html create mode 100644 Administrator-Guide/Directory-Quota/index.html create mode 100644 Administrator-Guide/Events-APIs/index.html create mode 100644 Administrator-Guide/Export-And-Netgroup-Authentication/index.html create mode 100644 Administrator-Guide/Geo-Replication/index.html create mode 100644 Administrator-Guide/Gluster-On-ZFS/index.html create mode 100644 Administrator-Guide/GlusterFS-Cinder/index.html create mode 100644 Administrator-Guide/GlusterFS-Coreutils/index.html create mode 100644 Administrator-Guide/GlusterFS-Filter/index.html create mode 100644 Administrator-Guide/GlusterFS-Introduction/index.html create mode 100644 Administrator-Guide/GlusterFS-Keystone-Quickstart/index.html create mode 100644 Administrator-Guide/GlusterFS-iSCSI/index.html create mode 100644 Administrator-Guide/Handling-of-users-with-many-groups/index.html create mode 100644 Administrator-Guide/Hook-scripts/index.html create mode 100644 Administrator-Guide/Linux-Kernel-Tuning/index.html create mode 100644 Administrator-Guide/Logging/index.html create mode 100644 Administrator-Guide/Managing-Snapshots/index.html create mode 100644 Administrator-Guide/Managing-Volumes/index.html create mode 100644 Administrator-Guide/Mandatory-Locks/index.html create mode 100644 Administrator-Guide/Monitoring-Workload/index.html create mode 100644 Administrator-Guide/NFS-Ganesha-GlusterFS-Integration/index.html create mode 100644 Administrator-Guide/Network-Configurations-Techniques/index.html create mode 100644 Administrator-Guide/Object-Storage/index.html create mode 100644 Administrator-Guide/Performance-Testing/index.html create mode 100644 Administrator-Guide/Performance-Tuning/index.html create mode 100644 Administrator-Guide/Puppet/index.html create mode 100644 Administrator-Guide/RDMA-Transport/index.html create mode 100644 Administrator-Guide/SSL/index.html create mode 100644 Administrator-Guide/Setting-Up-Clients/index.html create mode 100644 Administrator-Guide/Setting-Up-Volumes/index.html create mode 100644 Administrator-Guide/Split-brain-and-ways-to-deal-with-it/index.html create mode 100644 Administrator-Guide/Start-Stop-Daemon/index.html create mode 100644 Administrator-Guide/Storage-Pools/index.html create mode 100644 Administrator-Guide/Thin-Arbiter-Volumes/index.html create mode 100644 Administrator-Guide/Trash/index.html create mode 100644 Administrator-Guide/Tuning-Volume-Options/index.html create mode 100644 Administrator-Guide/arbiter-volumes-and-quorum/index.html create mode 100644 Administrator-Guide/formatting-and-mounting-bricks/index.html create mode 100644 Administrator-Guide/index.html create mode 100644 Administrator-Guide/io_uring/index.html create mode 100644 Administrator-Guide/overview/index.html create mode 100644 Administrator-Guide/setting-up-storage/index.html create mode 100644 CLI-Reference/cli-main/index.html create mode 100644 CNAME create mode 100644 Contributors-Guide/Adding-your-blog/index.html create mode 100644 Contributors-Guide/Bug-Reporting-Guidelines/index.html create mode 100644 Contributors-Guide/Bug-Triage/index.html create mode 100644 Contributors-Guide/GlusterFS-Release-process/index.html create mode 100644 Contributors-Guide/Guidelines-For-Maintainers/index.html create mode 100644 Contributors-Guide/Index/index.html create mode 100644 Developer-guide/Backport-Guidelines/index.html create mode 100644 Developer-guide/Building-GlusterFS/index.html create mode 100644 Developer-guide/Developers-Index/index.html create mode 100644 Developer-guide/Development-Workflow/index.html create mode 100644 Developer-guide/Easy-Fix-Bugs/index.html create mode 100644 Developer-guide/Fixing-issues-reported-by-tools-for-static-code-analysis/index.html create mode 100644 Developer-guide/Projects/index.html create mode 100644 Developer-guide/Simplified-Development-Workflow/index.html create mode 100644 Developer-guide/compiling-rpms/index.html create mode 100644 Developer-guide/coredump-on-customer-setup/index.html create mode 100644 GlusterFS-Tools/gfind-missing-files/index.html create mode 100644 GlusterFS-Tools/glusterfind/index.html create mode 100644 GlusterFS-Tools/index.html create mode 100644 Install-Guide/Common-criteria/index.html create mode 100644 Install-Guide/Community-Packages/index.html create mode 100644 Install-Guide/Configure/index.html create mode 100644 Install-Guide/Install/index.html create mode 100644 Install-Guide/Overview/index.html create mode 100644 Install-Guide/Setup-Bare-metal/index.html create mode 100644 Install-Guide/Setup-aws/index.html create mode 100644 Install-Guide/Setup-virt/index.html create mode 100644 Ops-Guide/Overview/index.html create mode 100644 Ops-Guide/Tools/index.html create mode 100644 Quick-Start-Guide/Architecture/index.html create mode 100644 Quick-Start-Guide/Quickstart/index.html create mode 100644 Troubleshooting/gfid-to-path/index.html create mode 100644 Troubleshooting/gluster-crash/index.html create mode 100644 Troubleshooting/index.html create mode 100644 Troubleshooting/resolving-splitbrain/index.html create mode 100644 Troubleshooting/statedump/index.html create mode 100644 Troubleshooting/troubleshooting-afr/index.html create mode 100644 Troubleshooting/troubleshooting-filelocks/index.html create mode 100644 Troubleshooting/troubleshooting-georep/index.html create mode 100644 Troubleshooting/troubleshooting-glusterd/index.html create mode 100644 Troubleshooting/troubleshooting-gnfs/index.html create mode 100644 Troubleshooting/troubleshooting-memory/index.html create mode 100644 Upgrade-Guide/generic-upgrade-procedure/index.html create mode 100644 Upgrade-Guide/index.html create mode 100644 Upgrade-Guide/op-version/index.html create mode 100644 Upgrade-Guide/upgrade-to-10/index.html create mode 100644 Upgrade-Guide/upgrade-to-11/index.html create mode 100644 Upgrade-Guide/upgrade-to-3.10/index.html create mode 100644 Upgrade-Guide/upgrade-to-3.11/index.html create mode 100644 Upgrade-Guide/upgrade-to-3.12/index.html create mode 100644 Upgrade-Guide/upgrade-to-3.13/index.html create mode 100644 Upgrade-Guide/upgrade-to-3.5/index.html create mode 100644 Upgrade-Guide/upgrade-to-3.6/index.html create mode 100644 Upgrade-Guide/upgrade-to-3.7/index.html create mode 100644 Upgrade-Guide/upgrade-to-3.8/index.html create mode 100644 Upgrade-Guide/upgrade-to-3.9/index.html create mode 100644 Upgrade-Guide/upgrade-to-4.0/index.html create mode 100644 Upgrade-Guide/upgrade-to-4.1/index.html create mode 100644 Upgrade-Guide/upgrade-to-5/index.html create mode 100644 Upgrade-Guide/upgrade-to-6/index.html create mode 100644 Upgrade-Guide/upgrade-to-7/index.html create mode 100644 Upgrade-Guide/upgrade-to-8/index.html create mode 100644 Upgrade-Guide/upgrade-to-9/index.html create mode 100644 analytics.txt create mode 100644 assets/images/favicon.png create mode 100644 assets/javascripts/bundle.51d95adb.min.js create mode 100644 assets/javascripts/bundle.51d95adb.min.js.map create mode 100644 assets/javascripts/lunr/min/lunr.ar.min.js create mode 100644 assets/javascripts/lunr/min/lunr.da.min.js create mode 100644 assets/javascripts/lunr/min/lunr.de.min.js create mode 100644 assets/javascripts/lunr/min/lunr.du.min.js create mode 100644 assets/javascripts/lunr/min/lunr.es.min.js create mode 100644 assets/javascripts/lunr/min/lunr.fi.min.js create mode 100644 assets/javascripts/lunr/min/lunr.fr.min.js create mode 100644 assets/javascripts/lunr/min/lunr.hi.min.js create mode 100644 assets/javascripts/lunr/min/lunr.hu.min.js create mode 100644 assets/javascripts/lunr/min/lunr.it.min.js create mode 100644 assets/javascripts/lunr/min/lunr.ja.min.js create mode 100644 assets/javascripts/lunr/min/lunr.jp.min.js create mode 100644 assets/javascripts/lunr/min/lunr.ko.min.js create mode 100644 assets/javascripts/lunr/min/lunr.multi.min.js create mode 100644 assets/javascripts/lunr/min/lunr.nl.min.js create mode 100644 assets/javascripts/lunr/min/lunr.no.min.js create mode 100644 assets/javascripts/lunr/min/lunr.pt.min.js create mode 100644 assets/javascripts/lunr/min/lunr.ro.min.js create mode 100644 assets/javascripts/lunr/min/lunr.ru.min.js create mode 100644 assets/javascripts/lunr/min/lunr.stemmer.support.min.js create mode 100644 assets/javascripts/lunr/min/lunr.sv.min.js create mode 100644 assets/javascripts/lunr/min/lunr.ta.min.js create mode 100644 assets/javascripts/lunr/min/lunr.th.min.js create mode 100644 assets/javascripts/lunr/min/lunr.tr.min.js create mode 100644 assets/javascripts/lunr/min/lunr.vi.min.js create mode 100644 assets/javascripts/lunr/min/lunr.zh.min.js create mode 100644 assets/javascripts/lunr/tinyseg.js create mode 100644 assets/javascripts/lunr/wordcut.js create mode 100644 assets/javascripts/workers/search.e5c33ebb.min.js create mode 100644 assets/javascripts/workers/search.e5c33ebb.min.js.map create mode 100644 assets/stylesheets/main.558e4712.min.css create mode 100644 assets/stylesheets/main.558e4712.min.css.map create mode 100644 assets/stylesheets/palette.2505c338.min.css create mode 100644 assets/stylesheets/palette.2505c338.min.css.map create mode 100644 css/custom.css create mode 100644 glossary/index.html create mode 100644 google64817fdc11b2f6b6.html create mode 100644 images/640px-GlusterFS-Architecture.png create mode 100644 images/Bugzilla-watching-bugs@gluster.org.png create mode 100644 images/Bugzilla-watching.png create mode 100644 images/Distribute.png create mode 100644 images/Distributed-Replicated-Volume.png create mode 100644 images/Distributed-Striped-Replicated-Volume.png create mode 100644 images/Distributed-Striped-Volume.png create mode 100644 images/Distributed-Volume.png create mode 100644 images/FUSE-access.png create mode 100644 images/FUSE-structure.png create mode 100644 images/First-translator.png create mode 100644 images/Geo-Rep-LAN.png create mode 100644 images/Geo-Rep-WAN.png create mode 100644 images/Geo-Rep03-Internet.png create mode 100644 images/Geo-Rep04-Cascading.png create mode 100644 images/Geo-replication-async.jpg create mode 100644 images/Geo-replication-sync.png create mode 100644 images/GlusterFS-Architecture.png create mode 100644 images/GlusterFS_Translator_Stack.png create mode 100644 images/Graph.png create mode 100644 images/Hadoop-Architecture.png create mode 100644 images/Libgfapi-access.png create mode 100644 images/New-DispersedVol.png create mode 100644 images/New-Distributed-DisperseVol.png create mode 100644 images/New-Distributed-ReplicatedVol.png create mode 100644 images/New-DistributedVol.png create mode 100644 images/New-ReplicatedVol.png create mode 100644 images/Overallprocess.png create mode 100644 images/Replicated-Volume.png create mode 100644 images/Striped-Replicated-Volume.png create mode 100644 images/Striped-Volume.png create mode 100644 images/Translator-h.png create mode 100644 images/Translator.png create mode 100644 images/UFO-Architecture.png create mode 100644 images/VSA-Architecture.png create mode 100644 images/favicon.ico create mode 100644 images/icon.svg create mode 100644 images/logo.png create mode 100644 images/url.png create mode 100644 index.html create mode 100644 js/custom-features.js create mode 100644 presentations/index.html create mode 100644 release-notes/10.0/index.html create mode 100644 release-notes/10.1/index.html create mode 100644 release-notes/10.2/index.html create mode 100644 release-notes/10.3/index.html create mode 100644 release-notes/10.4/index.html create mode 100644 release-notes/11.0/index.html create mode 100644 release-notes/11.1/index.html create mode 100644 release-notes/3.10.0/index.html create mode 100644 release-notes/3.10.1/index.html create mode 100644 release-notes/3.10.10/index.html create mode 100644 release-notes/3.10.11/index.html create mode 100644 release-notes/3.10.12/index.html create mode 100644 release-notes/3.10.2/index.html create mode 100644 release-notes/3.10.3/index.html create mode 100644 release-notes/3.10.4/index.html create mode 100644 release-notes/3.10.5/index.html create mode 100644 release-notes/3.10.6/index.html create mode 100644 release-notes/3.10.7/index.html create mode 100644 release-notes/3.10.8/index.html create mode 100644 release-notes/3.10.9/index.html create mode 100644 release-notes/3.11.0/index.html create mode 100644 release-notes/3.11.1/index.html create mode 100644 release-notes/3.11.2/index.html create mode 100644 release-notes/3.11.3/index.html create mode 100644 release-notes/3.12.0/index.html create mode 100644 release-notes/3.12.1/index.html create mode 100644 release-notes/3.12.10/index.html create mode 100644 release-notes/3.12.11/index.html create mode 100644 release-notes/3.12.12/index.html create mode 100644 release-notes/3.12.13/index.html create mode 100644 release-notes/3.12.14/index.html create mode 100644 release-notes/3.12.15/index.html create mode 100644 release-notes/3.12.2/index.html create mode 100644 release-notes/3.12.3/index.html create mode 100644 release-notes/3.12.4/index.html create mode 100644 release-notes/3.12.5/index.html create mode 100644 release-notes/3.12.6/index.html create mode 100644 release-notes/3.12.7/index.html create mode 100644 release-notes/3.12.8/index.html create mode 100644 release-notes/3.12.9/index.html create mode 100644 release-notes/3.13.0/index.html create mode 100644 release-notes/3.13.1/index.html create mode 100644 release-notes/3.13.2/index.html create mode 100644 release-notes/3.5.0/index.html create mode 100644 release-notes/3.5.1/index.html create mode 100644 release-notes/3.5.2/index.html create mode 100644 release-notes/3.5.3/index.html create mode 100644 release-notes/3.5.4/index.html create mode 100644 release-notes/3.6.0/index.html create mode 100644 release-notes/3.6.3/index.html create mode 100644 release-notes/3.7.0/index.html create mode 100644 release-notes/3.7.1/index.html create mode 100644 release-notes/3.9.0/index.html create mode 100644 release-notes/4.0.0/index.html create mode 100644 release-notes/4.0.1/index.html create mode 100644 release-notes/4.0.2/index.html create mode 100644 release-notes/4.1.0/index.html create mode 100644 release-notes/4.1.1/index.html create mode 100644 release-notes/4.1.10/index.html create mode 100644 release-notes/4.1.2/index.html create mode 100644 release-notes/4.1.3/index.html create mode 100644 release-notes/4.1.4/index.html create mode 100644 release-notes/4.1.5/index.html create mode 100644 release-notes/4.1.6/index.html create mode 100644 release-notes/4.1.7/index.html create mode 100644 release-notes/4.1.8/index.html create mode 100644 release-notes/4.1.9/index.html create mode 100644 release-notes/5.0/index.html create mode 100644 release-notes/5.1/index.html create mode 100644 release-notes/5.10/index.html create mode 100644 release-notes/5.11/index.html create mode 100644 release-notes/5.12/index.html create mode 100644 release-notes/5.13/index.html create mode 100644 release-notes/5.2/index.html create mode 100644 release-notes/5.3/index.html create mode 100644 release-notes/5.5/index.html create mode 100644 release-notes/5.6/index.html create mode 100644 release-notes/5.8/index.html create mode 100644 release-notes/5.9/index.html create mode 100644 release-notes/6.0/index.html create mode 100644 release-notes/6.1/index.html create mode 100644 release-notes/6.10/index.html create mode 100644 release-notes/6.2/index.html create mode 100644 release-notes/6.3/index.html create mode 100644 release-notes/6.4/index.html create mode 100644 release-notes/6.5/index.html create mode 100644 release-notes/6.6/index.html create mode 100644 release-notes/6.7/index.html create mode 100644 release-notes/6.8/index.html create mode 100644 release-notes/6.9/index.html create mode 100644 release-notes/7.0/index.html create mode 100644 release-notes/7.1/index.html create mode 100644 release-notes/7.2/index.html create mode 100644 release-notes/7.3/index.html create mode 100644 release-notes/7.4/index.html create mode 100644 release-notes/7.5/index.html create mode 100644 release-notes/7.6/index.html create mode 100644 release-notes/7.7/index.html create mode 100644 release-notes/7.8/index.html create mode 100644 release-notes/7.9/index.html create mode 100644 release-notes/8.0/index.html create mode 100644 release-notes/8.1/index.html create mode 100644 release-notes/8.2/index.html create mode 100644 release-notes/8.3/index.html create mode 100644 release-notes/8.4/index.html create mode 100644 release-notes/8.5/index.html create mode 100644 release-notes/8.6/index.html create mode 100644 release-notes/9.0/index.html create mode 100644 release-notes/9.1/index.html create mode 100644 release-notes/9.2/index.html create mode 100644 release-notes/9.3/index.html create mode 100644 release-notes/9.4/index.html create mode 100644 release-notes/9.5/index.html create mode 100644 release-notes/9.6/index.html create mode 100644 release-notes/geo-rep-in-3.7/index.html create mode 100644 release-notes/glusterfs-selinux2.0.1/index.html create mode 100644 release-notes/index.html create mode 100644 search/search_index.json create mode 100644 security/index.html create mode 100644 sitemap.xml create mode 100644 sitemap.xml.gz diff --git a/.nojekyll b/.nojekyll new file mode 100644 index 00000000..e69de29b diff --git a/404.html b/404.html new file mode 100644 index 00000000..386d311a --- /dev/null +++ b/404.html @@ -0,0 +1,4398 @@ + + + + + + + + + + + + + + + + + + + + Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ +

404 - Not found

+ +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/Administrator-Guide/Access-Control-Lists/index.html b/Administrator-Guide/Access-Control-Lists/index.html new file mode 100644 index 00000000..0c24043b --- /dev/null +++ b/Administrator-Guide/Access-Control-Lists/index.html @@ -0,0 +1,4832 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + Access Control Lists - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + + + + + +
+
+ + + + + + + + +

POSIX Access Control Lists

+

POSIX Access Control Lists (ACLs) allows you to assign different +permissions for different users or groups even though they do not +correspond to the original owner or the owning group.

+

For example: User john creates a file but does not want to allow anyone +to do anything with this file, except another user, antony (even though +there are other users that belong to the group john).

+

This means, in addition to the file owner, the file group, and others, +additional users and groups can be granted or denied access by using +POSIX ACLs.

+

Activating POSIX ACLs Support

+

To use POSIX ACLs for a file or directory, the partition of the file or +directory must be mounted with POSIX ACLs support.

+

Activating POSIX ACLs Support on Server

+

To mount the backend export directories for POSIX ACLs support, use the +following command:

+
mount -o acl
+
+

For example:

+
mount -o acl /dev/sda1 /export1
+
+

Alternatively, if the partition is listed in the /etc/fstab file, add +the following entry for the partition to include the POSIX ACLs option:

+
LABEL=/work /export1 ext3 rw, acl 14
+
+

Activating POSIX ACLs Support on Client

+

To mount the glusterfs volumes for POSIX ACLs support, use the following +command:

+
mount –t glusterfs -o acl
+
+

For example:

+
mount -t glusterfs -o acl 198.192.198.234:glustervolume /mnt/gluster
+
+

Setting POSIX ACLs

+

You can set two types of POSIX ACLs, that is, access ACLs and default +ACLs. You can use access ACLs to grant permission for a specific file or +directory. You can use default ACLs only on a directory but if a file +inside that directory does not have an ACLs, it inherits the permissions +of the default ACLs of the directory.

+

You can set ACLs for per user, per group, for users not in the user +group for the file, and via the effective right mask.

+

Setting Access ACLs

+

You can apply access ACLs to grant permission for both files and +directories.

+

To set or modify Access ACLs

+

You can set or modify access ACLs use the following command:

+
setfacl –m  file
+
+

The ACL entry types are the POSIX ACLs representations of owner, group, +and other.

+

Permissions must be a combination of the characters r (read), w +(write), and x (execute). You must specify the ACL entry in the +following format and can specify multiple entry types separated by +commas.

+ + + + + + + + + + + + + + + + + + + + + + + + + +
ACL EntryDescription
u:uid:\<permission>Sets the access ACLs for a user. You can specify user name or UID
g:gid:\<permission>Sets the access ACLs for a group. You can specify group name or GID.
m:\<permission>Sets the effective rights mask. The mask is the combination of all access permissions of the owning group and all of the user and group entries.
o:\<permission>Sets the access ACLs for users other than the ones in the group for the file.
+

If a file or directory already has a POSIX ACLs, and the setfacl +command is used, the additional permissions are added to the existing +POSIX ACLs or the existing rule is modified.

+

For example, to give read and write permissions to user antony:

+
setfacl -m u:antony:rw /mnt/gluster/data/testfile
+
+

Setting Default ACLs

+

You can apply default ACLs only to directories. They determine the +permissions of a file system objects that inherits from its parent +directory when it is created.

+

To set default ACLs

+

You can set default ACLs for files and directories using the following +command:

+
setfacl –m –-set
+
+

Permissions must be a combination of the characters r (read), w (write), and x (execute). Specify the ACL entry_type as described below, separating multiple entry types with commas.

+

u:user_name:permissions +Sets the access ACLs for a user. Specify the user name, or the UID.

+

g:group_name:permissions +Sets the access ACLs for a group. Specify the group name, or the GID.

+

m:permission +Sets the effective rights mask. The mask is the combination of all access permissions of the owning group, and all user and group entries.

+

o:permissions +Sets the access ACLs for users other than the ones in the group for the file.

+

For example, to set the default ACLs for the /data directory to read for +users not in the user group:

+
setfacl –m --set o::r /mnt/gluster/data
+
+
+

Note

+

An access ACLs set for an individual file can override the default +ACLs permissions.

+
+

Effects of a Default ACLs

+

The following are the ways in which the permissions of a directory's +default ACLs are passed to the files and subdirectories in it:

+
    +
  • A subdirectory inherits the default ACLs of the parent directory + both as its default ACLs and as an access ACLs.
  • +
  • A file inherits the default ACLs as its access ACLs.
  • +
+

Retrieving POSIX ACLs

+

You can view the existing POSIX ACLs for a file or directory.

+

To view existing POSIX ACLs

+
    +
  • View the existing access ACLs of a file using the following command:
  • +
+
getfacl
+
+

For example, to view the existing POSIX ACLs for sample.jpg

+
getfacl /mnt/gluster/data/test/sample.jpg
+
+
owner: antony
+group: antony
+user::rw-
+group::rw-
+other::r--
+
+
    +
  • View the default ACLs of a directory using the following command:
  • +
+
getfacl
+
+

For example, to view the existing ACLs for /data/doc

+
getfacl /mnt/gluster/data/doc
+
+
owner: antony
+group: antony
+user::rw-
+user:john:r--
+group::r--
+mask::r--
+other::r--
+default:user::rwx
+default:user:antony:rwx
+default:group::r-x
+default:mask::rwx
+default:other::r-x
+
+

Removing POSIX ACLs

+

To remove all the permissions for a user, groups, or others, use the +following command:

+
setfacl -x
+
+

setfaclentry_type Options

+

The ACL entry_type translates to the POSIX ACL representations of owner, group, and other.

+

Permissions must be a combination of the characters r (read), w (write), and x (execute). Specify the ACL entry_type as described below, separating multiple entry types with commas.

+

u:user_name +Sets the access ACLs for a user. Specify the user name, or the UID.

+

g:group_name +Sets the access ACLs for a group. Specify the group name, or the GID.

+

m:permission +Sets the effective rights mask. The mask is the combination of all access permissions of the owning group, and all user and group entries.

+

o:permissions +Sets the access ACLs for users other than the ones in the group for the file.

+

For example, to remove all permissions from the user antony:

+
setfacl -x u:antony /mnt/gluster/data/test-file
+
+

Samba and ACLs

+

If you are using Samba to access GlusterFS FUSE mount, then POSIX ACLs +are enabled by default. Samba has been compiled with the +--with-acl-support option, so no special flags are required when +accessing or mounting a Samba share.

+

NFS and ACLs

+

Currently GlusterFS supports POSIX ACL configuration through NFS mount, +i.e. setfacl and getfacl commands work through NFS mount.

+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/Administrator-Guide/Accessing-Gluster-from-Windows/index.html b/Administrator-Guide/Accessing-Gluster-from-Windows/index.html new file mode 100644 index 00000000..61d23f32 --- /dev/null +++ b/Administrator-Guide/Accessing-Gluster-from-Windows/index.html @@ -0,0 +1,4638 @@ + + + + + + + + + + + + + + + + + + + + + + Accessing Gluster volume via SMB Protocol - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + + + + + +
+
+ + + + + + + + +

Accessing Gluster volume via SMB Protocol

+

Layered product Samba is used to export the Gluster volume and ctdb +for providing the high availability Samba. Here are the steps to +configure Highly Available Samba cluster to export Gluster volume.

+

Note: These configuration steps are applicable to Samba version = +4.1. and Gluster Version >= 3.7. ctdb >= 2.5

+

Step 1: Choose the servers that will export the Gluster volume.

+

The servers may/may not be part of the trusted storage +pool. Preferable number of servers is <=4. Install Samba and ctdb +packages on these servers.

+

Step 2: Enable/Disable the auto export of Gluster volume via SMB

+
# gluster volume set VOLNAME user.smb disable/enable
+
+

Step 3: Setup the CTDB Cluster:

+
    +
  1. +

    Create a ctdb meta volume with replica N, N being the number of the + servers that are used as Samba servers. This volume will host only + a zero byte lock file, hence choose the minimal sized bricks. To + create the n replica volume run the following command:

    +
     # gluster volume create <volname> replica n <ipaddr/host name>:/<brick_patch>.... N times
    +
    +
  2. +
  3. +

    In the following files, replace "all" in the statement "META=all" + to the newly created volume name.

    +
     /var/lib/glusterd/hooks/1/start/post/S29CTDBsetup.sh
    + /var/lib/glusterd/hooks/1/stop/pre/S29CTDB-teardown.sh
    +
    +
  4. +
  5. +

    Start the ctdb volume

    +
    # gluster vol start <volname>
    +
    +
  6. +
  7. +

    Verify the following:

    +
      +
    • +

      If the following lines are added in smb.conf file in all the + nodes running samba/ctdb:

      +
      clustering = yes
      +idmap backend = tdb2
      +
      +
    • +
    • +

      If the ctdb volume is mounted at /gluster/lock on all the + nodes that runs ctdb/samba

      +
    • +
    • If the mount entry for ctdb volume is added in /etc/fstab
    • +
    • If file /etc/sysconfig/ctdb exists on all the nodes that runs + ctdb/samba
    • +
    +
  8. +
  9. +

    Create /etc/ctdb/nodes files on all the nodes that runs ctdb/samba, + and add the IPs of all these nodes in the file. For example,

    +
     # cat /etc/ctdb/nodes
    + 10.16.157.0
    + 10.16.157.3
    + 10.16.157.6
    + 10.16.157.8
    +
    +

    The IPs listed here are the private IPs of Samba/ctdb servers, +which should be a private non-routable subnet and are only used for +internal cluster traffic. For more details refer to the ctdb man +page.

    +
  10. +
  11. +

    Create /etc/ctdb/public_addresses files on all the nodes that runs + ctdb/samba, and add the virtual IPs in the following format:

    +
     <virtual IP><routing prefix> <node interface>
    +
    +

    Eg:

    +
     # cat /etc/ctdb/public_addresses
    + 192.168.1.20/24 eth0
    + 192.168.1.21/24 eth0
    +
    +
  12. +
  13. +

    Either uncomment CTDB_SAMBA_SKIP_SHARE_CHECK=yes or add + CTDB_SAMBA_SKIP_SHARE_CHECK=yes in its absence inside + /etc/ctdb/script.options to disable checking of the shares by + ctdb.

    +
  14. +
  15. +

    If SELinux is enabled and enforcing, try the following command if + ctdb fails.

    +
     # setsebool -P use_fusefs_home_dirs 1
    + # setsebool -P samba_load_libgfapi 1
    +
    +
  16. +
+

Step 4: Performance tunings before exporting the volume

+
    +
  1. +

    To ensure lock and IO coherency:

    +
    # gluster volume set VOLNAME storage.batch-fsync-delay-usec 0
    +
    +
  2. +
  3. +

    If using Samba 4.X version add the following line in smb.conf in + the global section

    +
     kernel share modes = no
    + kernel oplocks = no
    + map archive = no
    + map hidden = no
    + map read only = no
    + map system = no
    + store dos attributes = yes
    +
    +

    Note: Setting 'store dos attributes = no' is recommended if +archive/hidden/read-only dos attributes are not used. This can give +better performance.

    +
  4. +
  5. +

    If you are using gluster5 or higher execute the following to + improve performance:

    +
     # gluster volume set VOLNAME group samba
    +
    +

    On older version, please execute the following:

    +
     # gluster volume set VOLNAME features.cache-invalidation on
    + # gluster volume set VOLNAME features.cache-invalidation-timeout 600
    + # gluster volume set VOLNAME performance.cache-samba-metadata on
    + # gluster volume set VOLNAME performance.stat-prefetch on
    + # gluster volume set VOLNAME performance.cache-invalidation on
    + # gluster volume set VOLNAME performance.md-cache-timeout 600
    + # gluster volume set VOLNAME network.inode-lru-limit 200000
    + # gluster volume set VOLNAME performance.nl-cache on
    + # gluster volume set VOLNAME performance.nl-cache-timeout 600
    + # gluster volume set VOLNAME performance.readdir-ahead on
    + # gluster volume set VOLNAME performance.parallel-readdir on
    +
    +
  6. +
  7. +

    Tune the number of threads in gluster for better performance:

    +
    # gluster volume set VOLNAME client.event-threads 4
    +# gluster volume set VOLNAME server.event-threads 4 # Increasing to a very high value will reduce the performance
    +
    +
  8. +
+

Step 5: Mount the volume using SMB

+
    +
  1. +

    If no Active directory setup add the user on all the samba server + and set the password

    +
     # adduser USERNAME
    + # smbpasswd -a USERNAME
    +
    +
  2. +
  3. +

    Start the ctdb, smb and other related services:

    +
    # systemctl re/start ctdb
    +# ctdb status
    +# ctdb ip
    +# ctdb ping -n all
    +
    +
  4. +
  5. +

    To verify if the volume exported by samba can be accessed by a + user:

    +
     # smbclient //<hostname>/gluster-<volname> -U <username>%<password>
    +
    +
  6. +
  7. +

    To mount on a linux system:

    +
     # mount -t cifs -o user=<username>,pass=<password> //<Virtual IP>/gluster-<volname> /<mountpoint>
    +
    +

    To mount on Windows system:

    +
     >net use <device:> \\<Virtual IP>\gluster-<volname>
    +
    +

    OR

    +
     \\<Virtual IP>\gluster-<volname>
    +
    +

    from windows explorer.

    +
  8. +
+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/Administrator-Guide/Automatic-File-Replication/index.html b/Administrator-Guide/Automatic-File-Replication/index.html new file mode 100644 index 00000000..f3106190 --- /dev/null +++ b/Administrator-Guide/Automatic-File-Replication/index.html @@ -0,0 +1,4678 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + Replication - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + +

This doc contains information about the synchronous replication module in gluster and has two sections

+
    +
  • Replication logic
  • +
  • Self-heal logic.
  • +
+

1. Replication logic

+

AFR is the module (translator) in glusterfs that provides all the features that you would expect of any synchronous replication system:

+
    +
  1. Simultaneous updating of all copies of data on the replica bricks when a client modifies it.
  2. +
  3. Providing continued data availability to clients when say one brick of the replica set goes down.
  4. +
  5. Automatic self-healing of any data that was modified when the brick that was down, once it comes back up, ensuring consistency of data on all the bricks of the replica.
  6. +
+

1 and 2 are in the I/O path while 3 is done either in the I/O path (in the background) or via the self-heal daemon.

+

Each gluster translator implements what are known as File Operations (FOPs) which are mapped to the I/O syscalls which the application makes. For example, AFR has afr_writev that gets invoked when application does a write(2). As is obvious, all FOPs fall into one of 2 types:

+

i) Read based FOPs which only get informtion from and don’t modify the file in any way.

+

viz: afr_readdir, afr_access, afr_stat, afr_fstat, afr_readlink, afr_getxattr, afr_fgetxattr, afr_readv,afr_seek

+

ii) Write based FOPs which change the file or its attributes.

+

viz: afr_create, afr_mknod,afr_mkdir,afr_link, afr_symlink, afr_rename, afr_unlink, afr_rmdir, afr_do_writev, afr_truncate, afr_ftruncate, afr_setattr, afr_fsetattr, afr_setxattr, afr_fsetxattr, afr_removexattr, afr_fremovexattr, afr_fallocate, afr_discard, afr_zerofill, afr_xattrop, afr_fxattrop, afr_fsync.

+

AFR follows a transaction model for both types of FOPs.

+

Read transactions:

+

For every file in the replica, AFR has an in-memory notion/array called ‘readables’ which indicate whether each brick of the replica is a good copy or a bad one (i.e. in need of a heal). In a healthy state, all bricks are readable and a read FOP will be served from any one of the readable bricks. The read-hash-mode volume option decides which brick is the chosen one.

+
gluster volume set help | grep read-hash-mode -A7
+
+
Option: cluster.read-hash-mode
+Default Value: 1
+Description: inode-read fops happen only on one of the bricks in replicate. AFR will prefer the one computed using the method specified using this option.
+0 = first readable child of AFR, starting from 1st child.
+1 = hash by GFID of file (all clients use same subvolume).
+2 = hash by GFID of file and client PID.
+3 = brick having the least outstanding read requests.
+
+

If the brick is bad for a given file (i.e. it is pending heal), then it won’t be marked readable to begin with. The readables array is populated based on the on-disk AFR xattrs for the file during lookup. These xattrs indicate which bricks are good and which ones are bad. We will see more about these xattrs in the write transactions section below. If the FOP fails on the chosen readable brick, AFR attempts it on the next readable one, until all are exhausted. If the FOP doesn’t succeed on any of the readables, then the application receives an error.

+

Write transactions:

+

Every write based FOP employs a write transaction model which consists of 5 phases:

+

1) The lock phase +Take locks on the file being modified on all bricks so that AFRs of other clients are blocked if they try to modify the same file simultaneously.

+

2) The pre-op phase +Increment the ‘dirty’ xattr (trusted.afr.dirty) by 1 on all participating bricks as an indication of an impending FOP (in the next phase)

+

3) The FOP phase +Perform the actual FOP (say a setfattr) on all bricks.

+

4) The post-op phase +Decrement the dirty xattr by 1 on bricks where the FOP was successful. +In addition, also increment the ‘pending’ xattr (trusted.afr.$VOLNAME-client-x) xattr on the success bricks to ‘blame’ the bricks where the FOP failed.

+

5) The unlock phase +Release the locks that were taken in phase 1. Any competing client can now go ahead with its own write transaction.

+

Note: There are certain optimizations done at the code level which reduce the no. of lock/unlock phases done for a transaction by piggybacking on the previous transaction’s locks. These optimizations (eager-locking, piggybacking and delayed post-op) beyond the scope of this post.

+

AFR returns sucess for these FOPs only if they meet quorum. For replica 2, this means it needs to suceed on any one brick. For replica 3, it is two out of theree and so on.

+

More on the AFR xattrs:

+

We saw that AFR modifies the dirty and pending xattrs in the pre-op and post-op phases. To be more precise, only parts of the xattr are modified in a given transaction. Which bytes are modified depends on the type of write transaction which the FOP belongs to.

+ + + + + + + + + + + + + + + + + + + + + +
Transaction TypeFOPs that belong to it
AFR_DATA_TRANSACTIONafr_writev, afr_truncate, afr_ftruncate, afr_fsync, afr_fallocate, afr_discard, afr_zerofill
AFR_METADATA_TRANSACTIONafr_setattr, afr_fsetattr, afr_setxattr, afr_fsetxattr, afr_removexattr, afr_fremovexattr, afr_xattrop, afr_fxattrop
AFR_ENTRY_TRANSACTIONafr_create, afr_mknod, afr_mkdir, afr_link, afr_symlink, afr_rename, afr_unlink, afr_rmdir
+

Stop here and convince yourself that given a write based FOP, you can say which one of the 3 transaction types it belongs to.

+

Note: In the code, there is also a AFR_ENTRY_RENAME_TRANSACTION (used by afr_rename) but it is safe to assume that it is identical to AFR_ENTRY_TRANSACTION as far as interpreting the xattrs are concerned.

+

Consider the xttr: +trusted.afr.dirty=0x000000000000000000000000 +The first 4 bytes of the xattr are used for data transactions, the next 4 bytes for metadata transactions and the last 4 for entry transactions. Let us see some examples of how the xattr would look like for various types of FOPs during a transaction:

+ + + + + + + + + + + + + + + + + + + + + + + + + +
FOPValue after pre-op phaseValue after post-op phase
afr_writevtrusted.afr.dirty=0x00000001 00000000 00000000trusted.afr.dirty=0x00000000 00000000 00000000
afr_setattrtrusted.afr.dirty=0x00000000 00000001 00000000trusted.afr.dirty=0x00000000 00000000 00000000
afr_createtrusted.afr.dirty=0x00000000 00000000 00000001trusted.afr.dirty=0x00000000 00000000 00000000
+

Thus depending on the type of FOP (i.e. data/ metadata/ entry transaction), different set of bytes of the dirty xattr get incremented/ decremented. Modification of the pending xattr also follows the same pattern, execept it is incremented only in the post-op phase if the FOP fails on some bricks.

+

Example: +Let us say a write was performed on a file, say FILE1, on replica 3 volume called ‘testvol’. Suppose the lock and pre-op phase succeeded on all bricks. After that the 3rd brick went down, and the transaction completed successfully on the first 2 bricks. +What will be the state of the afr xattrs on all bricks?

+
# getfattr -d -m . -e hex /bricks/brick1/FILE1 | grep afr
+getfattr: Removing leading '/' from absolute path names
+trusted.afr.dirty=0x000000000000000000000000
+trusted.afr.testvol-client-2=0x000000010000000000000000
+
+
# getfattr -d -m . -e hex /bricks/brick2/FILE1 | grep afr
+getfattr: Removing leading '/' from absolute path names
+trusted.afr.dirty=0x000000000000000000000000
+trusted.afr.testvol-client-2=0x000000010000000000000000
+
+
# getfattr -d -m . -e hex /bricks/brick3/FILE1 | grep afr
+getfattr: Removing leading '/' from absolute path names
+trusted.afr.dirty=0x000000010000000000000000
+
+

So Brick3 will still have the dirty xattr set because it went down before the post-op had a chance to decrement it. Bricks 1 and 2 will have a zero dirty xattr and in addition, a non-zero pending xattr set. The client-2 in trusted.afr.testvol-client-2 indicates that the 3rd brick is bad and has some pending data operations.

+

2. Self-heal logic.

+

We already know that AFR increments and/or decrements the dirty (i.e. trusted.afr.dirty) and pending (i.e. trusted.afr.$VOLNAME-client-x) xattrs during the different phases of the transaction. For a given file (or directory), an all zero value of these xattrs or the total absence of these xattrs on all bricks of the replica mean the file is healthy and does not need heal. If any of these xattrs are non-zero even on one of the bricks, then the file is a candidate for heal- it as simple as that.

+

When we say these xattrs are non-zero, it is in the context of no on-going I/O going from client(s) on the file. Otherwise the non-zero values that one observes might be transient as the write transaction is progressing through its five phases. Of course, as an admin, you wouldn’t need to figure out all of this. Just running the heal info set of commands should give you the list of files that need heal.

+

So if self-heal observes a file with non-zero xattrs, it does the following steps:

+
    +
  1. Fetch the afr xattrs, examine which set of 8 bytes are non-zero and determine the corresponding heals that are needed on the file – i.e. data heal/ metadata heal/ entry heal.
  2. +
  3. Determine which bricks are good (a.k.a. ‘sources’) and which ones are bad (a.k.a. ‘sinks’) for each of those heals by interpretting the xattr values.
  4. +
  5. Pick one source brick and heal the file on to all the sink bricks.
  6. +
  7. If the heal is successful, reset the afr xattrs to zero.
  8. +
+

This is a rather simplified description and I have omitted details about various locks that each of these steps need to take because self-heal and client I/O can happen in parallel on the file. Or even multiple self-heal daemons (described later) can attempt to heal the same file.

+

Data heal: Happens only for files. The contents of the file are copied from the source to the sink bricks.

+

Entry heal: Happens only for directories. Entries (i.e. files and subdirs) under a given directory are deleted from the sinks if they are not present in the source. Likewise, entries are created on the sinks if they are present in the source.

+

Metadata heal: Happens for both files and directories. File ownership, file permissions and extended attributes are copied from the source to the sink bricks.

+

It can be possible that for a given file, one set of bricks can be the source for data heal while another set could be the source for metadata heals. It all depends on which FOPs failed on what bricks and therefore what set of bytes are non-zero for the afr xattrs.

+

When do self-heals happen?

+

There are two places from which the steps described above for healing can be carried out:

+

i) From the client side.

+

Client-side heals are triggered when the file is accessed from the client (mount). AFR uses a monotonically increasing generation number to keep track of disconnect/connect of its children (i.e. the client translators) to the bricks. When this ‘event generation’ number changes, the file’s inode is marked as a candidate for refresh. When the next FOP comes on such an inode, a refresh is triggered to update the readables during which a heal is launched (if the AFR xattrs indicate that a heal is needed, that is). This heal happens in the background, meaning it does not block the actual FOP which will continue as usual post the refresh. Specific client-side heals can be turned off by disabling the 3 corresponding volume options:

+
cluster.metadata-self-heal
+cluster.data-self-heal
+cluster.entry-self-heal
+
+

The number of client-side heals that happen in the background can be tuned via the following volume options:

+
background-self-heal-count
+heal-wait-queue-length
+
+

See the gluster volume set help for more information on all the above options.

+

Name heal: Name heal is just healing of the file/directory name when it is accessed. For example, say a file is created and written to when a brick is down and all the 3 client side heals are disabled. When the brick comes up and the next I/O comes on it, the file name is created on it as a part of lookup. Its contents/metadata are not healed though. Name heal cannot be disabled. It is there to ensure that the namespace is consistent on all bricks as soon as the file is accessed.

+

ii) By the self-heal daemon.

+

There is a self-heal daemon process (glutershd) that runs on every node of the trusted storage pool. It is a light weight client process consisting mainly of AFR ant the protocol/client translators. It can talk to all bricks of all the replicate volume(s) of the pool. It periodically crawls (every 10 minutes by default; tunable via the heal-timeout volume option) the list of files that need heal and does their healing. As you can see, client side heal is done upon file access but glustershd processes the heal backlog pro-actively.

+

Index heal:

+

But how does glustershd know which files it needs to heal? Where does it get the list from? So in part-1, while we saw the five phases of the AFR write transaction, we left out one detail:

+
    +
  • In the pre-op phase, in addition to marking the dirty xattr, each brick also stores the gfid string of the file inside its .glusterfs/indices/dirty directory.
  • +
  • Likewise, in the post-op phase, it removes the gfid string from its .glusterfs/indices/dirty If addition, if the write failed on some brick, the good bricks will stores the gfid string inside the .glusterfs/indices/xattrop directory.
  • +
+

Thus when no I/O is happening on a file and you still find its gfid inside .glusterfs/indices/dirty of a particular brick, it means the brick went down before the post-op phase. If you find the gfid inside .glusterfs/indices/xattrop, it means the write failed on some other brick and this brick has captured it.

+

The glustershd simply reads the list of entries inside .glusterfs/indices/* and triggers heal on them. This is referred to as index heal. While this happens automcatically every heal-timeout seconds, we can also manaully trigger it via the CLI using gluster volume heal $VOLNAME .

+

Full heal:

+

A full heal, triggered from the CLI with gluster volume heal $VOLNAME full, does just what the name implies. It does not process a particular list of entries like index heal, but crawls the whole gluster filesystem beginning with root, examines if files have non zero afr xattrs and triggers heal on them.

+

Of missing xattrs and split-brains:

+

You might now realise how AFR pretty much relies on its xattr values of a given file- from using it to find the good copies to serve a read to finding out the source and sink bricks to heal the file. But what if there is inconsistency in data/metadata of a file and

+

(a) there are zero/ no AFR xattrs (or)

+

(b) if the xattrs all blame each other (i.e. no good copy=>split-brain)?

+

For (a), AFR uses heuristics like picking a local (to that specfic glustershd process) brick, picking the bigger file, picking the file with latest ctime etc. and then does the heal.

+

For (b) you need to resort to using the gluster split-brain resolution CLI or setting the favorite-child-policy volume option to choose a good copy and trigger the heal.

+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/Administrator-Guide/Bareos/index.html b/Administrator-Guide/Bareos/index.html new file mode 100644 index 00000000..047a31c0 --- /dev/null +++ b/Administrator-Guide/Bareos/index.html @@ -0,0 +1,4615 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + Configuring Bareos to store backups on Gluster - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + +

Configuring Bareos to store backups on Gluster

+

This description assumes that you already have a Gluster environment ready and +configured. The examples use storage.example.org as a Round Robin DNS name +that can be used to contact any of the available GlusterD processes. The +Gluster Volume that is used, is called backups. Client systems would be able +to access the volume by mounting it with FUSE like this:

+
mount -t glusterfs storage.example.org:/backups /mnt
+
+

Bareos contains a plugin for the Storage Daemon that uses +libgfapi. This makes it possible for Bareos to access the Gluster Volumes +without the need to have a FUSE mount available.

+

Here we will use one server that is dedicated for doing backups. This system is +called backup.example.org. The Bareos Director is running on this host, +together with the Bareos Storage Daemon. In the example, there is a File Daemon +running on the same server. This makes it possible to backup the Bareos +Director, which is useful as a backup of the Bareos database and configuration +is kept that way.

+

Bareos Installation

+

An absolute minimal Bareos installation needs a Bareos Director and a Storage +Daemon. In order to backup a filesystem, a File Daemon needs to be available +too. For the description in this document, CentOS-7 was used, with the +following packages and versions:

+ +

The Gluster Storage Servers do not need to have any Bareos packages installed. +It is often better to keep applications (Bareos) and storage servers on +different systems. So, when the Bareos repository has been configured, install +the packages on the backup.example.org server:

+
yum install bareos-director bareos-database-sqlite3 \
+                bareos-storage-glusterfs bareos-filedaemon \
+                bareos-bconsole
+
+

To keep things as simple as possible, SQlite it used. For production +deployments either MySQL or PostgrSQL is advised. It is needed to create the +initial database:

+
sqlite3 /var/lib/bareos/bareos.db < /usr/lib/bareos/scripts/ddl/creates/sqlite3.sql
+chown bareos:bareos /var/lib/bareos/bareos.db
+
+

The bareos-bconsole package is optional. bconsole is a terminal application +that can be used to initiate backups, check the status of different Bareos +components and the like. Testing the configuration with bconsole is +relatively simple.

+

Once the packages are installed, you will need to start and enable the daemons:

+
systemctl start bareos­sd
+systemctl start bareos­fd
+systemctl start bareos­dir
+systemctl enable bareos­sd
+systemctl enable bareos­fd
+systemctl enable bareos­dir
+
+

Gluster Volume preparation

+

There are a few steps needed to allow Bareos to access the Gluster Volume. By +default Gluster does not allow clients to connect from an unprivileged port. +Because the Bareos Storage Daemon does not run as root, permissions to connect +need to be opened up.

+

There are two processes involved when a client accesses a Gluster Volume. For +the initial phase, GlusterD is contacted, when the client received the layout +of the volume, the client will connect to the bricks directly. The changes to +allow unprivileged processes to connect, are therefore twofold:

+
    +
  1. In /etc/glusterfs/glusterd.vol the option rpc-auth-allow-insecure on + needs to be added on all storage servers. After the modification of the + configuration file, the GlusterD process needs to be restarted with + systemctl restart glusterd.
  2. +
  3. The brick processes for the volume are configured through a volume option. + By executing gluster volume set backups server.allow-insecure on the + needed option gets set. Some versions of Gluster require a volume stop/start + before the option is taken into account, for these versions you will need to + execute gluster volume stop backups and gluster volume start backups.
  4. +
+

Except for the network permissions, the Bareos Storage Daemon needs to be +allowed to write to the filesystem provided by the Gluster Volume. This is +achieved by setting normal UNIX permissions/ownership so that the right +user/group can write to the volume:

+
mount -t glusterfs storage.example.org:/backups /mnt
+mkdir /mnt/bareos
+chown bareos:bareos /mnt/bareos
+chmod ug=rwx /mnt/bareos
+umount /mnt
+
+

Depending on how users/groups are maintained in the environment, the bareos +user and group may not be available on the storage servers. If that is the +case, the chown command above can be adapted to use the uid and gid of +the bareos user and group from backup.example.org. On the Bareos server, +the output would look similar to:

+
# id bareos
+uid=998(bareos) gid=997(bareos) groups=997(bareos),6(disk),30(tape)
+
+

And that makes the chown command look like this:

+
chown 998:997 /mnt/bareos
+
+

Bareos Configuration

+

When bareos-storage-glusterfs got installed, an example configuration file +has been added too. The /etc/bareos/bareos-sd.d/device-gluster.conf contains +the Archive Device directive, which is a URL for the Gluster Volume and path +where the backups should get stored. In our example, the entry should get set +to:

+
Device {
+    Name = GlusterStorage
+    Archive Device = gluster://storage.example.org/backups/bareos
+    Device Type = gfapi
+    Media Type = GlusterFile
+    ...
+}
+
+

The default configuration of the Bareos provided jobs is to write backups to +/var/lib/bareos/storage. In order to write all the backups to the Gluster +Volume instead, the configuration for the Bareos Director needs to be modified. +In the /etc/bareos/bareos-dir.conf configuration, the defaults for all jobs +can be changed to use the GlusterFile storage:

+
JobDefs {
+    Name = "DefaultJob"
+    ...
+  #   Storage = File
+    Storage = GlusterFile
+    ...
+}
+
+

After changing the configuration files, the Bareos daemons need to apply them. +The easiest to inform the processes of the changed configuration files is by +instructing them to reload their configuration:

+
# bconsole
+Connecting to Director backup:9101
+1000 OK: backup-dir Version: 14.2.2 (12 December 2014)
+Enter a period to cancel a command.
+*reload
+
+

With bconsole it is also possible to check if the configuration has been +applied. The status command can be used to show the URL of the storage that +is configured. When all is setup correctly, the result looks like this:

+
*status storage=GlusterFile
+Connecting to Storage daemon GlusterFile at backup:9103
+...
+open.
+...
+
+

Create your first backup

+

There are several default jobs configured in the Bareos Director. One of them +is the DefaultJob which was modified in an earlier step. This job uses the +SelfTest FileSet, which backups /usr/sbin. Running this job will verify if +the configuration is working correctly. Additional jobs, other FileSets and +more File Daemons (clients that get backed up) can be added later.

+
*run
+A job name must be specified.
+The defined Job resources are:
+        1: BackupClient1
+        2: BackupCatalog
+        3: RestoreFiles
+Select Job resource (1-3): 1
+Run Backup job
+JobName:  BackupClient1
+Level:    Incremental
+Client:   backup-fd
+...
+OK to run? (yes/mod/no): yes
+Job queued. JobId=1
+
+

The job will need a few seconds to complete, the status command can be used +to show the progress. Once done, the messages command will display the +result:

+
*messages
+...
+    JobId:                  1
+    Job:                    BackupClient1.2015-09-30_21.17.56_12
+    ...
+    Termination:            Backup OK
+
+

The archive that contains the backup will be located on the Gluster Volume. To +check if the file is available, mount the volume on a storage server:

+
mount -t glusterfs storage.example.org:/backups /mnt
+ls /mnt/bareos
+
+

Further Reading

+

This document intends to provide a quick start of configuring Bareos to use +Gluster as a storage backend. Bareos can be configured to create backups of +different clients (which run a File Daemon), run jobs at scheduled time and +intervals and much more. The excellent Bareos +documentation can be consulted to find out how to +create backups in a much more useful way than can get expressed on this page.

+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/Administrator-Guide/Brick-Naming-Conventions/index.html b/Administrator-Guide/Brick-Naming-Conventions/index.html new file mode 100644 index 00000000..641c0a2e --- /dev/null +++ b/Administrator-Guide/Brick-Naming-Conventions/index.html @@ -0,0 +1,4527 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + Brick Naming Conventions - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + +

Brick Naming Conventions

+

FHS-2.3 isn't entirely clear on where data shared by the server should reside. It does state that "/srv contains site-specific data which is served by this system", but is GlusterFS data site-specific?

+

The consensus seems to lean toward using /data. A good hierarchical method for placing bricks is:

+
/data/glusterfs/<volume>/<brick>/brick
+
+

In this example, <brick> is the filesystem that is mounted.

+

Example: One Brick Per Server

+

A physical disk /dev/sdb is going to be used as brick storage for a volume you're about to create named myvol1. You've partitioned and formatted /dev/sdb1 with XFS on each of 4 servers.

+

On all 4 servers:

+
mkdir -p /data/glusterfs/myvol1/brick1
+mount /dev/sdb1 /data/glusterfs/myvol1/brick1
+
+

We're going to define the actual brick in the brick directory on that filesystem. This helps by causing the brick to fail to start if the XFS filesystem isn't mounted.

+

On just one server:

+
gluster volume create myvol1 replica 2 server{1..4}:/data/glusterfs/myvol1/brick1/brick
+
+

This will create the volume myvol1 which uses the directory /data/glusterfs/myvol1/brick1/brick on all 4 servers.

+

Example: Two Bricks Per Server

+

Two physical disks /dev/sdb and /dev/sdc are going to be used as brick storage for a volume you're about to create named myvol2. You've partitioned and formatted /dev/sdb1 and /dev/sdc1 with XFS on each of 4 servers.

+

On all 4 servers:

+
mkdir -p /data/glusterfs/myvol2/brick{1,2}
+mount /dev/sdb1 /data/glusterfs/myvol2/brick1
+mount /dev/sdc1 /data/glusterfs/myvol2/brick2
+
+

Again we're going to define the actual brick in the brick directory on these filesystems.

+

On just one server:

+
gluster volume create myvol2 replica 2 \
+  server{1..4}:/data/glusterfs/myvol2/brick1/brick \
+  server{1..4}:/data/glusterfs/myvol2/brick2/brick
+
+

Note: It might be tempting to try gluster volume create myvol2 replica 2 server{1..4}:/data/glusterfs/myvol2/brick{1,2}/brick but Bash would expand the last {} first, so you would end up replicating between the two bricks on each servers, instead of across servers.

+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/Administrator-Guide/Building-QEMU-With-gfapi-For-Debian-Based-Systems/index.html b/Administrator-Guide/Building-QEMU-With-gfapi-For-Debian-Based-Systems/index.html new file mode 100644 index 00000000..b3ed70cc --- /dev/null +++ b/Administrator-Guide/Building-QEMU-With-gfapi-For-Debian-Based-Systems/index.html @@ -0,0 +1,4579 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + Building QEMU with gfapi For Debian Based Systems - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + +

Building QEMU With gfapi For Debian Based Systems

+

This how-to has been tested on Ubuntu 13.10 in a clean, up to date +environment. Older Ubuntu distros required some hacks if I remembered +rightly. Other Debian based distros should be able to follow this +adjusting for dependencies. Please update this if you get it working on +another distro.

+

Satisfying dependencies

+

Make the first stab at getting qemu dependencies

+
apt-get  build-dep qemu
+
+

This next command grabs all the dependencies specified in the debian +control file as asked for from upstream Debian sid You can look into the +options specified there and adjust to taste.

+
# get almost all the rest and the tools to work up the Debian magic
+apt-get install devscripts quilt libiscsi-dev libusbredirparser-dev libssh2-1-dev libvdeplug-dev libjpeg-dev glusterfs*
+
+

we need a newer version of libseccomp for Ubuntu 13.10

+
mkdir libseccomp
+cd libseccomp
+
+# grab it from upstream sid
+wget http://ftp.de.debian.org/debian/pool/main/libs/libseccomp/libseccomp_2.1.0+dfsg.orig.tar.gz
+wget http://ftp.de.debian.org/debian/pool/main/libs/libseccomp/libseccomp_2.1.0+dfsg-1.debian.tar.gz
+
+# get it ready
+tar xf libseccomp_2.1.0+dfsg.orig.tar.gz
+cd libseccomp-2.1.0+dfsg/
+
+# install the debian magic
+tar xf ../libseccomp_2.1.0+dfsg-1.debian.tar.gz
+
+# apply series files if any
+while quilt push; do quilt refresh; done
+
+# build debs, they will appear one directory up
+debuild -i -us -uc -b
+cd ..
+
+# install it
+dpkg -i *.deb
+
+

Building QEMU

+

This next part is straightforward if your dependencies are met. For the +advanced reader look around debian/control once it is extracted before +you install as you may want to change what options QEMU is built with +and what targets are requested.

+
cd ..
+mkdir qemu
+cd qemu
+
+# download our sources. you'll want to check back frequently on these for changes
+wget http://ftp.de.debian.org/debian/pool/main/q/qemu/qemu_1.7.0+dfsg.orig.tar.xz
+wget http://ftp.de.debian.org/debian/pool/main/q/qemu/qemu_1.7.0+dfsg-2.debian.tar.gz
+wget http://download.gluster.org/pub/gluster/glusterfs/3.4/LATEST/glusterfs-3.4.2.tar.gz
+tar xf glusterfs-3.4.2.tar.gz
+tar xf qemu_1.7.0+dfsg.orig.tar.xz
+cd qemu-1.7.0+dfsg/
+
+# unpack the debian magic
+tar xf ../qemu_1.7.0+dfsg-2.debian.tar.gz
+
+# bring glusterfs in to the buiild
+cp -r ../glusterfs-3.4.2 glusterfs
+
+# the glusterfs check in configure looks around weird. I've never asked why but moving the src stuff up one works and tests fine
+cd glusterfs/api/
+mv src/* .
+cd ../..
+
+#you'll need to edit debian/control to enable glusterfs replacing
+
+  - ##--enable-glusterfs todo
+  + # --enable-glusterfs
+  + glusterfs-common (>= 3.4.0),
+
+#And finally build. It'll take ages.  http://xkcd.com/303/
+# apply series if any
+while quilt push; do quilt refresh; done
+
+# build packages
+debuild -i -us -uc -b
+cd ..
+
+

Your debs now available to install. It is up to the reader to determine +what targets they want installed.

+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/Administrator-Guide/Consul/index.html b/Administrator-Guide/Consul/index.html new file mode 100644 index 00000000..98adb111 --- /dev/null +++ b/Administrator-Guide/Consul/index.html @@ -0,0 +1,4882 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + Consul integration - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + +

Consul and GlusterFS integration

+

Consul is used for service discovery and configuration.

+

It consists of consul server and agents connecting to it. +Apps can get configuration data from consul via HTTP API or DNS queries.

+

Long story short, instead of using standard hostnames and relying on official DNS servers which we may not control, +we can use consul to resolve hosts with services under .consul domain, which turns this classic setup:

+
mount -t glusterfs -o backupvolfile-server=gluster-poc-02 gluster-poc-01:/g0 /mnt/gluster/g0
+
+

into more convenient entry:

+
mount -t glusterfs gluster.service.consul:/g0 /mnt/gluster/g0
+
+

which is especially useful when using image-based servers without further provisioning, and spreading load across all healthy servers registered in consul.

+

Warning

+

In this document you will get a proof-of-concept basic setup - gluster servers and gluster clients configured, +which should be a point to expand. You should read Further steps section to harden it.

+

Tested on:

+
    +
  • isolated virtual network
  • +
  • selinux permissive (yay!)
  • +
  • consul server/agents version v0.7.5
  • +
  • gluster servers with glusterfs 3.8.x on CentOS 7.3 + samba 4 with simple auth and vfs gluster module
  • +
  • gluster volume set as distributed-replicated + 'features.shard: true' and 'features.shard-block-size: 512MB'
  • +
  • gluster agents with glusterfs 3.8.x on Ubuntu 14.04
  • +
  • gluster agents with glusterfs 3.8.x on CentOS 7.3
  • +
  • gluster agents with glusterfs 3.7.x on CentOS 5.9
  • +
  • Windows 2012R2 connected to gluster servers via samba
  • +
+

Scenario

+

We want to create shared storage accessible via different operating systems - Linux and Windows.

+
    +
  • we do not control DNS server so we cannot add/remove entries on gluster server add/remove
  • +
  • gluster servers are in the gluster pool and have gluster volume created named g0
  • +
  • gluster servers have consul agent installed, and they will register to consul as gluster service
  • +
  • gluster servers have also SMB installed with very simple setup using gluster vfs plugin
  • +
  • gluster client have consul agent installed, and they will use gluster.service.consul as entry point.
  • +
  • DNS resolution under Linux will be handled via dnsmasq
  • +
  • DNS resolution under Windows will be handled via consul itself
  • +
+

Known limitations

+
    +
  • consul health checks introduce delay, also remember that consul can cache DNS entries to increase performance
  • +
  • the way Windows share works is that it will connect to one of the samba servers, if this server die then transfers are + aborted, and we must retry operation, but watch out for delay.
  • +
  • anything other than gluster volume distributed-replicated was not tested - it may not work for Windows.
  • +
+

Requirements

+
    +
  • you should have consul server (or cluster) up and running, and the best, also accessible via default HTTP port.
  • +
  • you should have gluster servers already joined in the gluster pool, bricks and volume configured.
  • +
  • check you firewall rules for outbound and inbound for DNS, gluster, samba, consul
  • +
  • make yourself familiar with consul documentation (or specific branch on github)
  • +
+

Linux setup

+

Consul agent on Linux on gluster clients

+

First, install consul agent. The best way is to use for example puppet module +In general your Linux boxes should register in the consul server and be visible under Nodes section.

+

To verify if consul agent is working properly, you can query its DNS interface, asking it to list consul servers:

+

+[centos@gluster-poc-01]# dig consul.service.consul 127.0.0.1:8600
+
+; <<>> DiG 9.9.4-RedHat-9.9.4-38.el7_3.3 <<>> consul.service.consul 127.0.01:8600
+;; global options: +cmd
+;; Got answer:
+;; ->>HEADER<<- opcode: QUERY, status: NOERROR, id: 39354
+;; flags: qr aa rd ra; QUERY: 1, ANSWER: 3, AUTHORITY: 0, ADDITIONAL: 0
+
+;; QUESTION SECTION:
+;consul.service.consul.     IN  A
+
+;; ANSWER SECTION:
+consul.service.consul.  0   IN  A   172.30.64.198
+consul.service.consul.  0   IN  A   172.30.82.255
+consul.service.consul.  0   IN  A   172.30.81.155
+
+;; Query time: 1 msec
+;; SERVER: 127.0.0.1#53(127.0.0.1)
+;; WHEN: Sat May 20 08:50:21 UTC 2017
+;; MSG SIZE  rcvd: 87
+
+;; Got answer:
+;; ->>HEADER<<- opcode: QUERY, status: NXDOMAIN, id: 22224
+;; flags: qr rd ra ad; QUERY: 1, ANSWER: 0, AUTHORITY: 0, ADDITIONAL: 0
+
+;; QUESTION SECTION:
+;127.0.0.1:8600.            IN  A
+
+;; Query time: 0 msec
+;; SERVER: 127.0.0.1#53(127.0.0.1)
+;; WHEN: Sat May 20 08:50:21 UTC 2017
+;; MSG SIZE  rcvd: 32
+
+

Now, to be able to use it on system level, we want it to work without specifying port. +We can achieve it with running consul on port 53 (not advised), or redirecting network traffic from port 53 to 8600 or proxy it via local DNS resolver - for example use locally installed dnsmasq.

+

First, install dnsmasq, and add file /etc/dnsmasq.d/10-consul:

+
server=/consul/127.0.0.1#8600
+
+

This will ensure that any *.consul requests will be forwarded to local consul listening on its default DNS port 8600.

+

Make sure that /etc/resolv.conf contains nameserver 127.0.0.1. Under Debian distros it should be there, under RedHat - not really. You can fix this in two ways, choose on your onw which one to apply:

+
    +
  • add nameserver 127.0.0.1 to /etc/resolvconf/resolv.conf.d/header
  • +
+

or

+
    +
  • update /etc/dhcp/dhclient.conf and add to it line prepend domain-name-servers 127.0.0.1;.
  • +
+

In both cases it ensures that dnsmasq will be a first nameserver, and requires reloading resolver or networking.

+

Eventually you should have nameserver 127.0.0.1 set as first entry in /etc/resolv.conf and have DNS resolving consul entries:

+

+[centos@gluster-poc-01]# dig consul.service.consul
+
+; <<>> DiG 9.9.4-RedHat-9.9.4-38.el7_3.3 <<>> consul.service.consul
+;; global options: +cmd
+;; Got answer:
+;; ->>HEADER<<- opcode: QUERY, status: NOERROR, id: 42571
+;; flags: qr aa rd ra; QUERY: 1, ANSWER: 3, AUTHORITY: 0, ADDITIONAL: 0
+
+;; QUESTION SECTION:
+;consul.service.consul.     IN  A
+
+;; ANSWER SECTION:
+consul.service.consul.  0   IN  A   172.30.64.198
+consul.service.consul.  0   IN  A   172.30.82.255
+consul.service.consul.  0   IN  A   172.30.81.155
+
+;; Query time: 1 msec
+;; SERVER: 127.0.0.1#53(127.0.0.1)
+;; WHEN: Sat May 20 09:01:12 UTC 2017
+;; MSG SIZE  rcvd: 87
+
+
+

From now on we should be able to use <servicename>.service.consul in places, where we had FQDN entries of the single servers.

+

Next, we must define gluster service consul on the servers.

+

Consul agent on Linux on gluster servers

+

Install consul agent as described in previous section.

+

You can define consul services as gluster to run health checks, to do that we must add consul to sudoers or allow it executing certain sudo commands without password:

+

/etc/sudoers.d/99-consul.conf:

+
consul ALL=(ALL) NOPASSWD: /sbin/gluster pool list
+
+

First, lets define service in consul, it will be very basic, without volume names. +Service name gluster, with default port 24007, and we will tag it as gluster and server.

+

Our service will have service health checks every 10s:

+
    +
  • check if the gluster service is responding to TCP on 24007 port
  • +
  • check if the gluster server is connected to other peers in the pool (to avoid registering as healthy service which is actaully not serving anything)
  • +
+

Below is an example of /etc/consul/service_gluster.json:

+
{
+  "service": {
+    "address": "",
+    "checks": [
+      {
+        "interval": "10s",
+        "tcp": "localhost:24007",
+        "timeout": "5s"
+      },
+      {
+        "interval": "10s",
+        "script": "/bin/bash -c \"sudo -n /sbin/gluster pool list |grep -v UUID|grep -v localhost|grep Connected\"",
+        "timeout": "5s"
+      }
+    ],
+    "enableTagOverride": false,
+    "id": "gluster",
+    "name": "gluster",
+    "port": 24007,
+    "tags": ["gluster", "server"]
+  }
+}
+
+

Restart consul service and you should see gluster servers in consul web ui. +After a while service should be in healthy stage and be available under nslookup:

+
[centos@gluster-poc-02]# nslookup gluster.service.consul
+Server:     127.0.0.1
+Address:    127.0.0.1#53
+
+Name:   gluster.service.consul
+Address: 172.30.64.144
+Name:   gluster.service.consul
+Address: 172.30.65.61
+
+

Notice that gluster server can be also gluster client, for example if we want to mount gluster volume on the servers.

+

Mounting gluster volume under Linux

+

As a moutpoint we would usually select one of the gluster servers, and another as backup server, like this:

+
mount -t glusterfs -o backupvolfile-server=gluster-poc-02 gluster-poc-01:/g0 /mnt/gluster/g0
+
+

This is a bit inconvenient, for example we have an image with hardcoded hostnames, and old servers are gone due to maintenance. +We would have to recreate image, or reconfigure existing nodes if they unmount gluster storage.

+

To mitigate that issue we can now use consul for fetching the server pool:

+
mount -t glusterfs gluster.service.consul:/g0 /mnt/gluster/g0
+
+

So we can populate that to /etc/fstab or one of the autofs files.

+

Windows setup

+

Configuring gluster servers as samba shares

+

This is the simplest and not so secure setup, you have been warned.

+

Proper setup suggests using LDAP or CTDB. +You can configure it with puppet using module kakwa-samba.

+

First, we want to reconfigure gluster servers so that they serve as samba shares using user/pass credentials, which is separate to Linux credentials.

+

We assume that accessing windows share will be done as user steve with password steve-loves-bacon, make sure you create that user on each gluster server host.

+
sudo adduser steve
+sudo smbpasswd -a steve
+
+

Notice that if you do not set user.smb = disable in gluster volume then it may auto-add itself to samba configuration. So better disable this by executing:

+
gluster volume get g0 user.smb disable
+
+

Now install samba and samba-vfs-glusterfs packages and configure /etc/samba/smb.conf:

+
[global]
+workgroup = test
+security = user
+min protocol = SMB2
+netbios name = gluster
+realm = test
+vfs objects = acl_xattr
+map acl inherit = Yes
+store dos attributes = Yes
+log level = 1
+dedicated keytab file = /etc/krb5.keytab
+map untrusted to domain = Yes
+
+[vfs-g0]
+guest only = no
+writable = yes
+guest ok = no
+force user = steve
+create mask = 0666
+directory mask = 0777
+comment = Gluster via VFS (native gluster)
+path = /
+vfs objects = glusterfs
+glusterfs:volume = g0
+kernel share modes = no
+glusterfs:loglevel = 7
+glusterfs:logfile = /var/log/samba/glusterfs-g0.%M.log
+browsable = yes
+force group = steve
+
+

Some notes:

+
    +
  • when using vfs plugin then path is a relative path via gluster volume.
  • +
  • kernel share modes = no may be required to make it work.
  • +
+

We can also use classic fuse mount and use it under samba as share path, then configuration is even simpler.

+

For detailed description between those two solutions see gluster vfs blog posts.

+
    +
  • Remember to add user steve to samba with a password
  • +
  • unblock firewall ports for samba
  • +
  • test samba config and reload samba
  • +
+

Defining new samba service under consul

+

Now we define gluster-samba service on gluster server hosts in a similiar way as we defined it for gluster itself.

+

Below is an example of /etc/consul/service_samba.json:

+
{
+  "service": {
+    "address": "",
+    "checks": [
+      {
+        "interval": "10s",
+        "tcp": "localhost:139",
+        "timeout": "5s"
+      },
+      {
+        "interval": "10s",
+        "tcp": "localhost:445",
+        "timeout": "5s"
+      }
+    ],
+    "enableTagOverride": false,
+    "id": "gluster-samba",
+    "name": "gluster-samba",
+    "port": 139,
+    "tags": ["gluster", "samba"]
+  }
+}
+
+

We have two health checks here, just checking if we can connect to samba service. It could be also expanded to see if the network share is actually accessible.

+

Reload consul service and you should after a while see new service registered in the consul. +Check if it exists in dns:

+
nslookup gluster-samba.service.consul
+
+Server:     127.0.0.1
+Address:    127.0.0.1#53
+
+Name:   gluster-samba.service.consul
+Address: 172.30.65.61
+Name:   gluster-samba.service.consul
+Address: 172.30.64.144
+
+

Install samba-client and check connectivity to samba from gluster server itself.

+
[centos@gluster-poc-01]# smbclient -L //gluster-samba.service.consul/g0 -U steve
+Enter steve's password:
+Domain=[test] OS=[Windows 6.1] Server=[Samba 4.4.4]
+
+    Sharename       Type      Comment
+    ---------       ----      -------
+    vfs-g0          Disk      Gluster via VFS (native gluster)
+    IPC$            IPC       IPC Service (Samba 4.4.4)
+Domain=[test] OS=[Windows 6.1] Server=[Samba 4.4.4]
+
+    Server               Comment
+    ---------            -------
+
+    Workgroup            Master
+    ---------            -------
+
+

Now check if we can list share directory as steve:

+
smbclient //gluster-samba.service.consul/vfs-g0/ -U steve -c ls
+
+Enter steve's password:
+Domain=[test] OS=[Windows 6.1] Server=[Samba 4.4.4]
+  .                                   D        0  Wed May 17 20:48:06 2017
+  ..                                  D        0  Wed May 17 20:48:06 2017
+  .trashcan                          DH        0  Mon May 15 15:41:37 2017
+  CentOS-7-x86_64-Everything-1611.iso      N 8280604672  Mon Dec  5 13:57:33 2016
+  hello.world                         D        0  Fri May 19 08:54:02 2017
+  ipconfig.all.txt                    A     2931  Wed May 17 20:18:52 2017
+  nslookup.txt                        A      126  Wed May 17 20:19:13 2017
+  net.view.txt                        A      315  Wed May 17 20:47:44 2017
+
+        463639360 blocks of size 1024. 447352464 blocks available
+
+
+

Notice that this might take a few seconds, because when we try to connect to the share, samba vfs connects to the gluster servers as agent.

+

Looks good, time to configure Windows.

+

Installing Consul agent on Windows

+

Log in as administrator and install consul agent on the Windows machine, the easiest way is to use chocolatey.

+
    +
  • +

    install chocolatey and use preferred installation method, for example via cmd.exe

    +
  • +
  • +

    optionally install some tools via chocolatey to edit files:

    +
  • +
+
chocolatey install notepadplusplus
+
+
    +
  • install consul as agent with specific version and configs to load:
  • +
+
chocolatey install consul --version 0.7.5 -params '-config-dir "%PROGRAMDATA%\consul\"'
+
+
    +
  • stop consul service in command prompt:
  • +
+
net stop consul
+
+
    +
  • edit consul config %PROGRAMDATA%\consul\config.json:
  • +
+
start notepad++.exe "%PROGRAMDATA%\consul\config\config.json"
+
+

fill it with data (description below):

+
{
+  "datacenter": "virt-gluster",
+  "retry_join": ["192.178.1.11", "192.178.1.12", "192.178.1.13"],
+  "recursors": ["8.8.8.8", "8.8.4.4"],
+  "ports": {
+    "dns": 53
+  }
+}
+
+

Remember to replace datacenter, recursors with preferred local DNS servers and retry_join with list of consul server hosts or for example some generic Route53 entry from private zone (if it exists) which points to real consul servers.

+

In AWS you can also use retry_join_ec2 - his way Windows instance will always search other consul server EC2 instances and join them.

+

Notice that recursors section is required if not using retry_join and just relying on AWS EC2 tags - otherwise consul will fail to resolve anything else, thus not joining to the consul.

+

We use port 53 so that consul will serve as local DNS.

+
    +
  • start consul service
  • +
+
net start consul
+
+
    +
  • update DNS settings for network interface in Windows, make it the primary entry
  • +
+
netsh interface ipv4 add dnsserver \"Ethernet\" address=127.0.0.1 index=1
+
+
    +
  • verify that DNS Servers is pointing to localhost:
  • +
+
ipconfig /all
+
+Windows IP Configuration
+
+    Host Name . . . . . . . . . . . . : WIN-S8N782O8GG3
+    ...
+    ...
+    DNS Servers . . . . . . . . . . . : 127.0.0.1
+    ...
+    ...
+
+
    +
  • verify that consul resolves some services:
  • +
+
nslookup gluster.service.consul
+
+nslookup gluster-samba.service.consul
+
+Server:  UnKnown
+Address:  127.0.0.1
+
+Name:    gluster-samba.service.consul
+Addresses:  172.30.65.61
+            172.30.64.144
+
+

Mounting gluster volume under Windows

+

We have running gluster servers with volume and samba share, registered in consul. +We have Windows with running consul agent. +All hosts are registered in consul and can connect to each other.

+
    +
  • verify that samba can see network share:
  • +
+
net view \\gluster-samba.service.consul
+
+Shared resources at \\gluster-samba.service.consul
+
+Samba 4.4.4
+
+Share name  Type  Used as  Comment
+
+-------------------------------------------------------------------------------
+vfs-g0      Disk           Gluster via VFS (native gluster)
+The command completed successfully.
+
+
    +
  • mount network share, providing credentials for gluster samba share:
  • +
+
net use s: \\gluster-samba.service.consul\vfs-g0 /user:steve password: steve-loves-bacon /persistent:yes
+
+

If mounting fails due to error message: +System error 1219 has occurred. Multiple connections to a server or shared resource by the same user, using more than one user name, are not allowed.... +then you must delete existing connections, for example:

+
net use /delete \\gluster-samba.service.consul\IPC$
+
+

And then retry the net use commands again.

+

From now on this windows share should reconnect to the random gluster samba server, if it is healthy.

+

Enjoy.

+

Further steps for improvements

+

Below is a list of things to improve:

+
    +
  • enable selinux
  • +
  • harden samba setup on gluster servers to use domain logons
  • +
  • +

    use consul ACL lists to control access to consul data

    +
  • +
  • +

    export gluster volumes as key/value in consul, use consul-template to create mountpoints on consul updates - in autofs/ samba mounts/unmounts

    +
  • +
  • expand consul health checks with more detailed checks, like:
  • +
  • better checking if gluster volume exists etc
  • +
  • if samba share is accessible by the client (to avoid situation samba tries to share non-mounted volume)
  • +
+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/Administrator-Guide/Directory-Quota/index.html b/Administrator-Guide/Directory-Quota/index.html new file mode 100644 index 00000000..661c8a9a --- /dev/null +++ b/Administrator-Guide/Directory-Quota/index.html @@ -0,0 +1,4809 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + Quotas - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + + + + + +
+
+ + + + + + + + +

Managing Directory Quota

+

Directory quotas in GlusterFS allow you to set limits on the usage of the disk +space by directories or volumes. The storage administrators can control +the disk space utilization at the directory and/or volume levels in +GlusterFS by setting limits to allocatable disk space at any level in +the volume and directory hierarchy. This is particularly useful in cloud +deployments to facilitate the utility billing model.

+
+

Note: For now, only Hard limits are supported. Here, the limit cannot be +exceeded, and attempts to use more disk space or inodes beyond the set +limit are denied.

+
+

System administrators can also monitor the resource utilization to limit +the storage for the users depending on their role in the organization.

+

You can set the quota at the following levels:

+
    +
  • Directory level – limits the usage at the directory level
  • +
  • Volume level – limits the usage at the volume level
  • +
+
+

Note: You can set the quota limit on an empty directory. The quota limit will be automatically enforced when files are added to the directory.

+
+

Enabling Quota

+

You must enable Quota to set disk limits.

+

To enable quota:

+

Use the following command to enable quota:

+
gluster volume quota <VOLNAME> enable
+
+

For example, to enable quota on the test-volume:

+
# gluster volume quota test-volume enable
+Quota is enabled on /test-volume
+
+

Disabling Quota

+

You can disable Quota if needed.

+

To disable quota:

+

Use the following command to disable quota:

+
gluster volume quota <VOLNAME> disable
+
+

For example, to disable quota translator on the test-volume:

+
# gluster volume quota test-volume disable
+Quota translator is disabled on /test-volume
+
+

Setting or Replacing Disk Limit

+

You can create new directories in your storage environment and set the +disk limit or set disk limit for the existing directories. The directory +name should be relative to the volume with the export directory/mount +being treated as "/".

+

To set or replace disk limit:

+

Set the disk limit using the following command:

+
gluster volume quota <VOLNAME> limit-usage <DIR> <HARD_LIMIT>
+
+

For example, to set a limit on data directory on the test-volume where +data is a directory under the export directory:

+
# gluster volume quota test-volume limit-usage /data 10GB
+Usage limit has been set on /data
+
+
+

Note +In a multi-level directory hierarchy, the strictest disk limit +will be considered for enforcement. Also, whenever the quota limit +is set for the first time, an auxiliary mount point will be +created under /var/run/gluster/. This is just like any +other mount point with some special permissions and remains until +the quota is disabled. This mount point is being used by quota to set +and display limits and lists respectively.

+
+

Displaying Disk Limit Information

+

You can display disk limit information on all the directories on which +the limit is set.

+

To display disk limit information:

+
    +
  • +

    Display disk limit information of all the directories on which limit + is set, using the following command:

    +

    gluster volume quota list

    +
  • +
+

For example, to see the set disks limit on the test-volume:

+
  # gluster volume quota test-volume list
+  /Test/data    10 GB       6 GB
+  /Test/data1   10 GB       4 GB
+
+
    +
  • +

    Display disk limit information on a particular directory on which + limit is set, using the following command:

    +

    gluster volume quota list

    +
  • +
+

For example, to view the set limit on /data directory of test-volume:

+
  # gluster volume quota test-volume list /data
+  /Test/data    10 GB       6 GB
+
+

Displaying Quota Limit Information Using the df Utility

+

You can create a report of the disk usage using the df utility by considering quota limits. To generate a report, run the following command:

+
gluster volume set <VOLNAME> quota-deem-statfs on
+
+

In this case, the total disk space of the directory is taken as the quota hard limit set on the directory of the volume.

+
+

Note +The default value for quota-deem-statfs is on when the quota is enabled and it is recommended to keep quota-deem-statfs on.

+
+

The following example displays the disk usage when quota-deem-statfs is off:

+
# gluster volume set test-volume features.quota-deem-statfs off
+volume set: success
+
+# gluster volume quota test-volume list
+Path            Hard-limit    Soft-limit    Used      Available
+---------------------------------------------------------------
+/               300.0GB        90%          11.5GB    288.5GB
+/John/Downloads  77.0GB        75%          11.5GB     65.5GB
+
+

Disk usage for volume test-volume as seen on client1:

+
# df -hT /home
+Filesystem           Type            Size  Used Avail Use% Mounted on
+server1:/test-volume fuse.glusterfs  400G   12G  389G   3% /home
+
+

The following example displays the disk usage when quota-deem-statfs is on:

+
# gluster volume set test-volume features.quota-deem-statfs on
+volume set: success
+
+# gluster vol quota test-volume list
+Path        Hard-limit    Soft-limit     Used     Available
+-----------------------------------------------------------
+/              300.0GB        90%        11.5GB     288.5GB
+/John/Downloads 77.0GB        75%        11.5GB     65.5GB
+
+

Disk usage for volume test-volume as seen on client1:

+
# df -hT /home
+Filesystem            Type            Size  Used Avail Use% Mounted on
+server1:/test-volume  fuse.glusterfs  300G   12G  289G   4% /home
+
+

The quota-deem-statfs option when set to on, allows the administrator to make the user view the total disk space available on the directory as the hard limit set on it.

+

Updating Memory Cache Size

+

Setting Timeout

+

For performance reasons, quota caches the directory sizes on the client. You +can set a timeout indicating the maximum valid duration of directory sizes +in the cache, from the time they are populated.

+

For example: If multiple clients are writing to a single +directory, there are chances that some other client might write till the +quota limit is exceeded. However, this new file-size may not get +reflected in the client till the size entry in the cache has become stale +because of timeout. If writes happen on this client during this +duration, they are allowed even though they would lead to exceeding of +quota-limits, since the size in the cache is not in sync with the actual size. +When a timeout happens, the size in the cache is updated from servers and will +be in sync and no further writes will be allowed. A timeout of zero will +force fetching of directory sizes from the server for every operation that +modifies file data and will effectively disable directory size caching +on the client-side.

+

To update the memory cache size:

+

Use the following command to update the memory cache size:

+
    +
  1. Soft Timeout: The frequency at which the quota server-side translator checks the volume usage when the usage is below the soft limit. The soft timeout is in effect when the disk usage is less than the soft limit.
  2. +
+
gluster volume set <VOLNAME> features.soft-timeout <time>
+
+
    +
  1. Hard Timeout: The frequency at which the quota server-side translator checks the volume usage when the usage is above the soft limit. The hard timeout is in effect when the disk usage is between the soft limit and the hard limit.
  2. +
+
gluster volume set <VOLNAME> features.hard-timeout <time>
+
+

For example, to update the memory cache size for every 5 seconds on test-volume in case of hard-timeout:

+
# gluster volume set test-volume features.hard-timeout 5
+Set volume successful
+
+

Setting Alert Time

+

Alert time is the frequency at which you want your usage information to be logged after you reach the soft limit.

+

To set the alert time:

+

Use the following command to set the alert time:

+
gluster volume quota <VOLNAME> alert-time <time>
+
+
+

Note: +The default alert-time is one week.

+
+

For example, to set the alert time to one day:

+
# gluster volume quota test-volume alert-time 1d
+volume quota : success
+
+

Removing Disk Limit

+

You can remove the set disk limit if you do not want a quota anymore.

+

To remove disk limit:

+

Use the following command to remove the disk limit set on a particular directory:

+
gluster volume quota <VOLNAME> remove <DIR>
+
+

For example, to remove the disk limit on /data directory of +test-volume:

+
# gluster volume quota test-volume remove /data
+Usage limit set on /data is removed
+
+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/Administrator-Guide/Events-APIs/index.html b/Administrator-Guide/Events-APIs/index.html new file mode 100644 index 00000000..d7a0cc38 --- /dev/null +++ b/Administrator-Guide/Events-APIs/index.html @@ -0,0 +1,6705 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + Events APIs - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + + + + + +
+
+ + + + + + + + +

Events APIs

+

New in version 3.9

+

NOTE : glusterfs-selinux package would have to be installed for events +feature to function properly when the selinux is in enforced mode. In +addition to that, the default port to be used for eventsd has now been +changed to 55555 and it has to lie between the ephemeral port ranges.

+

Set PYTHONPATH(Only in case of Source installation)

+

If Gluster is installed using source install, cliutils will get +installed under /usr/local/lib/python.2.7/site-packages Set +PYTHONPATH by adding in ~/.bashrc

+
export PYTHONPATH=/usr/local/lib/python2.7/site-packages:$PYTHONPATH
+
+

Enable and Start Events APIs

+

Enable and Start glustereventsd in all peer nodes

+

In Systems using Systemd,

+
systemctl enable glustereventsd
+systemctl start glustereventsd
+
+

FreeBSD or others, add the following in /etc/rc.conf

+
glustereventsd_enable="YES"
+
+

And start the glustereventsd using,

+
service glustereventsd start
+
+

SysVInit(CentOS 6),

+
chkconfig glustereventsd on
+service glustereventsd start
+
+

Status

+

Status Can be checked using,

+
gluster-eventsapi status
+
+

Example output:

+
Webhooks:
+None
+
++-----------+-------------+-----------------------+
+| NODE      | NODE STATUS | GLUSTEREVENTSD STATUS |
++-----------+-------------+-----------------------+
+| localhost |          UP |                    UP |
+| node2     |          UP |                    UP |
++-----------+-------------+-----------------------+
+
+

Webhooks

+

Webhooks are similar to callbacks(over HTTP), on event Gluster will +call the Webhook URL(via POST) which is configured. Webhook is a web +server which listens on a URL, this can be deployed outside of the +Cluster. Gluster nodes should be able to access this Webhook server on +the configured port.

+

Example Webhook written in python,

+
from flask import Flask, request
+
+app = Flask(__name__)
+
+@app.route("/listen", methods=["POST"])
+def events_listener():
+    gluster_event = request.json
+    if gluster_event is None:
+        # No event to process, may be test call
+        return "OK"
+
+    # Process gluster_event
+    # {
+    #  "nodeid": NODEID,
+    #  "ts": EVENT_TIMESTAMP,
+    #  "event": EVENT_TYPE,
+    #  "message": EVENT_DATA
+    # }
+    print (gluster_event)
+    return "OK"
+
+app.run(host="0.0.0.0", port=9000)
+
+

Test and Register webhook using following commands,

+
usage: gluster-eventsapi webhook-test [-h] [--bearer_token BEARER_TOKEN] url
+
+positional arguments:
+  url                   URL of Webhook
+
+optional arguments:
+  -h, --help            show this help message and exit
+  --bearer_token BEARER_TOKEN, -t BEARER_TOKEN
+                        Bearer Token
+
+

Example(Webhook server is running in 192.168.122.188:9000),

+
# gluster-eventsapi webhook-test http://192.168.122.188:9000/listen
+
++-----------+-------------+----------------+
+| NODE      | NODE STATUS | WEBHOOK STATUS |
++-----------+-------------+----------------+
+| localhost |          UP |             OK |
+| node2     |          UP |             OK |
++-----------+-------------+----------------+
+
+

If Webhook status is OK from all peer nodes then register the Webhook +using,

+
usage: gluster-eventsapi webhook-add [-h] [--bearer_token BEARER_TOKEN] url
+
+positional arguments:
+  url                   URL of Webhook
+
+optional arguments:
+  -h, --help            show this help message and exit
+  --bearer_token BEARER_TOKEN, -t BEARER_TOKEN
+                        Bearer Token
+
+

Example,

+
# gluster-eventsapi webhook-add http://192.168.122.188:9000/listen
+
++-----------+-------------+-------------+
+| NODE      | NODE STATUS | SYNC STATUS |
++-----------+-------------+-------------+
+| localhost |          UP |          OK |
+| node2     |          UP |          OK |
++-----------+-------------+-------------+
+
+

Note: If Sync status is Not OK for any node, then make sure to run +following command from a peer node when that node comes up.

+
gluster-eventsapi sync
+
+

To unsubscribe from events, delete the webhook using following command

+
usage: gluster-eventsapi webhook-del [-h] url
+
+positional arguments:
+  url         URL of Webhook
+
+optional arguments:
+  -h, --help  show this help message and exit
+
+

Example,

+
gluster-eventsapi webhook-del http://192.168.122.188:9000/listen
+
+

Configuration

+

View all configurations using,

+
usage: gluster-eventsapi config-get [-h] [--name NAME]
+
+optional arguments:
+  -h, --help   show this help message and exit
+  --name NAME  Config Name
+
+

Example output:

+
+--------------------+-------+
+| NAME               | VALUE |
++--------------------+-------+
+| log-level          | INFO  |
+| port               | 55555 |
+| disable-events-log | False |
++--------------------+-------+
+
+

To change any configuration,

+
usage: gluster-eventsapi config-set [-h] name value
+
+positional arguments:
+  name        Config Name
+  value       Config Value
+
+optional arguments:
+  -h, --help  show this help message and exit
+
+

Example output,

+
+-----------+-------------+-------------+
+| NODE      | NODE STATUS | SYNC STATUS |
++-----------+-------------+-------------+
+| localhost |          UP |          OK |
+| node2     |          UP |          OK |
++-----------+-------------+-------------+
+
+

To Reset any configuration,

+
usage: gluster-eventsapi config-reset [-h] name
+
+positional arguments:
+  name        Config Name or all
+
+optional arguments:
+  -h, --help  show this help message and exit
+
+

Example output,

+
+-----------+-------------+-------------+
+| NODE      | NODE STATUS | SYNC STATUS |
++-----------+-------------+-------------+
+| localhost |          UP |          OK |
+| node2     |          UP |          OK |
++-----------+-------------+-------------+
+
+

Note: If any node status is not UP or sync status is not OK, make +sure to run gluster-eventsapi sync from a peer node.

+

Add node to the Cluster

+

When a new node added to the cluster,

+
    +
  • Enable and Start Eventsd in the new node using the steps mentioned above
  • +
  • Run gluster-eventsapi sync command from a peer node other than the new node.
  • +
+

APIs documentation

+

Glustereventsd pushes the Events in JSON format to configured +Webhooks. All Events will have following attributes.

+ + + + + + + + + + + + + + + + + + + + + + + + + +
AttributeDescription
nodeidNode UUID
tsEvent Timestamp
eventEvent Type
messageEvent Specific Data
+

Example:

+
{
+  "nodeid": "95cd599c-5d87-43c1-8fba-b12821fd41b6",
+  "ts": 1468303352,
+  "event": "VOLUME_CREATE",
+  "message": {
+    "name": "gv1"
+  }
+}
+
+

"message" can have following attributes based on the type of event.

+

Peer Events

+ + + + + + + + + + + + + + + + + + + + +
Event TypeAttributeDescription
PEER_ATTACHhostHostname or IP of added node
PEER_DETACHhostHostname or IP of detached node
+

Volume Events

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Event TypeAttributeDescription
VOLUME_CREATEnameVolume Name
VOLUME_STARTforceForce option used or not during Start
nameVolume Name
VOLUME_STOPforceForce option used or not during Stop
nameVolume Name
VOLUME_DELETEnameVolume Name
VOLUME_SETnameVolume Name
optionsList of Options[(key1, val1), (key2, val2),..]
VOLUME_RESETnameVolume Name
optionOption Name
+

Brick Events

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Event TypeAttributeDescription
BRICK_RESET_STARTvolumeVolume Name
source-brickSource Brick details
BRICK_RESET_COMMITvolumeVolume Name
destination-brickDestination Brick
source-brickSource Brick details
BRICK_REPLACEvolumeVolume Name
destination-brickDestination Brick
source-brickSource Brick details
+

Georep Events

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Event TypeAttributeDescription
GEOREP_CREATEforceForce option used during session Create
secondarySecondary Details(Secondaryhost::SecondaryVolume)
no_verifyNo verify option is used or not
push_pemPush pem option is used or Not
ssh_portIf SSH port is configured during Session Create
primaryPrimary Volume Name
GEOREP_STARTforceForce option used during session Start
PrimaryPrimary Volume Name
secondarySecondary Details(Secondaryhost::SecondaryVolume)
GEOREP_STOPforceForce option used during session Stop
primaryPrimary Volume Name
secondarySecondary Details(Secondaryhost::SecondaryVolume)
GEOREP_PAUSEforceForce option used during session Pause
primaryPrimary Volume Name
secondarySecondary Details(Secondaryhost::SecondaryVolume)
GEOREP_RESUMEforceForce option used during session Resume
primaryPrimary Volume Name
secondarySecondary Details(Secondaryhost::SecondaryVolume)
GEOREP_DELETEforceForce option used during session Delete
primaryPrimary Volume Name
secondarySecondary Details(Secondaryhost::SecondaryVolume)
GEOREP_CONFIG_SETprimaryPrimary Volume Name
secondarySecondary Details(Secondaryhost::SecondaryVolume)
optionName of Geo-rep config
valueChanged Value
GEOREP_CONFIG_RESETprimaryPrimary Volume Name
secondarySecondary Details(Secondaryhost::SecondaryVolume)
optionName of Geo-rep config
+

Bitrot Events

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Event TypeAttributeDescription
BITROT_ENABLEnameVolume Name
BITROT_DISABLEnameVolume Name
BITROT_SCRUB_THROTTLEnameVolume Name
valueChanged Value
BITROT_SCRUB_FREQnameVolume Name
valueChanged Value
BITROT_SCRUB_OPTIONnameVolume Name
valueChanged Value
+

Quota Events

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Event TypeAttributeDescription
QUOTA_ENABLEvolumeVolume Name
QUOTA_DISABLEvolumeVolume Name
QUOTA_SET_USAGE_LIMITvolumeVolume Name
pathPath in Volume on which Quota option is set
limitChanged Value
QUOTA_SET_OBJECTS_LIMITvolumeVolume Name
pathPath in Volume on which Quota option is set
limitChanged Value
QUOTA_REMOVE_USAGE_LIMITvolumeVolume Name
pathPath in Volume on which Quota option is Reset
QUOTA_REMOVE_OBJECTS_LIMITvolumeVolume Name
pathPath in Volume on which Quota option is Reset
QUOTA_ALERT_TIMEvolumeVolume Name
timeChanged Alert Time
QUOTA_SOFT_TIMEOUTvolumeVolume Name
soft-timeoutChanged Value
QUOTA_HARD_TIMEOUTvolumeVolume Name
hard-timeoutChanged Value
QUOTA_DEFAULT_SOFT_LIMITvolumeVolume Name
default-soft-limitChanged Value
+

Snapshot Events

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Event TypeAttributeDescription
SNAPSHOT_CREATEDsnapshot_nameSnapshot Name
volume_nameVolume Name
snapshot_uuidSnapshot UUID
SNAPSHOT_CREATE_FAILEDsnapshot_nameSnapshot Name
volume_nameVolume Name
errorFailure details
SNAPSHOT_ACTIVATEDsnapshot_nameSnapshot Name
snapshot_uuidSnapshot UUID
SNAPSHOT_ACTIVATE_FAILEDsnapshot_nameSnapshot Name
errorFailure details
SNAPSHOT_DEACTIVATEDsnapshot_nameSnapshot Name
snapshot_uuidSnapshot UUID
SNAPSHOT_DEACTIVATE_FAILEDsnapshot_nameSnapshot Name
errorFailure details
SNAPSHOT_SOFT_LIMIT_REACHEDvolume_nameVolume Name
volume_idVolume ID
SNAPSHOT_HARD_LIMIT_REACHEDvolume_nameVolume Name
volume_idVolume ID
SNAPSHOT_RESTOREDsnapshot_nameSnapshot Name
volume_nameVolume Name
snapshot_uuidSnapshot UUID
SNAPSHOT_RESTORE_FAILEDsnapshot_nameSnapshot Name
errorFailure details
SNAPSHOT_DELETEDsnapshot_nameSnapshot Name
snapshot_uuidSnapshot UUID
SNAPSHOT_DELETE_FAILEDsnapshot_nameSnapshot Name
errorFailure details
SNAPSHOT_CLONEDclone_uuidSnapshot Clone UUID
snapshot_nameSnapshot Name
clone_nameSnapshot Clone Name
SNAPSHOT_CLONE_FAILEDsnapshot_nameSnapshot Name
clone_nameSnapshot Clone Name
errorFailure details
SNAPSHOT_CONFIG_UPDATEDauto-deleteAuto delete Value if available
config_typeVolume Config or System Config
hard_limitHard Limit Value if available
soft_limitSoft Limit Value if available
snap-activateSnap activate Value if available
SNAPSHOT_CONFIG_UPDATE_FAILEDerrorError details
SNAPSHOT_SCHEDULER_INITIALISEDstatusSuccss Status
SNAPSHOT_SCHEDULER_INIT_FAILEDerrorError details
SNAPSHOT_SCHEDULER_ENABLEDstatusSuccss Status
SNAPSHOT_SCHEDULER_ENABLE_FAILEDerrorError details
SNAPSHOT_SCHEDULER_DISABLEDstatusSuccss Status
SNAPSHOT_SCHEDULER_DISABLE_FAILEDerrorError details
SNAPSHOT_SCHEDULER_SCHEDULE_ADDEDstatusSuccss Status
SNAPSHOT_SCHEDULER_SCHEDULE_ADD_FAILEDerrorError details
SNAPSHOT_SCHEDULER_SCHEDULE_EDITEDstatusSuccss Status
SNAPSHOT_SCHEDULER_SCHEDULE_EDIT_FAILEDerrorError details
SNAPSHOT_SCHEDULER_SCHEDULE_DELETEDstatusSuccss Status
SNAPSHOT_SCHEDULER_SCHEDULE_DELETE_FAILEDerrorError details
+

Svc Events

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Event TypeAttributeDescription
SVC_MANAGER_FAILEDvolumeVolume Name if available
svc_nameService Name
SVC_CONNECTEDvolumeVolume Name if available
svc_nameService Name
SVC_DISCONNECTEDsvc_nameService Name
+

Peer Events

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Event TypeAttributeDescription
PEER_STORE_FAILUREpeerHostname or IP
PEER_RPC_CREATE_FAILEDpeerHostname or IP
PEER_REJECTpeerHostname or IP
PEER_CONNECThostHostname or IP
uuidHost UUID
PEER_DISCONNECThostHostname or IP
uuidHost UUID
stateDisconnect State
PEER_NOT_FOUNDpeerHostname or IP
uuidHost UUID
+

Unknown Events

+ + + + + + + + + + + + + + + +
Event TypeAttributeDescription
UNKNOWN_PEERpeerHostname or IP
+

Brick Events

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Event TypeAttributeDescription
BRICK_START_FAILEDpeerHostname or IP
volumeVolume Name
brickBrick
BRICK_STOP_FAILEDpeerHostname or IP
volumeVolume Name
brickBrick
BRICK_DISCONNECTEDpeerHostname or IP
volumeVolume Name
brickBrick
BRICK_CONNECTEDpeerHostname or IP
volumeVolume Name
brickBrick
+

Bricks Events

+ + + + + + + + + + + + + + + +
Event TypeAttributeDescription
BRICKS_START_FAILEDvolumeVolume Name
+

Brickpath Events

+ + + + + + + + + + + + + + + + + + + + + + + + + +
Event TypeAttributeDescription
BRICKPATH_RESOLVE_FAILEDpeerHostname or IP
volumeVolume Name
brickBrick
+

Notify Events

+ + + + + + + + + + + + + + + +
Event TypeAttributeDescription
NOTIFY_UNKNOWN_OPopOperation Name
+

Quorum Events

+ + + + + + + + + + + + + + + + + + + + +
Event TypeAttributeDescription
QUORUM_LOSTvolumeVolume Name
QUORUM_REGAINEDvolumeVolume Name
+

Rebalance Events

+ + + + + + + + + + + + + + + + + + + + +
Event TypeAttributeDescription
REBALANCE_START_FAILEDvolumeVolume Name
REBALANCE_STATUS_UPDATE_FAILEDvolumeVolume Name
+

Import Events

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Event TypeAttributeDescription
IMPORT_QUOTA_CONF_FAILEDvolumeVolume Name
IMPORT_VOLUME_FAILEDvolumeVolume Name
IMPORT_BRICK_FAILEDpeerHostname or IP
brickBrick details
+

Compare Events

+ + + + + + + + + + + + + + + +
Event TypeAttributeDescription
COMPARE_FRIEND_VOLUME_FAILEDvolumeVolume Name
+

Ec Events

+ + + + + + + + + + + + + + + + + + + + +
Event TypeAttributeDescription
EC_MIN_BRICKS_NOT_UPsubvolSubvolume
EC_MIN_BRICKS_UPsubvolSubvolume
+

Georep Events

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Event TypeAttributeDescription
GEOREP_FAULTYprimary_nodeHostname or IP of Primary Volume
brick_pathBrick Path
secondary_hostSecondary Hostname or IP
primary_volumePrimary Volume Name
current_secondary_hostCurrent Secondary Host to which Geo-rep worker was trying to connect to
secondary_volumeSecondary Volume Name
+

Quota Events

+ + + + + + + + + + + + + + + + + + + + + + + + + +
Event TypeAttributeDescription
QUOTA_CROSSED_SOFT_LIMITusageUsage
volumeVolume Name
pathPath
+

Bitrot Events

+ + + + + + + + + + + + + + + + + + + + + + + + + +
Event TypeAttributeDescription
BITROT_BAD_FILEgfidGFID of File
pathPath if Available
brickBrick details
+

Client Events

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Event TypeAttributeDescription
CLIENT_CONNECTclient_identifierClient Identifier
client_uidClient UID
server_identifierServer Identifier
brick_pathPath of Brick
CLIENT_AUTH_REJECTclient_identifierClient Identifier
client_uidClient UID
server_identifierServer Identifier
brick_pathPath of Brick
CLIENT_DISCONNECTclient_identifierClient Identifier
client_uidClient UID
server_identifierServer Identifier
brick_pathPath of Brick
+

Posix Events

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Event TypeAttributeDescription
POSIX_SAME_GFIDgfidGFID of File
pathPath of File
newpathNew Path of File
brickBrick details
POSIX_ALREADY_PART_OF_VOLUMEvolume-idVolume ID
brickBrick details
POSIX_BRICK_NOT_IN_VOLUMEbrickBrick details
POSIX_BRICK_VERIFICATION_FAILEDbrickBrick details
POSIX_ACL_NOT_SUPPORTEDbrickBrick details
POSIX_HEALTH_CHECK_FAILEDpathPath
brickBrick details
opError Number
errorError
+

Afr Events

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Event TypeAttributeDescription
AFR_QUORUM_METsubvolSub Volume Name
AFR_QUORUM_FAILsubvolSub Volume Name
AFR_SUBVOL_UPsubvolSub Volume Name
AFR_SUBVOLS_DOWNsubvolSub Volume Name
AFR_SPLIT_BRAINsubvolSub Volume Name
+

Tier Events

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Event TypeAttributeDescription
TIER_ATTACHvolVolume Name
TIER_ATTACH_FORCEvolVolume Name
TIER_DETACH_STARTvolVolume Name
TIER_DETACH_STOPvolVolume Name
TIER_DETACH_COMMITvolVolume Name
TIER_DETACH_FORCEvolVolume Name
TIER_PAUSEvolVolume Name
TIER_RESUMEvolVolume Name
TIER_WATERMARK_HIvolVolume Name
TIER_WATERMARK_DROPPED_TO_MIDvolVolume Name
TIER_WATERMARK_RAISED_TO_MIDvolVolume Name
TIER_WATERMARK_DROPPED_TO_LOWvolVolume Name
+

Volume Events

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Event TypeAttributeDescription
VOLUME_ADD_BRICKvolumeVolume Name
bricksBricks details separated by Space
VOLUME_REMOVE_BRICK_STARTvolumeVolume Name
bricksBricks details separated by Space
VOLUME_REMOVE_BRICK_COMMITvolumeVolume Name
bricksBricks details separated by Space
VOLUME_REMOVE_BRICK_STOPvolumeVolume Name
bricksBricks details separated by Space
VOLUME_REMOVE_BRICK_FORCEvolumeVolume Name
bricksBricks details separated by Space
VOLUME_REBALANCE_STARTvolumeVolume Name
VOLUME_REBALANCE_STOPvolumeVolume Name
VOLUME_REBALANCE_FAILEDvolumeVolume Name
VOLUME_REBALANCE_COMPLETEvolumeVolume Name
+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/Administrator-Guide/Export-And-Netgroup-Authentication/index.html b/Administrator-Guide/Export-And-Netgroup-Authentication/index.html new file mode 100644 index 00000000..30ba8764 --- /dev/null +++ b/Administrator-Guide/Export-And-Netgroup-Authentication/index.html @@ -0,0 +1,4605 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + Export and Netgroup Authentication - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + + + + + +
+
+ + + + + + + + +

Export and Netgroup Authentication

+ +

Exports and Netgroups Authentication for NFS

+

This feature adds Linux-style exports & netgroups authentication to Gluster's NFS server. More specifically, this feature allows users to restrict access specific IPs (exports authentication) or a netgroup (netgroups authentication), or a combination of both for both Gluster volumes and subdirectories within Gluster volumes. Netgroups are used in Unix environments to control access for NFS exports, remote logins and remote shells. Each netgroup has a unique name and defines a set of hosts, users, groups and other netgroups. This information is stored in files and gluster NFS server manage permission for clients based on those file

+

Implications and Usage

+

Currently, gluster can restrict access to volumes through simple IP list. But this feature makes that capability more scalable by allowing large lists of IPs to be managed through a netgroup. Moreover it provides more granular permission handling on volumes like wildcard support, read-only permission to certain client etc.

+

The file /var/lib/glusterd/nfs/export contains the details of machines which can be used as clients for that server.An typical export entry use the following format :

+
/<export path> <host/netgroup> (options),..
+
+

Here export name can be gluster volume or subdirectory path inside that volume. Next it contains list of host/netgroup , followed by the options applicable to that entry.A string beginning with an '@' is treated as a netgroup and a string beginning without an @ is a host. The options include mount related parameters , right now options such as "sec", "ro/rw", "anonuid" valid one. If * is mention as host/netgroup field , then any client can mount that export path.

+

The file /var/lib/glusterd/nfs/netgroup should mention the expansion of each netgroup which mentioned in the export file. An typical netgroup entry will look like :

+
<netgroup name> ng1000\nng1000 ng999\nng999 ng1\nng1 ng2\nng2 (ip1,ip2,..)
+
+

The gluster NFS server will check the contents of these file after specific time intervals

+

Volume Options

+
    +
  1. Enabling export/netgroup feature
  2. +
+
gluster volume set <volname> nfs.exports-auth-enable on
+
+
    +
  1. Changing the refresh interval for gluster NFS server
  2. +
+
gluster volume set <volname> nfs.auth-refresh-interval-sec <time in seconds>
+
+
    +
  1. Changing the cache interval for an export entry
  2. +
+
gluster volume set <volname> nfs.auth-cache-ttl-sec <time in seconds>
+
+

Testing the export/netgroup file

+

An user should have the ability to check the validity of the files before applying the configuration. The "glusterfsd" command now has the following additional arguments that can be used to check the configuration:

+
    +
  • +

    --print-netgroups: Validate the netgroups file and print it out. For example,

    +
  • +
  • +

    glusterfsd --print-netgroups <name of the file>

    +
  • +
  • +

    --print-exports: Validate the exports file and print it out. For example,

    +
  • +
  • glusterfsd --print-export <name of the file>
  • +
+

Points to be noted.

+
    +
  1. +

    This feature does not currently support all the options in the man page of exports, but we can easily add them.

    +
  2. +
  3. +

    The files /var/lib/glusterd/nfs/export and /var/lib/glusterd/nfs/netgroup should be created before setting the nfs.exports-auth-enable option in every node in Trusted Storage Pool.

    +
  4. +
  5. +

    These files are handled manually by the users. So that, their contents can be different among the gluster nfs servers across Trusted Storage Pool . i.e it is possible to have different authenticate mechanism for the gluster NFS servers in the same cluster.

    +
  6. +
  7. +

    Do not mixup this feature and authentication using nfs.rpc-auth-allow, nfs.export-dir which may result in inconsistency.

    +
  8. +
+

Troubleshooting

+

After changing the contents of the file, if it is not reflected properly in the authentication mechanism , just restart the server using volume stop and start, So that gluster NFS server will forcefully read the contents of those files again.

+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/Administrator-Guide/Geo-Replication/index.html b/Administrator-Guide/Geo-Replication/index.html new file mode 100644 index 00000000..85a7bc40 --- /dev/null +++ b/Administrator-Guide/Geo-Replication/index.html @@ -0,0 +1,5312 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + Geo Replication - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + + + + + +
+
+ + + + + + + + +

Geo-Replication

+

Introduction

+

Geo-replication provides a continuous, asynchronous, and incremental +replication service from one site to another over Local Area Networks +(LANs), Wide Area Network (WANs), and across the Internet.

+

Prerequisites

+
    +
  • Primary and Secondary Volumes should be Gluster Volumes.
  • +
  • Primary and Secondary clusters should have the same GlusterFS version.
  • +
+

Replicated Volumes vs Geo-replication

+

The following table lists the difference between replicated volumes +and Geo-replication:

+ + + + + + + + + + + + + + + + + + + + + +
Replicated VolumesGeo-replication
Mirrors data across clustersMirrors data across geographically distributed clusters
Provides high-availabilityEnsures backing up of data for disaster recovery
Synchronous replication (each and every file operation is sent across all the bricks)Asynchronous replication (checks for the changes in files periodically and syncs them on detecting differences)
+

Exploring Geo-replication Deployment Scenarios

+

Geo-replication provides an incremental replication service over Local +Area Networks (LANs), Wide Area Network (WANs), and across the +Internet.

+

This section illustrates the most common deployment scenarios for +Geo-replication, including the following:

+

Geo-replication over Local Area Network(LAN)

+

geo-rep_lan

+

Geo-replication over Wide Area Network(WAN)

+

geo-rep_wan

+

Geo-replication over Internet

+

geo-rep03_internet

+

Mirror data in a cascading fashion across multiple sites(Multi-site cascading Geo-replication)

+

geo-rep04_cascading

+

Secondary User setup

+

Setup an unprivileged user in Secondary nodes to secure the SSH +connectivity to those nodes. The unprivileged Secondary user uses the +mountbroker service of glusterd to set up an auxiliary gluster mount +for the user in a special environment, which ensures that the user is +only allowed to access with special parameters that provide +administrative level access to the particular Volume.

+

In all the Secondary nodes, create a new group as "geogroup".

+
sudo groupadd geogroup
+
+

In all the Secondary nodes, create an unprivileged account. For example, +"geoaccount". Add geoaccount as a member of "geogroup" group.

+
useradd -G geogroup geoaccount
+
+

In any one Secondary node, run the following command to setup the +mountbroker root directory and group.

+
gluster-mountbroker setup <MOUNT ROOT> <GROUP>
+
+

For example,

+
gluster-mountbroker setup /var/mountbroker-root geogroup
+
+

In any one of Secondary node, Run the following commands to add Volume and +user to mountbroker service.

+
gluster-mountbroker add <VOLUME> <USER>
+
+

For example,

+
gluster-mountbroker add gvol-secondary geoaccount
+
+

(Note: To remove a user, use gluster-mountbroker remove command)

+

Check the status of setup using,

+
gluster-mountbroker status
+
+

Restart glusterd service on all Secondary nodes.

+

Setting Up the Environment for Geo-replication

+

Time Synchronization

+

On bricks of a geo-replication Primary volume, all the servers' time +must be uniform. You are recommended to set up NTP (Network Time +Protocol) or similar service to keep the bricks sync in time and avoid +the out-of-time sync effect.

+

For example: In a Replicated volume where brick1 of the Primary is at +12.20 hrs, and brick 2 of the Primary is at 12.10 hrs with 10 minutes +time lag, all the changes in brick2 between this period may go +unnoticed during synchronization of files with Secondary.

+

Password-less SSH

+

Password-less login has to be set up between the host machine (where +geo-replication Create command will be issued) and one of the Secondary +node for the unprivileged account created above.

+

Note: This is required to run Create command. This can be disabled +once the session is established.(Required again while running create +force)

+

On one of the Primary node where geo-replication Create command will be +issued, run the following command to generate the SSH key(Press Enter +twice to avoid passphrase).

+
ssh-keygen
+
+

Run the following command on the same node to one Secondary node which is +identified as the main Secondary node.

+
ssh-copy-id geoaccount@snode1.example.com
+
+

Creating secret pem pub file

+

Execute the below command from the node where you setup the +password-less ssh to Secondary. This will generate Geo-rep session +specific ssh-keys in all Primary peer nodes and collect public keys +from all peer nodes to the command initiated node.

+
gluster-georep-sshkey generate
+
+

This command adds extra prefix inside common_secret.pem.pub file to +each pub keys to prevent running extra commands using this key, to +disable that prefix,

+
gluster-georep-sshkey generate --no-prefix
+
+

Creating the session

+

Create a geo-rep session between Primary and Secondary volume using the +following command. The node in which this command is executed and the +<Secondary_host> specified in the command should have password less ssh +setup between them. The push-pem option actually uses the secret pem +pub file created earlier and establishes geo-rep specific password +less ssh between each node in Primary to each node of Secondary.

+
gluster volume geo-replication <primary_volume> \
+    <secondary_user>@<secondary_host>::<secondary_volume> \
+    create [ssh-port <port>] push-pem|no-verify [force]
+
+

For example,

+
gluster volume geo-replication gvol-primary \
+  geoaccount@snode1.example.com::gvol-secondary \
+  create push-pem
+
+

If custom SSH port (example: 50022) is configured in Secondary nodes then

+
gluster volume geo-replication gvol-primary  \
+  geoaccount@snode1.example.com::gvol-secondary \
+  config ssh_port 50022
+
+gluster volume geo-replication gvol-primary  \
+  geoaccount@snode1.example.com::gvol-secondary \
+  create ssh-port 50022 push-pem
+
+

If the total available size in Secondary volume is less than the total +size of Primary, the command will throw error message. In such cases +'force' option can be used.

+

In use cases where the rsa-keys of nodes in Primary volume is +distributed to Secondary nodes through an external agent and following +Secondary side verifications are taken care of by the external agent, then

+
    +
  • if ssh port 22 or custom port is open in Secondary
  • +
  • has proper passwordless ssh login setup
  • +
  • Secondary volume is created and is empty
  • +
  • if Secondary has enough memory
  • +
+

Then use following command to create Geo-rep session with no-verify +option.

+
gluster volume geo-replication <primary_volume> \
+    <secondary_user>@<secondary_host>::<secondary_volume> create no-verify [force]
+
+

For example,

+
gluster volume geo-replication gvol-primary  \
+  geoaccount@snode1.example.com::gvol-secondary \
+  create no-verify
+
+

In this case the Primary node rsa-key distribution to Secondary node does +not happen and above mentioned Secondary verification is not performed and +these two things has to be taken care externaly.

+

Post Creation steps

+

Run the following command as root in any one of Secondary node.

+
/usr/libexec/glusterfs/set_geo_rep_pem_keys.sh <secondary_user> \
+    <primary_volume> <secondary_volume>
+
+

For example,

+
/usr/libexec/glusterfs/set_geo_rep_pem_keys.sh geoaccount \
+  gvol-primary gvol-secondary
+
+

Configuration

+

Configuration can be changed anytime after creating the session. After +successful configuration change, Geo-rep session will be automatically +restarted.

+

To view all configured options of a session,

+
gluster volume geo-replication <primary_volume> \
+    <secondary_user>@<secondary_host>::<secondary_volume> config [option]
+
+

For Example,

+
gluster volume geo-replication gvol-primary  \
+  geoaccount@snode1.example.com::gvol-secondary \
+  config
+
+gluster volume geo-replication gvol-primary  \
+  geoaccount@snode1.example.com::gvol-secondary \
+  config sync-jobs
+
+

To configure Gluster Geo-replication, use the following command at the +Gluster command line

+
gluster volume geo-replication <primary_volume> \
+   <secondary_user>@<secondary_host>::<secondary_volume> config [option]
+
+

For example:

+
gluster volume geo-replication gvol-primary  \
+  geoaccount@snode1.example.com::gvol-secondary \
+  config sync-jobs 3
+
+
+

Note: If Geo-rep is in between sync, restart due to configuration +change may cause resyncing a few entries which are already synced.

+
+

Configurable Options

+

Meta Volume

+

In case of Replica bricks, one brick worker will be Active and +participate in syncing and others will be waiting as Passive. By +default Geo-rep uses node-uuid, if node-uuid of worker present in +first up subvolume node ids list then that worker will become +Active. With this method, multiple workers of same replica becomes +Active if multiple bricks used from same machine.

+

To prevent this, Meta Volume(Extra Gluster Volume) can be used in +Geo-rep. With this method, Each worker will try to acquire lock on a +file inside meta volume. Lock file name pattern will be different for +each sub volume. If a worker acquire lock, then it will become Active +else remain as Passive.

+
gluster volume geo-replication <primary_volume> \
+    <secondary_user>@<secondary_host>::<secondary_volume> config
+    use-meta-volume true
+
+
+

Note: Meta Volume is shared replica 3 Gluster Volume. The name +of the meta-volume should be gluster_shared_storage and should be +mounted at /var/run/gluster/shared_storage/.

+
+

The following table provides an overview of the configurable options +for a geo-replication setting:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
OptionDescription
log-level LOGFILELEVELThe log level for geo-replication.
gluster-log-level LOGFILELEVELThe log level for glusterfs processes.
changelog-log-level LOGFILELEVELThe log level for Changelog processes.
ssh-command COMMANDThe SSH command to connect to the remote machine (the default is ssh). If ssh is installed in custom location, that path can be configured. For ex /usr/local/sbin/ssh
rsync-command COMMANDThe rsync command to use for synchronizing the files (the default is rsync).
use-tarssh trueThe use-tarssh command allows tar over Secure Shell protocol. Use this option to handle workloads of files that have not undergone edits.
timeout SECONDSThe timeout period in seconds.
sync-jobs NThe number of simultaneous files/directories that can be synchronized.
ignore-deletesIf this option is set to 1, a file deleted on the primary will not trigger a delete operation on the secondary. As a result, the secondary will remain as a superset of the primary and can be used to recover the primary in the event of a crash and/or accidental delete.
+

Starting Geo-replication

+

Use the following command to start geo-replication session,

+
gluster volume geo-replication <primary_volume>  \
+    <secondary_user>@<secondary_host>::<secondary_volume> \
+    start [force]
+
+

For example,

+
gluster volume geo-replication gvol-primary  \
+  geoaccount@snode1.example.com::gvol-secondary \
+  start
+
+
+

Note

+

You may need to configure the session before starting Gluster +Geo-replication.

+
+

Stopping Geo-replication

+

Use the following command to stop geo-replication sesion,

+
gluster volume geo-replication <primary_volume>  \
+    <secondary_user>@<secondary_host>::<secondary_volume> \
+    stop [force]
+
+

For example,

+
gluster volume geo-replication gvol-primary  \
+  geoaccount@snode1.example.com::gvol-secondary \
+  stop
+
+

Status

+

To check the status of all Geo-replication sessions in the Cluster

+
gluster volume geo-replication status
+
+

To check the status of one session,

+
gluster volume geo-replication <primary_volume> \
+    <secondary_user>@<secondary_host>::<secondary_volume> status [detail]
+
+

Example,

+
gluster volume geo-replication gvol-primary \
+  geoaccount@snode1::gvol-secondary status
+
+gluster volume geo-replication gvol-primary \
+  geoaccount@snode1::gvol-secondary status detail
+
+

Example Status Output

+
PRIMARY NODE    PRIMARY VOL          PRIMARY BRICK    SECONDARY USER    SECONDARY         SECONDARY NODE    STATUS    CRAWL STATUS       LAST_SYNCED
+---------------------------------------------------------------------------------------------------------------------------------------------------------
+mnode1         gvol-primary           /bricks/b1      root          snode1::gvol-secondary  snode1        Active    Changelog Crawl    2016-10-12 23:07:13
+mnode2         gvol-primary           /bricks/b2      root          snode1::gvol-secondary  snode2        Active    Changelog Crawl    2016-10-12 23:07:13
+
+

Example Status detail Output

+
PRIMARY NODE    PRIMARY VOL    PRIMARY BRICK    SECONDARY USER    SECONDARY        SECONDARY NODE    STATUS    CRAWL STATUS       LAST_SYNCED            ENTRY    DATA    META    FAILURES    CHECKPOINT TIME    CHECKPOINT COMPLETED    CHECKPOINT COMPLETION TIME
+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+mnode1         gvol-primary           /bricks/b1      root          snode1::gvol-secondary  snode1        Active    Changelog Crawl    2016-10-12 23:07:13    0        0       0       0           N/A                N/A                     N/A
+mnode2         gvol-primary           /bricks/b2      root          snode1::gvol-secondary  snode2        Active    Changelog Crawl    2016-10-12 23:07:13    0        0       0       0           N/A                N/A                     N/A
+
+

The STATUS of the session could be one of the following,

+
    +
  • +

    Initializing: This is the initial phase of the Geo-replication + session; it remains in this state for a minute in order to make + sure no abnormalities are present.

    +
  • +
  • +

    Created: The geo-replication session is created, but not + started.

    +
  • +
  • +

    Active: The gsync daemon in this node is active and syncing the + data. (One worker among the replica pairs will be in Active state)

    +
  • +
  • +

    Passive: A replica pair of the active node. The data + synchronization is handled by active node. Hence, this node does + not sync any data. If Active node goes down, Passive worker will + become Active

    +
  • +
  • +

    Faulty: The geo-replication session has experienced a problem, + and the issue needs to be investigated further. Check log files + for more details about the Faulty status. Log file path can be + found using

    +
    gluster volume geo-replication <primary_volume> \
    +    <secondary_user>@<secondary_host>::<secondary_volume> config log-file
    +
    +
  • +
  • +

    Stopped: The geo-replication session has stopped, but has not + been deleted.

    +
  • +
+

The CRAWL STATUS can be one of the following:

+
    +
  • +

    Hybrid Crawl: The gsyncd daemon is crawling the glusterFS file + system and generating pseudo changelog to sync data. This crawl is + used during initial sync and if Changelogs are not available.

    +
  • +
  • +

    History Crawl: gsyncd daemon syncs data by consuming Historical + Changelogs. On every worker restart, Geo-rep uses this Crawl to + process backlog Changelogs.

    +
  • +
  • +

    Changelog Crawl: The changelog translator has produced the + changelog and that is being consumed by gsyncd daemon to sync + data.

    +
  • +
+

The ENTRY denotes: +The number of pending entry operations (create, mkdir, mknod, symlink, link, rename, unlink, rmdir) per session.

+

The DATA denotes: +The number of pending Data operations (write, writev, truncate, ftruncate) per session.

+

The META denotes: +The number of pending Meta operations (setattr, fsetattr, setxattr, fsetxattr, removexattr, fremovexattr) per session.

+

The FAILURE denotes: +The number of failures per session. On encountering failures, one can proceed to look at the log files.

+

Deleting the session

+

Established Geo-replication session can be deleted using the following +command,

+
gluster volume geo-replication <primary_volume> \
+    <secondary_user>@<secondary_host>::<secondary_volume> delete [force]
+
+

For example,

+
gluster volume geo-replication gvol-primary \
+  geoaccount@snode1.example.com::gvol-secondary delete
+
+
+

Note: If the same session is created again then syncing will resume +from where it was stopped before deleting the session. If the +session to be deleted permanently then use reset-sync-time option +with delete command. For example, gluster volume geo-replication gvol-primary geoaccount@snode1::gvol-secondary delete reset-sync-time

+
+

Checkpoint

+

Using Checkpoint feature we can find the status of sync with respect +to the Checkpoint time. Checkpoint completion status shows "Yes" once +Geo-rep syncs all the data from that brick which are created or +modified before the Checkpoint Time.

+

Set the Checkpoint using,

+
gluster volume geo-replication <primary_volume> \
+    <secondary_user>@<secondary_host>::<secondary_volume> config checkpoint now
+
+

Example,

+
gluster volume geo-replication gvol-primary \
+  geoaccount@snode1.example.com::gvol-secondary \
+  config checkpoint now
+
+

Touch the Primary mount point to make sure Checkpoint completes even +though no I/O happening in the Volume

+
mount -t glusterfs <primaryhost>:<primaryvol> /mnt
+touch /mnt
+
+

Checkpoint status can be checked using Geo-rep status +command. Following columns in status output gives more information +about Checkpoint

+
    +
  • CHECKPOINT TIME: Checkpoint Set Time
  • +
  • CHECKPOINT COMPLETED: Yes/No/NA, Status of Checkpoint
  • +
  • CHECKPOINT COMPLETION TIME: Checkpoint Completion Time if + completed, else N/A
  • +
+

Log Files

+

Primary Log files are located in /var/log/glusterfs/geo-replication +directory in each Primary nodes. Secondary log files are located in +/var/log/glusterfs/geo-replication-secondary directory in Secondary nodes.

+

Gluster Snapshots and Geo-replicated Volumes

+

Gluster snapshot of Primary and Secondary should not go out of order on +restore. So while taking snapshot take snapshot of both Primary and +Secondary Volumes.

+
    +
  • +

    Pause the Geo-replication session using,

    +
    gluster volume geo-replication <primary_volume> \
    +    <secondary_user>@<secondary_host>::<secondary_volume> pause
    +
    +
  • +
  • +

    Take Gluster Snapshot of Secondary Volume and Primary Volume(Use same + name for snapshots)

    +
    gluster snapshot create <snapname> <volname>
    +
    +
  • +
+

Example,

+
    gluster snapshot create snap1 gvol-secondary
+    gluster snapshot create snap1 gvol-primary
+
+
    +
  • Resume Geo-replication session using,
    gluster volume geo-replication <primary_volume> \
    +    <secondary_user>@<secondary_host>::<secondary_volume> resume
    +
    +
  • +
+

If we want to continue Geo-rep session after snapshot restore, we need +to restore both Primary and Secondary Volume and resume the Geo-replication +session using force option

+
gluster snapshot restore <snapname>
+gluster volume geo-replication <primary_volume> \
+    <secondary_user>@<secondary_host>::<secondary_volume> resume force
+
+

Example,

+
gluster snapshot restore snap1 # Secondary Snap
+gluster snapshot restore snap1 # Primary Snap
+gluster volume geo-replication gvol-primary geoaccount@snode1::gvol-secondary \
+  resume force
+
+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/Administrator-Guide/Gluster-On-ZFS/index.html b/Administrator-Guide/Gluster-On-ZFS/index.html new file mode 100644 index 00000000..776658f1 --- /dev/null +++ b/Administrator-Guide/Gluster-On-ZFS/index.html @@ -0,0 +1,4828 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + Gluster On ZFS - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + + + + + +
+
+ + + + + + + + +

Gluster On ZFS

+ +

Gluster On ZFS

+

This is a step-by-step set of instructions to install Gluster on top of ZFS as the backing file store. There are some commands which were specific to my installation, specifically, the ZFS tuning section. Moniti estis.

+

Preparation

+
    +
  • Install CentOS 6.3
  • +
  • Assumption is that your hostname is gfs01
  • +
  • Run all commands as the root user
  • +
  • yum update
  • +
  • Disable IP Tables
  • +
+
chkconfig iptables off
+service iptables stop
+
+
    +
  • Disable SELinux
  • +
+
1. edit `/etc/selinux/config`
+2. set `SELINUX=disabled`
+3. reboot
+
+

Install ZFS on Linux

+

For RHEL6 or 7 and derivatives, you can install the ZFSoL repo (and EPEL) and use that to install ZFS

+
    +
  • RHEL 6:
  • +
+
yum localinstall --nogpgcheck https://download.fedoraproject.org/pub/epel/6/x86_64/epel-release-6-8.noarch.rpm
+yum localinstall --nogpgcheck http://archive.zfsonlinux.org/epel/zfs-release.el6.noarch.rpm
+yum install kernel-devel zfs
+
+
    +
  • RHEL 7:
  • +
+
yum localinstall --nogpgcheck https://download.fedoraproject.org/pub/epel/7/x86_64/e/epel-release-7-2.noarch.rpm
+yum localinstall --nogpgcheck http://archive.zfsonlinux.org/epel/zfs-release.el7.noarch.rpm
+yum install kernel-devel zfs
+
+

and skip to Finish ZFS Configuration below.

+

Or you can roll your own if you want specific patches:

+
yum groupinstall "Development Tools"
+
+ +

Install DKMS

+

We want automatically rebuild the kernel modules when we upgrade the kernel, so you definitely want DKMS with ZFS on Linux.

+ +
rpm -Uvh dkms*.rpm
+
+

Build & Install SPL

+
    +
  • Enter SPL source directory
  • +
  • The following commands create two source & three binary RPMs. Remove the static module RPM (we are using DKMS) and install the rest:
  • +
+
./configure
+make rpm
+rm spl-modules-0.6.0*.x86_64.rpm
+rpm -Uvh spl*.x86_64.rpm spl*.noarch.rpm
+
+

Build & Install ZFS

+

Notice: +If you plan to use the xattr=sa filesystem option, make sure you have the ZFS fix for https://github.com/zfsonlinux/zfs/issues/1648 so your symlinks don't get corrupted. +(applies to ZFSoL before 0.6.3, xattr=sa is safe to use on 0.6.3 and later)

+
    +
  • Enter ZFS source directory
  • +
  • The following commands create two source & five binary RPMs. Remove the static module RPM and install the rest. Note we have a few preliminary packages to install before we can compile.
  • +
+
yum install zlib-devel libuuid-devel libblkid-devel libselinux-devel parted lsscsi
+./configure
+make rpm
+rm zfs-modules-0.6.0*.x86_64.rpm
+rpm -Uvh zfs*.x86_64.rpm zfs*.noarch.rpm
+
+

Finish ZFS Configuration

+
    +
  • Reboot to allow all changes to take effect, if desired
  • +
  • Create ZFS storage pool, in below examples it will be named sp1. This is a simple example of 4 HDDs in RAID10. NOTE: Check the latest ZFS on Linux FAQ about configuring the /etc/zfs/zdev.conf file. You want to create mirrored devices across controllers to maximize performance. Make sure to run udevadm trigger after creating zdev.conf.
  • +
+
zpool create -f sp1 mirror A0 B0 mirror A1 B1
+zpool status sp1
+df -h
+
+
    +
  • You should see the /sp1 mount point
  • +
  • Enable ZFS compression to save disk space:
  • +
+

zfs set compression=on sp1

+
    +
  • you can also use lz4 compression on later versions of ZFS as it can be faster, especially for incompressible workloads. It is safe to change this on the fly, as ZFS will compress new data with the current setting:
  • +
+

zfs set compression=lz4 sp1

+
    +
  • Set ZFS tunables. This is specific to my environment.
  • +
  • Set ARC cache min to 33% and max to 75% of installed RAM. Since this is a dedicated storage node, I can get away with this. In my case my servers have 24G of RAM. More RAM is better with ZFS.
  • +
  • We use SATA drives which do not accept command tagged queuing, therefore set the min and max pending requests to 1
  • +
  • Disable read prefetch because it is almost completely useless and does nothing in our environment but work the drives unnecessarily. I see < 10% prefetch cache hits, so it's really not required and actually hurts performance.
  • +
  • Set transaction group timeout to 5 seconds to prevent the volume from appearing to freeze due to a large batch of writes. 5 seconds is the default, but safe to force this.
  • +
  • Ignore client flush/sync commands; let ZFS handle this with the transaction group timeout flush. NOTE: Requires a UPS backup solution unless you don't mind losing that 5 seconds worth of data.
  • +
+
echo "options zfs zfs_arc_min=8G zfs_arc_max=18G zfs_vdev_min_pending=1 zfs_vdev_max_pending=1 zfs_prefetch_disable=1 zfs_txg_timeout=5" > /etc/modprobe.d/zfs.conf
+reboot
+
+
    +
  • Setting the acltype property to posixacl indicates Posix ACLs should be used.
  • +
+

zfs set acltype=posixacl sp1

+

Install GlusterFS

+
wget -P /etc/yum.repos.d http://download.gluster.org/pub/gluster/glusterfs/LATEST/EPEL.repo/glusterfs-epel.repo
+yum install glusterfs{-fuse,-server}
+service glusterd start
+service glusterd status
+chkconfig glusterd on
+
+
    +
  • Continue with your GFS peer probe, volume creation, etc.
  • +
  • To mount GFS volumes automatically after reboot, add these lines to /etc/rc.local (assuming your gluster volume is called export and your desired mount point is /export:
  • +
+
# Mount GFS Volumes
+mount -t glusterfs gfs01:/export /export
+
+

Miscellaneous Notes & TODO

+

Daily e-mail status reports

+

Python script source; put your desired e-mail address in the toAddr variable. Add a crontab entry to run this daily.

+
#!/usr/bin/python
+'''
+Send e-mail to given user with zfs status
+'''
+import datetime
+import socket
+import smtplib
+import subprocess
+
+
+def doShellCmd(cmd):
+    '''execute system shell command, return output as string'''
+    subproc = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)
+    cmdOutput = subproc.communicate()[0]
+    return cmdOutput
+
+hostname = socket.gethostname()
+statusLine = "Status of " + hostname + " at " + str(datetime.datetime.now())
+zpoolList = doShellCmd('zpool list')
+zpoolStatus = doShellCmd('zpool status')
+zfsList = doShellCmd('zfs list')
+report = (statusLine + "\n" +
+    "-----------------------------------------------------------\n" +
+    zfsList +
+    "-----------------------------------------------------------\n" +
+    zpoolList +
+    "-----------------------------------------------------------\n" +
+    zpoolStatus)
+
+fromAddr = "From: root@" + hostname + "\r\n"
+toAddr = "To: user@your.com\r\n"
+subject = "Subject: ZFS Status from " + hostname + "\r\n"
+msg = (subject + report)
+server = smtplib.SMTP('localhost')
+server.set_debuglevel(1)
+server.sendmail(fromAddr, toAddr, msg)
+server.quit()
+
+
+

Restoring files from ZFS Snapshots

+

Show which node a file is on (for restoring files from ZFS snapshots):

+
 getfattr -n trusted.glusterfs.pathinfo <file>
+
+

Recurring ZFS Snapshots

+

Since the community site will not let me actually post the script due to some random bug with Akismet spam blocking, I'll just post links instead.

+ + + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/Administrator-Guide/GlusterFS-Cinder/index.html b/Administrator-Guide/GlusterFS-Cinder/index.html new file mode 100644 index 00000000..d68c0998 --- /dev/null +++ b/Administrator-Guide/GlusterFS-Cinder/index.html @@ -0,0 +1,4697 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + GlusterFS Cinder - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + + + + + +
+
+ + + + + + + + +

Accessing GlusterFS using Cinder Hosts

+

Note: GlusterFS driver was removed from Openstack since Ocata. This guide applies only to older Openstack releases.

+

1. Introduction

+

GlusterFS and Cinder integration provides a system for data storage that enables users to access the same data, both as an object and as a file, thus simplifying management and controlling storage costs.

+

GlusterFS - GlusterFS is an open source, distributed file system capable of scaling to several petabytes and handling thousands of clients. GlusterFS clusters together storage building blocks over Infiniband RDMA or TCP/IP interconnect, aggregating disk and memory resources and managing data in a single global namespace. GlusterFS is based on a stackable user space design and can deliver exceptional performance for diverse workloads.

+

Cinder - Cinder is the OpenStack service which is responsible for handling persistent storage for virtual machines. This is persistent block storage for the instances running in Nova. Snapshots can be taken for backing up and data, either for restoring data, or to be used to create new block storage volumes.

+

With Enterprise Linux 6, configuring OpenStack Grizzly to use GlusterFS for its Cinder (block) storage is fairly simple.

+

These instructions have been tested with both GlusterFS 3.3 and GlusterFS 3.4. Other releases may also work, but have not been tested.

+

2. Prerequisites

+

GlusterFS

+

For information on prerequisites and instructions for installing GlusterFS, see http://www.gluster.org/community/documentation/index.php.

+

Cinder

+

For information on prerequisites and instructions for installing Cinder, see http://docs.openstack.org/.

+

Before beginning, you must ensure there are no existing volumes in Cinder. Use "cinder delete" to remove any, and "cinder list" to verify that they are deleted. If you do not delete the existing cinder volumes, it will cause errors later in the process, breaking your Cinder installation.

+

NOTE - Unlike other software, the "openstack-config" and "cinder" commands generally require you to run them as a root user. Without prior configuration, running them through sudo generally does not work. (This can be changed, but is beyond the scope of this HOW-TO.)

+

3 Installing GlusterFS Client on Cinder hosts

+

On each Cinder host, install the GlusterFS client packages:

+
sudo yum -y install glusterfs-fuse
+
+

4. Configuring Cinder to Add GlusterFS

+

On each Cinder host, run the following commands to add GlusterFS to the Cinder configuration:

+
openstack-config --set /etc/cinder/cinder.conf DEFAULT volume_driver cinder.volume.drivers.glusterfs.GlusterfsDriver
+openstack-config --set /etc/cinder/cinder.conf DEFAULT glusterfs_shares_config /etc/cinder/shares.conf
+openstack-config --set /etc/cinder/cinder.conf DEFAULT glusterfs_mount_point_base /var/lib/cinder/volumes
+
+

5. Creating GlusterFS Volume List

+

On each of the Cinder nodes, create a simple text file /etc/cinder/shares.conf.

+

This file is a simple list of the GlusterFS volumes to be used, one per line, using the following format:

+
GLUSTERHOST:VOLUME
+GLUSTERHOST:NEXTVOLUME
+GLUSTERHOST2:SOMEOTHERVOLUME
+
+

For example:

+
myglusterbox.example.org:myglustervol
+
+

6. Updating Firewall for GlusterFS

+

You must update the firewall rules on each Cinder node to communicate with the GlusterFS nodes.

+

The ports to open are explained in Step 3:

+

https://docs.gluster.org/en/latest/Install-Guide/Install/

+

If you are using iptables as your firewall, these lines can be added under :OUTPUT ACCEPT in the "*filter" section. You should probably adjust them to suit your environment (eg. only accept connections from your GlusterFS servers).

+
-A INPUT -m state --state NEW -m tcp -p tcp --dport 111 -j ACCEPT
+-A INPUT -m state --state NEW -m tcp -p tcp --dport 24007 -j ACCEPT
+-A INPUT -m state --state NEW -m tcp -p tcp --dport 24008 -j ACCEPT
+-A INPUT -m state --state NEW -m tcp -p tcp --dport 24009 -j ACCEPT
+-A INPUT -m state --state NEW -m tcp -p tcp --dport 24010 -j ACCEPT
+-A INPUT -m state --state NEW -m tcp -p tcp --dport 24011 -j ACCEPT
+-A INPUT -m state --state NEW -m tcp -p tcp --dport 38465:38469 -j ACCEPT
+
+

Restart the firewall service:

+
sudo service iptables restart
+
+

7. Restarting Cinder Services

+

Configuration is complete and now you must restart the Cinder services to make it active.

+
for i in api scheduler volume; do sudo service openstack-cinder-${i} start; done
+
+

Check the Cinder volume log to make sure that there are no errors:

+
sudo tail -50 /var/log/cinder/volume.log
+
+

8. Verify GlusterFS Integration with Cinder

+

To verify if the installation and configuration is successful, create a Cinder volume then check using GlusterFS.

+

Create a Cinder volume:

+
cinder create --display_name myvol 10
+
+

Volume creation takes a few seconds. Once created, run the following command:

+
cinder list
+
+

The volume should be in "available" status. Now, look for a new file in the GlusterFS volume directory:

+
sudo ls -lah /var/lib/cinder/volumes/XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/
+
+

(the XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX will be a number specific to your installation)

+

A newly created file should be inside that directory which is the new volume you just created. A new file will appear each time you create a volume.

+

For example:

+
$ sudo ls -lah /var/lib/cinder/volumes/29e55f0f3d56494ef1b1073ab927d425/
+ 
+ total 4.0K
+ drwxr-xr-x. 3 root   root     73 Apr  4 15:46 .
+ drwxr-xr-x. 3 cinder cinder 4.0K Apr  3 09:31 ..
+ -rw-rw-rw-. 1 root   root    10G Apr  4 15:46 volume-a4b97d2e-0f8e-45b2-9b94-b8fa36bd51b9
+
+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/Administrator-Guide/GlusterFS-Coreutils/index.html b/Administrator-Guide/GlusterFS-Coreutils/index.html new file mode 100644 index 00000000..722af6c8 --- /dev/null +++ b/Administrator-Guide/GlusterFS-Coreutils/index.html @@ -0,0 +1,4756 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + GlusterFS coreutilities - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + + + + + +
+
+ + + + + + + + +

Coreutils for GlusterFS volumes

+

The GlusterFS Coreutils is a suite of utilities that aims to mimic the standard Linux coreutils, with the exception that it utilizes the gluster C API in order to do work. It offers an interface similar to that of the ftp program. +Operations include things like getting files from the server to the local machine, putting files from the local machine to the server, retrieving directory information from the server and so on.

+

Installation

+

Install GlusterFS

+

For information on prerequisites, instructions and configuration of GlusterFS, see Installation Guides from http://docs.gluster.org/en/latest/.

+

Install glusterfs-coreutils

+

For now glusterfs-coreutils will be packaged only as rpm. Other package formats will be supported very soon.

+
For fedora
+

Use dnf/yum to install glusterfs-coreutils:

+
dnf install glusterfs-coreutils
+
+

OR

+
yum install glusterfs-coreutils
+
+

Usage

+

glusterfs-coreutils provides a set of basic utilities such as cat, cp, flock, ls, mkdir, rm, stat and tail that are implemented specifically using the GlusterFS API commonly known as libgfapi. These utilities can be used either inside a gluster remote +shell or as standalone commands with 'gf' prepended to their respective base names. For example, glusterfs cat utility is named as gfcat and so on with an exception to flock core utility for which a standalone gfflock command is not provided as such(see the notes section on why flock is designed in that way).

+

Using coreutils within a remote gluster-shell

+
Invoke a new shell
+

In order to enter into a gluster client-shell, type gfcli and press enter. You will now be presented with a similar prompt as shown below:

+
# gfcli
+gfcli>
+
+

See the man page for gfcli for more options.

+
Connect to a gluster volume
+

Now we need to connect as a client to some glusterfs volume which has already started. Use connect command to do so as follows:

+
gfcli> connect glfs://<SERVER-IP or HOSTNAME>/<VOLNAME>
+
+

For example if you have a volume named vol on a server with hostname localhost the above command will take the following form:

+
gfcli> connect glfs://localhost/vol
+
+

Make sure that you are successfully attached to a remote gluster volume by verifying the new prompt which should look like:

+
gfcli (<SERVER IP or HOSTNAME/<VOLNAME>)
+
+
Try out your favorite utilities
+

Please go through the man pages for different utilities and available options for each command. For example, man gfcp will display details on the usage of cp command outside or within a gluster-shell. Run different commands as follows:

+
gfcli (localhost/vol) ls .
+gfcli (localhost/vol) stat .trashcan
+
+
Terminate the client connection from the volume
+

Use disconnect command to close the connection:

+
gfcli (localhost/vol) disconnect
+gfcli>
+
+
Exit from shell
+

Run quit from shell:

+
gfcli> quit
+
+

Using standalone glusterfs coreutil commands

+

As mentioned above glusterfs coreutils also provides standalone commands to perform the basic GNU coreutil functionalities. All those commands are prepended by 'gf'. Instead of invoking a gluster client-shell you can directly make use of these to establish and perform the operation in one shot. For example see the following sample usage of gfstat command:

+
gfstat glfs://localhost/vol/foo
+
+

There is an exemption regarding flock coreutility which is not available as a standalone command for a reason described under 'Notes' section.

+

For more information on each command and corresponding options see associated man pages.

+

Notes

+
    +
  • Within a particular session of gluster client-shell, history of commands are preserved i.e, you can use up/down arrow keys to search through previously executed commands or the reverse history search technique using Ctrl+R.
  • +
  • flock is not available as standalone 'gfflock'. Because locks are always associated with file descriptors. Unlike all other commands flock cannot straight away clean up the file descriptor after acquiring the lock. For flock we need to maintain an active connection as a glusterfs client.
  • +
+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/Administrator-Guide/GlusterFS-Filter/index.html b/Administrator-Guide/GlusterFS-Filter/index.html new file mode 100644 index 00000000..bef154c9 --- /dev/null +++ b/Administrator-Guide/GlusterFS-Filter/index.html @@ -0,0 +1,4456 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + GlusterFS Filter - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + +

Modifying .vol files with a filter

+

If you need to make manual changes to a .vol file it is recommended to +make these through the client interface ('gluster foo'). Making changes +directly to .vol files is discouraged, because it cannot be predicted +when a .vol file will be reset on disk, for example with a 'gluster set +foo' command. The command line interface was never designed to read the +.vol files, but rather to keep state and rebuild them (from +/var/lib/glusterd/vols/$vol/info). There is, however, another way to +do this.

+

You can create a shell script in the directory +/usr/lib*/glusterfs/$VERSION/filter. All scripts located there will +be executed every time the .vol files are written back to disk. The +first and only argument passed to all script located there is the name +of the .vol file.

+

So you could create a script there that looks like this:

+
#!/bin/sh
+sed -i 'some-sed-magic' "$1"
+
+

Which will run the script, which in turn will run the sed command on the +.vol file (passed as \$1).

+

Importantly, the script needs to be set as executable (eg via chmod), +else it won't be run.

+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/Administrator-Guide/GlusterFS-Introduction/index.html b/Administrator-Guide/GlusterFS-Introduction/index.html new file mode 100644 index 00000000..69d01ef1 --- /dev/null +++ b/Administrator-Guide/GlusterFS-Introduction/index.html @@ -0,0 +1,4515 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + Introduction - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + +

What is Gluster ?

+

Gluster is a scalable, distributed file system that aggregates disk storage resources from multiple servers into a single global namespace.

+

Advantages

+
    +
  • Scales to several petabytes
  • +
  • Handles thousands of clients
  • +
  • POSIX compatible
  • +
  • Uses commodity hardware
  • +
  • Can use any ondisk filesystem that supports extended attributes
  • +
  • Accessible using industry standard protocols like NFS and SMB
  • +
  • Provides replication, quotas, geo-replication, snapshots and bitrot detection
  • +
  • Allows optimization for different workloads
  • +
  • Open Source
  • +
+

640px-glusterfs_architecture

+

Enterprises can scale capacity, performance, and availability on demand, with no vendor lock-in, across on-premise, public cloud, and hybrid environments. +Gluster is used in production at thousands of organisations spanning media, healthcare, government, education, web 2.0, and financial services.

+

Commercial offerings and support

+

Several companies offer support or consulting.

+

Red Hat Gluster Storage +is a commercial storage software product, based on Gluster.

+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/Administrator-Guide/GlusterFS-Keystone-Quickstart/index.html b/Administrator-Guide/GlusterFS-Keystone-Quickstart/index.html new file mode 100644 index 00000000..6168ddc6 --- /dev/null +++ b/Administrator-Guide/GlusterFS-Keystone-Quickstart/index.html @@ -0,0 +1,4565 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + GlusterFS Keystone Quickstart - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + +

GlusterFS Keystone Quickstart

+

This is a document in progress, and may contain some errors or missing information.

+

I am currently in the process of building an AWS Image with this installed, however if you can't wait, and want to install this with a script, here are the commands from both articles, with defaults appropriate for an Amazon CentOS/RHEL 6 AMI, such as ami-a6e15bcf

+

This document assumes you already have GlusterFS with UFO installed, 3.3.1-11 or later, and are using the instructions here:

+

http://www.gluster.org/2012/09/howto-using-ufo-swift-a-quick-and-dirty-setup-guide/

+

These docs are largely derived from:

+

http://fedoraproject.org/wiki/Getting_started_with_OpenStack_on_Fedora_17#Initial_Keystone_setup

+

Add the RDO Openstack Grizzly and Epel repos:

+
sudo yum install -y "http://dl.fedoraproject.org/pub/epel/6/i386/epel-release-6-8.noarch.rpm"
+
+sudo yum install -y "http://rdo.fedorapeople.org/openstack/openstack-grizzly/rdo-release-grizzly-1.noarch.rpm"
+
+

Install Openstack-Keystone

+
sudo yum install openstack-keystone openstack-utils python-keystoneclient
+
+

Configure keystone

+
$ cat > keystonerc << _EOF
+export ADMIN_TOKEN=$(openssl rand -hex 10)
+export OS_USERNAME=admin
+export OS_PASSWORD=$(openssl rand -hex 10)
+export OS_TENANT_NAME=admin
+export OS_AUTH_URL=`[`https://127.0.0.1:5000/v2.0/`](https://127.0.0.1:5000/v2.0/)
+export SERVICE_ENDPOINT=`[`https://127.0.0.1:35357/v2.0/`](https://127.0.0.1:35357/v2.0/)
+export SERVICE_TOKEN=\$ADMIN_TOKEN
+_EOF
+
+$ . ./keystonerc
+$ sudo openstack-db --service keystone --init
+
+

Append the keystone configs to /etc/swift/proxy-server.conf

+
$ sudo -i
+
+# cat >> /etc/swift/proxy-server.conf << _EOM
+[filter:keystone]`
+use = egg:swift#keystoneauth`
+operator_roles = admin, swiftoperator`
+
+[filter:authtoken]
+paste.filter_factory = keystoneclient.middleware.auth_token:filter_factory
+auth_port = 35357
+auth_host = 127.0.0.1
+auth_protocol = https
+_EOM
+
+# exit
+
+

Finish configuring both swift and keystone using the command-line tool:

+
sudo openstack-config --set /etc/swift/proxy-server.conf filter:authtoken admin_token $ADMIN_TOKEN
+sudo openstack-config --set /etc/swift/proxy-server.conf filter:authtoken auth_token $ADMIN_TOKEN
+sudo openstack-config --set /etc/swift/proxy-server.conf DEFAULT log_name proxy_server
+sudo openstack-config --set /etc/swift/proxy-server.conf filter:authtoken signing_dir /etc/swift
+sudo openstack-config --set /etc/swift/proxy-server.conf pipeline:main pipeline "healthcheck cache authtoken keystone proxy-server"
+
+sudo openstack-config --set /etc/keystone/keystone.conf DEFAULT admin_token $ADMIN_TOKEN
+sudo openstack-config --set /etc/keystone/keystone.conf ssl enable True
+sudo openstack-config --set /etc/keystone/keystone.conf ssl keyfile /etc/swift/cert.key
+sudo openstack-config --set /etc/keystone/keystone.conf ssl certfile /etc/swift/cert.crt
+sudo openstack-config --set /etc/keystone/keystone.conf signing token_format UUID
+sudo openstack-config --set /etc/keystone/keystone.conf sql connection mysql://keystone:keystone@127.0.0.1/keystone
+
+

Configure keystone to start at boot and start it up.

+
sudo chkconfig openstack-keystone on
+sudo service openstack-keystone start # If you script this, you'll want to wait a few seconds to start using it
+
+

We are using untrusted certs, so tell keystone not to complain. If you replace with trusted certs, or are not using SSL, set this to "".

+
INSECURE="--insecure"
+
+

Create the keystone and swift services in keystone:

+
KS_SERVICEID=$(keystone $INSECURE service-create --name=keystone --type=identity --description="Keystone Identity Service" | grep " id " | cut -d "|" -f 3)
+
+SW_SERVICEID=$(keystone $INSECURE service-create --name=swift --type=object-store --description="Swift Service" | grep " id " | cut -d "|" -f 3)
+
+endpoint="`[`https://127.0.0.1:443`](https://127.0.0.1:443)`"
+
+keystone $INSECURE endpoint-create --service_id $KS_SERVICEID \
+  --publicurl $endpoint'/v2.0' --adminurl `[`https://127.0.0.1:35357/v2.0`](https://127.0.0.1:35357/v2.0)` \
+  --internalurl `[`https://127.0.0.1:5000/v2.0`](https://127.0.0.1:5000/v2.0)
+
+keystone $INSECURE endpoint-create --service_id $SW_SERVICEID \
+  --publicurl $endpoint'/v1/AUTH_$(tenant_id)s' \
+  --adminurl $endpoint'/v1/AUTH_$(tenant_id)s' \
+  --internalurl $endpoint'/v1/AUTH_$(tenant_id)s'
+
+

Create the admin tenant:

+
admin_id=$(keystone $INSECURE tenant-create --name admin --description "Internal Admin Tenant" | grep id | awk '{print $4}')
+
+

Create the admin roles:

+
admin_role=$(keystone $INSECURE role-create --name admin | grep id | awk '{print $4}')
+ksadmin_role=$(keystone $INSECURE role-create --name KeystoneServiceAdmin | grep id | awk '{print $4}')
+kadmin_role=$(keystone $INSECURE role-create --name KeystoneAdmin | grep id | awk '{print $4}')
+member_role=$(keystone $INSECURE role-create --name member | grep id | awk '{print $4}')
+
+

Create the admin user:

+
user_id=$(keystone $INSECURE user-create --name admin --tenant-id $admin_id --pass $OS_PASSWORD | grep id | awk '{print $4}')
+
+keystone $INSECURE user-role-add --user-id $user_id --tenant-id $admin_id \
+  --role-id $admin_role
+
+keystone $INSECURE user-role-add --user-id $user_id --tenant-id $admin_id \
+  --role-id $kadmin_role
+
+keystone $INSECURE user-role-add --user-id $user_id --tenant-id $admin_id \
+  --role-id $ksadmin_role
+
+

If you do not have multi-volume support (broken in 3.3.1-11), then the volume names will not correlate to the tenants, and all tenants will map to the same volume, so just use a normal name. (This will be fixed in 3.4, and should be fixed in 3.4 Beta. The bug report for this is here: https://bugzilla.redhat.com/show_bug.cgi?id=924792)

+
volname="admin"
+
+# or if you have the multi-volume patch
+volname=$admin_id
+
+

Create and start the admin volume:

+
sudo gluster volume create $volname $myhostname:$pathtobrick
+sudo gluster volume start $volname
+sudo service openstack-keystone start
+
+

Create the ring for the admin tenant. If you have working multi-volume support, then you can specify multiple volume names in the call:

+
cd /etc/swift
+sudo /usr/bin/gluster-swift-gen-builders $volname
+sudo swift-init main restart
+
+

Create a testadmin user associated with the admin tenant with password testadmin and admin role:

+
user_id=$(keystone $INSECURE user-create --name testadmin --tenant-id $admin_id --pass testadmin | grep id | awk '{print $4}')
+
+keystone $INSECURE user-role-add --user-id $user_id --tenant-id $admin_id \
+  --role-id $admin_role
+
+

Test the user:

+
curl $INSECURE -d '{"auth":{"tenantName": "admin", "passwordCredentials":{"username": "testadmin", "password": "testadmin"}}}' -H "Content-type: application/json" "https://127.0.0.1:5000/v2.0/tokens"
+
+

See here for more examples:

+

http://docs.openstack.org/developer/keystone/api_curl_examples.html

+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/Administrator-Guide/GlusterFS-iSCSI/index.html b/Administrator-Guide/GlusterFS-iSCSI/index.html new file mode 100644 index 00000000..7d0741e1 --- /dev/null +++ b/Administrator-Guide/GlusterFS-iSCSI/index.html @@ -0,0 +1,4576 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + GlusterFS iSCSI - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + +

GlusterFS iSCSI

+

Introduction

+

iSCSI on Gluster can be set up using the Linux Target driver. This is a user space daemon that accepts iSCSI (as well as iSER and FCoE.) It interprets iSCSI CDBs and converts them into some other I/O operation, according to user configuration. In our case, we can convert the CDBs into file operations that run against a gluster file. The file represents the LUN and the offset in the file the LBA.

+

A plug-in for the Linux target driver has been written to use the libgfapi. It is part of the Linux target driver (bs_glfs.c). Using it, the datapath skips FUSE. This document will be updated to describe how to use it. You can see README.glfs in the Linux target driver's documentation subdirectory.

+

LIO is a replacement for the Linux Target Driver that is included in RHEL7. A user-space plug-in mechanism for it is under development. Once that piece of code exists a similar mechanism can be built for gluster as was done for the Linux target driver.

+

Below is a cookbook to set it up using the Linux Target Driver on the server. This has been tested on XEN and KVM instances within RHEL6, RHEL7, and Fedora 19 instances. In this setup a single path leads to gluster, which represents a performance bottleneck and single point of failure. For HA and load balancing, it is possible to setup two or more paths to different gluster servers using mpio; if the target name is equivalent over each path, mpio will coalless both paths into a single device.

+

For more information on iSCSI and the Linux target driver, see [1] and [2].

+

Setup

+

Mount gluster locally on your gluster server. Note you can also run it on the gluster client. There are pros and cons to these configurations, described below.

+
mount -t glusterfs 127.0.0.1:gserver /mnt
+
+

Create a large file representing your block device within the gluster fs. In this case, the lun is 2G. (You could also create a gluster "block device" for this purpose, which would skip the file system).

+
dd if=/dev/zero of=disk3 bs=2G count=25
+
+

Create a target using the file as the backend storage.

+

If necessary, download the Linux SCSI target. Then start the service.

+
yum install scsi-target-utils
+service tgtd start
+
+

You must give an iSCSI Qualified name (IQN), in the format : iqn.yyyy-mm.reversed.domain.name:OptionalIdentifierText

+

where:

+

yyyy-mm represents the 4-digit year and 2-digit month the device was started (for example: 2011-07)

+
tgtadm --lld iscsi --op new --mode target --tid 1 -T iqn.20013-10.com.redhat
+
+

You can look at the target:

+
# tgtadm --lld iscsi --op show --mode conn --tid 1
+
+Session: 11  Connection: 0     Initiator iqn.1994-05.com.redhat:cf75c8d4274d
+
+

Next, add a logical unit to the target

+
tgtadm --lld iscsi --op new --mode logicalunit --tid 1 --lun 1 -b /mnt/disk3
+
+

Allow any initiator to access the target.

+
tgtadm --lld iscsi --op bind --mode target --tid 1 -I ALL
+
+

Now it’s time to set up your client.

+

Discover your targets. Note in this example's case, the target IP address is 192.168.1.2

+
iscsiadm --mode discovery --type sendtargets --portal 192.168.1.2
+
+

Login to your target session.

+
iscsiadm --mode node --targetname iqn.2001-04.com.example:storage.disk1.amiens.sys1.xyz --portal 192.168.1.2:3260 --login
+
+

You should have a new SCSI disk. You will see it created in /var/log/messages. You will see it in lsblk.

+

You can send I/O to it:

+
dd if=/dev/zero of=/dev/sda bs=4K count=100
+
+

To tear down your iSCSI connection:

+
iscsiadm  -m node -T iqn.2001-04.com.redhat  -p 172.17.40.21 -u
+
+

Running the iSCSI target on the gluster client

+

You can run the Linux target daemon on the gluster client. The advantages to this setup is the client could run gluster and enjoy all of gluster's benefits. For example, gluster could "fan out" I/O to different gluster servers. The downside would be that the client would need to load and configure gluster. It is better to run gluster on the client if it is possible.

+

References

+

[1] http://www.linuxjournal.com/content/creating-software-backed-iscsi-targets-red-hat-enterprise-linux-6

+

[2] http://www.cyberciti.biz/tips/howto-setup-linux-iscsi-target-sanwith-tgt.html

+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/Administrator-Guide/Handling-of-users-with-many-groups/index.html b/Administrator-Guide/Handling-of-users-with-many-groups/index.html new file mode 100644 index 00000000..d6b5e245 --- /dev/null +++ b/Administrator-Guide/Handling-of-users-with-many-groups/index.html @@ -0,0 +1,4613 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + Handling of users that belong to many groups - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + +

Handling of users that belong to many groups

+

Users can belong to many different (UNIX) groups. These groups are generally +used to allow or deny permissions for executing commands or access to files and +directories.

+

The number of groups a user can belong to depends on the operating system, but +there are also components that support fewer groups. In Gluster, there are +different restrictions on different levels in the stack. The explanations in +this document should clarify which restrictions exist, and how these can be +handled.

+

tl;dr

+
    +
  • if users belong to more than 90 groups, the brick processes need to resolve + the secondary/auxiliary groups with the server.manage-gids volume option
  • +
  • the linux kernels /proc filesystem provides up to 32 groups of a running + process, if this is not sufficient the mount option resolve-gids can be + used
  • +
  • Gluster/NFS needs nfs.server-aux-gids when users accessing a Gluster volume + over NFS belong to more than 16 groups
  • +
+

For all of the above options counts that the system doing the group resolving +must be configured (nsswitch, sssd, ..) to be able to get all groups when +only a UID is known.

+

Limit in the GlusterFS protocol

+

When a Gluster client does some action on a Gluster volume, the operation is +sent in an RPC packet. This RPC packet contains an header with the credentials +of the user. The server-side receives the RPC packet and uses the credentials +from the RPC header to perform ownership operations and allow/deny checks.

+

The RPC header used by the GlusterFS protocols can contain at most ~93 groups. +In order to pass this limit, the server process (brick) receiving the RPC +procedure can do the resolving of the groups locally, and ignore the (too few) +groups from the RPC header.

+

This requires that the service process can resolve all of the users groups by +the UID of the client process. Most environments that have many groups, already +use a configuration where users and groups are maintained in a central +location. for enterprises it is common to manage users and their groups in +LDAP, Active Directory, NIS or similar.

+

To have the groups of a user resolved on the server-side (brick), the volume +option server.manage-gids needs to be set. Once this option is enabled, the +brick processes will not use the groups that the Gluster clients send, but will +use the POSIX getgrouplist() function to fetch them.

+

Because this is a protocol limitation, all clients, including FUSE mounts, +Gluster/NFS server and libgfapi applications are affected by this.

+

Group limit with FUSE

+

The FUSE client gets the groups of the process that does the I/O by reading the +information from /proc/$pid/status. This file only contains up to 32 groups. +If client-side xlators rely on all groups of a process/user (like posix-acl), +these 32 groups could limit functionality.

+

For that reason a mount option has been added. With the resolve-gids mount +option, the FUSE client calls the POSIX getgrouplist() function instead of +reading /proc/$pid/status.

+

Group limit for NFS

+

The NFS protocol (actually the AUTH_SYS/AUTH_UNIX RPC header) allows up to 16 +groups. These are the groups that the NFS-server receives from NFS-clients. +Similar to the way the brick processes can resolve the groups on the +server-side, the NFS-server can take the UID passed by the NFS-client and use +that to resolve the groups. the volume option for that is +nfs.server-aux-gids.

+

Other NFS-servers offer options like this too. The Linux kernel nfsd server +uses rpc.mountd --manage-gids. NFS-Ganesha has the configuration option +Manage_Gids.

+

Implications of these solutions

+

All of the mentioned options are disabled by default. one of the reasons is +that resolving groups is an expensive operation. in many cases there is no need +for supporting many groups and there could be a performance hit.

+

When groups are resolved, the list is cached. the validity of the cache is +configurable. the Gluster processes are not the only ones that cache these +groups. nscd or sssd also keep a cache when they handle the +getgroupslist() requests. When there are many requests, and querying the +groups from a centralized management system takes long, caches might benefit +from a longer validity.

+

An other, less obvious difference might be noticed too. Many processes that are +written with security in mind reduce the groups that the process can +effectively use. This is normally done with the setegids() function. When +storage processes do not honour the fewer groups that are effective, and the +processes use the UID to resolve all groups of a process, the groups that got +dropped with setegids() are added back again. this could lead to permissions +that the process should not have.

+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/Administrator-Guide/Hook-scripts/index.html b/Administrator-Guide/Hook-scripts/index.html new file mode 100644 index 00000000..aa429f52 --- /dev/null +++ b/Administrator-Guide/Hook-scripts/index.html @@ -0,0 +1,4594 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + Managing GlusterFS Volume Life-Cycle Extensions with Hook Scripts - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + +

Managing GlusterFS Volume Life-Cycle Extensions with Hook Scripts

+

Glusterfs allows automation of operations by user-written scripts. For every operation, you can execute a pre and a post script.

+

Pre Scripts

+

These scripts are run before the occurrence of the event. You can write a script to automate activities like managing system-wide services. For example, you can write a script to stop exporting the SMB share corresponding to the volume before you stop the volume.

+

Post Scripts

+

These scripts are run after execution of the event. For example, you can write a script to export the SMB share corresponding to the volume after you start the volume.

+

You can run scripts for the following events:

+
    +
  • Creating a volume
  • +
  • Starting a volume
  • +
  • Adding a brick
  • +
  • Removing a brick
  • +
  • Tuning volume options
  • +
  • Stopping a volume
  • +
  • Deleting a volume
  • +
+

Naming Convention

+

While creating the file names of your scripts, you must follow the naming convention followed in your underlying file system like XFS.

+
+

Note: To enable the script, the name of the script must start with an S . Scripts run in lexicographic order of their names.

+
+

Location of Scripts

+

This section provides information on the folders where the scripts must be placed. When you create a trusted storage pool, the following directories are created:

+
    +
  • /var/lib/glusterd/hooks/1/create/
  • +
  • /var/lib/glusterd/hooks/1/delete/
  • +
  • /var/lib/glusterd/hooks/1/start/
  • +
  • /var/lib/glusterd/hooks/1/stop/
  • +
  • /var/lib/glusterd/hooks/1/set/
  • +
  • /var/lib/glusterd/hooks/1/add-brick/
  • +
  • /var/lib/glusterd/hooks/1/remove-brick/
  • +
+

After creating a script, you must ensure to save the script in its respective folder on all the nodes of the trusted storage pool. The location of the script dictates whether the script must be executed before or after an event. Scripts are provided with the command line argument --volname=VOLNAME to specify the volume. Command-specific additional arguments are provided for the following volume operations:

+
Start volume
+    --first=yes, if the volume is the first to be started
+    --first=no, for otherwise
+Stop volume
+    --last=yes, if the volume is to be stopped last.
+    --last=no, for otherwise
+Set volume
+    -o key=value
+    For every key, value is specified in volume set command.
+
+

Prepackaged Scripts

+

Gluster provides scripts to export Samba (SMB) share when you start a volume and to remove the share when you stop the volume. These scripts are available at: /var/lib/glusterd/hooks/1/start/post and /var/lib/glusterd/hooks/1/stop/pre. By default, the scripts are enabled.

+

When you start a volume using gluster volume start VOLNAME, the S30samba-start.sh script performs the following:

+
    +
  • Adds Samba share configuration details of the volume to the smb.conf file
  • +
  • Mounts the volume through FUSE and adds an entry in /etc/fstab for the same.
  • +
  • Restarts Samba to run with updated configuration
  • +
+

When you stop the volume using gluster volume stop VOLNAME, the S30samba-stop.sh script performs the following:

+
    +
  • Removes the Samba share details of the volume from the smb.conf file
  • +
  • Unmounts the FUSE mount point and removes the corresponding entry in + /etc/fstab
  • +
  • Restarts Samba to run with updated configuration
  • +
+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/Administrator-Guide/Linux-Kernel-Tuning/index.html b/Administrator-Guide/Linux-Kernel-Tuning/index.html new file mode 100644 index 00000000..6df32e21 --- /dev/null +++ b/Administrator-Guide/Linux-Kernel-Tuning/index.html @@ -0,0 +1,4924 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + Linux Kernel Tuning - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + + + + + +
+
+ + + + + + + + +

Linux Kernel Tuning

+ +

Linux kernel tuning for GlusterFS

+

Every now and then, questions come up here internally and with many +enthusiasts on what Gluster has to say about kernel tuning, if anything.

+

The rarity of kernel tuning is on account of the Linux kernel doing a +pretty good job on most workloads. But there is a flip side to this +design. The Linux kernel historically has eagerly eaten up a lot of RAM, +provided there is some, or driven towards caching as the primary way to +improve performance.

+

For most cases, this is fine, but as the amount of workload increases +over time and clustered load is thrown upon the servers, this turns out +to be troublesome, leading to catastrophic failures of jobs etc.

+

Having had a fair bit of experience looking at large memory systems with +heavily loaded regressions, be it CAD, EDA or similar tools, we've +sometimes encountered stability problems with Gluster. We had to +carefully analyse the memory footprint and amount of disk wait times +over days. This gave us a rather remarkable story of disk trashing, huge +iowaits, kernel oops, disk hangs etc.

+

This article is the result of my many experiences with tuning options +which were performed on many sites. The tuning not only helped with +overall responsiveness, but it dramatically stabilized the cluster +overall.

+

When it comes to memory tuning the journey starts with the 'VM' +subsystem which has a bizarre number of options, which can cause a lot +of confusion.

+

vm.swappiness

+

vm.swappiness is a tunable kernel parameter that controls how much the +kernel favors swap over RAM. At the source code level, it’s also defined +as the tendency to steal mapped memory. A high swappiness value means +that the kernel will be more apt to unmap mapped pages. A low swappiness +value means the opposite, the kernel will be less apt to unmap mapped +pages. In other words, the higher the vm.swappiness value, the more the +system will swap.

+

High system swapping has very undesirable effects when there are huge +chunks of data being swapped in and out of RAM. Many have argued for the +value to be set high, but in my experience, setting the value to '0' +causes a performance increase.

+

Conforming with the details here - http://lwn.net/Articles/100978/

+

But again these changes should be driven by testing and due diligence +from the user for their own applications. Heavily loaded, streaming apps +should set this value to '0'. By changing this value to '0', the +system's responsiveness improves.

+

vm.vfs_cache_pressure

+

This option controls the tendency of the kernel to reclaim the memory +which is used for caching of directory and inode objects.

+

At the default value of vfs_cache_pressure=100 the kernel will attempt +to reclaim dentries and inodes at a "fair" rate with respect to +pagecache and swapcache reclaim. Decreasing vfs_cache_pressure causes +the kernel to prefer to retain dentry and inode caches. When +vfs_cache_pressure=0, the kernel will never reclaim dentries and +inodes due to memory pressure and this can easily lead to out-of-memory +conditions. Increasing vfs_cache_pressure beyond 100 causes the kernel +to prefer to reclaim dentries and inodes.

+

With GlusterFS, many users with a lot of storage and many small files +easily end up using a lot of RAM on the server side due to +'inode/dentry' caching, leading to decreased performance when the kernel +keeps crawling through data-structures on a 40GB RAM system. Changing +this value higher than 100 has helped many users to achieve fair caching +and more responsiveness from the kernel.

+

vm.dirty_background_ratio

+

vm.dirty_ratio

+

The first of the two (vm.dirty_background_ratio) defines the +percentage of memory that can become dirty before a background flushing +of the pages to disk starts. Until this percentage is reached no pages +are flushed to disk. However when the flushing starts, then it's done in +the background without disrupting any of the running processes in the +foreground.

+

Now the second of the two parameters (vm.dirty_ratio) defines the +percentage of memory which can be occupied by dirty pages before a +forced flush starts. If the percentage of dirty pages reaches this +threshold, then all processes become synchronous, and they are not +allowed to continue until the io operation they have requested is +actually performed and the data is on disk. In cases of high performance +I/O machines, this causes a problem as the data caching is cut away and +all of the processes doing I/O become blocked to wait for I/O. This will +cause a large number of hanging processes, which leads to high load, +which leads to an unstable system and crappy performance.

+

Lowering them from standard values causes everything to be flushed to +disk rather than storing much in RAM. It helps large memory systems, +which would normally flush a 45G-90G pagecache to disk, causing huge +wait times for front-end applications, decreasing overall responsiveness +and interactivity.

+

"1" > /proc/sys/vm/pagecache

+

Page Cache is a disk cache which holds data from files and executable +programs, i.e. pages with actual contents of files or block devices. +Page Cache (disk cache) is used to reduce the number of disk reads. A +value of '1' indicates 1% of the RAM is used for this, so that most of +them are fetched from disk rather than RAM. This value is somewhat fishy +after the above values have been changed. Changing this option is not +necessary, but if you are still paranoid about controlling the +pagecache, this value should help.

+

"deadline" > /sys/block/sdc/queue/scheduler

+

The I/O scheduler is a component of the Linux kernel which decides how +the read and write buffers are to be queued for the underlying device. +Theoretically 'noop' is better with a smart RAID controller because +Linux knows nothing about (physical) disk geometry, therefore it can be +efficient to let the controller, well aware of disk geometry, handle the +requests as soon as possible. But 'deadline' seems to enhance +performance. You can read more about them in the Linux kernel source +documentation: linux/Documentation/block/*iosched.txt . I have also +seen 'read' throughput increase during mixed-operations (many writes).

+

"256" > /sys/block/sdc/queue/nr_requests

+

This is the size of I/O requests which are buffered before they are +communicated to the disk by the Scheduler. The internal queue size of +some controllers (queue_depth) is larger than the I/O scheduler's +nr_requests so that the I/O scheduler doesn't get much of a chance to +properly order and merge the requests. Deadline or CFQ scheduler likes +to have nr_requests to be set 2 times the value of queue_depth, which +is the default for a given controller. Merging the order and requests +helps the scheduler to be more responsive during huge load.

+

echo "16" > /proc/sys/vm/page-cluster

+

page-cluster controls the number of pages which are written to swap in a +single attempt. It defines the swap I/O size, in the above example +adding '16' as per the RAID stripe size of 64k. This wouldn't make sense +after you have used swappiness=0, but if you defined swappiness=10 or +20, then using this value helps when your have a RAID stripe size of +64k.

+

blockdev --setra 4096 /dev/ (eg:- sdb, hdc or dev_mapper)

+

Default block device settings often result in terrible performance for +many RAID controllers. Adding the above option, which sets read-ahead to +4096 * 512-byte sectors, at least for the streamed copy, increases the +speed, saturating the HD's integrated cache by reading ahead during the +period used by the kernel to prepare I/O. It may put in cached data +which will be requested by the next read. Too much read-ahead may kill +random I/O on huge files if it uses potentially useful drive time or +loads data beyond caches.

+

A few other miscellaneous changes which are recommended at filesystem +level but haven't been tested yet are the following. Make sure that your +filesystem knows about the stripe size and number of disks in the array. +E.g. for a raid5 array with a stripe size of 64K and 6 disks +(effectively 5, because in every stripe-set there is one disk doing +parity). These are built on theoretical assumptions and gathered from +various other blogs/articles provided by RAID experts.

+

-> ext4 fs, 5 disks, 64K stripe, units in 4K blocks

+

mkfs -text4 -E stride=\$((64/4))

+

-> xfs, 5 disks, 64K stripe, units in 512-byte sectors

+

mkfs -txfs -d sunit=\$((64*2)) -d swidth=\$((5*64*2))

+

You may want to consider increasing the above stripe sizes for streaming +large files.

+

WARNING: Above changes are highly subjective with certain types of +applications. This article doesn't guarantee any benefits whatsoever +without prior due diligence from the user for their respective +applications. It should only be applied at the behest of an expected +increase in overall system responsiveness or if it resolves ongoing +issues.

+

More informative and interesting articles/emails/blogs to read

+ +

Last updated by:User:y4m4

+

comment:jdarcy

+

Some additional tuning ideas:

+

* The choice of scheduler is *really* hardware- and workload-dependent, and some schedulers have unique features other than performance. For example, last time I looked cgroups support was limited to the cfq scheduler. Different tests regularly do best on any of cfq, deadline, or noop. The best advice here is not to use a particular scheduler but to try them all for a specific need.

+

* It's worth checking to make sure that /sys/.../max_sectors_kb matches max_hw_sectors_kb. I haven't seen this problem for a while, but back when I used to work on Lustre I often saw that these didn't match and performance suffered.

+

* For read-heavy workloads, experimenting with /sys/.../readahead_kb is definitely worthwhile.

+

* Filesystems should be built with -I 512 or similar so that more xattrs can be stored in the inode instead of requiring an extra seek.

+

* Mounting with noatime or relatime is usually good for performance.

+

reply:y4m4

+

Agreed i was about write those parameters you mentioned. I should write another elaborate article on FS changes.

+

y4m4

+

comment:eco

+

1 year ago\ +This article is the model on which all articles should be written. Detailed information, solid examples and a great selection of references to let readers go more in depth on topics they choose. Great benchmark for others to strive to attain.\ +Eco\

+

comment:y4m4

+

sysctl -w net.core.{r,w}mem_max = 4096000 - this helped us to Reach 800MB/sec with replicated GlusterFS on 10gige - Thanks to Ben England for these test results.\ +y4m4

+

comment:bengland

+

After testing Gluster 3.2.4 performance with RHEL6.1, I'd suggest some changes to this article's recommendations:

+

vm.swappiness=10 not 0 -- I think 0 is a bit extreme and might lead to out-of-memory conditions, but 10 will avoid just about all paging/swapping. If you still see swapping, you need to probably focus on restricting dirty pages with vm.dirty_ratio.

+

vfs_cache_pressure > 100 -- why? I thought this was a percentage.

+

vm.pagecache=1 -- some distros (e.g. RHEL6) don't have vm.pagecache parameter.

+

vm.dirty_background_ratio=1 not 10 (kernel default?) -- the kernel default is a bit dependent on choice of Linux distro, but for most workloads it's better to set this parameter very low to cause Linux to push dirty pages out to storage sooner. It means that if dirty pages exceed 1% of RAM then it will start to asynchronously write dirty pages to storage. The only workload where this is really bad: apps that write temp files and then quickly delete them (compiles) -- and you should probably be using local storage for such files anyway.

+

Choice of vm.dirty_ratio is more dependent upon the workload, but in other contexts I have observed that response time fairness and stability is much better if you lower dirty ratio so that it doesn't take more than 2-5 seconds to flush all dirty pages to storage.

+

block device parameters:

+

I'm not aware of any case where cfq scheduler actually helps Gluster server. Unless server I/O threads correspond directly to end-users, I don't see how cfq can help you. Deadline scheduler is a good choice. I/O request queue has to be deep enough to allow scheduler to reorder requests to optimize away disk seeks. The parameters max_sectors_kb and nr_requests are relevant for this. For read-ahead, consider increasing it to the point where you prefetch for longer period of time than a disk seek (on order of 10 msec), so that you can avoid unnecessary disk seeks for multi-stream workloads. This comes at the expense of I/O latency so don't overdo it.

+

network:

+

jumbo frames can increase throughput significantly for 10-GbE networks.

+

Raise net.core.{r,w}mem_max to 540000 from default of 131071 (not 4 MB above, my previous recommendation). Gluster 3.2 does setsockopt() call to use 1/2 MB mem for TCP socket buffer space.\ +bengland\

+

comment:hjmangalam

+

Thanks very much for noting this info - the descriptions are VERY good.. I'm in the midst of debugging a misbehaving gluster that can't seem to handle small writes over IPoIB and this contains some useful pointers.

+

Some suggestions that might make this more immediately useful:

+

- I'm assuming that this discussion refers to the gluster server nodes, not to the gluster native client nodes, yes? If that's the case, are there are also kernel parameters or recommended settings for the client nodes?\ +- While there are some cases where you mention that a value should be changed to a particular # or %, in a number of cases you advise just increasing/decreasing the values, which for something like a kernel parameter is probably not a useful suggestion. Do I raise it by 10? 10% 2x? 10x?

+

I also ran across a complimentary page, which might be of interest - it explains more of the vm variables, especially as it relates to writing.\ +"Theory of Operation and Tuning for Write-Heavy Loads"\ +`` and refs therein. +hjmangalam

+

comment:bengland

+

Here are some additional suggestions based on recent testing:\ +- scaling out number of clients -- you need to increase the size of the ARP tables on Gluster server if you want to support more than 1K clients mounting a gluster volume. The defaults for RHEL6.3 were too low to support this, we used this:

+

net.ipv4.neigh.default.gc_thresh2 = 2048\ +net.ipv4.neigh.default.gc_thresh3 = 4096

+

In addition, tunings common to webservers become relevant at this number of clients as well, such as netdev_max_backlog, tcp_fin_timeout, and somaxconn.

+

Bonding mode 6 has been observed to increase replication write performance, I have no experience with bonding mode 4 but it should work if switch is properly configured, other bonding modes are a waste of time.

+

bengland\ +3 months ago

+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/Administrator-Guide/Logging/index.html b/Administrator-Guide/Logging/index.html new file mode 100644 index 00000000..68a6047d --- /dev/null +++ b/Administrator-Guide/Logging/index.html @@ -0,0 +1,4696 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + Logging - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + + + + + +
+
+ + + + + + + + +

GlusterFS service Logs and locations

+

Below lists the component, services, and functionality based logs in the GlusterFS Server. As per the File System Hierarchy Standards (FHS) all the log files are placed in the /var/log directory. +⁠

+

Glusterd:

+

glusterd logs are located at /var/log/glusterfs/glusterd.log. One glusterd log file per server. This log file also contains the snapshot and user logs.

+

Gluster cli command:

+

gluster cli logs are located at /var/log/glusterfs/cli.log. Gluster commands executed on a node in a GlusterFS Trusted Storage Pool is logged in /var/log/glusterfs/cmd_history.log.

+

Bricks:

+

Bricks logs are located at /var/log/glusterfs/bricks/<path extraction of brick path>.log. One log file per brick on the server

+

Rebalance:

+

rebalance logs are located at /var/log/glusterfs/VOLNAME-rebalance.log . One log file per volume on the server.

+

Self heal deamon:

+

self heal deamon are logged at /var/log/glusterfs/glustershd.log. One log file per server

+

Quota:

+

/var/log/glusterfs/quotad.log are log of the quota daemons running on each node. +/var/log/glusterfs/quota-crawl.log Whenever quota is enabled, a file system crawl is performed and the corresponding log is stored in this file. +/var/log/glusterfs/quota-mount- VOLNAME.log An auxiliary FUSE client is mounted in /VOLNAME of the glusterFS and the corresponding client logs found in this file. One log file per server and per volume from quota-mount.

+

Gluster NFS:

+

/var/log/glusterfs/nfs.log One log file per server

+

SAMBA Gluster:

+

/var/log/samba/glusterfs-VOLNAME-<ClientIP>.log . If the client mounts this on a glusterFS server node, the actual log file or the mount point may not be found. In such a case, the mount outputs of all the glusterFS type mount operations need to be considered.

+

Ganesha NFS :

+

/var/log/nfs-ganesha.log

+

FUSE Mount:

+

/var/log/glusterfs/<mountpoint path extraction>.log

+

Geo-replication:

+

/var/log/glusterfs/geo-replication/<primary> +/var/log/glusterfs/geo-replication-secondary

+

Gluster volume heal VOLNAME info command:

+

/var/log/glusterfs/glfsheal-VOLNAME.log . One log file per server on which the command is executed.

+

Gluster-swift:

+

/var/log/messages

+

SwiftKrbAuth:

+

/var/log/httpd/error_log

+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/Administrator-Guide/Managing-Snapshots/index.html b/Administrator-Guide/Managing-Snapshots/index.html new file mode 100644 index 00000000..d75a5ab9 --- /dev/null +++ b/Administrator-Guide/Managing-Snapshots/index.html @@ -0,0 +1,4895 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + Snapshots - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + + + + + +
+
+ + + + + + + + +

Managing GlusterFS Volume Snapshots

+

This section describes how to perform common GlusterFS volume snapshot +management operations

+

Pre-requisites

+

GlusterFS volume snapshot feature is based on thinly provisioned LVM snapshot. +To make use of snapshot feature GlusterFS volume should fulfill following +pre-requisites:

+
    +
  • Each brick should be on an independent thinly provisioned LVM.
  • +
  • Brick LVM should not contain any other data other than brick.
  • +
  • None of the brick should be on a thick LVM.
  • +
  • gluster version should be 3.6 and above.
  • +
+

Details of how to create thin volume can be found at the following link. +https://access.redhat.com/documentation/en-US/Red_Hat_Enterprise_Linux/7/html/Logical_Volume_Manager_Administration/LV.html#thinly_provisioned_volume_creation

+

Few features of snapshot are:

+

Crash Consistency

+

when a snapshot is taken at a particular point-in-time, it is made sure that +the taken snapshot is crash consistent. when the taken snapshot is restored, +then the data is identical as it was at the time of taking a snapshot.

+

Online Snapshot

+

When the snapshot is being taken the file system and its associated data +continue to be available for the clients.

+

Barrier

+

During snapshot creation some of the fops are blocked to guarantee crash +consistency. There is a default time-out of 2 minutes, if snapshot creation +is not complete within that span then fops are unbarried. If unbarrier happens +before the snapshot creation is complete then the snapshot creation operation +fails. This to ensure that the snapshot is in a consistent state.

+

Snapshot Management

+

Snapshot creation

+
gluster snapshot create <snapname> <volname> [no-timestamp] [description <description>]
+
+

Creates a snapshot of a GlusterFS volume. User can provide a snap-name and a +description to identify the snap. The description cannot be more than 1024 +characters.

+

Snapshot will be created by appending timestamp with user provided snap name. +User can override this behaviour by giving no-timestamp flag.

+

NOTE: To be able to take a snapshot, volume should be present and it +should be in started state. All the bricks used in creating the snapshot +have to be online in order to successfully create a snapshot as the force option +is now deprecated.

+

Snapshot clone

+
gluster snapshot clone <clonename> <snapname>
+
+

Creates a clone of a snapshot. Upon successful completion, a new GlusterFS +volume will be created from snapshot. The clone will be a space efficient clone, +i.e, the snapshot and the clone will share the backend disk.

+

NOTE: To be able to take a clone from snapshot, snapshot should be present +and it should be in activated state.

+

Restoring snaps

+
gluster snapshot restore <snapname>
+
+

Restores an already taken snapshot of a GlusterFS volume. +Snapshot restore is an offline activity therefore if the volume is +online (in started state) then the restore operation will fail.

+

Once the snapshot is restored it will not be available in the +list of snapshots.

+

Deleting snaps

+
gluster snapshot delete (all | <snapname> | volume <volname>)
+
+

If snapname is specified then mentioned snapshot is deleted. +If volname is specified then all snapshots belonging to that particular +volume is deleted. If keyword all is used then all snapshots belonging +to the system is deleted.

+

Listing of available snaps

+
gluster snapshot list [volname]
+
+

Lists all snapshots taken. +If volname is provided, then only the snapshots belonging to +that particular volume is listed.

+

Information of available snaps

+
gluster snapshot info [(snapname | volume <volname>)]
+
+

This command gives information such as snapshot name, snapshot UUID, +time at which snapshot was created, and it lists down the snap-volume-name, +number of snapshots already taken and number of snapshots still available +for that particular volume, and the state of the snapshot.

+

Status of snapshots

+
gluster snapshot status [(snapname | volume <volname>)]
+
+

This command gives status of the snapshot. +The details included are snapshot brick path, volume group(LVM details), +status of the snapshot bricks, PID of the bricks, data percentage filled for +that particular volume group to which the snapshots belong to, and total size +of the logical volume.

+

If snapname is specified then status of the mentioned snapshot is displayed. +If volname is specified then status of all snapshots belonging to that volume +is displayed. If both snapname and volname is not specified then status of all +the snapshots present in the system are displayed.

+

Configuring the snapshot behavior

+
snapshot config [volname] ([snap-max-hard-limit <count>] [snap-max-soft-limit <percent>])
+                            | ([auto-delete <enable|disable>])
+                            | ([activate-on-create <enable|disable>])
+
+

Displays and sets the snapshot config values.

+

snapshot config without any keywords displays the snapshot config values of +all volumes in the system. If volname is provided, then the snapshot config +values of that volume is displayed.

+

Snapshot config command along with keywords can be used to change the existing +config values. If volname is provided then config value of that volume is +changed, else it will set/change the system limit.

+

snap-max-soft-limit and auto-delete are global options, that will be +inherited by all volumes in the system and cannot be set to individual volumes.

+

The system limit takes precedence over the volume specific limit.

+

When auto-delete feature is enabled, then upon reaching the soft-limit, +with every successful snapshot creation, the oldest snapshot will be deleted.

+

When auto-delete feature is disabled, then upon reaching the soft-limit, +the user gets a warning with every successful snapshot creation.

+

Upon reaching the hard-limit, further snapshot creations will not be allowed.

+

activate-on-create is disabled by default. If you enable activate-on-create, +then further snapshot will be activated during the time of snapshot creation.

+

Activating a snapshot

+
gluster snapshot activate <snapname>
+
+

Activates the mentioned snapshot.

+

Note: By default the snapshot will not be activated during snapshot creation.

+

Deactivate a snapshot

+
gluster snapshot deactivate <snapname>
+
+

Deactivates the mentioned snapshot.

+

Accessing the snapshot

+

Snapshots can be accessed in 2 ways.

+
    +
  1. +

    Mounting the snapshot:

    +

    The snapshot can be accessed via FUSE mount (only fuse). To do that it has to be +mounted first. A snapshot can be mounted via fuse by below command

    +
    mount -t glusterfs <hostname>:/snaps/<snap-name>/<volume-name> <mount-path>
    +
    +

    i.e. say "host1" is one of the peers. Let "vol" be the volume name and "my-snap" +be the snapshot name. In this case a snapshot can be mounted via this command

    +
    mount -t glusterfs host1:/snaps/my-snap/vol /mnt/snapshot
    +
    +
  2. +
  3. +

    User serviceability:

    +

    Apart from the above method of mounting the snapshot, a list of available +snapshots and the contents of each snapshot can be viewed from any of the mount +points accessing the glusterfs volume (either FUSE or NFS or SMB). For having +user serviceable snapshots, it has to be enabled for a volume first. User +serviceability can be enabled for a volume using the below command.

    +
    gluster volume set <volname> features.uss enable
    +
    +

    Once enabled, from any of the directory (including root of the filesystem) an +access point will be created to the snapshot world. The access point is a hidden +directory cding into which will make the user enter the snapshot world. By +default the hidden directory is ".snaps". Once user serviceability is enabled, +one will be able to cd into .snaps from any directory. Doing "ls" on that +directory shows a list of directories which are nothing but the snapshots +present for that volume. Say if there are 3 snapshots ("snap1", "snap2", +"snap3"), then doing ls in .snaps directory will show those 3 names as the +directory entries. They represent the state of the directory from which .snaps +was entered, at different points in time.

    +

    NOTE: The access to the snapshots are read-only. The snapshot needs to be +activated for it to be accessible inside .snaps directory.

    +

    Also, the name of the hidden directory (or the access point to the snapshot +world) can be changed using the below command.

    +
    gluster volume set <volname> snapshot-directory <new-name>
    +
    +
  4. +
  5. +

    Accessing from windows:

    +

    The glusterfs volumes can be made accessible by windows via samba. (the +glusterfs plugin for samba helps achieve this, without having to re-export +a fuse mounted glusterfs volume). The snapshots of a glusterfs volume can +also be viewed in the windows explorer.

    +

    There are 2 ways:

    +
      +
    • +

      Give the path of the entry point directory + (<hostname><samba-share><directory><entry-point path>) in the run command + window

      +
    • +
    • +

      Go to the samba share via windows explorer. Make hidden files and folders + visible so that in the root of the samba share a folder icon for the entry point + can be seen.

      +
    • +
    +
  6. +
+

NOTE: From the explorer, snapshot world can be entered via entry point only from +the root of the samba share. If snapshots have to be seen from subfolders, then +the path should be provided in the run command window.

+

For snapshots to be accessible from windows, below 2 options can be used.

+
    +
  1. +

    The glusterfs plugin for samba should give the option "snapdir-entry-path" + while starting. The option is an indication to glusterfs, that samba is loading + it and the value of the option should be the path that is being used as the + share for windows.

    +

    Ex: Say, there is a glusterfs volume and a directory called "export" from the +root of the volume is being used as the samba share, then samba has to load +glusterfs with this option as well.

    +
     ret = glfs_set_xlator_option(
    +         fs,
    +         "*-snapview-client",
    +         "snapdir-entry-path", "/export"
    + );
    +
    +

    The xlator option "snapdir-entry-path" is not exposed via volume set options, +cannot be changed from CLI. Its an option that has to be provided at the time of +mounting glusterfs or when samba loads glusterfs.

    +
  2. +
  3. +

    The accessibility of snapshots via root of the samba share from windows + is configurable. By default it is turned off. It is a volume set option which can + be changed via CLI.

    +

    gluster volume set <volname> features.show-snapshot-directory <on/off>. By +default it is off.

    +
  4. +
+

Only when both the above options have been provided (i.e snapdir-entry-path +contains a valid unix path that is exported and show-snapshot-directory option +is set to true), snapshots can accessed via windows explorer.

+

If only 1st option (i.e. snapdir-entry-path) is set via samba and 2nd option +(i.e. show-snapshot-directory) is off, then snapshots can be accessed from +windows via the run command window, but not via the explorer.

+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/Administrator-Guide/Managing-Volumes/index.html b/Administrator-Guide/Managing-Volumes/index.html new file mode 100644 index 00000000..34bc78ec --- /dev/null +++ b/Administrator-Guide/Managing-Volumes/index.html @@ -0,0 +1,5389 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + Managing Volumes - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + + + + + +
+
+ + + + + + + + +

Managing GlusterFS Volumes

+

This section describes how to perform common GlusterFS management +operations, including the following:

+ +

+

Configuring Transport Types for a Volume

+

A volume can support one or more transport types for communication between clients and brick processes. +There are three types of supported transport, which are tcp, rdma, and tcp,rdma.

+

To change the supported transport types of a volume, follow the procedure:

+
    +
  1. +

    Unmount the volume on all the clients using the following command:

    +
    umount mount-point
    +
    +
  2. +
  3. +

    Stop the volumes using the following command:

    +
    gluster volume stop <VOLNAME>
    +
    +
  4. +
  5. +

    Change the transport type. For example, to enable both tcp and rdma execute the followimg command:

    +
    gluster volume set test-volume config.transport tcp,rdma OR tcp OR rdma
    +
    +
  6. +
  7. +

    Mount the volume on all the clients. For example, to mount using rdma transport, use the following command:

    +
    mount -t glusterfs -o transport=rdma server1:/test-volume /mnt/glusterfs
    +
    +
  8. +
+

+

Expanding Volumes

+

You can expand volumes, as needed, while the cluster is online and +available. For example, you might want to add a brick to a distributed +volume, thereby increasing the distribution and adding to the capacity +of the GlusterFS volume.

+

Similarly, you might want to add a group of bricks to a distributed +replicated volume, increasing the capacity of the GlusterFS volume.

+
+

Note
+When expanding distributed replicated and distributed dispersed volumes, +you need to add a number of bricks that is a multiple of the replica +or disperse count. For example, to expand a distributed replicated +volume with a replica count of 2, you need to add bricks in multiples +of 2 (such as 4, 6, 8, etc.).

+
+

To expand a volume

+
    +
  1. +

    If they are not already part of the TSP, probe the servers which contain the bricks you + want to add to the volume using the following command:

    +
    gluster peer probe <SERVERNAME>
    +
    +

    For example:

    +
    # gluster peer probe server4
    +Probe successful
    +
    +
  2. +
  3. +

    Add the brick using the following command:

    +
    gluster volume add-brick <VOLNAME> <NEW-BRICK>
    +
    +

    For example:

    +
    # gluster volume add-brick test-volume server4:/exp4
    +Add Brick successful
    +
    +
  4. +
  5. +

    Check the volume information using the following command:

    +
    gluster volume info <VOLNAME>
    +
    +

    The command displays information similar to the following:

    +
    Volume Name: test-volume
    +Type: Distribute
    +Status: Started
    +Number of Bricks: 4
    +Bricks:
    +Brick1: server1:/exp1
    +Brick2: server2:/exp2
    +Brick3: server3:/exp3
    +Brick4: server4:/exp4
    +
    +
  6. +
  7. +

    Rebalance the volume to ensure that files are distributed to the + new brick.

    +

    You can use the rebalance command as described in Rebalancing Volumes

    +
  8. +
+

+

Shrinking Volumes

+

You can shrink volumes, as needed, while the cluster is online and +available. For example, you might need to remove a brick that has become +inaccessible in a distributed volume due to hardware or network failure.

+
+

Note
+Data residing on the brick that you are removing will no longer be +accessible at the Gluster mount point. Note however that only the +configuration information is removed - you can continue to access the +data directly from the brick, as necessary.

+
+

When shrinking distributed replicated and distributed dispersed volumes, +you need to remove a number of bricks that is a multiple of the replica +or stripe count. For example, to shrink a distributed replicate volume +with a replica count of 2, you need to remove bricks in multiples of 2 +(such as 4, 6, 8, etc.). In addition, the bricks you are trying to +remove must be from the same sub-volume (the same replica or disperse +set).

+

Running remove-brick with the start option will automatically trigger a rebalance +operation to migrate data from the removed-bricks to the rest of the volume.

+

To shrink a volume

+
    +
  1. +

    Remove the brick using the following command:

    +
    gluster volume remove-brick <VOLNAME> <BRICKNAME> start
    +
    +

    For example, to remove server2:/exp2:

    +
    # gluster volume remove-brick test-volume server2:/exp2 start
    +volume remove-brick start: success
    +
    +
  2. +
  3. +

    View the status of the remove brick operation using the + following command:

    +
    gluster volume remove-brick <VOLNAME> <BRICKNAME> status
    +
    +

    For example, to view the status of remove brick operation on +server2:/exp2 brick:

    +
    # gluster volume remove-brick test-volume server2:/exp2 status
    +                                Node  Rebalanced-files  size  scanned       status
    +                           ---------  ----------------  ----  -------  -----------
    +617c923e-6450-4065-8e33-865e28d9428f               34   340      162   in progress
    +
    +
  4. +
  5. +

    Once the status displays "completed", commit the remove-brick operation

    +
    gluster volume remove-brick <VOLNAME> <BRICKNAME> commit
    +
    +

    In this example:

    +
    # gluster volume remove-brick test-volume server2:/exp2 commit
    +Removing brick(s) can result in data loss. Do you want to Continue? (y/n) y
    +volume remove-brick commit: success
    +Check the removed bricks to ensure all files are migrated.
    +If files with data are found on the brick path, copy them via a gluster mount point before re-purposing the removed brick.
    +
    +
  6. +
  7. +

    Check the volume information using the following command:

    +
    gluster volume info
    +
    +

    The command displays information similar to the following:

    +
    # gluster volume info
    +Volume Name: test-volume
    +Type: Distribute
    +Status: Started
    +Number of Bricks: 3
    +Bricks:
    +Brick1: server1:/exp1
    +Brick3: server3:/exp3
    +Brick4: server4:/exp4
    +
    +
  8. +
+

+

Replace faulty brick

+

Replacing a brick in a pure distribute volume

+

To replace a brick on a distribute only volume, add the new brick and then remove the brick you want to replace. This will trigger a rebalance operation which will move data from the removed brick.

+
+

NOTE: Replacing a brick using the 'replace-brick' command in gluster is supported only for pure replicate or distributed-replicate volumes.

+
+

Steps to remove brick Server1:/home/gfs/r2_1 and add Server1:/home/gfs/r2_2:

+
    +
  1. +

    Here is the initial volume configuration:

    +
    Volume Name: r2
    +Type: Distribute
    +Volume ID: 25b4e313-7b36-445d-b524-c3daebb91188
    +Status: Started
    +Number of Bricks: 2
    +Transport-type: tcp
    +Bricks:
    +Brick1: Server1:/home/gfs/r2_0
    +Brick2: Server1:/home/gfs/r2_1
    +
    +
  2. +
  3. +

    Here are the files that are present on the mount:

    +
    # ls
    +1  10  2  3  4  5  6  7  8  9
    +
    +
  4. +
  5. +

    Add the new brick - Server1:/home/gfs/r2_2 now:

    +
    # gluster volume add-brick r2 Server1:/home/gfs/r2_2
    +volume add-brick: success
    +
    +
  6. +
  7. +

    Start remove-brick using the following command:

    +
    # gluster volume remove-brick r2 Server1:/home/gfs/r2_1 start
    +volume remove-brick start: success
    +ID: fba0a488-21a4-42b7-8a41-b27ebaa8e5f4
    +
    +
  8. +
  9. +

    Wait until remove-brick status indicates that it is complete.

    +
    # gluster volume remove-brick r2 Server1:/home/gfs/r2_1 status
    +                                Node Rebalanced-files          size       scanned      failures       skipped               status   run time in secs
    +                           ---------      -----------   -----------   -----------   -----------   -----------         ------------     --------------
    +                           localhost                5       20Bytes            15             0             0            completed               0.00
    +
    +
  10. +
  11. +

    Now we can safely remove the old brick, so commit the changes:

    +
    # gluster volume remove-brick r2 Server1:/home/gfs/r2_1 commit
    +Removing brick(s) can result in data loss. Do you want to Continue? (y/n) y
    +volume remove-brick commit: success
    +
    +
  12. +
  13. +

    Here is the new volume configuration.

    +
    Volume Name: r2
    +Type: Distribute
    +Volume ID: 25b4e313-7b36-445d-b524-c3daebb91188
    +Status: Started
    +Number of Bricks: 2
    +Transport-type: tcp
    +Bricks:
    +Brick1: Server1:/home/gfs/r2_0
    +Brick2: Server1:/home/gfs/r2_2
    +
    +
  14. +
  15. +

    Check the contents of the mount:

    +
    # ls
    +1  10  2  3  4  5  6  7  8  9
    +
    +
  16. +
+

Replacing bricks in Replicate/Distributed Replicate volumes

+

This section of the document describes how brick: Server1:/home/gfs/r2_0 is replaced with brick: Server1:/home/gfs/r2_5 in volume r2 with replica count 2.

+
    Volume Name: r2
+    Type: Distributed-Replicate
+    Volume ID: 24a0437a-daa0-4044-8acf-7aa82efd76fd
+    Status: Started
+    Number of Bricks: 2 x 2 = 4
+    Transport-type: tcp
+    Bricks:
+    Brick1: Server1:/home/gfs/r2_0
+    Brick2: Server2:/home/gfs/r2_1
+    Brick3: Server1:/home/gfs/r2_2
+    Brick4: Server2:/home/gfs/r2_3
+
+

Steps:

+
    +
  1. Make sure there is no data in the new brick Server1:/home/gfs/r2_5
  2. +
  3. Check that all the bricks are running. It is okay if the brick that is going to be replaced is down.
  4. +
  5. +

    Replace the brick with 'commit force' option. Please note that other variants of replace-brick command are not supported.

    +
      +
    • +

      Execute replace-brick command

      +

      # gluster volume replace-brick r2 Server1:/home/gfs/r2_0 Server1:/home/gfs/r2_5 commit force + volume replace-brick: success: replace-brick commit successful

      +
    • +
    • +

      Check that the new brick is now online

      +

      # gluster volume status + Status of volume: r2 + Gluster process Port Online Pid

      +
      +

      Brick Server1:/home/gfs/r2_5 49156 Y 5731 <---- new brick is online + Brick Server2:/home/gfs/r2_1 49153 Y 5354 + Brick Server1:/home/gfs/r2_2 49154 Y 5365 + Brick Server2:/home/gfs/r2_3 49155 Y 5376

      +
    • +
    • +

      Users can track the progress of self-heal using: gluster volume heal [volname] info, or by checking the size of the new brick.

      +
    • +
    • +

      # gluster volume heal <volname> info will show that no heal is required when the data is fully synced to the replaced brick.

      +

      # gluster volume heal r2 info + Brick Server1:/home/gfs/r2_5 + Number of entries: 0

      +

      Brick Server2:/home/gfs/r2_1 + Number of entries: 0

      +

      Brick Server1:/home/gfs/r2_2 + Number of entries: 0

      +

      Brick Server2:/home/gfs/r2_3 + Number of entries: 0

      +
    • +
    +
  6. +
+

+

Rebalancing Volumes

+

After expanding a volume using the add-brick command, you may need to rebalance the data +among the servers. New directories created after expanding or shrinking +of the volume will be evenly distributed automatically. For all the +existing directories, the distribution can be fixed by rebalancing the +layout and/or data.

+

This section describes how to rebalance GlusterFS volumes in your +storage environment, using the following common scenarios:

+
    +
  • +

    Fix Layout - Fixes the layout to use the new volume topology so that files can + be distributed to newly added nodes.

    +
  • +
  • +

    Fix Layout and Migrate Data - Rebalances volume by fixing the layout + to use the new volume topology and migrating the existing data.

    +
  • +
+

Rebalancing Volume to Fix Layout Changes

+

Fixing the layout is necessary because the layout structure is static +for a given directory. Even after new bricks are added to the volume, newly created +files in existing directories will still be distributed only among the original bricks. +The command gluster volume rebalance <volname> fix-layout start will fix the +layout information so that the files can be created on the newly added bricks. +When this command is issued, all the file stat information which is +already cached will get revalidated.

+

As of GlusterFS 3.6, the assignment of files to bricks will take into account +the sizes of the bricks. For example, a 20TB brick will be assigned twice as +many files as a 10TB brick. In versions before 3.6, the two bricks were +treated as equal regardless of size, and would have been assigned an equal +share of files.

+

A fix-layout rebalance will only fix the layout changes and does not +migrate data. If you want to migrate the existing data, +use gluster volume rebalance <volume> start command to rebalance data among +the servers.

+

To rebalance a volume to fix layout

+
    +
  • Start the rebalance operation on any Gluster server using the + following command:
  • +
+

# gluster volume rebalance <VOLNAME> fix-layout start

+

For example:

+
  # gluster volume rebalance test-volume fix-layout start
+  Starting rebalance on volume test-volume has been successful
+
+

Rebalancing Volume to Fix Layout and Migrate Data

+

After expanding a volume using the add-brick respectively, you need to rebalance the data +among the servers. A remove-brick command will automatically trigger a rebalance.

+

To rebalance a volume to fix layout and migrate the existing data

+
    +
  • Start the rebalance operation on any one of the server using the + following command:
  • +
+

# gluster volume rebalance <VOLNAME> start

+

For example:

+
  # gluster volume rebalance test-volume start
+  Starting rebalancing on volume test-volume has been successful
+
+
    +
  • Start the migration operation forcefully on any one of the servers + using the following command:
  • +
+

# gluster volume rebalance <VOLNAME> start force

+

For example:

+
  # gluster volume rebalance test-volume start force
+  Starting rebalancing on volume test-volume has been successful
+
+

A rebalance operation will attempt to balance the diskusage across nodes, therefore it will skip +files where the move will result in a less balanced volume. This leads to link files that are still +left behind in the system and hence may cause performance issues. The behaviour can be overridden +with the force argument.

+

Displaying the Status of Rebalance Operation

+

You can display the status information about rebalance volume operation, +as needed.

+
    +
  • Check the status of the rebalance operation, using the following + command:
  • +
+

# gluster volume rebalance <VOLNAME> status

+

For example:

+
  # gluster volume rebalance test-volume status
+                                  Node  Rebalanced-files  size  scanned       status
+                             ---------  ----------------  ----  -------  -----------
+  617c923e-6450-4065-8e33-865e28d9428f               416  1463      312  in progress
+
+

The time to complete the rebalance operation depends on the number + of files on the volume along with the corresponding file sizes. + Continue checking the rebalance status, verifying that the number of + files rebalanced or total files scanned keeps increasing.

+

For example, running the status command again might display a result + similar to the following:

+
  # gluster volume rebalance test-volume status
+                                  Node  Rebalanced-files  size  scanned       status
+                             ---------  ----------------  ----  -------  -----------
+  617c923e-6450-4065-8e33-865e28d9428f               498  1783      378  in progress
+
+

The rebalance status displays the following when the rebalance is + complete:

+
  # gluster volume rebalance test-volume status
+                                  Node  Rebalanced-files  size  scanned       status
+                             ---------  ----------------  ----  -------  -----------
+  617c923e-6450-4065-8e33-865e28d9428f               502  1873      334   completed
+
+

Stopping an Ongoing Rebalance Operation

+

You can stop the rebalance operation, if needed.

+
    +
  • Stop the rebalance operation using the following command:
  • +
+

# gluster volume rebalance <VOLNAME> stop

+

For example:

+
  # gluster volume rebalance test-volume stop
+                                  Node  Rebalanced-files  size  scanned       status
+                             ---------  ----------------  ----  -------  -----------
+  617c923e-6450-4065-8e33-865e28d9428f               59   590      244       stopped
+  Stopped rebalance process on volume test-volume
+
+

+

Stopping Volumes

+
    +
  1. +

    Stop the volume using the following command:

    +

    # gluster volume stop <VOLNAME>

    +

    For example, to stop test-volume:

    +
    # gluster volume stop test-volume
    +Stopping volume will make its data inaccessible. Do you want to continue? (y/n)
    +
    +
  2. +
  3. +

    Enter y to confirm the operation. The output of the command + displays the following:

    +
    Stopping volume test-volume has been successful
    +
    +
  4. +
+

+

Deleting Volumes

+
    +
  1. +

    Delete the volume using the following command:

    +

    # gluster volume delete <VOLNAME>

    +

    For example, to delete test-volume:

    +
    # gluster volume delete test-volume
    +Deleting volume will erase all information about the volume. Do you want to continue? (y/n)
    +
    +
  2. +
  3. +

    Enter y to confirm the operation. The command displays the + following:

    +
    Deleting volume test-volume has been successful
    +
    +
  4. +
+

+

Triggering Self-Heal on Replicate

+

In replicate module, previously you had to manually trigger a self-heal +when a brick goes offline and comes back online, to bring all the +replicas in sync. Now the pro-active self-heal daemon runs in the +background, diagnoses issues and automatically initiates self-healing +every 10 minutes on the files which requireshealing.

+

You can view the list of files that need healing, the list of files +which are currently/previously healed, list of files which are in +split-brain state, and you can manually trigger self-heal on the entire +volume or only on the files which need healing.

+
    +
  • Trigger self-heal only on the files which requires healing:
  • +
+

# gluster volume heal <VOLNAME>

+

For example, to trigger self-heal on files which requires healing + of test-volume:

+
  # gluster volume heal test-volume
+  Heal operation on volume test-volume has been successful
+
+
    +
  • Trigger self-heal on all the files of a volume:
  • +
+

# gluster volume heal <VOLNAME> full

+

For example, to trigger self-heal on all the files of of + test-volume:

+
  # gluster volume heal test-volume full
+  Heal operation on volume test-volume has been successful
+
+
    +
  • View the list of files that needs healing:
  • +
+

# gluster volume heal <VOLNAME> info

+

For example, to view the list of files on test-volume that needs + healing:

+
  # gluster volume heal test-volume info
+  Brick server1:/gfs/test-volume_0
+  Number of entries: 0
+
+  Brick server2:/gfs/test-volume_1
+  Number of entries: 101
+  /95.txt
+  /32.txt
+  /66.txt
+  /35.txt
+  /18.txt
+  /26.txt
+  /47.txt
+  /55.txt
+  /85.txt
+  ...
+
+
    +
  • View the list of files that are self-healed:
  • +
+

# gluster volume heal <VOLNAME> info healed

+

For example, to view the list of files on test-volume that are + self-healed:

+
  # gluster volume heal test-volume info healed
+  Brick Server1:/gfs/test-volume_0
+  Number of entries: 0
+
+  Brick Server2:/gfs/test-volume_1
+  Number of entries: 69
+  /99.txt
+  /93.txt
+  /76.txt
+  /11.txt
+  /27.txt
+  /64.txt
+  /80.txt
+  /19.txt
+  /41.txt
+  /29.txt
+  /37.txt
+  /46.txt
+  ...
+
+
    +
  • View the list of files of a particular volume on which the self-heal + failed:
  • +
+

# gluster volume heal <VOLNAME> info failed

+

For example, to view the list of files of test-volume that are not + self-healed:

+
  # gluster volume heal test-volume info failed
+  Brick Server1:/gfs/test-volume_0
+  Number of entries: 0
+
+  Brick Server2:/gfs/test-volume_3
+  Number of entries: 72
+  /90.txt
+  /95.txt
+  /77.txt
+  /71.txt
+  /87.txt
+  /24.txt
+  ...
+
+
    +
  • View the list of files of a particular volume which are in + split-brain state:
  • +
+

# gluster volume heal <VOLNAME> info split-brain

+

For example, to view the list of files of test-volume which are in + split-brain state:

+
  # gluster volume heal test-volume info split-brain
+  Brick Server1:/gfs/test-volume_2
+  Number of entries: 12
+  /83.txt
+  /28.txt
+  /69.txt
+  ...
+
+  Brick Server2:/gfs/test-volume_3
+  Number of entries: 12
+  /83.txt
+  /28.txt
+  /69.txt
+  ...
+
+

+

Non Uniform File Allocation

+

NUFA translator or Non Uniform File Access translator is designed for giving higher preference +to a local drive when used in a HPC type of environment. It can be applied to Distribute and Replica translators; +in the latter case it ensures that one copy is local if space permits.

+

When a client on a server creates files, the files are allocated to a brick in the volume based on the file name. +This allocation may not be ideal, as there is higher latency and unnecessary network traffic for read/write operations +to a non-local brick or export directory. NUFA ensures that the files are created in the local export directory +of the server, and as a result, reduces latency and conserves bandwidth for that server accessing that file. +This can also be useful for applications running on mount points on the storage server.

+

If the local brick runs out of space or reaches the minimum disk free limit, instead of allocating files +to the local brick, NUFA distributes files to other bricks in the same volume if there is +space available on those bricks.

+

NUFA should be enabled before creating any data in the volume.

+

Use the following steps to enable NUFA:

+
    +
  • decide which group is going to be used for managing settings of your volume. Assuming .
  • +
  • define cluster.nufa enable for this group:
  • +
+

# echo "cluster.nufa=enable" | tee -a /var/lib/glusterd/groups/<GROUPNAME>

+
    +
  • add your volume to the group
  • +
+

# gluster volume set <VOLNAME> group <GROUPNAME>

+
    +
  • verify whether the nufa setting was set properly
  • +
+

# gluster volume info

+

Important

+

NUFA is supported under the following conditions:

+
    +
  • Volumes with only one brick per server.
  • +
  • For use with a FUSE client. NUFA is not supported with NFS or SMB.
  • +
  • A client that is mounting a NUFA-enabled volume must be present within the trusted storage pool.
  • +
+

The NUFA scheduler also exists, for use with the Unify translator; see below.

+
volume bricks
+  type cluster/nufa
+  option local-volume-name brick1
+  subvolumes brick1 brick2 brick3 brick4 brick5 brick6 brick7
+end-volume
+
+
NUFA additional options
+
    +
  • lookup-unhashed
  • +
+

This is an advanced option where files are looked up in all subvolumes if they are missing on the subvolume matching the hash value of the filename. The default is on.

+
    +
  • local-volume-name
  • +
+

The volume name to consider local and prefer file creations on. The default is to search for a volume matching the hostname of the system.

+
    +
  • subvolumes
  • +
+

This option lists the subvolumes that are part of this 'cluster/nufa' volume. This translator requires more than one subvolume.

+

BitRot Detection

+

With BitRot detection in Gluster, it's possible to identify "insidious" type of disk +errors where data is silently corrupted with no indication from the disk to the storage +software layer than an error has occured. This also helps in catching "backend" tinkering +of bricks (where data is directly manipulated on the bricks without going through FUSE, +NFS or any other access protocol(s).

+

BitRot detection is disbled by default and needs to be enabled to make use of other +sub-commands.

+
    +
  1. To enable bitrot detection for a given volume :
  2. +
+

# gluster volume bitrot <VOLNAME> enable

+

and similarly to disable bitrot use:

+

# gluster volume bitrot <VOLNAME> disable

+
+

Note
+Enabling bitrot spawns the Signer & Scrubber daemon per node. Signer is responsible +for signing (calculating checksum for each file) an object and scrubber verifies the +calculated checksum against the objects data.

+
+
    +
  1. +

    Scrubber daemon has three (3) throttling modes that adjusts the rate at which objects + are verified.

    +
     # volume bitrot <VOLNAME> scrub-throttle lazy
    + # volume bitrot <VOLNAME> scrub-throttle normal
    + # volume bitrot <VOLNAME> scrub-throttle aggressive
    +
    +
  2. +
  3. +

    By default scrubber scrubs the filesystem biweekly. It's possible to tune it to scrub + based on predefined frequency such as monthly, etc. This can be done as shown below:

    +
     # volume bitrot <VOLNAME> scrub-frequency daily
    + # volume bitrot <VOLNAME> scrub-frequency weekly
    + # volume bitrot <VOLNAME> scrub-frequency biweekly
    + # volume bitrot <VOLNAME> scrub-frequency monthly
    +
    +
  4. +
+
+

NOTE: Daily scrubbing would not be available with GA release.

+
+
    +
  1. Scrubber daemon can be paused and later resumed when required. This can be done as + shown below:
  2. +
+

# volume bitrot <VOLNAME> scrub pause

+

and to resume scrubbing:

+

# volume bitrot <VOLNAME> scrub resume

+
+

Note
+Signing cannot be paused (and resumed) and would always be active as long as +bitrot is enabled for that particular volume.

+
+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/Administrator-Guide/Mandatory-Locks/index.html b/Administrator-Guide/Mandatory-Locks/index.html new file mode 100644 index 00000000..a9496c3f --- /dev/null +++ b/Administrator-Guide/Mandatory-Locks/index.html @@ -0,0 +1,4560 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + Mandatory Locks - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + +

Mandatory Locks

+

Support for mandatory locks inside GlusterFS does not converge all by itself to what Linux kernel provides to user space file systems. Here we enforce core mandatory lock semantics with and without the help of file mode bits. Please read through the design specification which explains the whole concept behind the mandatory locks implementation done for GlusterFS.

+

Implications and Usage

+

By default, mandatory locking will be disabled for a volume and a volume set options is available to configure volume to operate under 3 different mandatory locking modes.

+

Volume Option

+
gluster volume set <VOLNAME> locks.mandatory-locking <off / file / forced / optimal>
+
+

off - Disable mandatory locking for specified volume.
+file - Enable Linux kernel style mandatory locking semantics with the help of mode bits (not well tested)
+forced - Check for conflicting byte range locks for every data modifying operation in a volume
+optimal - Combinational mode where POSIX clients can live with their advisory lock semantics which will still honour the mandatory locks acquired by other clients like SMB.

+

Note:- Please refer the design doc for more information on these key values.

+

Points to be remembered

+
    +
  • Valid key values available with mandatory-locking volume set option are taken into effect only after a subsequent start/restart of the volume.
  • +
  • Due to some outstanding issues, it is recommended to turn off the performance translators in order to have the complete functionality of mandatory-locks when volume is configured in any one of the above described mandatory-locking modes. Please see the 'Known issue' section below for more details.
  • +
+

Known issues

+
    +
  • Since the whole logic of mandatory-locks are implemented within the locks translator loaded at the server side, early success returned to fops like open, read, write to upper/application layer by performance translators residing at the client side will impact the intended functionality of mandatory-locks. One such issue is being tracked in the following bugzilla report:
  • +
+

https://bugzilla.redhat.com/show_bug.cgi?id=1194546

+
    +
  • There is a possible race window uncovered with respect to mandatory locks and an ongoing read/write operation. For more details refer the bug report given below:
  • +
+

https://bugzilla.redhat.com/show_bug.cgi?id=1287099

+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/Administrator-Guide/Monitoring-Workload/index.html b/Administrator-Guide/Monitoring-Workload/index.html new file mode 100644 index 00000000..c618b3cd --- /dev/null +++ b/Administrator-Guide/Monitoring-Workload/index.html @@ -0,0 +1,5525 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + Monitoring Workload - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + + + + + +
+
+ + + + + + + + +

Monitoring your GlusterFS Workload

+

You can monitor the GlusterFS volumes on different parameters. +Monitoring volumes helps in capacity planning and performance tuning +tasks of the GlusterFS volume. Using these information, you can identify +and troubleshoot issues.

+

You can use Volume Top and Profile commands to view the performance and +identify bottlenecks/hotspots of each brick of a volume. This helps +system administrators to get vital performance information whenever +performance needs to be probed.

+

You can also perform statedump of the brick processes and nfs server +process of a volume, and also view volume status and volume information.

+

Running GlusterFS Volume Profile Command

+

GlusterFS Volume Profile command provides an interface to get the +per-brick I/O information for each File Operation (FOP) of a volume. The +per brick information helps in identifying bottlenecks in the storage +system.

+

This section describes how to run GlusterFS Volume Profile command by +performing the following operations:

+ +

+

Start Profiling

+

You must start the Profiling to view the File Operation information for +each brick.

+

To start profiling, use following command:

+

# gluster volume profile start

+

For example, to start profiling on test-volume:

+
# gluster volume profile test-volume start
+Profiling started on test-volume
+
+

When profiling on the volume is started, the following additional +options are displayed in the Volume Info:

+
diagnostics.count-fop-hits: on
+diagnostics.latency-measurement: on
+
+

+

Displaying the I/0 Information

+

You can view the I/O information of each brick by using the following command:

+

# gluster volume profile info

+

For example, to see the I/O information on test-volume:

+
# gluster volume profile test-volume info
+Brick: Test:/export/2
+Cumulative Stats:
+
+Block                     1b+           32b+           64b+
+Size:
+       Read:                0              0              0
+       Write:             908             28              8
+
+Block                   128b+           256b+         512b+
+Size:
+       Read:                0               6             4
+       Write:               5              23            16
+
+Block                  1024b+          2048b+        4096b+
+Size:
+       Read:                 0              52           17
+       Write:               15             120          846
+
+Block                   8192b+         16384b+      32768b+
+Size:
+       Read:                52               8           34
+       Write:              234             134          286
+
+Block                                  65536b+     131072b+
+Size:
+       Read:                               118          622
+       Write:                             1341          594
+
+
+%-latency  Avg-      Min-       Max-       calls     Fop
+          latency   Latency    Latency
+___________________________________________________________
+4.82      1132.28   21.00      800970.00   4575    WRITE
+5.70       156.47    9.00      665085.00   39163   READDIRP
+11.35      315.02    9.00     1433947.00   38698   LOOKUP
+11.88     1729.34   21.00     2569638.00    7382   FXATTROP
+47.35   104235.02 2485.00     7789367.00     488   FSYNC
+
+------------------
+
+------------------
+
+Duration     : 335
+
+BytesRead    : 94505058
+
+BytesWritten : 195571980
+
+

+

Stop Profiling

+

You can stop profiling the volume, if you do not need profiling +information anymore.

+

Stop profiling using the following command:

+
# gluster volume profile  stop
+
+

For example, to stop profiling on test-volume:

+
# gluster volume profile  stop
+
+Profiling stopped on test-volume
+
+

Running GlusterFS Volume TOP Command

+

GlusterFS Volume Top command allows you to view the glusterfs bricks’ +performance metrics like read, write, file open calls, file read calls, +file write calls, directory open calls, and directory real calls. The +top command displays up to 100 results.

+

This section describes how to run and view the results for the following +GlusterFS Top commands:

+ +

+

Viewing Open fd Count and Maximum fd Count

+

You can view both current open fd count (list of files that are +currently the most opened and the count) on the brick and the maximum +open fd count (count of files that are the currently open and the count +of maximum number of files opened at any given point of time, since the +servers are up and running). If the brick name is not specified, then +open fd metrics of all the bricks belonging to the volume will be +displayed.

+
    +
  • View open fd count and maximum fd count using the following command:
  • +
+

# gluster volume top open [brick ] [list-cnt ]

+

For example, to view open fd count and maximum fd count on brick + server:/export of test-volume and list top 10 open calls:

+

# gluster volume top open brick list-cnt

+

Brick: server:/export/dir1

+

Current open fd's: 34 Max open fd's: 209

+
               ==========Open file stats========
+
+  open            file name
+  call count
+
+  2               /clients/client0/~dmtmp/PARADOX/
+                  COURSES.DB
+
+  11              /clients/client0/~dmtmp/PARADOX/
+                  ENROLL.DB
+
+  11              /clients/client0/~dmtmp/PARADOX/
+                  STUDENTS.DB
+
+  10              /clients/client0/~dmtmp/PWRPNT/
+                  TIPS.PPT
+
+  10              /clients/client0/~dmtmp/PWRPNT/
+                  PCBENCHM.PPT
+
+  9               /clients/client7/~dmtmp/PARADOX/
+                  STUDENTS.DB
+
+  9               /clients/client1/~dmtmp/PARADOX/
+                  STUDENTS.DB
+
+  9               /clients/client2/~dmtmp/PARADOX/
+                  STUDENTS.DB
+
+  9               /clients/client0/~dmtmp/PARADOX/
+                  STUDENTS.DB
+
+  9               /clients/client8/~dmtmp/PARADOX/
+                  STUDENTS.DB
+
+

+

Viewing Highest File Read Calls

+

You can view highest read calls on each brick. If brick name is not +specified, then by default, list of 100 files will be displayed.

+
    +
  • View highest file Read calls using the following command:
  • +
+

# gluster volume top read [brick ] [list-cnt ]

+

For example, to view highest Read calls on brick server:/export of + test-volume:

+

# gluster volume top read brick list-cnt

+

Brick: server:/export/dir1

+
            ==========Read file stats========
+
+  read              filename
+  call count
+
+  116              /clients/client0/~dmtmp/SEED/LARGE.FIL
+
+  64               /clients/client0/~dmtmp/SEED/MEDIUM.FIL
+
+  54               /clients/client2/~dmtmp/SEED/LARGE.FIL
+
+  54               /clients/client6/~dmtmp/SEED/LARGE.FIL
+
+  54               /clients/client5/~dmtmp/SEED/LARGE.FIL
+
+  54               /clients/client0/~dmtmp/SEED/LARGE.FIL
+
+  54               /clients/client3/~dmtmp/SEED/LARGE.FIL
+
+  54               /clients/client4/~dmtmp/SEED/LARGE.FIL
+
+  54               /clients/client9/~dmtmp/SEED/LARGE.FIL
+
+  54               /clients/client8/~dmtmp/SEED/LARGE.FIL
+
+

+

Viewing Highest File Write Calls

+

You can view list of files which has highest file write calls on each +brick. If brick name is not specified, then by default, list of 100 +files will be displayed.

+
    +
  • View highest file Write calls using the following command:
  • +
+

# gluster volume top write [brick ] [list-cnt ]

+

For example, to view highest Write calls on brick server:/export of + test-volume:

+

# gluster volume top write brick list-cnt

+

Brick: server:/export/dir1

+
                 ==========Write file stats========
+  write call count   filename
+
+  83                /clients/client0/~dmtmp/SEED/LARGE.FIL
+
+  59                /clients/client7/~dmtmp/SEED/LARGE.FIL
+
+  59                /clients/client1/~dmtmp/SEED/LARGE.FIL
+
+  59                /clients/client2/~dmtmp/SEED/LARGE.FIL
+
+  59                /clients/client0/~dmtmp/SEED/LARGE.FIL
+
+  59                /clients/client8/~dmtmp/SEED/LARGE.FIL
+
+  59                /clients/client5/~dmtmp/SEED/LARGE.FIL
+
+  59                /clients/client4/~dmtmp/SEED/LARGE.FIL
+
+  59                /clients/client6/~dmtmp/SEED/LARGE.FIL
+
+  59                /clients/client3/~dmtmp/SEED/LARGE.FIL
+
+

+

Viewing Highest Open Calls on Directories

+

You can view list of files which has highest open calls on directories +of each brick. If brick name is not specified, then the metrics of all +the bricks belonging to that volume will be displayed.

+
    +
  • View list of open calls on each directory using the following + command:
  • +
+

# gluster volume top opendir [brick ] [list-cnt ]

+

For example, to view open calls on brick server:/export/ of + test-volume:

+

# gluster volume top opendir brick list-cnt

+

Brick: server:/export/dir1

+
           ==========Directory open stats========
+
+  Opendir count     directory name
+
+  1001              /clients/client0/~dmtmp
+
+  454               /clients/client8/~dmtmp
+
+  454               /clients/client2/~dmtmp
+
+  454               /clients/client6/~dmtmp
+
+  454               /clients/client5/~dmtmp
+
+  454               /clients/client9/~dmtmp
+
+  443               /clients/client0/~dmtmp/PARADOX
+
+  408               /clients/client1/~dmtmp
+
+  408               /clients/client7/~dmtmp
+
+  402               /clients/client4/~dmtmp
+
+

+

Viewing Highest Read Calls on Directory

+

You can view list of files which has highest directory read calls on +each brick. If brick name is not specified, then the metrics of all the +bricks belonging to that volume will be displayed.

+
    +
  • View list of highest directory read calls on each brick using the + following command:
  • +
+

# gluster volume top test-volume readdir [brick BRICK] [list-cnt {0..100}]

+

For example, to view highest directory read calls on brick + server:/export of test-volume:

+

# gluster volume top test-volume readdir brick server:/export list-cnt 10

+

Brick:

+
  ==========Directory readdirp stats========
+
+  readdirp count           directory name
+
+  1996                    /clients/client0/~dmtmp
+
+  1083                    /clients/client0/~dmtmp/PARADOX
+
+  904                     /clients/client8/~dmtmp
+
+  904                     /clients/client2/~dmtmp
+
+  904                     /clients/client6/~dmtmp
+
+  904                     /clients/client5/~dmtmp
+
+  904                     /clients/client9/~dmtmp
+
+  812                     /clients/client1/~dmtmp
+
+  812                     /clients/client7/~dmtmp
+
+  800                     /clients/client4/~dmtmp
+
+

+

Viewing List of Read Performance on each Brick

+

You can view the read throughput of files on each brick. If brick name +is not specified, then the metrics of all the bricks belonging to that +volume will be displayed. The output will be the read throughput.

+
       ==========Read throughput file stats========
+
+read         filename                         Time
+through
+put(MBp
+s)
+
+2570.00    /clients/client0/~dmtmp/PWRPNT/      -2011-01-31
+           TRIDOTS.POT                      15:38:36.894610
+2570.00    /clients/client0/~dmtmp/PWRPNT/      -2011-01-31
+           PCBENCHM.PPT                     15:38:39.815310
+2383.00    /clients/client2/~dmtmp/SEED/        -2011-01-31
+           MEDIUM.FIL                       15:52:53.631499
+
+2340.00    /clients/client0/~dmtmp/SEED/        -2011-01-31
+           MEDIUM.FIL                       15:38:36.926198
+
+2299.00   /clients/client0/~dmtmp/SEED/         -2011-01-31
+          LARGE.FIL                         15:38:36.930445
+
+2259.00   /clients/client0/~dmtmp/PARADOX/      -2011-01-31
+          COURSES.X04                       15:38:40.549919
+
+2221.00   /clients/client0/~dmtmp/PARADOX/      -2011-01-31
+          STUDENTS.VAL                      15:52:53.298766
+
+2221.00   /clients/client3/~dmtmp/SEED/         -2011-01-31
+          COURSES.DB                        15:39:11.776780
+
+2184.00   /clients/client3/~dmtmp/SEED/         -2011-01-31
+          MEDIUM.FIL                        15:39:10.251764
+
+2184.00   /clients/client5/~dmtmp/WORD/         -2011-01-31
+          BASEMACH.DOC                      15:39:09.336572
+
+

This command will initiate a dd for the specified count and block size +and measures the corresponding throughput.

+
    +
  • View list of read performance on each brick using the following + command:
  • +
+

# gluster volume top read-perf [bs count ] [brick ] [list-cnt ]

+

For example, to view read performance on brick server:/export/ of + test-volume, 256 block size of count 1, and list count 10:

+

# gluster volume top read-perf bs 256 count 1 brick list-cnt

+

Brick: server:/export/dir1 256 bytes (256 B) copied, Throughput: 4.1 MB/s

+
         ==========Read throughput file stats========
+
+  read         filename                         Time
+  through
+  put(MBp
+  s)
+
+  2912.00   /clients/client0/~dmtmp/PWRPNT/    -2011-01-31
+             TRIDOTS.POT                   15:38:36.896486
+
+  2570.00   /clients/client0/~dmtmp/PWRPNT/    -2011-01-31
+             PCBENCHM.PPT                  15:38:39.815310
+
+  2383.00   /clients/client2/~dmtmp/SEED/      -2011-01-31
+             MEDIUM.FIL                    15:52:53.631499
+
+  2340.00   /clients/client0/~dmtmp/SEED/      -2011-01-31
+             MEDIUM.FIL                    15:38:36.926198
+
+  2299.00   /clients/client0/~dmtmp/SEED/      -2011-01-31
+             LARGE.FIL                     15:38:36.930445
+
+  2259.00  /clients/client0/~dmtmp/PARADOX/    -2011-01-31
+            COURSES.X04                    15:38:40.549919
+
+  2221.00  /clients/client9/~dmtmp/PARADOX/    -2011-01-31
+            STUDENTS.VAL                   15:52:53.298766
+
+  2221.00  /clients/client8/~dmtmp/PARADOX/    -2011-01-31
+           COURSES.DB                      15:39:11.776780
+
+  2184.00  /clients/client3/~dmtmp/SEED/       -2011-01-31
+            MEDIUM.FIL                     15:39:10.251764
+
+  2184.00  /clients/client5/~dmtmp/WORD/       -2011-01-31
+           BASEMACH.DOC                    15:39:09.336572
+
+

+

Viewing List of Write Performance on each Brick

+

You can view list of write throughput of files on each brick. If brick +name is not specified, then the metrics of all the bricks belonging to +that volume will be displayed. The output will be the write throughput.

+

This command will initiate a dd for the specified count and block size +and measures the corresponding throughput. To view list of write +performance on each brick:

+
    +
  • View list of write performance on each brick using the following + command:
  • +
+

# gluster volume top write-perf [bs count ] [brick ] [list-cnt ]

+

For example, to view write performance on brick server:/export/ of + test-volume, 256 block size of count 1, and list count 10:

+

# gluster volume top write-perf bs 256 count 1 brick list-cnt

+

Brick: server:/export/dir1

+

256 bytes (256 B) copied, Throughput: 2.8 MB/s

+
         ==========Write throughput file stats========
+
+  write                filename                 Time
+  throughput
+  (MBps)
+
+  1170.00    /clients/client0/~dmtmp/SEED/     -2011-01-31
+             SMALL.FIL                     15:39:09.171494
+
+  1008.00    /clients/client6/~dmtmp/SEED/     -2011-01-31
+             LARGE.FIL                      15:39:09.73189
+
+  949.00    /clients/client0/~dmtmp/SEED/      -2011-01-31
+            MEDIUM.FIL                     15:38:36.927426
+
+  936.00   /clients/client0/~dmtmp/SEED/       -2011-01-31
+           LARGE.FIL                        15:38:36.933177
+  897.00   /clients/client5/~dmtmp/SEED/       -2011-01-31
+           MEDIUM.FIL                       15:39:09.33628
+
+  897.00   /clients/client6/~dmtmp/SEED/       -2011-01-31
+           MEDIUM.FIL                       15:39:09.27713
+
+  885.00   /clients/client0/~dmtmp/SEED/       -2011-01-31
+            SMALL.FIL                      15:38:36.924271
+
+  528.00   /clients/client5/~dmtmp/SEED/       -2011-01-31
+           LARGE.FIL                        15:39:09.81893
+
+  516.00   /clients/client6/~dmtmp/ACCESS/    -2011-01-31
+           FASTENER.MDB                    15:39:01.797317
+
+

Displaying Volume Information

+

You can display information about a specific volume, or all volumes, as +needed.

+
    +
  • Display information about a specific volume using the following + command:
  • +
+

# gluster volume info VOLNAME

+

For example, to display information about test-volume:

+
  # gluster volume info test-volume
+  Volume Name: test-volume
+  Type: Distribute
+  Status: Created
+  Number of Bricks: 4
+  Bricks:
+  Brick1: server1:/exp1
+  Brick2: server2:/exp2
+  Brick3: server3:/exp3
+  Brick4: server4:/exp4
+
+
    +
  • Display information about all volumes using the following command:
  • +
+

# gluster volume info all

+
  # gluster volume info all
+
+  Volume Name: test-volume
+  Type: Distribute
+  Status: Created
+  Number of Bricks: 4
+  Bricks:
+  Brick1: server1:/exp1
+  Brick2: server2:/exp2
+  Brick3: server3:/exp3
+  Brick4: server4:/exp4
+
+  Volume Name: mirror
+  Type: Distributed-Replicate
+  Status: Started
+  Number of Bricks: 2 X 2 = 4
+  Bricks:
+  Brick1: server1:/brick1
+  Brick2: server2:/brick2
+  Brick3: server3:/brick3
+  Brick4: server4:/brick4
+
+  Volume Name: Vol
+  Type: Distribute
+  Status: Started
+  Number of Bricks: 1
+  Bricks:
+  Brick: server:/brick6
+
+

Performing Statedump on a Volume

+

Statedump is a mechanism through which you can get details of all +internal variables and state of the glusterfs process at the time of +issuing the command.You can perform statedumps of the brick processes +and nfs server process of a volume using the statedump command. The +following options can be used to determine what information is to be +dumped:

+
    +
  • +

    mem - Dumps the memory usage and memory pool details of the + bricks.

    +
  • +
  • +

    iobuf - Dumps iobuf details of the bricks.

    +
  • +
  • +

    priv - Dumps private information of loaded translators.

    +
  • +
  • +

    callpool - Dumps the pending calls of the volume.

    +
  • +
  • +

    fd - Dumps the open fd tables of the volume.

    +
  • +
  • +

    inode - Dumps the inode tables of the volume.

    +
  • +
+

To display volume statedump

+
    +
  • Display statedump of a volume or NFS server using the following + command:
  • +
+

# gluster volume statedump [nfs] [all|mem|iobuf|callpool|priv|fd|inode]

+

For example, to display statedump of test-volume:

+
  # gluster volume statedump test-volume
+  Volume statedump successful
+
+

The statedump files are created on the brick servers in the/tmp + directory or in the directory set using server.statedump-path + volume option. The naming convention of the dump file is + <brick-path>.<brick-pid>.dump.

+
    +
  • By defult, the output of the statedump is stored at + /tmp/<brickname.PID.dump> file on that particular server. Change + the directory of the statedump file using the following command:
  • +
+

# gluster volume set server.statedump-path

+

For example, to change the location of the statedump file of + test-volume:

+
  # gluster volume set test-volume server.statedump-path /usr/local/var/log/glusterfs/dumps/
+  Set volume successful
+
+

You can view the changed path of the statedump file using the + following command:

+

# gluster volume info

+

Displaying Volume Status

+

You can display the status information about a specific volume, brick or +all volumes, as needed. Status information can be used to understand the +current status of the brick, nfs processes, and overall file system. +Status information can also be used to monitor and debug the volume +information. You can view status of the volume along with the following +details:

+
    +
  • +

    detail - Displays additional information about the bricks.

    +
  • +
  • +

    clients - Displays the list of clients connected to the volume.

    +
  • +
  • +

    mem - Displays the memory usage and memory pool details of the + bricks.

    +
  • +
  • +

    inode - Displays the inode tables of the volume.

    +
  • +
  • +

    fd - Displays the open fd (file descriptors) tables of the + volume.

    +
  • +
  • +

    callpool - Displays the pending calls of the volume.

    +
  • +
+

To display volume status

+
    +
  • Display information about a specific volume using the following + command:
  • +
+

# gluster volume status [all| []] [detail|clients|mem|inode|fd|callpool]

+

For example, to display information about test-volume:

+
  # gluster volume status test-volume
+  STATUS OF VOLUME: test-volume
+  BRICK                           PORT   ONLINE   PID
+  --------------------------------------------------------
+  arch:/export/1                  24009   Y       22445
+  --------------------------------------------------------
+  arch:/export/2                  24010   Y       22450
+
+
    +
  • Display information about all volumes using the following command:
  • +
+

# gluster volume status all

+
  # gluster volume status all
+  STATUS OF VOLUME: volume-test
+  BRICK                           PORT   ONLINE   PID
+  --------------------------------------------------------
+  arch:/export/4                  24010   Y       22455
+
+  STATUS OF VOLUME: test-volume
+  BRICK                           PORT   ONLINE   PID
+  --------------------------------------------------------
+  arch:/export/1                  24009   Y       22445
+  --------------------------------------------------------
+  arch:/export/2                  24010   Y       22450
+
+
    +
  • Display additional information about the bricks using the following + command:
  • +
+

# gluster volume status detail

+

For example, to display additional information about the bricks of + test-volume:

+
  # gluster volume status test-volume detail
+  STATUS OF VOLUME: test-volume
+  -------------------------------------------
+  Brick                : arch:/export/1
+  Port                 : 24009
+  Online               : Y
+  Pid                  : 16977
+  File System          : rootfs
+  Device               : rootfs
+  Mount Options        : rw
+  Disk Space Free      : 13.8GB
+  Total Disk Space     : 46.5GB
+  Inode Size           : N/A
+  Inode Count          : N/A
+  Free Inodes          : N/A
+
+  Number of Bricks: 1
+  Bricks:
+  Brick: server:/brick6
+
+
    +
  • Display the list of clients accessing the volumes using the + following command:
  • +
+

# gluster volume status test-volume clients

+

For example, to display the list of clients connected to + test-volume:

+
  # gluster volume status test-volume clients
+  Brick : arch:/export/1
+  Clients connected : 2
+  Hostname          Bytes Read   BytesWritten
+  --------          ---------    ------------
+  127.0.0.1:1013    776          676
+  127.0.0.1:1012    50440        51200
+
+
    +
  • Display the memory usage and memory pool details of the bricks using + the following command:
  • +
+

# gluster volume status test-volume mem

+

For example, to display the memory usage and memory pool details of + the bricks of test-volume:

+
  Memory status for volume : test-volume
+  ----------------------------------------------
+  Brick : arch:/export/1
+  Mallinfo
+  --------
+  Arena    : 434176
+  Ordblks  : 2
+  Smblks   : 0
+  Hblks    : 12
+  Hblkhd   : 40861696
+  Usmblks  : 0
+  Fsmblks  : 0
+  Uordblks : 332416
+  Fordblks : 101760
+  Keepcost : 100400
+
+  Mempool Stats
+  -------------
+  Name                               HotCount ColdCount PaddedSizeof AllocCount MaxAlloc
+  ----                               -------- --------- ------------ ---------- --------
+  test-volume-server:fd_t                0     16384           92         57        5
+  test-volume-server:dentry_t           59       965           84         59       59
+  test-volume-server:inode_t            60       964          148         60       60
+  test-volume-server:rpcsvc_request_t    0       525         6372        351        2
+  glusterfs:struct saved_frame           0      4096          124          2        2
+  glusterfs:struct rpc_req               0      4096         2236          2        2
+  glusterfs:rpcsvc_request_t             1       524         6372          2        1
+  glusterfs:call_stub_t                  0      1024         1220        288        1
+  glusterfs:call_stack_t                 0      8192         2084        290        2
+  glusterfs:call_frame_t                 0     16384          172       1728        6
+
+
    +
  • Display the inode tables of the volume using the following command:
  • +
+

# gluster volume status inode

+

For example, to display the inode tables of the test-volume:

+
  # gluster volume status test-volume inode
+  inode tables for volume test-volume
+  ----------------------------------------------
+  Brick : arch:/export/1
+  Active inodes:
+  GFID                                            Lookups            Ref   IA type
+  ----                                            -------            ---   -------
+  6f3fe173-e07a-4209-abb6-484091d75499                  1              9         2
+  370d35d7-657e-44dc-bac4-d6dd800ec3d3                  1              1         2
+
+  LRU inodes:
+  GFID                                            Lookups            Ref   IA type
+  ----                                            -------            ---   -------
+  80f98abe-cdcf-4c1d-b917-ae564cf55763                  1              0         1
+  3a58973d-d549-4ea6-9977-9aa218f233de                  1              0         1
+  2ce0197d-87a9-451b-9094-9baa38121155                  1              0         2
+
+
    +
  • Display the open fd tables of the volume using the following + command:
  • +
+

# gluster volume status fd

+

For example, to display the open fd tables of the test-volume:

+
  # gluster volume status test-volume fd
+
+  FD tables for volume test-volume
+  ----------------------------------------------
+  Brick : arch:/export/1
+  Connection 1:
+  RefCount = 0  MaxFDs = 128  FirstFree = 4
+  FD Entry            PID                 RefCount            Flags
+  --------            ---                 --------            -----
+  0                   26311               1                   2
+  1                   26310               3                   2
+  2                   26310               1                   2
+  3                   26311               3                   2
+
+  Connection 2:
+  RefCount = 0  MaxFDs = 128  FirstFree = 0
+  No open fds
+
+  Connection 3:
+  RefCount = 0  MaxFDs = 128  FirstFree = 0
+  No open fds
+
+
    +
  • Display the pending calls of the volume using the following command:
  • +
+

# gluster volume status callpool

+

Each call has a call stack containing call frames.

+

For example, to display the pending calls of test-volume:

+
  # gluster volume status test-volume
+
+  Pending calls for volume test-volume
+  ----------------------------------------------
+  Brick : arch:/export/1
+  Pending calls: 2
+  Call Stack1
+   UID    : 0
+   GID    : 0
+   PID    : 26338
+   Unique : 192138
+   Frames : 7
+   Frame 1
+    Ref Count   = 1
+    Translator  = test-volume-server
+    Completed   = No
+   Frame 2
+    Ref Count   = 0
+    Translator  = test-volume-posix
+    Completed   = No
+    Parent      = test-volume-access-control
+    Wind From   = default_fsync
+    Wind To     = FIRST_CHILD(this)->fops->fsync
+   Frame 3
+    Ref Count   = 1
+    Translator  = test-volume-access-control
+    Completed   = No
+    Parent      = repl-locks
+    Wind From   = default_fsync
+    Wind To     = FIRST_CHILD(this)->fops->fsync
+   Frame 4
+    Ref Count   = 1
+    Translator  = test-volume-locks
+    Completed   = No
+    Parent      = test-volume-io-threads
+    Wind From   = iot_fsync_wrapper
+    Wind To     = FIRST_CHILD (this)->fops->fsync
+   Frame 5
+    Ref Count   = 1
+    Translator  = test-volume-io-threads
+    Completed   = No
+    Parent      = test-volume-marker
+    Wind From   = default_fsync
+    Wind To     = FIRST_CHILD(this)->fops->fsync
+   Frame 6
+    Ref Count   = 1
+    Translator  = test-volume-marker
+    Completed   = No
+    Parent      = /export/1
+    Wind From   = io_stats_fsync
+    Wind To     = FIRST_CHILD(this)->fops->fsync
+   Frame 7
+    Ref Count   = 1
+    Translator  = /export/1
+    Completed   = No
+    Parent      = test-volume-server
+    Wind From   = server_fsync_resume
+    Wind To     = bound_xl->fops->fsync
+
+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/Administrator-Guide/NFS-Ganesha-GlusterFS-Integration/index.html b/Administrator-Guide/NFS-Ganesha-GlusterFS-Integration/index.html new file mode 100644 index 00000000..82ca6235 --- /dev/null +++ b/Administrator-Guide/NFS-Ganesha-GlusterFS-Integration/index.html @@ -0,0 +1,5251 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + Configuring NFS-Ganesha server - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + + + + + +
+
+ + + + + + + + +

Configuring NFS-Ganesha over GlusterFS

+

NFS-Ganesha is a user-space file server for the NFS protocol with support for NFSv3, v4, v4.1, pNFS. It provides a FUSE-compatible File System Abstraction Layer(FSAL) to allow the file-system developers to plug in their storage mechanism and access it from any NFS client. NFS-Ganesha can access the FUSE filesystems directly through its FSAL without copying any data to or from the kernel, thus potentially improving response times.

+

Installing nfs-ganesha

+

Gluster RPMs (>= 3.10)

+
+

glusterfs-server
+glusterfs-api
+glusterfs-ganesha

+
+

Ganesha RPMs (>= 2.5)

+
+

nfs-ganesha
+nfs-ganesha-gluster

+
+

Start NFS-Ganesha manually

+
    +
  • To start NFS-Ganesha manually, use the command: + service nfs-ganesha start
  • +
+
where:
+/var/log/ganesha.log is the default log file for the ganesha process.
+/etc/ganesha/ganesha.conf is the default configuration file
+NIV_EVENT is the default log level.
+
+
    +
  • If the user wants to run ganesha in a preferred mode, execute the following command :
    +ganesha.nfsd -f <location_of_nfs-ganesha.conf_file> -L <location_of_log_file> -N <log_level>
  • +
+
For example:
+#ganesha.nfsd -f nfs-ganesha.conf -L nfs-ganesha.log -N NIV_DEBUG
+where:
+nfs-ganesha.log is the log file for the ganesha.nfsd process.
+nfs-ganesha.conf is the configuration file
+NIV_DEBUG is the log level.
+
+
    +
  • By default, the export list for the server will be Null
  • +
+
Note : include following parameters in ganesha configuration file for exporting gluster volumes
+NFS_Core_Param {
+        #Use supplied name other tha IP In NSM operations
+        NSM_Use_Caller_Name = true;
+        #Copy lock states into "/var/lib/nfs/ganesha" dir
+        Clustered = false;
+        #Use a non-privileged port for RQuota
+        Rquota_Port = 875;
+    #please note add below option for Mac clients
+    #Enable_RQUOTA = false;
+}
+
+

Step by step procedures to exporting GlusterFS volume via NFS-Ganesha

+

step 1 :

+

To export any GlusterFS volume or directory inside a volume, create the EXPORT block for each of those entries in an export configuration file. The following parameters are required to export any entry.

+
    +
  • cat export.conf
  • +
+
EXPORT{
+    Export_Id = 1 ;   # Export ID unique to each export
+    Path = "volume_path";  # Path of the volume to be exported. Eg: "/test_volume"
+
+    FSAL {
+        name = GLUSTER;
+        hostname = "10.xx.xx.xx";  # IP of one of the nodes in the trusted pool
+        volume = "volume_name";  # Volume name. Eg: "test_volume"
+    }
+
+    Access_type = RW;    # Access permissions
+    Squash = No_root_squash; # To enable/disable root squashing
+    Disable_ACL = TRUE;  # To enable/disable ACL
+    Pseudo = "pseudo_path";  # NFSv4 pseudo path for this export. Eg: "/test_volume_pseudo"
+    Protocols = "3","4" ;    # NFS protocols supported
+    Transports = "UDP","TCP" ; # Transport protocols supported
+    SecType = "sys";     # Security flavors supported
+}
+
+

step 2 :

+

Now include the export configuration file in the ganesha configuration file (by default). This can be done by adding the line below at the end of file

+
    +
  • %include “<path of export configuration>”
  • +
+
Note :
+The above two steps can be done with following script
+#/usr/libexec/ganesha/create-export-ganesha.sh <ganesha directory> on <volume name>
+By default ganesha directory is "/etc/ganesha"
+This will create export configuration file in <ganesha directory>/exports/export.<volume name>.conf
+Also, it will add the above entry to ganesha.conf
+
+

step 3 :

+

Turn on features.cache-invalidation for that volume

+
    +
  • gluster volume set \<volume name\> features.cache-invalidation on
  • +
+

step 4 :

+

dbus commands are used to export/unexport volume

+
    +
  • +

    export

    +
  • +
  • +

    dbus-send --system --print-reply --dest=org.ganesha.nfsd /org/ganesha/nfsd/ExportMgr org.ganesha.nfsd.exportmgr.AddExport string:<ganesha directory>/exports/export.<volume name>.conf string:"EXPORT(Path=/<volume name>)"

    +
  • +
  • +

    unexport

    +
  • +
  • dbus-send --system --dest=org.ganesha.nfsd /org/ganesha/nfsd/ExportMgr org.ganesha.nfsd.exportmgr.RemoveExport uint16:<export id>
  • +
+
Note :
+Step 4 can be performed via following script
+#/usr/libexec/ganesha/dbus-send.sh <ganesha directory> [on|off] <volume name>
+
+

Above scripts (mentioned in step 3 and step 4) are available in glusterfs 3.10 rpms.

+

You can download it from here

+

step 5 :

+
    +
  • To check if the volume is exported, run
  • +
  • showmount -e localhost
  • +
  • Or else use the following dbus command
  • +
  • dbus-send --type=method_call --print-reply --system --dest=org.ganesha.nfsd /org/ganesha/nfsd/ExportMgr org.ganesha.nfsd.exportmgr.ShowExports
  • +
  • To see clients
  • +
  • dbus-send --type=method_call --print-reply --system --dest=org.ganesha.nfsd /org/ganesha/nfsd/ClientMgr org.ganesha.nfsd.clientmgr.ShowClients
  • +
+

Using Highly Available Active-Active NFS-Ganesha And GlusterFS cli

+
+

Please Note currently HA solution for nfs-ganesha is available in 3.10. From 3.12 onwards HA will be handled by a different project known as storhaug which is under development.

+
+

In a highly available active-active environment, if an NFS-Ganesha server that is connected to an NFS client running a particular application crashes, the application/NFS client is seamlessly connected to another NFS-Ganesha server without any administrative intervention. +The cluster is maintained using Pacemaker and Corosync. Pacemaker acts as a resource manager and Corosync provides the communication layer of the cluster. +Data coherency across the multi-head NFS-Ganesha servers in the cluster is achieved using the UPCALL infrastructure. UPCALL infrastructure is a generic and extensible framework that sends notifications to the respective glusterfs clients (in this case NFS-Ganesha server) in case of any changes detected in the backend filesystem.

+

The Highly Available cluster is configured in the following three stages:

+

Creating the ganesha-ha.conf file

+

The ganesha-ha.conf.example is created in the following location /etc/ganesha when Gluster Storage is installed. Rename the file to ganesha-ha.conf and make the changes as suggested in the following example: +sample ganesha-ha.conf file:

+
+

# Name of the HA cluster created. must be unique within the subnet
+HA_NAME="ganesha-ha-360"
+# The subset of nodes of the Gluster Trusted Pool that form the ganesha HA cluster.
+# Hostname is specified.
+HA_CLUSTER_NODES="server1,server2,..."
+#HA_CLUSTER_NODES="server1.lab.redhat.com,server2.lab.redhat.com,..."
+# Virtual IPs for each of the nodes specified above.
+VIP_server1="10.0.2.1"
+VIP_server2="10.0.2.2"

+
+

Configuring NFS-Ganesha using gluster CLI

+

The HA cluster can be set up or torn down using gluster CLI. Also, it can export and unexport specific volumes. For more information, see section Configuring NFS-Ganesha using gluster CLI.

+

Modifying the HA cluster using the ganesha-ha.sh script

+

Post the cluster creation any further modification can be done using the ganesha-ha.sh script. For more information, see the section Modifying the HA cluster using the ganesha-ha.sh script.

+

Step-by-step guide

+

Configuring NFS-Ganesha using Gluster CLI⁠

+

Pre-requisites to run NFS-Ganesha

+

Ensure that the following pre-requisites are taken into consideration before you run NFS-Ganesha in your environment:

+
    +
  • A Gluster Storage volume must be available for export and NFS-Ganesha rpms are installed on all the nodes.
  • +
  • +

    IPv6 must be enabled on the host interface which is used by the NFS-Ganesha daemon. To enable IPv6 support, perform the following steps:

    +
  • +
  • +

    Comment or remove the line options ipv6 disable=1 in the /etc/modprobe.d/ipv6.conf file.

    +
  • +
  • +

    Reboot the system.

    +
  • +
  • +

    Ensure that all the nodes in the cluster are DNS resolvable. For example, you can populate the /etc/hosts with the details of all the nodes in the cluster.

    +
  • +
  • Disable and stop NetworkManager service.
  • +
  • Enable and start network service on all machines.
  • +
  • Create and mount a gluster shared volume.
  • +
  • gluster volume set all cluster.enable-shared-storage enable
  • +
  • Install Pacemaker and Corosync on all machines.
  • +
  • Set the cluster auth password on all the machines.
  • +
  • +

    Passwordless ssh needs to be enabled on all the HA nodes. Follow these steps,

    +
  • +
  • +

    On one (primary) node in the cluster, run:

    +
      +
    • ssh-keygen -f /var/lib/glusterd/nfs/secret.pem
    • +
    +
  • +
  • Deploy the pubkey ~root/.ssh/authorized keys on all nodes, run:
      +
    • ssh-copy-id -i /var/lib/glusterd/nfs/secret.pem.pub root@$node
    • +
    +
  • +
  • +

    Copy the keys to all nodes in the cluster, run:

    +
      +
    • scp /var/lib/glusterd/nfs/secret.\* $node:/var/lib/glusterd/nfs/
    • +
    +
  • +
  • +

    Create a directory named "nfs-ganesha" in shared storage path and create ganesha.conf & ganesha-ha.conf in it (from glusterfs 3.9 onwards)

    +
  • +
+

Configuring the HA Cluster

+

To set up the HA cluster, enable NFS-Ganesha by executing the following command:

+
gluster nfs-ganesha enable
+
+

To tear down the HA cluster, execute the following command:

+
gluster nfs-ganesha disable
+
+
Note :
+Enable command performs the following
+* create a symlink ganesha.conf in /etc/ganesha using ganesha.conf in shared storage
+* start nfs-ganesha process on nodes part of ganesha cluster
+* set up ha cluster
+and disable does the reversal of enable
+Also if gluster nfs-ganesha [enable/disable] fails of please check following logs
+* /var/log/glusterfs/glusterd.log
+* /var/log/messages (and grep for pcs commands)
+* /var/log/pcsd/pcsd.log
+
+

Exporting Volumes through NFS-Ganesha using cli

+

To export a Red Hat Gluster Storage volume, execute the following command:

+
gluster volume set <volname> ganesha.enable on
+
+

To unexport a Red Hat Gluster Storage volume, execute the following command:

+
gluster volume set <volname> ganesha.enable off
+
+

This command unexports the Red Hat Gluster Storage volume without affecting other exports.

+

To verify the status of the volume set options, follow the guidelines mentioned below:

+
    +
  • Check if NFS-Ganesha is started by executing the following command:
  • +
  • ps aux | grep ganesha.nfsd
  • +
  • Check if the volume is exported.
  • +
  • showmount -e localhost
  • +
+

The logs of ganesha.nfsd daemon is written to /var/log/ganesha.log. Check the log file on noticing any unexpected behavior.

+

Modifying the HA cluster using the ganesha-ha.sh script

+

To modify the existing HA cluster and to change the default values of the exports use the ganesha-ha.sh script located at /usr/libexec/ganesha/.

+

Adding a node to the cluster

+

Before adding a node to the cluster, ensure all the prerequisites mentioned in section Pre-requisites to run NFS-Ganesha are met. To add a node to the cluster. execute the following command on any of the nodes in the existing NFS-Ganesha cluster:

+
#./ganesha-ha.sh --add <HA_CONF_DIR> <HOSTNAME> <NODE-VIP>
+where,
+HA_CONF_DIR: The directory path containing the ganesha-ha.conf file.
+HOSTNAME: Hostname of the new node to be added
+NODE-VIP: Virtual IP of the new node to be added.
+
+

Deleting a node in the cluster

+

To delete a node from the cluster, execute the following command on any of the nodes in the existing NFS-Ganesha cluster:

+
#./ganesha-ha.sh --delete <HA_CONF_DIR> <HOSTNAME>
+
+where,
+HA_CONF_DIR: The directory path containing the ganesha-ha.conf file.
+HOSTNAME: Hostname of the new node to be added
+
+

Modifying the default export configuration

+

To modify the default export configurations perform the following steps on any of the nodes in the existing ganesha cluster:

+
    +
  • +

    Edit/add the required fields in the corresponding export file located at /etc/ganesha/exports.

    +
  • +
  • +

    Execute the following command:

    +
      #./ganesha-ha.sh --refresh-config <HA_CONFDIR> <volname>
    +
    +  where,
    +  HA_CONF_DIR: The directory path containing the ganesha-ha.conf file.
    +  volname: The name of the volume whose export configuration has to be changed.
    +
    +

    Note: + The export ID must not be changed.

    +
  • +
+

+

Configure ganesha ha cluster outside of gluster nodes

+

Currently, ganesha HA cluster creating tightly integrated with glusterd. So here user needs to create another TSP using ganesha nodes. Then create ganesha HA cluster using above mentioned steps till executing "gluster nfs-ganesha enable" +Exporting/Unexporting should be performed without using glusterd cli (follow the manual steps, before performing step 4 replace localhost with required hostname/ip "hostname=localhost;" in the export configuration file)

+

Configuring Gluster volume for pNFS

+

The Parallel Network File System (pNFS) is part of the NFS v4.1 protocol that allows computing clients to access storage devices directly and in parallel. The pNFS cluster consists of MDS (Meta-Data-Server) and DS (Data-Server). The client sends all the read/write requests directly to DS and all other operations are handle by the MDS.

+

Step by step guide

+
    +
  • +

    Turn on feature.cache-invalidation for the volume.

    +
  • +
  • +

    gluster v set <volname> features.cache-invalidation on

    +
  • +
  • +

    Select one of the nodes in the cluster as MDS and configure it adding the following block to ganesha configuration file

    +
  • +
+
GLUSTER
+{
+ PNFS_MDS = true;
+}
+
+
    +
  • +

    Manually start NFS-Ganesha in every node in the cluster.

    +
  • +
  • +

    Check whether the volume is exported via nfs-ganesha in all the nodes.

    +
  • +
  • +

    showmount -e localhost

    +
  • +
  • +

    Mount the volume using NFS version 4.1 protocol with the ip of MDS

    +
  • +
  • mount -t nfs4 -o minorversion=1 <ip of MDS>:<volume name> <mount path>
  • +
+

Points to be Noted

+
    +
  • +

    The current architecture supports only a single MDS and multiple DS. The server with which client mounts will act as MDS and all servers including MDS can act as DS.

    +
  • +
  • +

    Currently, HA is not supported for pNFS (more specifically MDS). Although it is configurable, consistency is guaranteed across the cluster.

    +
  • +
  • +

    If any of the DS goes down, then MDS will handle those I/O's.

    +
  • +
  • +

    Hereafter, all the subsequent NFS clients need to use the same server for mounting that volume via pNFS. i.e more than one MDS for a volume is not preferred

    +
  • +
  • +

    pNFS support is only tested with distributed, replicated, or distribute-replicate volumes

    +
  • +
  • +

    It is tested and verified with RHEL 6.5 , fedora 20, fedora 21 nfs clients. It is always better to use latest nfs-clients

    +
  • +
+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/Administrator-Guide/Network-Configurations-Techniques/index.html b/Administrator-Guide/Network-Configurations-Techniques/index.html new file mode 100644 index 00000000..699b7564 --- /dev/null +++ b/Administrator-Guide/Network-Configurations-Techniques/index.html @@ -0,0 +1,4676 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + Network Configuration Techniques - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + +

Network Configurations Techniques

+

Bonding best practices

+

Bonded network interfaces incorporate multiple physical interfaces into a single logical bonded interface, with a single IP addr. An N-way bonded interface can survive loss of N-1 physical interfaces, and performance can be improved in some cases.

+
When to bond?
+
    +
  • Need high availability for network link
  • +
  • Workload: sequential access to large files (most time spent reading/writing)
  • +
  • Network throughput limit of client/server \<\< storage throughput limit
  • +
  • 1 GbE (almost always)
  • +
  • 10-Gbps links or faster -- for writes, replication doubles the load on the network and replicas are usually on different peers to which the client can transmit in parallel.
  • +
  • LIMITATION: Bonding mode 6 doesn't improve throughput if network peers are not on the same VLAN.
  • +
+
How to configure
+
    +
  • Bonding-howto
  • +
  • Best bonding mode for Gluster client is mode 6 (balance-alb), this allows client to transmit writes in parallel on separate NICs much of the time. A peak throughput of 750 MB/s on writes from a single client was observed with bonding mode 6 on 2 10-GbE NICs with jumbo frames. That's 1.5 GB/s of network traffic.
  • +
  • Another way to balance both transmit and receive traffic is bonding mode 4 (802.3ad) but this requires switch configuration (trunking commands)
  • +
  • Still another way to load balance is bonding mode 2 (balance-xor) with option "xmit_hash_policy=layer3+4". The bonding modes 6 and 2 will not improve single-connection throughput, but improve aggregate throughput across all connections.
  • +
+
Jumbo frames
+

Jumbo frames are Ethernet (or Infiniband) frames with size greater than the default of 1500 bytes (Infiniband default is around 2000 bytes). Increasing frame size reduces load on operating system and hardware, which must process interrupts and protocol messages per frame.

+
When to configure?
+
    +
  • Any network faster than 1-GbE
  • +
  • Workload is sequential large-file reads/writes
  • +
  • LIMITATION: Requires all network switches in VLAN must be configured to handle jumbo frames, do not configure otherwise.
  • +
+
How to configure?
+
    +
  • Edit network interface file at /etc/sysconfig/network-scripts/ifcfg-your-interface
  • +
  • Ethernet (on ixgbe driver): add "MTU=9000" (MTU means "maximum transfer unit") record to network interface file
  • +
  • Infiniband (on mlx4 driver): add "CONNECTED_MODE=yes" and "MTU=65520" records to network interface file
  • +
  • ifdown your-interface; ifup your-interface
  • +
  • Test with "ping -s 16384 other-host-on-VLAN"
  • +
  • Switch requires max frame size larger than MTU because of protocol headers, usually 9216 bytes
  • +
+
Configuring a backend network for storage
+

This method lets you add network capacity for multi-protocol sites by segregating traffic for different protocols on different network interfaces. This method can lower latency and improve throughput. For example, this method can keep self-heal and rebalancing traffic from competing with non-Gluster client traffic for a network interface, and will better support multi-stream I/O.

+
When to configure?
+
    +
  • For non-Gluster services such as NFS, Swift (REST), CIFS being provided on Gluster servers. It will not help Gluster clients (external nodes with Gluster mountpoints on them).
  • +
  • Network port is over-utilized.
  • +
+
How to configure?
+
    +
  • Most network cards have multiple ports on them -- make port 1 the non-Gluster port and port 2 the Gluster port.
  • +
  • Separate Gluster ports onto a separate VLAN from non-Gluster ports, to simplify configuration.
  • +
+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/Administrator-Guide/Object-Storage/index.html b/Administrator-Guide/Object-Storage/index.html new file mode 100644 index 00000000..20aff3ba --- /dev/null +++ b/Administrator-Guide/Object-Storage/index.html @@ -0,0 +1,4523 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + Object Storage - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + +

SwiftOnFile

+

SwiftOnFile project enables GlusterFS volume to be used as backend for Openstack +Swift - a distributed object store. This allows objects PUT over Swift's RESTful +API to be accessed as files over filesystem interface and vice versa i.e files +created over filesystem interface (NFS/FUSE/native) can be accessed as objects +over Swift's RESTful API.

+

SwiftOnFile project was formerly known as gluster-swift and also as UFO (Unified File and Object) before that. More information about SwiftOnFile can +be found here. +There are differences in working of gluster-swift (now obsolete) and swiftonfile +projects. The older gluster-swift code and relevant documentation can be found +in icehouse branch +of swiftonfile repo.

+

SwiftOnFile vs gluster-swift

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Gluster-SwiftSwiftOnFile
One GlusterFS volume maps to and stores only one Swift account. Mountpoint Hierarchy: container/objectOne GlusterFS volume or XFS partition can have multiple accounts. Mountpoint Hierarchy: acc/container/object
Over-rides account server, container server and object server. We need to keep in sync with upstream Swift and often may need code changes or workarounds to support new Swift featuresImplements only object-server. Very less need to catch-up to Swift as new features at proxy,container and account level would very likely be compatible with SwiftOnFile as it's just a storage policy.
Does not use DBs for accounts and container.A container listing involves a filesystem crawl.A HEAD on account/container gives inaccurate or stale results without FS crawl.Uses Swift's DBs to store account and container information. An account or container listing does not involve FS crawl. Accurate info on HEAD to account/container – ability to support account quotas.
GET on a container and account lists actual files in filesystem.GET on a container and account only lists objects PUT over Swift. Files created over filesystem interface do not appear in container and object listings.
Standalone deployment required and does not integrate with existing Swift cluster.Integrates with any existing Swift deployment as a Storage Policy.
+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/Administrator-Guide/Performance-Testing/index.html b/Administrator-Guide/Performance-Testing/index.html new file mode 100644 index 00000000..68ad901b --- /dev/null +++ b/Administrator-Guide/Performance-Testing/index.html @@ -0,0 +1,4807 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + Performance Testing - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + +

Gluster performance testing

+

Once you have created a Gluster volume, you need to verify that it has +adequate performance for your application, and if it does not, you need +a way to isolate the root cause of the problem.

+

There are two kinds of workloads:

+
    +
  • synthetic - run a test program such as ones below
  • +
  • application - run existing application
  • +
+

Profiling tools

+

Ideally it's best to use the actual application that you want to run on Gluster, but applications often don't tell the sysadmin much about where the performance problems are, particularly latency (response-time) problems. So there are non-invasive profiling tools built into Gluster that can measure performance as seen by the application, without changing the application. Gluster profiling methods at present are based on the io-stats translator, and include:

+
    +
  • +

    client-side profiling - instrument a Gluster mountpoint or libgfapi process to sample profiling data. In this case, the io-stats translator is at the "top" of the translator stack, so the profile data truly represents what the application (or FUSE mountpoint) is asking Gluster to do. For example, a single application write is counted once as a WRITE FOP (file operation) call, and the latency for that WRITE FOP includes latency of the data replication done by the AFR translator lower in the stack.

    +
  • +
  • +

    server-side profiling - this is done using the "gluster volume profile" command (and "gluster volume top" can be used to identify particular hot files in use as well). Server-side profiling can measure the throughput of an entire Gluster volume over time, and can measure server-side latencies. However, it does not incorporate network or client-side latencies. It is also hard to infer application behavior because of client-side translators that alter the I/O workload (examples: erasure coding, cache tiering).

    +
  • +
+

In short, use client-side profiling for understanding "why is my application unresponsive"? and use server-side profiling for understanding how busy your Gluster volume is, what kind of workload is being applied to it (i.e. is it mostly-read? is it small-file?), and how well the I/O load is spread across the volume.

+

client-side profiling

+

To run client-side profiling,

+
    +
  • gluster volume profile your-volume start
  • +
  • setfattr -n trusted.io-stats-dump -v io-stats-pre.txt /your/mountpoint
  • +
+

This will generate the specified file (/var/run/gluster/io-stats-pre.txt) on the client. A script like gvp-client.sh can automate collection of this data.

+

TBS: what the different FOPs are and what they mean.

+

server-side profiling

+

To run it:

+
    +
  • gluster volume profile your-volume start
  • +
  • repeat this command periodically: gluster volume profile your-volume info
  • +
  • gluster volume profile your-volume stop
  • +
+

A script like gvp.sh can help you automate this procedure.

+

Scripts to post-process this data are in development now, let us know what you need and what would be a useful format for presenting the data.

+

Testing tools

+

In this section, we suggest some basic workload tests that can be used to +measure Gluster performance in an application-independent way for a wide +variety of POSIX-like operating systems and runtime environments. We +then provide some terminology and conceptual framework for interpreting +these results.

+

The tools that we suggest here are designed to run in a distributed +filesystem. This is still a relatively rare attribute for filesystem +benchmarks, even now! There is a much larger set of benchmarks available +that can be run from a single system. While single-system results are +important, they are far from a definitive measure of the performance +capabilities of a distributed filesystem.

+
    +
  • fio - for large file I/O tests.
  • +
  • smallfile - for + pure-workload small-file tests
  • +
  • iozone - for pure-workload large-file tests
  • +
  • parallel-libgfapi - for pure-workload libgfapi tests
  • +
+

The "netmist" mixed-workload generator of SPECsfs2014 may be suitable in some cases, but is not technically an open-source tool. This tool was written by Don Capps, who was an author of iozone.

+

fio

+

fio is extremely powerful and is easily installed from traditional distros, unlike iozone, and has increasingly powerful distributed test capabilities described in its --client parameter upstream as of May 2015. To use this mode, start by launching an fio "server" instance on each workload generator host using:

+
    fio --server --daemonize=/var/run/fio-svr.pid
+
+

And make sure your firewall allows port 8765 through for it. You can now run tests on sets of hosts using syntax like:

+
    fio --client=workload-generator.list --output-format=json my-workload.fiojob
+
+

You can also use it for distributed testing, however, by launching fio instances on separate hosts, taking care to start all fio instances as close to the same time as possible, limiting per-thread throughput, and specifying the run duration rather than the amount of data, so that all fio instances end at around the same time. You can then aggregate the fio results from different hosts to get a meaningful aggregate result.

+

fio also has different I/O engines, in particular Huamin Chen authored the libgfapi engine for fio so that you can use fio to test Gluster performance without using FUSE.

+

Limitations of fio in distributed mode:

+
    +
  • stonewalling - fio calculates throughput based on when the last thread finishes a test run. In contrast, iozone calculates throughput by default based on when the FIRST thread finishes the workload. This can lead to (deceptively?) higher throughput results for iozone, since there are inevitably some "straggler" threads limping to the finish line later than others. It is possible in some cases to overcome this limitation by specifying a time limit for the test. This works well for random I/O tests, where typically you do not want to read/write the entire file/device anyway.
  • +
  • inaccuracy when response times > 1 sec - at least in some cases fio has reported excessively high IOPS when fio threads encounter response times much greater than 1 second, this can happen for distributed storage when there is unfairness in the implementation.
  • +
  • io engines are not integrated.
  • +
+

smallfile Distributed I/O Benchmark

+

Smallfile is a python-based small-file distributed POSIX workload generator which can be used to quickly measure performance for a variety of metadata-intensive workloads across an entire cluster. It has no dependencies on any specific filesystem or implementation AFAIK. It runs on Linux, Windows and should work on most Unixes too. It is intended to complement use of iozone benchmark for measuring performance of large-file workloads, and borrows certain concepts from iozone and Ric Wheeler's fs_mark. It was developed by Ben England starting in March 2009, and is now open-source (Apache License v2).

+

Here is a typical simple sequence of tests where files laid down in an initial create test are then used in subsequent tests. There are many more smallfile operation types than these 5 (see doc), but these are the most commonly used ones.

+
    SMF="./smallfile_cli.py --top /mnt/glusterfs/smf --host-set h1,h2,h3,h4 --threads 8 --file-size 4 --files 10000 --response-times Y "
+    $SMF --operation create
+    for s in $SERVERS ; do ssh $h 'echo 3 > /proc/sys/vm/drop_caches' ; done
+    $SMF --operation read
+    $SMF --operation append
+    $SMF --operation rename
+    $SMF --operation delete
+
+

iozone

+

This tool has limitations but does distributed testing well using -+m +option (below).

+

The "-a" option for automated testing of all use cases is discouraged, +because:

+
    +
  • this does not allow you to drop the read cache in server before a + test.
  • +
  • most of the data points being measured will be irrelevant to the + problem you are solving.
  • +
+

Single-thread testing is an important use case, but to fully utilize the +available hardware you typically need to do multi-thread and even +multi-host testing.

+

Consider using "-c -e" options to measure the time it takes for data to +reach persistent storage. "-C" option lets you see how much each thread +participated in the test. "-+n" allows you to save time by skipping +re-read and re-write tests. "-w" option tells iozone not to delete any +files that it accessed, so that subsequent tests can use them. Specify +these options with each test:

+
    +
  • -i -- test type, 0=write, 1=read, 2=random read/write
  • +
  • -r -- data transfer size -- allows you to simulate I/O size used by + application
  • +
  • -s -- per-thread file size -- choose this to be large enough for the + system to reach steady state (typically multiple GB needed)
  • +
  • -t -- number of threads -- how many subprocesses will be + concurrently issuing I/O requests
  • +
  • -F -- list of files -- what files to write/read. If you do not + specify then the filenames iozone.DUMMY.* will be used in the + default directory.
  • +
+

Example of an 8-thread sequential write test with 64-KB transfer size +and file size of 1 GB to shared Gluster mountpoint directory +/mnt/glusterfs , including time to fsync() and close() the files in the +throughput calculation:

+
    iozone -w -c -e -i 0 -+n -C -r 64k -s 1g -t 8 -F /mnt/glusterfs/f{0,1,2,3,4,5,6,7,8}.ioz
+
+

WARNING: random I/O testing in iozone is heavily restricted by the iozone +constraint that it must randomly read then randomly write the entire +file! This is not what we want - instead it should randomly read/write +for some fraction of file size or time duration, allowing us to spread +out more on the disk while not waiting too long for test to finish. This +is why fio (below) is the preferred test tool for random I/O workloads.

+

Distributed testing is a strength of the iozone utility, but this +requires use of "-+m" option in place of "-F" option. The configuration +file passed with "-+m" option contains a series of records that look +like this:

+
    hostname   directory   iozone-pathname
+
+

Where hostname is a host name or IP address of a test driver machine +that iozone can use, directory is the pathname of a directory to use +within that host, and iozone-pathname is the full pathname of the iozone +executable to use on that host. Be sure that every target host can +resolve the hostname of host where the iozone command was run. All +target hosts must permit password-less ssh access from the host running +the command.

+

For example: (Here, my-ip-address refers to the machine from where the iozone is being run)

+
    export RSH=ssh
+    iozone -+m ioz.cfg -+h my-ip-address -w -c -e -i 0 -+n -C -r 64k -s 1g -t 4
+
+

And the file ioz.cfg contains these records (where /mnt/glusterfs is the +Gluster mountpoint on each test machine and test-client-ip is the IP address of a client). Also note that, Each record in the file is a thread in IOZone terminology. Since we have defined the number of threads to be 4 in the above example, we have four records(threads) for a single client.

+
    test-client-ip  /mnt/glusterfs  /usr/local/bin/iozone
+    test-client-ip  /mnt/glusterfs  /usr/local/bin/iozone
+    test-client-ip  /mnt/glusterfs  /usr/local/bin/iozone
+    test-client-ip  /mnt/glusterfs  /usr/local/bin/iozone
+
+

Restriction: Since iozone uses non-privileged ports it may be necessary +to temporarily shut down or alter iptables on some/all of the hosts. +Secondary machines must support password-less access from Primary machine via +ssh.

+

Note that the -+h option is undocumented but it tells the secondary host +what IP address to use so that the secondary does not have to be able to +resolve the hostname of the test driver. my-ip-address is the IP address +that the secondary should connect to in order to report results back to the +host. This need not be the same as the host's hostname.

+

Typically you run the sequential write test first to lay down the file, +drop cache on the servers (and clients if necessary), do the sequential +read test, drop cache, do random I/O test if desired. Using above +example:

+
    export RSH=ssh
+    IOZ="iozone -+m ioz.cfg -+h my-ip-address -w -C -c -e -r 64k -+n "
+     hosts="`awk '{ print $1 }' ioz.cfg`"
+    $IOZ -i 0 -s 1g -t 4`\
+    for n in $hosts $servers ; do \
+       ssh $n 'sync; echo 1 > /proc/sys/vm/drop_caches' ; done
+    $IOZ -i 1 -s 1g -t 4
+    for n in $hosts $servers ; do \
+       ssh $n 'sync; echo 1 > /proc/sys/vm/drop_caches' ; done
+    $IOZ -i 2 -s 1g -t 4
+
+

If you use client with buffered I/O (the default), drop cache on the +client machines first, then the server machines also as shown above.

+

parallel-libgfapi

+

This test exercises Gluster performance using the libgfapi API, +bypassing FUSE - no mountpoints are used. Available +here.

+

To use it, you edit the script parameters in parallel_gfapi_test.sh +script - all of them are above the comment "NO EDITABLE PARAMETERS BELOW +THIS LINE". These include such things as the Gluster volume name, a host +serving that volume, number of files, etc. You then make sure that the +gfapi_perf_test executable is distributed to the client machines at +the specified directory, and then run the script. The script starts all +libgfapi workload generator processes in parallel in such a way that +they all start the test at the same time. It waits until they all +complete, and then it collects and aggregates the results for you.

+

Note that libgfapi processes consume one socket per brick, so in Gluster +volumes with high brick counts, there can be constraints on the number +of libgfapi processes that can run concurrently. Specifically, each host +can only support up to about 30000 concurrent TCP ports. You may need to +adjust "ulimit -n" parameter (see /etc/security/limits.conf "nofile" +parameter for persistent tuning).

+

Object Store tools

+

COSBench +was developed by Intel employees and is very useful for both Swift and +S3 workload generation.

+

ssbench is +part of OpenStack Swift toolset and is command-line tool with a workload +definition file format.

+

Workload

+

An application can be as simple as writing some files, or it can be as +complex as running a cloud on top of Gluster. But all applications have +performance requirements, whether the users are aware of them or not, +and if these requirements aren't met, the system as a whole is not +functional from the user's perspective. The activities that the +application spends most of its time doing with Gluster are called the +"workload" below. For the Gluster filesystem, the "workload" consists of +the filesystem requests being delivered to Gluster by the application. +There are two ways to look at workload:

+
    +
  • top-down - what is the application trying to get the filesystem to + do?
  • +
  • bottom-up - what requests is the application actually generating to + the filesystem?
  • +
+

data vs metadata

+

In this page we frequently refer to "large-file" or "small-file" +workloads. But what do we mean by the terms "large-file" or +"small-file"? "large-file" is a deliberately vague but descriptive term +that refers to workloads where most of the application time is spent +reading/writing the file. This is in contrast to a "small-file" +workload, where most of the application's time is spent opening/closing +the file or accessing metadata about the file. Metadata means "data +about data", so it is information that describes the state of the file, +rather than the contents of the file. For example, a filename is a type +of metadata, as are directories and extended attributes.

+

Top-down workload analysis

+

Often this is what users will be able to help you with -- for example, a +workload might consist of ingesting a billion .mp3 files. Typical +questions that need to be answered (approximately) are:

+
    +
  • what is file size distribution? Averages are often not enough - file + size distributions can be bi-modal (i.e. consist mostly of the very + large and very small file sizes). TBS: provide pointers to scripts + that can collect this.
  • +
  • what fraction of file accesses are reads vs writes?
  • +
  • how cache-friendly is the workload? Do the same files get read + repeatedly by different Gluster clients, or by different + processes/threads on these clients?
  • +
  • for large-file workloads, what fraction of accesses are + sequential/random? Sequential file access means that the application + thread reads/writes the file from start to finish in byte offset + order, and random file access is the exact opposite -- the thread + may read/write from any offset at any time. Virtual machine disk + images are typically accessed randomly, since the VM's filesystem is + embedded in a Gluster file.
  • +
+

Why do these questions matter? For example, if you have a large-file +sequential read workload, network configuration + Gluster and Linux +readahead is important. If you have a small-file workload, storage +configuration is important, and so on. You will not know what tuning is +appropriate for Gluster unless you have a basic understanding the +workload.

+

Bottom-up analysis

+

Even a complex application may have a very simple workload from the +point of view of the filesystem servicing its requests. If you don't +know what your application spends its time doing, you can start by +running the "gluster volume profile" and "gluster volume top" commands. +These extremely useful tools will help you understand both the workload +and the bottlenecks which are limiting performance of that workload.

+

TBS: links to documentation for these tools and scripts that reduce the data to usable form.

+

Configuration

+

There are 4 basic hardware dimensions to a Gluster server, listed here +in order of importance:

+
    +
  • network - possibly the most important hardware component of a + Gluster site
  • +
  • access protocol - what kind of client is used to get to the + files/objects?
  • +
  • storage - this is absolutely critical to get right up front
  • +
  • cpu - on client, look for hot threads (see below)
  • +
  • memory - can impact performance of read-intensive, cacheable + workloads
  • +
+

network testing

+

Network configuration has a huge impact on performance of distributed storage, but is often not given the +attention it deserves during the planning and installation phases of the +cluster lifecycle. Fortunately, +network configuration +can be enhanced significantly, often without additional hardware.

+

To measure network performance, consider use of a +netperf-based +script.

+

The purpose of these two tools is to characterize the capacity of your entire network infrastructure to support the desired level of traffic induced by distributed storage, using multiple network connections in parallel. The latter script is probably the most realistic network workload for distributed storage.

+

The two most common hardware problems impacting distributed storage are, +not surprisingly, disk drive failures and network failures. Some of +these failures do not cause hard errors, but instead cause performance +degradation. For example, with a bonded network interface containing two +physical network interfaces, if one of the physical interfaces fails +(either port on NIC/switch, or cable), then the bonded interface will +stay up, but will have less performance (how much less depends on the +bonding mode). Another error would be failure of an 10-GbE Ethernet +interface to autonegotiate speed to 10-Gbps -- sometimes network +interfaces auto-negotiate to 1-Gbps instead. If the TCP connection is +experiencing a high rate of packet loss or is not tuned correctly, it +may not reach the full network speed supported by the hardware.

+

So why run parallel netperf sessions instead of just one? There are a +variety of network performance problems relating to network topology +(the way in which hosts are interconnected), particularly network switch +and router topology, that only manifest when several pairs of hosts are +attempting to transmit traffic across the same shared resource, which +could be a trunk connecting top-of-rack switches or a blade-based switch +with insufficient bandwidth to switch backplane, for example. Individual +netperf/iperf sessions will not find these problems, but this script +will.

+

This test can be used to simulate flow of data through a distributed +filesystem, for example. If you want to simulate 4 Gluster clients, call +them c1 through c4, writing large files to a set of 2 servers, call them +s1 and s2, you can specify these (sender, receiver) pairs:

+
    (c1,s1), (c2, s2), (c3, s1), (c4, s2)
+
+

If on the other hand you want to simulate reads, you can use these +(sender, receiver) pairs:

+
    (s1, c1), (s2, c2), (s1, c3), (s2, c4)
+
+

To simulate a mixed read-write workload, use both sets of pairs:

+
    (c1,s1), (c2, s2), (c3, s1), (c4, s2), (s1, c1), (s2, c2), (s1, c3), (s2, c4)
+
+

More complicated flows can model behavior of non-native protocols, where a cluster node acts as a proxy server- it is a server (for non-native protocol) and a client (for native protocol). For example, such protocols often induce full-duplex traffic which can stress the network differently than unidirectional in/out traffic. For example, try adding this set of flows to preceding flow:

+
    (s1, s2),.(s2, s3),.(s3, s4),.(s4, s1)
+
+

The comments at the top of the script describe the input syntax, but +here are some suggestions on how to best utilize it. You typically run +this script from a head node or test driver that has password-less ssh +access to the set of machines being tested. The hosts running the test +do not need ssh access to each other -- they only have to allow +password-less ssh access from the head node. The script does not rely on +root privileges, so you can run it from a non-root account. Just create +a public key on the head node in the right account (usually in +\$HOME/.ssh/id_rsa.pub ) and then append this public key to +\$HOME/.ssh/authorized_keys on each host participating in the test.

+

We input senders and receivers using separate text files, 1 host per +line. For pair (sender[j], receiver[j]), you get sender[j] from line j +in the sender file, and receiver[j] from line j in the receiver file. +You have to use the IP address/name that corresponds to the interface +you want to test, and you have to be able to ssh to each host from the +head node using this interface.

+

Results

+

There are 3 basic forms of performance results, not in order of +importance:

+
    +
  • throughput -- how much work is done in a unit of time? Best metrics + typically are workload-dependent:
  • +
  • for large-file random: IOPS
  • +
  • for large-file sequential: MB/s
  • +
  • for small-file: files/sec
  • +
  • response time -- IMPORTANT, how long does it take for filesystem + request to complete?
  • +
  • utilization -- how busy is the hardware while the workload is + running?
  • +
  • scalability -- can we linearly scale throughput without sacrificing + response time as we add servers to a Gluster volume?
  • +
+

Typically throughput results get the most attention, but in a +distributed-storage environment, the hardest goal to achieve may well be +CONSISTENTLY LOW RESPONSE TIME, not throughput.

+

While there are non-interactive workloads where response time does not +matter as much, you should pay attention to response time in any +situation where a user has to directly interact with the filesystem. +Tuning the filesystem to achieve the absolute highest throughput can +result in a filesystem that is unusable because of high response time. +Unless you are in a benchmarking situation, you want to achieve a +balance of good throughput and response time. Typically an interactive +user wants to see a response time under 5 seconds always, with most +response times much lower than this. To keep response times under +control (including system management!), you do not want any hardware +component to run at maximum utilization, typically 60-80% utilization is +a good peak utilization target. On the other hand, to avoid wasting +hardware, you want all of the hardware to be utilized to some extent.

+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/Administrator-Guide/Performance-Tuning/index.html b/Administrator-Guide/Performance-Tuning/index.html new file mode 100644 index 00000000..3ddc07c3 --- /dev/null +++ b/Administrator-Guide/Performance-Tuning/index.html @@ -0,0 +1,4620 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + Performance Tuning - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + +

Performance tuning

+

Enable Metadata cache

+

Metadata caching improves performance in almost all the workloads, except for use cases +with most of the workload accessing a file sumultaneously from multiple clients.

+
    +
  1. +

    Execute the following command to enable metadata caching and cache invalidation:

    +

    console +gluster volume set <volname> group metadata-cache

    +

    This group command enables caching of stat and xattr information of a file or directory. +The caching is refreshed every 10 min, and cache-invalidation is enabled to ensure cache +consistency.

    +
  2. +
  3. +

    To increase the number of files that can be cached, execute the following command:

    +

    console +gluster volume set <volname> network.inode-lru-limit <n>

    +

    n, is set to 50000. It can be increased if the number of active files in the volume +is very high. Increasing this number increases the memory footprint of the brick processes.

    +
  4. +
  5. +

    Execute the following command to enable samba specific metadata caching:

    +

    console +gluster volume set <volname> cache-samba-metadata on

    +
  6. +
  7. +

    By default, some xattrs are cached by gluster like: capability xattrs, ima xattrs + ACLs, etc. If there are any other xattrs that are used by the application using + the Gluster storage, execute the following command to add these xattrs to the metadata + cache list: + console + gluster volume set <volname> xattr-cache-list "comma separated xattr list" + Eg: + console + gluster volume set <volname> xattr-cache-list "user.org.netatalk.*,user.swift.metadata"

    +
  8. +
+

Directory operations

+

Along with enabling the metadata caching, the following options can be set to +increase performance of directory operations:

+

Directory listing Performance:

+
    +
  • Enable parallel-readdir
  • +
+

console + gluster volume set <VOLNAME> performance.readdir-ahead on + gluster volume set <VOLNAME> performance.parallel-readdir on

+

File/Directory Create Performance

+
    +
  • Enable nl-cache
  • +
+

console + gluster volume set <volname> group nl-cache + gluster volume set <volname> nl-cache-positive-entry on

+

The above command also enables cache invalidation and increases the timeout to +10 minutes

+

Small file Read operations

+

For use cases with dominant small file reads, enable the following options

+
gluster volume set <volname> performance.cache-invalidation on
+gluster volume set <volname> features.cache-invalidation on
+gluster volume set <volname> performance.qr-cache-timeout 600 # 10 min recommended setting
+gluster volume set <volname> cache-invalidation-timeout 600 # 10 min recommended setting
+
+

This command enables caching of the content of small file, in the client cache. +Enabling cache invalidation ensures cache consistency.

+

The total cache size can be set using

+
gluster volume set <volname> cache-size <size>
+
+

By default, the files with size <=64KB are cached. To change this value:

+
gluster volume set <volname> performance.cache-max-file-size <size>
+
+

Note that the size arguments use SI unit suffixes, e.g. 64KB or 2MB.

+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/Administrator-Guide/Puppet/index.html b/Administrator-Guide/Puppet/index.html new file mode 100644 index 00000000..a28667af --- /dev/null +++ b/Administrator-Guide/Puppet/index.html @@ -0,0 +1,5797 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + Puppet Gluster - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + +

Puppet-Gluster

+ +

A GlusterFS Puppet module by James

+

Available from:

+

https://github.com/purpleidea/puppet-gluster/

+

Table of Contents

+
    +
  1. Overview
  2. +
  3. Module description - What the module does
  4. +
  5. Setup - Getting started with Puppet-Gluster +
  6. +
  7. Usage/FAQ - Notes on management and frequently asked questions
  8. +
  9. Reference - Class and type reference +
  10. +
  11. Examples - Example configurations
  12. +
  13. Limitations - Puppet versions, OS compatibility, etc...
  14. +
  15. Development - Background on module development
  16. +
  17. Author - Author and contact information
  18. +
+

Overview

+

The Puppet-Gluster module installs, configures, and manages a GlusterFS cluster.

+

Module Description

+

This Puppet-Gluster module handles installation, configuration, and management +of GlusterFS across all of the hosts in the cluster.

+

Setup

+

What can Puppet-Gluster manage?

+

Puppet-Gluster is designed to be able to manage as much or as little of your +GlusterFS cluster as you wish. All features are optional. If there is a feature +that doesn't appear to be optional, and you believe it should be, please let me +know. Having said that, it makes good sense to me to have Puppet-Gluster manage +as much of your GlusterFS infrastructure as it can. At the moment, it cannot +rack new servers, but I am accepting funding to explore this feature ;) At the +moment it can manage:

+
    +
  • GlusterFS packages (rpm)
  • +
  • GlusterFS configuration files (/var/lib/glusterd/)
  • +
  • GlusterFS host peering (gluster peer probe)
  • +
  • GlusterFS storage partitioning (fdisk)
  • +
  • GlusterFS storage formatting (mkfs)
  • +
  • GlusterFS brick creation (mkdir)
  • +
  • GlusterFS services (glusterd)
  • +
  • GlusterFS firewalling (whitelisting)
  • +
  • GlusterFS volume creation (gluster volume create)
  • +
  • GlusterFS volume state (started/stopped)
  • +
  • GlusterFS volume properties (gluster volume set)
  • +
  • And much more...
  • +
+

Simple setup

+

include '::gluster::simple' is enough to get you up and running. When using the +gluster::simple class, or with any other Puppet-Gluster configuration, +identical definitions must be used on all hosts in the cluster. The simplest +way to accomplish this is with a single shared puppet host definition like:

+
node /^annex\d+$/ {        # annex{1,2,..N}
+        class { '::gluster::simple':
+        }
+}
+
+

If you wish to pass in different parameters, you can specify them in the class +before you provision your hosts:

+
class { '::gluster::simple':
+    replica => 2,
+    volume => ['volume1', 'volume2', 'volumeN'],
+}
+
+

Elastic setup

+

The gluster::elastic class is not yet available. Stay tuned!

+

Advanced setup

+

Some system administrators may wish to manually itemize each of the required +components for the Puppet-Gluster deployment. This happens automatically with +the higher level modules, but may still be a desirable feature, particularly +for non-elastic storage pools where the configuration isn't expected to change +very often (if ever).

+

To put together your cluster piece by piece, you must manually include and +define each class and type that you wish to use. If there are certain aspects +that you wish to manage yourself, you can omit them from your configuration. +See the reference section below for the specifics. Here is one +possible example:

+
class { '::gluster::server':
+    shorewall => true,
+}
+
+gluster::host { 'annex1.example.com':
+    # use uuidgen to make these
+    uuid => '1f660ca2-2c78-4aa0-8f4d-21608218c69c',
+}
+
+# note that this is using a folder on your existing file system...
+# this can be useful for prototyping gluster using virtual machines
+# if this isn't a separate partition, remember that your root fs will
+# run out of space when your gluster volume does!
+gluster::brick { 'annex1.example.com:/data/gluster-storage1':
+    areyousure => true,
+}
+
+gluster::host { 'annex2.example.com':
+    # NOTE: specifying a host uuid is now optional!
+    # if you don't choose one, one will be assigned
+    #uuid => '2fbe6e2f-f6bc-4c2d-a301-62fa90c459f8',
+}
+
+gluster::brick { 'annex2.example.com:/data/gluster-storage2':
+    areyousure => true,
+}
+
+$brick_list = [
+    'annex1.example.com:/data/gluster-storage1',
+    'annex2.example.com:/data/gluster-storage2',
+]
+
+gluster::volume { 'examplevol':
+    replica => 2,
+    bricks => $brick_list,
+    start => undef, # i'll start this myself
+}
+
+# namevar must be: <VOLNAME>#<KEY>
+gluster::volume::property { 'examplevol#auth.reject':
+    value => ['192.0.2.13', '198.51.100.42', '203.0.113.69'],
+}
+
+

Usage and frequently asked questions

+

All management should be done by manipulating the arguments on the appropriate +Puppet-Gluster classes and types. Since certain manipulations are either not +yet possible with Puppet-Gluster, or are not supported by GlusterFS, attempting +to manipulate the Puppet configuration in an unsupported way will result in +undefined behaviour, and possible even data loss, however this is unlikely.

+

How do I change the replica count?

+

You must set this before volume creation. This is a limitation of GlusterFS. +There are certain situations where you can change the replica count by adding +a multiple of the existing brick count to get this desired effect. These cases +are not yet supported by Puppet-Gluster. If you want to use Puppet-Gluster +before and / or after this transition, you can do so, but you'll have to do the +changes manually.

+

Do I need to use a virtual IP?

+

Using a virtual IP (VIP) is strongly recommended as a distributed lock manager +(DLM) and also to provide a highly-available (HA) IP address for your clients +to connect to. For a more detailed explanation of the reasoning please see:

+

https://ttboj.wordpress.com/2012/08/23/how-to-avoid-cluster-race-conditions-or-how-to-implement-a-distributed-lock-manager-in-puppet/

+

Remember that even if you're using a hosted solution (such as AWS) that doesn't +provide an additional IP address, or you want to avoid using an additional IP, +and you're okay not having full HA client mounting, you can use an unused +private RFC1918 IP address as the DLM VIP. Remember that a layer 3 IP can +co-exist on the same layer 2 network with the layer 3 network that is used by +your cluster.

+

Is it possible to have Puppet-Gluster complete in a single run?

+

No. This is a limitation of Puppet, and is related to how GlusterFS operates. +For example, it is not reliably possible to predict which ports a particular +GlusterFS volume will run on until after the volume is started. As a result, +this module will initially whitelist connections from GlusterFS host IP +addresses, and then further restrict this to only allow individual ports once +this information is known. This is possible in conjunction with the +puppet-shorewall module. +You should notice that each run should complete without error. If you do see an +error, it means that either something is wrong with your system and / or +configuration, or because there is a bug in Puppet-Gluster.

+

Can you integrate this with vagrant?

+

Not until vagrant properly supports libvirt/KVM. I have no desire to use +VirtualBox for fun.

+

Awesome work, but it's missing support for a feature and/or platform!

+

Since this is an Open Source / Free Software project that I also give away for +free (as in beer, free as in gratis, free as in libre), I'm unable to provide +unlimited support. Please consider donating funds, hardware, virtual machines, +and other resources. For specific needs, you could perhaps sponsor a feature!

+

You didn't answer my question, or I have a question!

+

Contact me through my technical blog +and I'll do my best to help. If you have a good question, please remind me to +add my answer to this documentation!

+

Reference

+

Please note that there are a number of undocumented options. For more +information on these options, please view the source at: +https://github.com/purpleidea/puppet-gluster/. +If you feel that a well used option needs documenting here, please contact me.

+

Overview of classes and types

+ +

gluster::simple

+

This is gluster::simple. It should probably take care of 80% of all use cases. +It is particularly useful for deploying quick test clusters. It uses a +finite-state machine (FSM) to decide when the cluster has settled and volume +creation can begin. For more information on the FSM in Puppet-Gluster see: +https://ttboj.wordpress.com/2013/09/28/finite-state-machines-in-puppet/

+

replica

+

The replica count. Can't be changed automatically after initial deployment.

+

volume

+

The volume name or list of volume names to create.

+

path

+

The valid brick path for each host. Defaults to local file system. If you need +a different path per host, then Gluster::Simple will not meet your needs.

+

vip

+

The virtual IP address to be used for the cluster distributed lock manager.

+

shorewall

+

Boolean to specify whether puppet-shorewall integration should be used or not.

+

gluster::elastic

+

Under construction.

+

gluster::server

+

Main server class for the cluster. Must be included when building the GlusterFS +cluster manually. Wrapper classes such as gluster::simple +include this automatically.

+

vip

+

The virtual IP address to be used for the cluster distributed lock manager.

+

shorewall

+

Boolean to specify whether puppet-shorewall integration should be used or not.

+

gluster::host

+

Main host type for the cluster. Each host participating in the GlusterFS +cluster must define this type on itself, and on every other host. As a result, +this is not a singleton like the gluster::server class.

+

ip

+

Specify which IP address this host is using. This defaults to the +$::ipaddress variable. Be sure to set this manually if you're declaring this +yourself on each host without using exported resources. If each host thinks the +other hosts should have the same IP address as itself, then Puppet-Gluster and +GlusterFS won't work correctly.

+

uuid

+

Universally unique identifier (UUID) for the host. If empty, Puppet-Gluster +will generate this automatically for the host. You can generate your own +manually with uuidgen, and set them yourself. I found this particularly +useful for testing, because I would pick easy to recognize UUID's like: +aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa, +bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb, and so on. If you set a UUID manually, +and Puppet-Gluster has a chance to run, then it will remember your choice, and +store it locally to be used again if you no longer specify the UUID. This is +particularly useful for upgrading an existing un-managed GlusterFS installation +to a Puppet-Gluster managed one, without changing any UUID's.

+

gluster::brick

+

Main brick type for the cluster. Each brick is an individual storage segment to +be used on a host. Each host must have at least one brick to participate in the +cluster, but usually a host will have multiple bricks. A brick can be as simple +as a file system folder, or it can be a separate file system. Please read the +official GlusterFS documentation, if you aren't entirely comfortable with the +concept of a brick.

+

For most test clusters, and for experimentation, it is easiest to use a +directory on the root file system. You can even use a /tmp sub folder if you +don't care about the persistence of your data. For more serious clusters, you +might want to create separate file systems for your data. On self-hosted iron, +it is not uncommon to create multiple RAID-6 drive pools, and to then create a +separate file system per virtual drive. Each file system can then be used as a +single brick.

+

So that each volume in GlusterFS has the maximum ability to grow, without +having to partition storage separately, the bricks in Puppet-Gluster are +actually folders (on whatever backing store you wish) which then contain +sub folders-- one for each volume. As a result, all the volumes on a given +GlusterFS cluster can share the total available storage space. If you wish to +limit the storage used by each volume, you can setup quotas. Alternatively, you +can buy more hardware, and elastically grow your GlusterFS volumes, since the +price per GB will be significantly less than any proprietary storage system. +The one downside to this brick sharing, is that if you have chosen the brick +per host count specifically to match your performance requirements, and +each GlusterFS volume on the same cluster has drastically different brick per +host performance requirements, then this won't suit your needs. I doubt that +anyone actually has such requirements, but if you do insist on needing this +compartmentalization, then you can probably use the Puppet-Gluster grouping +feature to accomplish this goal. Please let me know about your use-case, and +be warned that the grouping feature hasn't been extensively tested.

+

To prove to you that I care about automation, this type offers the ability to +automatically partition and format your file systems. This means you can plug +in new iron, boot, provision and configure the entire system automatically. +Regrettably, I don't have a lot of test hardware to routinely use this feature. +If you'd like to donate some, I'd be happy to test this thoroughly. Having said +that, I have used this feature, I consider it to be extremely safe, and it has +never caused me to lose data. If you're uncertain, feel free to look at the +code, or avoid using this feature entirely. If you think there's a way to make +it even safer, then feel free to let me know.

+

dev

+

Block device, such as /dev/sdc or /dev/disk/by-id/scsi-0123456789abcdef. By +default, Puppet-Gluster will assume you're using a folder to store the brick +data, if you don't specify this parameter.

+

fsuuid

+

File system UUID. This ensures we can distinctly identify a file system. You +can set this to be used with automatic file system creation, or you can specify +the file system UUID that you'd like to use.

+

labeltype

+

Only gpt is supported. Other options include msdos, but this has never been +used because of it's size limitations.

+

fstype

+

This should be xfs or ext4. Using xfs is recommended, but ext4 is also +quite common. This only affects a file system that is getting created by this +module. If you provision a new machine, with a root file system of ext4, and +the brick you create is a root file system path, then this option does nothing.

+

xfs_inode64

+

Set inode64 mount option when using the xfs fstype. Choose true to set.

+

xfs_nobarrier

+

Set nobarrier mount option when using the xfs fstype. Choose true to set.

+

ro

+

Whether the file system should be mounted read only. For emergencies only.

+

force

+

If true, this will overwrite any xfs file system it sees. This is useful for +rebuilding GlusterFS repeatedly and wiping data. There are other safeties in +place to stop this. In general, you probably don't ever want to touch this.

+

areyousure

+

Do you want to allow Puppet-Gluster to do dangerous things? You have to set +this to true to allow Puppet-Gluster to fdisk and mkfs your file system.

+

gluster::volume

+

Main volume type for the cluster. This is where a lot of the magic happens. +Remember that changing some of these parameters after the volume has been +created won't work, and you'll experience undefined behaviour. There could be +FSM based error checking to verify that no changes occur, but it has been left +out so that this code base can eventually support such changes, and so that the +user can manually change a parameter if they know that it is safe to do so.

+

bricks

+

List of bricks to use for this volume. If this is left at the default value of +true, then this list is built automatically. The algorithm that determines +this order does not support all possible situations, and most likely can't +handle certain corner cases. It is possible to examine the FSM to view the +selected brick order before it has a chance to create the volume. The volume +creation script won't run until there is a stable brick list as seen by the FSM +running on the host that has the DLM. If you specify this list of bricks +manually, you must choose the order to match your desired volume layout. If you +aren't sure about how to order the bricks, you should review the GlusterFS +documentation first.

+

transport

+

Only tcp is supported. Possible values can include rdma, but this won't get +any testing if I don't have access to infiniband hardware. Donations welcome.

+

replica

+

Replica count. Usually you'll want to set this to 2. Some users choose 3. +Other values are seldom seen. A value of 1 can be used for simply testing a +distributed setup, when you don't care about your data or high availability. A +value greater than 4 is probably wasteful and unnecessary. It might even +cause performance issues if a synchronous write is waiting on a slow fourth +server.

+

stripe

+

Stripe count. Thoroughly unsupported and untested option. Not recommended for +use by GlusterFS.

+

ping

+

Do we want to include ping checks with fping?

+

settle

+

Do we want to run settle checks?

+

start

+

Requested state for the volume. Valid values include: true (start), false +(stop), or undef (un-managed start/stop state).

+

gluster::volume::property

+

Main volume property type for the cluster. This allows you to manage GlusterFS +volume specific properties. There are a wide range of properties that volumes +support. For the full list of properties, you should consult the GlusterFS +documentation, or run the gluster volume set help command. To set a property +you must use the special name pattern of: volume#key. The value argument is +used to set the associated value. It is smart enough to accept values in the +most logical format for that specific property. Some properties aren't yet +supported, so please report any problems you have with this functionality. +Because this feature is an awesome way to document as code the volume +specific optimizations that you've made, make sure you use this feature even if +you don't use all the others.

+

value

+

The value to be used for this volume property.

+

Examples

+

For example configurations, please consult the examples/ directory in the git +source repository. It is available from:

+

https://github.com/purpleidea/puppet-gluster/tree/master/examples

+

Limitations

+

This module has been tested against open source Puppet 3.2.4 and higher.

+

The module has been tested on:

+
    +
  • CentOS 6.4
  • +
+

It will probably work without incident or without major modification on:

+
    +
  • CentOS 5.x/6.x
  • +
  • RHEL 5.x/6.x
  • +
+

It will most likely work with other Puppet versions and on other platforms, but +testing under other conditions has been light due to lack of resources. It will +most likely not work on Debian/Ubuntu systems without modification. I would +really love to add support for these operating systems, but I do not have any +test resources to do so. Please sponsor this if you'd like to see it happen.

+

Development

+

This is my personal project that I work on in my free time. +Donations of funding, hardware, virtual machines, and other resources are +appreciated. Please contact me if you'd like to sponsor a feature, invite me to +talk/teach or for consulting.

+

You can follow along on my technical blog.

+

Author

+

Copyright (C) 2010-2013+ James Shubin

+ + + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/Administrator-Guide/RDMA-Transport/index.html b/Administrator-Guide/RDMA-Transport/index.html new file mode 100644 index 00000000..3dffe53b --- /dev/null +++ b/Administrator-Guide/RDMA-Transport/index.html @@ -0,0 +1,4506 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + RDMA Transport - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + +

NOTE: FEATURE DEPRECATED

+

THE RDMA is no longer supported in Gluster builds. This has been removed from release 8 onwards.

+

Currently we dont have

+
    +
  1. The expertise to support RDMA
  2. +
  3. Infrastructure to test/verify the performances each release + The options are getting discussed here - https://github.com/gluster/glusterfs/issues/2000
  4. +
+

Ready to enable as a compile time option, if there is proper support and testing infrastructure.

+

Introduction

+

GlusterFS supports using RDMA protocol for communication between glusterfs clients and glusterfs bricks. +GlusterFS clients include FUSE client, libgfapi clients(Samba and NFS-Ganesha included), gNFS server and other glusterfs processes that communicate with bricks like self-heal daemon, quotad, rebalance process etc.

+

NOTE: As of now only FUSE client and gNFS server would support RDMA transport.

+

NOTE:
+NFS client to gNFS Server/NFS Ganesha Server communication would still happen over tcp.
+CIFS Clients/Windows Clients to Samba Server communication would still happen over tcp.

+

Setup

+

Please refer to these external documentation to setup RDMA on your machines
+http://people.redhat.com/dledford/infiniband_get_started.html

+

Creating Trusted Storage Pool

+

All the servers in the Trusted Storage Pool must have RDMA devices if either RDMA or TCP,RDMA volumes are created in the storage pool.
+The peer probe must be performed using IP/hostname assigned to the RDMA device.

+

Ports and Firewall

+

Process glusterd will listen on both tcp and rdma if rdma device is found. Port used for rdma is 24008. Similarly, brick processes will also listen on two ports for a volume created with transport "tcp,rdma".

+

Make sure you update the firewall to accept packets on these ports.

+

Gluster Volume Create

+

A volume can support one or more transport types for communication between clients and brick processes. There are three types of supported transport, which are, tcp, rdma, and tcp,rdma.

+

Example: To create a distributed volume with four storage servers over InfiniBand:

+

# gluster volume create test-volume transport rdma server1:/exp1 server2:/exp2 server3:/exp3 server4:/exp4
+Creation of test-volume has been successful
+Please start the volume to access data.

+

Changing Transport of Volume

+

To change the supported transport types of a existing volume, follow the procedure:
+NOTE: This is possible only if the volume was created with IP/hostname assigned to RDMA device.

+
    +
  1. +

    Unmount the volume on all the clients using the following command:

    +
    umount mount-point
    +
    +
  2. +
  3. +

    Stop the volumes using the following command:

    +
    gluster volume stop volname
    +
    +
  4. +
  5. +

    Change the transport type.
    + For example, to enable both tcp and rdma execute the followimg command:

    +
    gluster volume set volname config.transport tcp,rdma
    +
    +
  6. +
  7. +

    Mount the volume on all the clients.
    + For example, to mount using rdma transport, use the following command:

    +
    mount -t glusterfs -o transport=rdma server1:/test-volume /mnt/glusterfs`
    +
    +
  8. +
+

NOTE:
+config.transport option does not have a entry in help of gluster cli.

+
gluster vol set help | grep config.transport`
+
+

However, the key is a valid one.

+

Mounting a Volume using RDMA

+

You can use the mount option "transport" to specify the transport type that FUSE client must use to communicate with bricks. If the volume was created with only one transport type, then that becomes the default when no value is specified. In case of tcp,rdma volume, tcp is the default.

+

For example, to mount using rdma transport, use the following command:

+
mount -t glusterfs -o transport=rdma server1:/test-volume /mnt/glusterfs
+
+

Transport used by auxillary processes

+

All the auxillary processes like self-heal daemon, rebalance process etc use the default transport.In case you have a tcp,rdma volume it will use tcp.
+In case of rdma volume, rdma will be used.
+Configuration options to select transport used by these processes when volume is tcp,rdma are not yet available and will be coming in later releases.

+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/Administrator-Guide/SSL/index.html b/Administrator-Guide/SSL/index.html new file mode 100644 index 00000000..e4c10a45 --- /dev/null +++ b/Administrator-Guide/SSL/index.html @@ -0,0 +1,4721 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + SSL - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + + + + + +
+
+ + + + + + + + +

Setting up GlusterFS with SSL/TLS

+

GlusterFS allows its communication to be secured using the Transport Layer +Security standard (which supersedes Secure Sockets Layer), using the +OpenSSL library. Setting this up requires a basic working knowledge of +some SSL/TLS concepts, which can only be briefly summarized here.

+
    +
  • +

    "Authentication" is the process of one entity (e.g. a machine, process, or + person) proving its identity to a second entity.

    +
  • +
  • +

    "Authorization" is the process of checking whether an entity has permission + to perform an action.

    +
  • +
  • +

    TLS provides authentication and encryption. It does not provide + authorization, though GlusterFS can use TLS-authenticated identities to + authorize client connections to bricks/volumes.

    +
  • +
  • +

    An entity X which must authenticate to a second entity Y does so by sharing + with Y a certificate, which contains information sufficient to prove X's + identity. X's proof of identity also requires possession of a private key + which matches its certificate, but this key is never seen by Y or anyone + else. Because the certificate is already public, anyone who has the key can + claim that identity.

    +
  • +
  • +

    Each certificate contains the identity of its principal (owner) along with + the identity of a certifying authority or CA who can verify the integrity + of the certificate's contents. The principal and CA can be the same (a + "self-signed certificate"). If they are different, the CA must sign the + certificate by appending information derived from both the certificate + contents and the CA's own private key.

    +
  • +
  • +

    Certificate-signing relationships can extend through multiple levels. For + example, a company X could sign another company Y's certificate, which could + then be used to sign a third certificate Z for a specific user or purpose. + Anyone who trusts X (and is willing to extend that trust through a + certificate depth of two or more) would therefore be able to authenticate + Y and Z as well.

    +
  • +
  • +

    Any entity willing to accept other entities' authentication attempts must + have some sort of database seeded with the certificates that already accept.

    +
  • +
+

In GlusterFS's case, a client or server X uses the following files to contain +TLS-related information:

+
    +
  • +

    /etc/ssl/glusterfs.pem X's own certificate

    +
  • +
  • +

    /etc/ssl/glusterfs.key X's private key

    +
  • +
  • +

    /etc/ssl/glusterfs.ca concatenation of others' certificates

    +
  • +
+

GlusterFS always performs mutual authentication, though clients do not +currently do anything with the authenticated server identity. Thus, if client X +wants to communicate with server Y, then X's certificate (or that of a signer) +must be in Y's CA file, and vice versa.

+

For all uses of TLS in GlusterFS, if one side of a connection is configured to +use TLS then the other side must use it as well. There is no automatic fallback +to non-TLS communication, or allowance for concurrent TLS and non-TLS access to +the same resource, because either would be insecure. Instead, any such "mixed +mode" connections will be rejected by the TLS-using side, sacrificing +availability to maintain security.

+

NOTEThe TLS certificate verification will fail if the machines' date and +time are not in sync with each other. Certificate verification depends on the +time of the client as well as the server and if that is not found to be in +sync then it is deemed to be an invalid certificate. To get the date and times +in sync, tools such as ntpdate can be used.

+

Using Certmonger and FreeIPA to generate and manage certs

+

Certmonger can be used to generate keys, request certs from a CA and then +automatically keep the Gluster certificate and the CA bundle updated as +required, simplifying deployment. Either a commercial CA or a local CA can +be used. E.g., FreeIPA (with dogtag CA) is an open-source CA with +user-friendly tooling.

+

If using FreeIPA, first add the host. This is required for FreeIPA to issue +certificates. This can be done via the web UI, or the CLI with:

+
ipa host-add <hostname>
+
+

If the host has been added the following should show the host:

+
ipa host-show <hostname>
+
+

And it should show a kerberos principal for the host in the form of:

+
host/<hostname>
+
+

Now use certmonger on the gluster server or client to generate the key (if +required), and submit a CSR to the CA. Certmonger will monitor the request, +and create and update the files as required. For FreeIPA we need to specify +the Kerberos principal from above to -K. E.g.:

+
 getcert request -r  \
+    -K host/$(hostname)  \
+    -f /etc/ssl/gluster.pem \
+    -k /etc/ssl/gluster.key \
+    -D $(hostname)  \
+    -F /etc/ssl/gluster.ca
+
+

Certmonger should print out an ID for the request, e.g.:

+
New signing request "20210801190305" added.
+
+

You can check the status of the request with this ID:

+
getcert list -i 20210801190147
+
+

If the CA approves the CSR and issues the cert, then the previous command +should print a status field with:

+
status: MONITORING
+
+

As this point, the key, the cert and the CA bundle should all be in /etc/ssl +ready for Gluster to use. Certmonger will renew the certificates as +required for you.

+

You do not need to manually concatenate certs to a trusted cert bundle and +distribute them to all servers.

+

You may need to set the certificate depth to allow the CA signed certs to be +used, if there are intermediate CAs in the signing path. E.g., on every server +and client:

+
echo "option transport.socket.ssl-cert-depth 3" >  /var/lib/glusterd/secure-access
+
+

This should not be necessary where a local CA (e.g., FreeIPA) has directly +signed the cart.

+

Enabling TLS on the I/O Path

+

To enable authentication and encryption between clients and brick servers, two +options must be set:

+
gluster volume set MYVOLUME client.ssl on
+gluster volume set MYVOLUME server.ssl on
+
+
+

Note that the above options affect only the GlusterFS native protocol. +For foreign protocols such as NFS, SMB, or Swift the encryption will not be +affected between:

+
    +
  1. NFS client and Glusterfs NFS Ganesha Server
  2. +
  3. SMB client and Glusterfs SMB server
  4. +
+

While it affects the encryption between the following:

+
    +
  1. NFS Ganesha server and Glusterfs bricks
  2. +
  3. Glusterfs SMB server and Glusterfs bricks
  4. +
+
+

Using TLS Identities for Authorization

+

Once TLS has been enabled on the I/O path, TLS identities can be used instead of +IP addresses or plain usernames to control access to specific volumes. For +example:

+
gluster volume set MYVOLUME auth.ssl-allow Zaphod
+
+

Here, we're allowing the TLS-authenticated identity "Zaphod" to access MYVOLUME. +This is intentionally identical to the existing "auth.allow" option, except that +the name is taken from a TLS certificate instead of a command-line string. Note +that infelicities in the gluster CLI preclude using names that include spaces, +which would otherwise be allowed.

+

Enabling TLS on the Management Path

+

Management-daemon traffic is not controlled by an option. Instead, it is +controlled by the presence of a file on each machine:

+
/var/lib/glusterd/secure-access
+
+

Creating this file will cause glusterd connections made from that machine to use +TLS. Note that even clients must do this to communicate with a remote glusterd +while mounting, but not thereafter.

+

Additional Options

+

The GlusterFS TLS implementation supports two additional options related to TLS +internals.

+

The first option allows the user to set the certificate depth, as mentioned +above.

+
gluster volume set MYVOLUME ssl.certificate-depth 2
+
+

Here, we're setting our certificate depth to two, as in the introductory +example. By default this value is zero, meaning that only certificates which +are directly specified in the local CA file will be accepted (i.e. no signed +certificates at all).

+

The second option allows the user to specify the set of allowed TLS ciphers.

+
gluster volume set MYVOLUME ssl.cipher-list 'HIGH:!SSLv2'
+
+

Cipher lists are negotiated between the two parties to a TLS connection so +that both sides' security needs are satisfied. In this example, we're setting +the initial cipher list to HIGH, representing ciphers that the cryptography +community still believes to be unbroken. We are also explicitly disallowing +ciphers specific to SSL version 2. The default is based on this example but +also excludes CBC-based cipher modes to provide extra mitigation against the +POODLE attack.

+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/Administrator-Guide/Setting-Up-Clients/index.html b/Administrator-Guide/Setting-Up-Clients/index.html new file mode 100644 index 00000000..a9689f2f --- /dev/null +++ b/Administrator-Guide/Setting-Up-Clients/index.html @@ -0,0 +1,5194 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + Setting Up Clients - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + + + + + +
+
+ + + + + + + + +

Accessing Data - Setting Up GlusterFS Client

+

You can access gluster volumes in multiple ways. You can use Gluster +Native Client method for high concurrency, performance and transparent +failover in GNU/Linux clients. You can also use NFS v3 to access gluster +volumes. Extensive testing has been done on GNU/Linux clients and NFS +implementation in other operating system, such as FreeBSD, and Mac OS X, +as well as Windows 7 (Professional and Up) and Windows Server 2003. +Other NFS client implementations may work with gluster NFS server.

+

You can use CIFS to access volumes when using Microsoft Windows as well +as SAMBA clients. For this access method, Samba packages need to be +present on the client side.

+

Gluster Native Client

+

The Gluster Native Client is a FUSE-based client running in user space. +Gluster Native Client is the recommended method for accessing volumes +when high concurrency and high write performance is required.

+

This section introduces the Gluster Native Client and explains how to +install the software on client machines. This section also describes how +to mount volumes on clients (both manually and automatically) and how to +verify that the volume has mounted successfully.

+

Installing the Gluster Native Client

+

Before you begin installing the Gluster Native Client, you need to +verify that the FUSE module is loaded on the client and has access to +the required modules as follows:

+
    +
  1. +

    Add the FUSE loadable kernel module (LKM) to the Linux kernel:

    +
    modprobe fuse
    +
    +
  2. +
  3. +

    Verify that the FUSE module is loaded:

    +
    # dmesg | grep -i fuse
    +fuse init (API version 7.13)
    +
    +
  4. +
+

Installing on Red Hat Package Manager (RPM) Distributions

+

To install Gluster Native Client on RPM distribution-based systems

+
    +
  1. +

    Install required prerequisites on the client using the following + command:

    +
    sudo yum -y install openssh-server wget fuse fuse-libs openib libibverbs
    +
    +
  2. +
  3. +

    Ensure that TCP and UDP ports 24007 and 24008 are open on all + Gluster servers. Apart from these ports, you need to open one port + for each brick starting from port 49152 (instead of 24009 onwards as + with previous releases). The brick ports assignment scheme is now + compliant with IANA guidelines. For example: if you have + five bricks, you need to have ports 49152 to 49156 open.

    +

    From Gluster-10 onwards, the brick ports will be randomized. A port is +randomly selected within the range of base-port to max-port as defined +in the glusterd.vol file and then assigned to the brick. For example: if you have +five bricks, you need to have at least 5 ports open within the given range of +base-port and max-port. +To reduce the number of open ports (for best security practices), one can lower +the max-port value in the glusterd.vol file and restart glusterd to get it +into effect.

    +

    You can use the following chains with iptables:

    +
    sudo iptables -A RH-Firewall-1-INPUT -m state --state NEW -m tcp -p tcp --dport 24007:24008 -j ACCEPT
    +
    +sudo iptables -A RH-Firewall-1-INPUT -m state --state NEW -m tcp -p tcp --dport 49152:49156 -j ACCEPT
    +
    +
    +

    Note
    +If you already have iptable chains, make sure that the above +ACCEPT rules precede the DROP rules. This can be achieved by +providing a lower rule number than the DROP rule.

    +
    +
  4. +
  5. +

    Download the latest glusterfs, glusterfs-fuse, and glusterfs-rdma + RPM files to each client. The glusterfs package contains the Gluster + Native Client. The glusterfs-fuse package contains the FUSE + translator required for mounting on client systems and the + glusterfs-rdma packages contain OpenFabrics verbs RDMA module for + Infiniband.

    +

    You can download the software at GlusterFS download page.

    +
  6. +
  7. +

    Install Gluster Native Client on the client.

    +

    Note +The package versions listed in the example below may not be the latest release. Please refer to the download page to ensure that you have the recently released packages.

    +
    sudo rpm -i glusterfs-3.8.5-1.x86_64
    +sudo rpm -i glusterfs-fuse-3.8.5-1.x86_64
    +sudo rpm -i glusterfs-rdma-3.8.5-1.x86_64
    +
    +
  8. +
+
+

Note: +The RDMA module is only required when using Infiniband.

+
+

Installing on Debian-based Distributions

+

To install Gluster Native Client on Debian-based distributions

+
    +
  1. +

    Install OpenSSH Server on each client using the following command:

    +
    sudo apt-get install openssh-server vim wget
    +
    +
  2. +
  3. +

    Download the latest GlusterFS .deb file and checksum to each client.

    +

    You can download the software at GlusterFS download page.

    +
  4. +
  5. +

    For each .deb file, get the checksum (using the following command) + and compare it against the checksum for that file in the md5sum + file.

    +
    md5sum GlusterFS_DEB_file.deb
    +
    +

    The md5sum of the packages is available at: GlusterFS download page

    +
  6. +
  7. +

    Uninstall GlusterFS v3.1 (or an earlier version) from the client + using the following command:

    +
    sudo dpkg -r glusterfs
    +
    +

    (Optional) Run $ sudo dpkg -purge glusterfsto purge the +configuration files.

    +
  8. +
  9. +

    Install Gluster Native Client on the client using the following + command:

    +
    sudo dpkg -i GlusterFS_DEB_file
    +
    +

    For example:

    +
    sudo dpkg -i glusterfs-3.8.x.deb
    +
    +
  10. +
  11. +

    Ensure that TCP and UDP ports 24007 and 24008 are open on all + Gluster servers. Apart from these ports, you need to open one port + for each brick starting from port 49152 (instead of 24009 onwards as + with previous releases). The brick ports assignment scheme is now + compliant with IANA guidelines. For example: if you have + five bricks, you need to have ports 49152 to 49156 open.

    +

    From Gluster-10 onwards, the brick ports will be randomized. A port is +randomly selected within the range of base_port to max_port as defined +in glusterd.vol file and then assigned to the brick. For example: if you have +five bricks, you need to have at least 5 ports open within the given range of +base_port and max_port. +To reduce the number of open ports (for best security practices), one can lower +the max_port value in the glusterd.vol file and restart glusterd to get it +into effect.

    +

    You can use the following chains with iptables:

    +
    sudo iptables -A RH-Firewall-1-INPUT -m state --state NEW -m tcp -p tcp --dport 24007:24008 -j ACCEPT
    +
    +sudo iptables -A RH-Firewall-1-INPUT -m state --state NEW -m tcp -p tcp --dport 49152:49156 -j ACCEPT
    +
    +
  12. +
+
+

Note
+If you already have iptable chains, make sure that the above +ACCEPT rules precede the DROP rules. This can be achieved by +providing a lower rule number than the DROP rule.

+
+

Performing a Source Installation

+

To build and install Gluster Native Client from the source code

+
    +
  1. +

    Create a new directory using the following commands:

    +
    mkdir glusterfs
    +cd glusterfs
    +
    +
  2. +
  3. +

    Download the source code.

    +

    You can download the source at link.

    +
  4. +
  5. +

    Extract the source code using the following command:

    +
    tar -xvzf SOURCE-FILE
    +
    +
  6. +
  7. +

    Run the configuration utility using the following command:

    +
    $ ./configure
    +
    +GlusterFS configure summary
    +===========================
    +FUSE client : yes
    +Infiniband verbs : yes
    +epoll IO multiplex : yes
    +argp-standalone : no
    +fusermount : no
    +readline : yes
    +
    +

    The configuration summary shows the components that will be built +with Gluster Native Client.

    +
  8. +
  9. +

    Build the Gluster Native Client software using the following + commands:

    +
    make
    +make install`
    +
    +
  10. +
  11. +

    Verify that the correct version of Gluster Native Client is + installed, using the following command:

    +
    glusterfs --version
    +
    +
  12. +
+

Mounting Volumes

+

After installing the Gluster Native Client, you need to mount Gluster +volumes to access data. There are two methods you can choose:

+ +
+

Note
+Server names selected during creation of Volumes should be resolvable +in the client machine. You can use appropriate /etc/hosts entries or +DNS server to resolve server names to IP addresses.

+
+

+

Manually Mounting Volumes

+
    +
  • To mount a volume, use the following command:
    mount -t glusterfs HOSTNAME-OR-IPADDRESS:/VOLNAME MOUNTDIR
    +
    +
  • +
+

For example:

+
    mount -t glusterfs server1:/test-volume /mnt/glusterfs
+
+
+

Note
+The server specified in the mount command is only used to fetch +the gluster configuration volfile describing the volume name. +Subsequently, the client will communicate directly with the +servers mentioned in the volfile (which might not even include the +one used for mount).

+

If you see a usage message like "Usage: mount.glusterfs", mount +usually requires you to create a directory to be used as the mount +point. Run "mkdir /mnt/glusterfs" before you attempt to run the +mount command listed above.

+
+

Mounting Options

+

You can specify the following options when using the +mount -t glusterfs command. Note that you need to separate all options +with commas.

+
backupvolfile-server=server-name
+
+volfile-max-fetch-attempts=number of attempts
+
+log-level=loglevel
+
+log-file=logfile
+
+transport=transport-type
+
+direct-io-mode=[enable|disable]
+
+use-readdirp=[yes|no]
+
+
+

For example:

+

mount -t glusterfs -o backupvolfile-server=volfile_server2,use-readdirp=no,volfile-max-fetch-attempts=2,log-level=WARNING,log-file=/var/log/gluster.log server1:/test-volume /mnt/glusterfs

+

If backupvolfile-server option is added while mounting fuse client, +when the first volfile server fails, then the server specified in +backupvolfile-server option is used as volfile server to mount the +client.

+

In volfile-max-fetch-attempts=X option, specify the number of +attempts to fetch volume files while mounting a volume. This option is +useful when you mount a server with multiple IP addresses or when +round-robin DNS is configured for the server-name..

+

If use-readdirp is set to ON, it forces the use of readdirp +mode in fuse kernel module

+

+

Automatically Mounting Volumes

+

You can configure your system to automatically mount the Gluster volume +each time your system starts.

+

The server specified in the mount command is only used to fetch the +gluster configuration volfile describing the volume name. Subsequently, +the client will communicate directly with the servers mentioned in the +volfile (which might not even include the one used for mount).

+
    +
  • To mount a volume, edit the /etc/fstab file and add the following + line:
  • +
+

HOSTNAME-OR-IPADDRESS:/VOLNAME MOUNTDIR glusterfs defaults,_netdev 0 0

+

For example:

+

server1:/test-volume /mnt/glusterfs glusterfs defaults,_netdev 0 0

+

Mounting Options

+

You can specify the following options when updating the /etc/fstab file. +Note that you need to separate all options with commas.

+
log-level=loglevel
+
+log-file=logfile
+
+transport=transport-type
+
+direct-io-mode=[enable|disable]
+
+use-readdirp=no
+
+

For example:

+

HOSTNAME-OR-IPADDRESS:/VOLNAME MOUNTDIR glusterfs defaults,_netdev,log-level=WARNING,log-file=/var/log/gluster.log 0 0

+

Testing Mounted Volumes

+

To test mounted volumes

+
    +
  • Use the following command:
  • +
+

# mount

+

If the gluster volume was successfully mounted, the output of the + mount command on the client will be similar to this example:

+

server1:/test-volume on /mnt/glusterfs type fuse.glusterfs (rw,allow_other,default_permissions,max_read=131072

+
    +
  • Use the following command:
  • +
+

# df

+

The output of df command on the client will display the aggregated + storage space from all the bricks in a volume similar to this + example:

+
  # df -h /mnt/glusterfs
+  Filesystem               Size Used Avail Use% Mounted on
+  server1:/test-volume     28T 22T 5.4T 82% /mnt/glusterfs
+
+
    +
  • Change to the directory and list the contents by entering the + following:
  • +
+
    `# cd MOUNTDIR `
+    `# ls`
+
+
    +
  • For example,
  • +
+
    `# cd /mnt/glusterfs `
+    `# ls`
+
+

NFS

+

You can use NFS v3 to access to gluster volumes. Extensive testing has +be done on GNU/Linux clients and NFS implementation in other operating +system, such as FreeBSD, and Mac OS X, as well as Windows 7 +(Professional and Up), Windows Server 2003, and others, may work with +gluster NFS server implementation.

+

GlusterFS now includes network lock manager (NLM) v4. NLM enables +applications on NFSv3 clients to do record locking on files on NFS +server. It is started automatically whenever the NFS server is run.

+

You must install nfs-common package on both servers and clients (only +for Debian-based) distribution.

+

This section describes how to use NFS to mount Gluster volumes (both +manually and automatically) and how to verify that the volume has been +mounted successfully.

+

Using NFS to Mount Volumes

+

You can use either of the following methods to mount Gluster volumes:

+ +

Prerequisite: Install nfs-common package on both servers and clients +(only for Debian-based distribution), using the following command:

+
    sudo aptitude install nfs-common
+
+

+

Manually Mounting Volumes Using NFS

+

To manually mount a Gluster volume using NFS

+
    +
  • To mount a volume, use the following command:
    mount -t nfs -o vers=3 HOSTNAME-OR-IPADDRESS:/VOLNAME MOUNTDIR
    +
    +
  • +
+

For example:

+
   mount -t nfs -o vers=3 server1:/test-volume /mnt/glusterfs
+
+
+

Note
+Gluster NFS server does not support UDP. If the NFS client you are +using defaults to connecting using UDP, the following message +appears:

+

requested NFS version or transport protocol is not supported.

+
+

To connect using TCP

+
    +
  • Add the following option to the mount command:
  • +
+

-o mountproto=tcp

+

For example:

+
    mount -o mountproto=tcp -t nfs server1:/test-volume /mnt/glusterfs
+
+

To mount Gluster NFS server from a Solaris client

+
    +
  • Use the following command:
    mount -o proto=tcp,vers=3 nfs://HOSTNAME-OR-IPADDRESS:38467/VOLNAME MOUNTDIR
    +
    +
  • +
+

For example:

+
    mount -o proto=tcp,vers=3 nfs://server1:38467/test-volume /mnt/glusterfs
+
+

+

Automatically Mounting Volumes Using NFS

+

You can configure your system to automatically mount Gluster volumes +using NFS each time the system starts.

+

To automatically mount a Gluster volume using NFS

+
    +
  • To mount a volume, edit the /etc/fstab file and add the following + line:
    HOSTNAME-OR-IPADDRESS:/VOLNAME MOUNTDIR nfs defaults,_netdev,vers=3 0 0
    +
    +
  • +
+

For example,

+

server1:/test-volume /mnt/glusterfs nfs defaults,_netdev,vers=3 0 0

+
+

Note
+Gluster NFS server does not support UDP. If the NFS client you are +using defaults to connecting using UDP, the following message +appears:

+

requested NFS version or transport protocol is not supported.

+
+

To connect using TCP

+
    +
  • Add the following entry in /etc/fstab file :
    HOSTNAME-OR-IPADDRESS:/VOLNAME MOUNTDIR nfs defaults,_netdev,mountproto=tcp 0 0
    +
    +
  • +
+

For example,

+

server1:/test-volume /mnt/glusterfs nfs defaults,_netdev,mountproto=tcp 0 0

+

To automount NFS mounts

+

Gluster supports *nix standard method of automounting NFS mounts. +Update the /etc/auto.master and /etc/auto.misc and restart the autofs +service. After that, whenever a user or process attempts to access the +directory it will be mounted in the background.

+

Testing Volumes Mounted Using NFS

+

You can confirm that Gluster directories are mounting successfully.

+

To test mounted volumes

+
    +
  • Use the mount command by entering the following:
  • +
+

# mount

+

For example, the output of the mount command on the client will + display an entry like the following:

+

server1:/test-volume on /mnt/glusterfs type nfs (rw,vers=3,addr=server1)

+
    +
  • Use the df command by entering the following:
  • +
+

# df

+

For example, the output of df command on the client will display the + aggregated storage space from all the bricks in a volume.

+
  # df -h /mnt/glusterfs
+  Filesystem              Size Used Avail Use% Mounted on
+  server1:/test-volume    28T  22T  5.4T  82%  /mnt/glusterfs
+
+
    +
  • Change to the directory and list the contents by entering the + following:
  • +
+

# cd MOUNTDIR + # ls

+

CIFS

+

You can use CIFS to access to volumes when using Microsoft Windows as +well as SAMBA clients. For this access method, Samba packages need to be +present on the client side. You can export glusterfs mount point as the +samba export, and then mount it using CIFS protocol.

+

This section describes how to mount CIFS shares on Microsoft +Windows-based clients (both manually and automatically) and how to +verify that the volume has mounted successfully.

+
+

Note

+

CIFS access using the Mac OS X Finder is not supported, however, you +can use the Mac OS X command line to access Gluster volumes using +CIFS.

+
+

Using CIFS to Mount Volumes

+

You can use either of the following methods to mount Gluster volumes:

+ +

You can also use Samba for exporting Gluster Volumes through CIFS +protocol.

+

+

Exporting Gluster Volumes Through Samba

+

We recommend you to use Samba for exporting Gluster volumes through the +CIFS protocol.

+

To export volumes through CIFS protocol

+
    +
  1. +

    Mount a Gluster volume.

    +
  2. +
  3. +

    Setup Samba configuration to export the mount point of the Gluster + volume.

    +

    For example, if a Gluster volume is mounted on /mnt/gluster, you +must edit smb.conf file to enable exporting this through CIFS. Open +smb.conf file in an editor and add the following lines for a simple +configuration:

    +
  4. +
+
    [glustertest]
+
+    comment = For testing a Gluster volume exported through CIFS
+
+    path = /mnt/glusterfs
+
+    read only = no
+
+    guest ok = yes
+
+

Save the changes and start the smb service using your systems init +scripts (/etc/init.d/smb [re]start). Abhove steps is needed for doing +multiple mount. If you want only samba mount then in your smb.conf you +need to add

+
    kernel share modes = no
+    kernel oplocks = no
+    map archive = no
+    map hidden = no
+    map read only = no
+    map system = no
+    store dos attributes = yes
+
+
+

Note

+

To be able mount from any server in the trusted storage pool, you must +repeat these steps on each Gluster node. For more advanced +configurations, see Samba documentation.

+
+

+

Manually Mounting Volumes Using CIFS

+

You can manually mount Gluster volumes using CIFS on Microsoft +Windows-based client machines.

+

To manually mount a Gluster volume using CIFS

+
    +
  1. +

    Using Windows Explorer, choose Tools > Map Network Drive… from + the menu. The Map Network Drivewindow appears.

    +
  2. +
  3. +

    Choose the drive letter using the Drive drop-down list.

    +
  4. +
  5. +

    Click Browse, select the volume to map to the network drive, and + click OK.

    +
  6. +
  7. +

    Click Finish.

    +
  8. +
+

The network drive (mapped to the volume) appears in the Computer window.

+

Alternatively, to manually mount a Gluster volume using CIFS by going to +Start > Run and entering Network path manually.

+

+

Automatically Mounting Volumes Using CIFS

+

You can configure your system to automatically mount Gluster volumes +using CIFS on Microsoft Windows-based clients each time the system +starts.

+

To automatically mount a Gluster volume using CIFS

+

The network drive (mapped to the volume) appears in the Computer window +and is reconnected each time the system starts.

+
    +
  1. +

    Using Windows Explorer, choose Tools > Map Network Drive… from + the menu. The Map Network Drivewindow appears.

    +
  2. +
  3. +

    Choose the drive letter using the Drive drop-down list.

    +
  4. +
  5. +

    Click Browse, select the volume to map to the network drive, and + click OK.

    +
  6. +
  7. +

    Click the Reconnect at logon checkbox.

    +
  8. +
  9. +

    Click Finish.

    +
  10. +
+

Testing Volumes Mounted Using CIFS

+

You can confirm that Gluster directories are mounting successfully by +navigating to the directory using Windows Explorer.

+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/Administrator-Guide/Setting-Up-Volumes/index.html b/Administrator-Guide/Setting-Up-Volumes/index.html new file mode 100644 index 00000000..db16b57d --- /dev/null +++ b/Administrator-Guide/Setting-Up-Volumes/index.html @@ -0,0 +1,4964 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + Setting Up Volumes - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + + + + + +
+
+ + + + + + + + +

Setting up GlusterFS Volumes

+

A volume is a logical collection of bricks where each brick is an export +directory on a server in the trusted storage pool. +To create a new volume in your storage environment, specify the bricks +that comprise the volume. After you have created a new volume, you must +start it before attempting to mount it.

+

See Setting up Storage for how to set up bricks.

+

Volume Types

+
    +
  • +

    Volumes of the following types can be created in your storage + environment:

    +
      +
    • +

      Distributed - Distributed volumes + distribute files across the bricks in the volume. You can use distributed + volumes where the requirement is to scale storage and the redundancy is + either not important or is provided by other hardware/software layers.

      +
    • +
    • +

      Replicated – Replicated volumes replicate + files across bricks in the volume. You can use replicated volumes in + environments where high-availability and high-reliability are critical.

      +
    • +
    • +

      Distributed Replicated - + Distributed replicated volumes distribute files across replicated bricks in the + volume. You can use distributed replicated volumes in environments where the + requirement is to scale storage and high-reliability is critical. Distributed + replicated volumes also offer improved read performance in most environments.

      +
    • +
    • +

      Dispersed - Dispersed volumes are based on + erasure codes, providing space-efficient protection against disk or server + failures. It stores an encoded fragment of the original file to each brick in a + way that only a subset of the fragments is needed to recover the original file. + The number of bricks that can be missing without losing access to data is + configured by the administrator on volume creation time.

      +
    • +
    • +

      Distributed Dispersed - + Distributed dispersed volumes distribute files across dispersed subvolumes. This + has the same advantages of distribute replicate volumes, but using disperse to + store the data into the bricks.

      +
    • +
    +
  • +
+

To create a new volume

+
    +
  • +

    Create a new volume :

    +

    # gluster volume create <NEW-VOLNAME> [[replica <COUNT> [arbiter <COUNT>]]|[replica 2 thin-arbiter 1]] [disperse [<COUNT>]] [disperse-data <COUNT>] [redundancy <COUNT>] [transport <tcp>] <NEW-BRICK> <TA-BRICK>... [force]

    +

    For example, to create a volume called test-volume consisting of +server3:/exp3 and server4:/exp4:

    +
    # gluster volume create test-volume server3:/exp3 server4:/exp4
    +Creation of test-volume has been successful
    +Please start the volume to access data.
    +
    +

    tcp is the default and currently only available transport.

    +
  • +
+

Creating Distributed Volumes

+

In a distributed volume files are spread randomly across the bricks in +the volume. Use distributed volumes where you need to scale storage and +redundancy is either not important or is provided by other +hardware/software layers.

+
+

Note: +Disk/server failure in distributed volumes can result in a serious +loss of data because directory contents are spread randomly across the +bricks in the volume.

+
+

distributed_volume

+

To create a distributed volume

+
    +
  1. +

    Create a trusted storage pool.

    +
  2. +
  3. +

    Create the distributed volume:

    +

    # gluster volume create [transport tcp]

    +

    For example, to create a distributed volume with four storage +servers using tcp:

    +
    # gluster volume create test-volume server1:/exp1 server2:/exp2 server3:/exp3 server4:/exp4
    +Creation of test-volume has been successful
    +Please start the volume to access data.
    +
    +

    (Optional) You can display the volume information:

    +
    # gluster volume info
    +Volume Name: test-volume
    +Type: Distribute
    +Status: Created
    +Number of Bricks: 4
    +Transport-type: tcp
    +Bricks:
    +Brick1: server1:/exp1
    +Brick2: server2:/exp2
    +Brick3: server3:/exp3
    +Brick4: server4:/exp4
    +
    +

    For example, to create a distributed volume with four storage +servers over InfiniBand:

    +
    # gluster volume create test-volume transport tcp server1:/exp1 server2:/exp2 server3:/exp3 server4:/exp4
    +Creation of test-volume has been successful
    +Please start the volume to access data.
    +
    +

    If the transport type is not specified, tcp is used as the +default. You can also set additional options if required, such as +auth.allow or auth.reject.

    +
    +

    Note: +Make sure you start your volumes before you try to mount them or +else client operations after the mount will hang.

    +
    +
  4. +
+

Creating Replicated Volumes

+

Replicated volumes create copies of files across multiple bricks in the +volume. You can use replicated volumes in environments where +high-availability and high-reliability are critical.

+
+

Note: +The number of bricks should be equal to of the replica count for a +replicated volume. To protect against server and disk failures, it is +recommended that the bricks of the volume are from different servers.

+
+

replicated_volume

+

To create a replicated volume

+
    +
  1. +

    Create a trusted storage pool.

    +
  2. +
  3. +

    Create the replicated volume:

    +

    # gluster volume create [replica ] [transport tcp]

    +

    For example, to create a replicated volume with two storage servers:

    +
    # gluster volume create test-volume replica 2 transport tcp server1:/exp1 server2:/exp2
    +Creation of test-volume has been successful
    +Please start the volume to access data.
    +
    +

    If the transport type is not specified, tcp is used as the +default. You can also set additional options if required, such as +auth.allow or auth.reject.

    +
    +

    Note:

    +
      +
    • +

      Make sure you start your volumes before you try to mount them or +else client operations after the mount will hang.

      +
    • +
    • +

      GlusterFS will fail to create a replicate volume if more than one brick of a replica set is present on the same peer. For eg. a four node replicated volume where more than one brick of a replica set is present on the same peer. +

      +
      # gluster volume create <volname> replica 4 server1:/brick1 server1:/brick2 server2:/brick3 server4:/brick4
      +volume create: <volname>: failed: Multiple bricks of a replicate volume are present on the same server. This setup is not optimal. Use 'force' at the end of the command if you want to override this behavior.
      +
      +
    • +
    +

    Use the force option at the end of command if you still want to create the volume with this configuration.

    +
    +
  4. +
+

Arbiter configuration for replica volumes

+

Arbiter volumes are replica 3 volumes where the 3rd brick acts as the arbiter brick. This configuration has mechanisms that prevent occurrence of split-brains.

+

It can be created with the following command:

+
`# gluster volume create <VOLNAME> replica 2 arbiter 1 host1:brick1 host2:brick2 host3:brick3`
+
+

More information about this configuration can be found at Administrator-Guide : arbiter-volumes-and-quorum

+

Note that the arbiter configuration for replica 3 can be used to create distributed-replicate volumes as well.

+

Creating Distributed Replicated Volumes

+

Distributes files across replicated bricks in the volume. You can use +distributed replicated volumes in environments where the requirement is +to scale storage and high-reliability is critical. Distributed +replicated volumes also offer improved read performance in most +environments.

+
+

Note: +The number of bricks should be a multiple of the replica count for a +distributed replicated volume. Also, the order in which bricks are +specified has a great effect on data protection. Each replica_count +consecutive bricks in the list you give will form a replica set, with +all replica sets combined into a volume-wide distribute set. To make +sure that replica-set members are not placed on the same node, list +the first brick on every server, then the second brick on every server +in the same order, and so on.

+
+

distributed_replicated_volume

+

To create a distributed replicated volume

+
    +
  1. +

    Create a trusted storage pool.

    +
  2. +
  3. +

    Create the distributed replicated volume:

    +

    # gluster volume create [replica ] [transport tcp]

    +

    For example, a four node distributed (replicated) volume with a +two-way mirror:

    +
    # gluster volume create test-volume replica 2 transport tcp server1:/exp1 server2:/exp2 server3:/exp3 server4:/exp4
    +Creation of test-volume has been successful
    +Please start the volume to access data.
    +
    +

    For example, to create a six node distributed (replicated) volume +with a two-way mirror:

    +
    # gluster volume create test-volume replica 2 transport tcp server1:/exp1 server2:/exp2 server3:/exp3 server4:/exp4 server5:/exp5 server6:/exp6
    +Creation of test-volume has been successful
    +Please start the volume to access data.
    +
    +

    If the transport type is not specified, tcp is used as the +default. You can also set additional options if required, such as +auth.allow or auth.reject.

    +
    +

    Note: +- Make sure you start your volumes before you try to mount them or +else client operations after the mount will hang.

    +
      +
    • GlusterFS will fail to create a distribute replicate volume if more than one brick of a replica set is present on the same peer. For eg. for a four node distribute (replicated) volume where more than one brick of a replica set is present on the same peer. +
      # gluster volume create <volname> replica 2 server1:/brick1 server1:/brick2 server2:/brick3 server4:/brick4
      +volume create: <volname>: failed: Multiple bricks of a replicate volume are present on the same server. This setup is not optimal. Use 'force' at the end of the command if you want to override this behavior.
      +
      +
    • +
    +

    Use the force option at the end of command if you want to create the volume in this case.

    +
    +
  4. +
+

Creating Dispersed Volumes

+

Dispersed volumes are based on erasure codes. It stripes the encoded data of +files, with some redundancy added, across multiple bricks in the volume. You +can use dispersed volumes to have a configurable level of reliability with +minimum space waste.

+

Redundancy

+

Each dispersed volume has a redundancy value defined when the volume is +created. This value determines how many bricks can be lost without +interrupting the operation of the volume. It also determines the amount of +usable space of the volume using this formula:

+
<Usable size> = <Brick size> * (#Bricks - Redundancy)
+
+

All bricks of a disperse set should have the same capacity, otherwise, when +the smallest brick becomes full, no additional data will be allowed in the +disperse set.

+

It's important to note that a configuration with 3 bricks and redundancy 1 +will have less usable space (66.7% of the total physical space) than a +configuration with 10 bricks and redundancy 1 (90%). However the first one +will be safer than the second one (roughly the probability of failure of +the second configuration if more than 4.5 times bigger than the first one).

+

For example, a dispersed volume composed of 6 bricks of 4TB and a redundancy +of 2 will be completely operational even with two bricks inaccessible. However +a third inaccessible brick will bring the volume down because it won't be +possible to read or write to it. The usable space of the volume will be equal +to 16TB.

+

The implementation of erasure codes in GlusterFS limits the redundancy to a +value smaller than #Bricks / 2 (or equivalently, redundancy * 2 < #Bricks). +Having a redundancy equal to half of the number of bricks would be almost +equivalent to a replica-2 volume, and probably a replicated volume will +perform better in this case.

+

Optimal volumes

+

One of the worst things erasure codes have in terms of performance is the +RMW (Read-Modify-Write) cycle. Erasure codes operate in blocks of a certain +size and it cannot work with smaller ones. This means that if a user issues +a write of a portion of a file that doesn't fill a full block, it needs to +read the remaining portion from the current contents of the file, merge them, +compute the updated encoded block and, finally, writing the resulting data.

+

This adds latency, reducing performance when this happens. Some GlusterFS +performance xlators can help to reduce or even eliminate this problem for +some workloads, but it should be taken into account when using dispersed +volumes for a specific use case.

+

Current implementation of dispersed volumes use blocks of a size that depends +on the number of bricks and redundancy: 512 * (#Bricks - redundancy) bytes. +This value is also known as the stripe size.

+

Using combinations of #Bricks/redundancy that give a power of two for the +stripe size will make the disperse volume perform better in most workloads +because it's more typical to write information in blocks that are multiple of +two (for example databases, virtual machines and many applications).

+

These combinations are considered optimal.

+

For example, a configuration with 6 bricks and redundancy 2 will have a stripe +size of 512 * (6 - 2) = 2048 bytes, so it's considered optimal. A configuration +with 7 bricks and redundancy 2 would have a stripe size of 2560 bytes, needing +a RMW cycle for many writes (of course this always depends on the use case).

+

To create a dispersed volume

+
    +
  1. +

    Create a trusted storage pool.

    +
  2. +
  3. +

    Create the dispersed volume:

    +

    # gluster volume create [disperse [<count>]] [redundancy <count>] [transport tcp]

    +

    A dispersed volume can be created by specifying the number of bricks in a +disperse set, by specifying the number of redundancy bricks, or both.

    +

    If disperse is not specified, or the <count> is missing, the +entire volume will be treated as a single disperse set composed by all +bricks enumerated in the command line.

    +

    If redundancy is not specified, it is computed automatically to be the +optimal value. If this value does not exist, it's assumed to be '1' and a +warning message is shown:

    +
    # gluster volume create test-volume disperse 4 server{1..4}:/bricks/test-volume
    +There isn't an optimal redundancy value for this configuration. Do you want to create the volume with redundancy 1 ? (y/n)
    +
    +

    In all cases where redundancy is automatically computed and it's not +equal to '1', a warning message is displayed:

    +
    # gluster volume create test-volume disperse 6 server{1..6}:/bricks/test-volume
    +The optimal redundancy for this configuration is 2. Do you want to create the volume with this value ? (y/n)
    +
    +

    redundancy must be greater than 0, and the total number of bricks must +be greater than 2 * redundancy. This means that a dispersed volume must +have a minimum of 3 bricks.

    +

    If the transport type is not specified, tcp is used as the default. You +can also set additional options if required, like in the other volume +types.

    +
    +

    Note:

    +
      +
    • +

      Make sure you start your volumes before you try to mount them or +else client operations after the mount will hang.

      +
    • +
    • +

      GlusterFS will fail with a warning to create a dispersed volume if more than one brick of a disperse set is present on the same peer.

      +
      # gluster volume create <volname> disperse 3 server1:/brick{1..3}
      +volume create: <volname>: failed: Multiple bricks of a disperse volume are present on the same server. This setup is not optimal. Bricks should be on different nodes to have best fault tolerant configuration. Use 'force' at the end of the command if you want to override this behavior.
      +
      +
    • +
    +
    +
  4. +
+

Creating Distributed Dispersed Volumes

+

Distributed dispersed volumes are the equivalent to distributed replicated +volumes, but using dispersed subvolumes instead of replicated ones.

+

To create a distributed dispersed volume

+
    +
  1. +

    Create a trusted storage pool.

    +
  2. +
  3. +

    Create the distributed dispersed volume:

    +

    # gluster volume create disperse <count> [redundancy <count>] [transport tcp]

    +

    To create a distributed dispersed volume, the disperse keyword and +<count> is mandatory, and the number of bricks specified in the +command line must must be a multiple of the disperse count.

    +

    redundancy is exactly the same as in the dispersed volume.

    +

    If the transport type is not specified, tcp is used as the default. You +can also set additional options if required, like in the other volume +types.

    +
    +

    Note:

    +
      +
    • +

      Make sure you start your volumes before you try to mount them or +else client operations after the mount will hang.

      +
    • +
    • +

      For distributed disperse volumes bricks can be hosted on same node if they belong to different subvol.

      +
      # gluster volume create <volname> disperse 3 server1:/br1 server2:/br1 server3:/br1 server1:/br2 server2:/br2 server3:/br2
      +
      +

      volume create: : success: please start the volume to access data

      +
    • +
    +
    +
  4. +
+

Starting Volumes

+

You must start your volumes before you try to mount them.

+

To start a volume

+
    +
  • +

    Start a volume:

    +

    # gluster volume start <VOLNAME> [force]

    +

    For example, to start test-volume:

    +
    # gluster volume start test-volume
    +Starting test-volume has been successful
    +
    +
  • +
+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/Administrator-Guide/Split-brain-and-ways-to-deal-with-it/index.html b/Administrator-Guide/Split-brain-and-ways-to-deal-with-it/index.html new file mode 100644 index 00000000..02c2fca2 --- /dev/null +++ b/Administrator-Guide/Split-brain-and-ways-to-deal-with-it/index.html @@ -0,0 +1,4657 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + Split brain and ways to deal with it - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + + + + + +
+
+ + + + + + + + +

Split brain and the ways to deal with it

+

Split brain:

+

Split brain is a situation where two or more replicated copies of a file become divergent. When a file is in split brain, there is an inconsistency in either data or metadata of the file amongst the bricks of a replica and do not have enough information to authoritatively pick a copy as being pristine and heal the bad copies, despite all bricks being up and online. For a directory, there is also an entry split brain where a file inside it can have different gfid/file-type across the bricks of a replica. Split brain can happen mainly because of 2 reasons:

+
    +
  • Due to network disconnect Where a client temporarily loses connection to the bricks.
  • +
+
+
    +
  1. +

    There is a replica pair of 2 bricks, brick1 on server1 and brick2 on server2.

    +
  2. +
  3. +

    Client1 loses connection to brick2 and client2 loses connection to brick1 due to network split.

    +
  4. +
  5. +

    Writes from client1 goes to brick1 and from client2 goes to brick2, which is nothing but split-brain.

    +
  6. +
+
+
    +
  • Gluster brick processes going down or returning error:
  • +
+
+
    +
  1. +

    Server1 is down and server2 is up: Writes happen on server 2.

    +
  2. +
  3. +

    Server1 comes up, server2 goes down (Heal not happened / data on server 2 is not replicated on server1): Writes happen on server1.

    +
  4. +
  5. +

    Server2 comes up: Both server1 and server2 has data independent of each other.

    +
  6. +
+
+

If we use the replica 2 volume, it is not possible to prevent split-brain without losing availability.

+

Ways to deal with split brain:

+

In glusterfs there are ways to resolve split brain. You can see the detailed description of how to resolve a split-brain here. Moreover, there are ways to reduce the chances of ending up in split-brain situations. They are:

+
    +
  1. Replica 3 volume
  2. +
  3. Arbiter volume
  4. +
+

Both of these use the client-quorum option of glusterfs to avoid the split-brain situations.

+

Client quorum:

+

This is a feature implemented in Automatic File Replication (AFR here on) module, to prevent split-brains in the I/O path for replicate/distributed-replicate volumes. By default, if the client-quorum is not met for a particular replica subvol, it becomes read-only. The other subvols (in a dist-rep volume) will still have R/W access. Here you can see more details about client-quorum.

+

Client quorum in replica 2 volumes:

+

In a replica 2 volume it is not possible to achieve high availability and consistency at the same time, without sacrificing tolerance to partition. If we set the client-quorum option to auto, then the first brick must always be up, irrespective of the status of the second brick. If only the second brick is up, the subvolume becomes read-only. +If the quorum-type is set to fixed, and the quorum-count is set to 1, then we may end up in split brain. - Brick1 is up and brick2 is down. Quorum is met and write happens on brick1. - Brick1 goes down and brick2 comes up (No heal happened). Quorum is met, write happens on brick2. - Brick1 comes up. Quorum is met, but both the bricks have independent writes - split-brain. +To avoid this we have to set the quorum-count to 2, which will cost the availability. Even if we have one replica brick up and running, the quorum is not met and we end up seeing EROFS.

+

1. Replica 3 volume:

+

When we create a replicated or distributed replicated volume with replica count 3, the cluster.quorum-type option is set to auto by default. That means at least 2 bricks should be up and running to satisfy the quorum and allow the writes. This is the recommended setting for a replica 3 volume and this should not be changed. Here is how it prevents files from ending up in split brain:

+

B1, B2, and B3 are the 3 bricks of a replica 3 volume.

+
    +
  1. B1 & B2 are up and B3 is down. Quorum is met and write happens on B1 & B2.
  2. +
  3. B3 comes up and B2 is down. Quorum is met and write happens on B1 & B3.
  4. +
  5. B2 comes up and B1 goes down. Quorum is met. But when a write request comes, AFR sees that B2 & B3 are blaming each other (B2 says that some writes are pending on B3 and B3 says that some writes are pending on B2), therefore the write is not allowed and is failed with EIO.
  6. +
+

Command to create a replica 3 volume:

+
gluster volume create <volname> replica 3 host1:brick1 host2:brick2 host3:brick3
+
+

2. Arbiter volume:

+

Arbiter offers the sweet spot between replica 2 and replica 3, where user wants the split-brain protection offered by replica 3 but does not want to invest in 3x storage space. Arbiter is also an replica 3 volume where the third brick of the replica is automatically configured as an arbiter node. This means that the third brick stores only the file name and metadata, but not any data. This will help in avoiding split brain while providing the same level of consistency as a normal replica 3 volume.

+

Command to create a arbiter volume:

+
gluster volume create <volname> replica 3 arbiter 1 host1:brick1 host2:brick2 host3:brick3
+
+

The only difference in the command is, we need to add one more keyword arbiter 1 after the replica count. Since it is also a replica 3 volume, the cluster.quorum-type option is set to auto by default and at least 2 bricks should be up to satisfy the quorum and allow writes. +Since the arbiter brick has only name and metadata of the files, there are some more checks to guarantee consistency. Arbiter works as follows:

+
    +
  1. Clients take full file locks while writing (replica 3 takes range locks).
  2. +
  3. If 2 bricks are up and if one of them is the arbiter, and it blames the other up brick, then all FOPs will fail with ENOTCONN (Transport endpoint is not connected). If the arbiter doesn't blame the other brick, FOPs will be allowed to proceed.
  4. +
  5. If 2 bricks are up and the arbiter is down, then FOPs will be allowed.
  6. +
  7. If only one brick is up, then client-quorum is not met and the volume becomes EROFS.
  8. +
  9. In all cases, if there is only one source before the FOP is initiated and if the FOP fails on that source, the application will receive ENOTCONN.
  10. +
+

You can find more details on arbiter here.

+

Differences between replica 3 and arbiter volumes:

+
    +
  1. In case of a replica 3 volume, we store the entire file in all the bricks and it is recommended to have bricks of same size. But in case of arbiter, since we do not store data, the size of the arbiter brick is comparatively lesser than the other bricks.
  2. +
  3. Arbiter is a state between replica 2 and replica 3 volume. If we have only arbiter and one of the other brick is up and the arbiter brick blames the other brick, then we can not proceed with the FOPs.
  4. +
  5. Replica 3 gives high availability compared to arbiter, because unlike in arbiter, replica 3 has a full copy of the data in all 3 bricks.
  6. +
+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/Administrator-Guide/Start-Stop-Daemon/index.html b/Administrator-Guide/Start-Stop-Daemon/index.html new file mode 100644 index 00000000..beac9485 --- /dev/null +++ b/Administrator-Guide/Start-Stop-Daemon/index.html @@ -0,0 +1,4712 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + Managing the Gluster Service - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + + + + + +
+
+ + + + + + + + +

Managing the glusterd Service

+

After installing GlusterFS, you must start glusterd service. The +glusterd service serves as the Gluster elastic volume manager, +overseeing glusterfs processes, and co-ordinating dynamic volume +operations, such as adding and removing volumes across multiple storage +servers non-disruptively.

+

This section describes how to start the glusterd service in the +following ways:

+ +
+

Note: You must start glusterd on all GlusterFS servers.

+
+

Distributions with systemd

+

+

Starting and stopping glusterd manually

+
    +
  • +

    To start glusterd manually:

    +
    systemctl start glusterd
    +
    +
  • +
  • +

    To stop glusterd manually:

    +
    systemctl stop glusterd
    +
    +
  • +
+

+

Starting glusterd automatically

+
    +
  • +

    To enable the glusterd service and start it if stopped:

    +
    systemctl enable --now glusterd
    +
    +
  • +
  • +

    To disable the glusterd service and stop it if started:

    +
    systemctl disable --now glusterd
    +
    +
  • +
+

Distributions without systemd

+

+

Starting and stopping glusterd manually

+

This section describes how to start and stop glusterd manually

+
    +
  • +

    To start glusterd manually, enter the following command:

    +
    /etc/init.d/glusterd start
    +
    +
  • +
  • +

    To stop glusterd manually, enter the following command:

    +
    /etc/init.d/glusterd stop
    +
    +
  • +
+

+

Starting glusterd Automatically

+

This section describes how to configure the system to automatically +start the glusterd service every time the system boots.

+

Red Hat and Fedora distributions

+

To configure Red Hat-based systems to automatically start the glusterd +service every time the system boots, enter the following from the +command line:

+
chkconfig glusterd on
+
+

Debian and derivatives like Ubuntu

+

To configure Debian-based systems to automatically start the glusterd +service every time the system boots, enter the following from the +command line:

+
update-rc.d glusterd defaults
+
+

Systems Other than Red Hat and Debian

+

To configure systems other than Red Hat or Debian to automatically start +the glusterd service every time the system boots, enter the following +entry to the/etc/rc.local file:

+
echo "glusterd" >> /etc/rc.local
+
+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/Administrator-Guide/Storage-Pools/index.html b/Administrator-Guide/Storage-Pools/index.html new file mode 100644 index 00000000..65a6f3cc --- /dev/null +++ b/Administrator-Guide/Storage-Pools/index.html @@ -0,0 +1,4643 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + Managing Trusted Storage Pools - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + +

Managing Trusted Storage Pools

+

Overview

+

A trusted storage pool(TSP) is a trusted network of storage servers. Before you can configure a +GlusterFS volume, you must create a trusted storage pool of the storage servers +that will provide bricks to the volume by peer probing the servers. +The servers in a TSP are peers of each other.

+

After installing Gluster on your servers and before creating a trusted storage pool, +each server belongs to a storage pool consisting of only that server.

+ +

Before you start:

+
    +
  • +

    The servers used to create the storage pool must be resolvable by hostname.

    +
  • +
  • +

    The glusterd daemon must be running on all storage servers that you + want to add to the storage pool. See Managing the glusterd Service for details.

    +
  • +
  • +

    The firewall on the servers must be configured to allow access to port 24007.

    +
  • +
+

The following commands were run on a TSP consisting of 3 servers - server1, server2, +and server3.

+

+

Adding Servers

+

To add a server to a TSP, peer probe it from a server already in the pool.

+
    # gluster peer probe <server>
+
+

For example, to add a new server4 to the cluster described above, probe it from one of the other servers:

+
    server1#  gluster peer probe server4
+    Probe successful
+
+

Verify the peer status from the first server (server1):

+
    server1# gluster peer status
+    Number of Peers: 3
+
+    Hostname: server2
+    Uuid: 5e987bda-16dd-43c2-835b-08b7d55e94e5
+    State: Peer in Cluster (Connected)
+
+    Hostname: server3
+    Uuid: 1e0ca3aa-9ef7-4f66-8f15-cbc348f29ff7
+    State: Peer in Cluster (Connected)
+
+    Hostname: server4
+    Uuid: 3e0cabaa-9df7-4f66-8e5d-cbc348f29ff7
+    State: Peer in Cluster (Connected)
+
+

+

Listing Servers

+

To list all nodes in the TSP:

+
    server1# gluster pool list
+    UUID                                    Hostname        State
+    d18d36c5-533a-4541-ac92-c471241d5418    localhost       Connected
+    5e987bda-16dd-43c2-835b-08b7d55e94e5    server2         Connected
+    1e0ca3aa-9ef7-4f66-8f15-cbc348f29ff7    server3         Connected
+    3e0cabaa-9df7-4f66-8e5d-cbc348f29ff7    server4         Connected
+
+

+

Viewing Peer Status

+

To view the status of the peers in the TSP:

+
    server1# gluster peer status
+    Number of Peers: 3
+
+    Hostname: server2
+    Uuid: 5e987bda-16dd-43c2-835b-08b7d55e94e5
+    State: Peer in Cluster (Connected)
+
+    Hostname: server3
+    Uuid: 1e0ca3aa-9ef7-4f66-8f15-cbc348f29ff7
+    State: Peer in Cluster (Connected)
+
+    Hostname: server4
+    Uuid: 3e0cabaa-9df7-4f66-8e5d-cbc348f29ff7
+    State: Peer in Cluster (Connected)
+
+

+

Removing Servers

+

To remove a server from the TSP, run the following command from another server in the pool:

+
    # gluster peer detach <server>
+
+

For example, to remove server4 from the trusted storage pool:

+
    server1# gluster peer detach server4
+    Detach successful
+
+

Verify the peer status:

+
    server1# gluster peer status
+    Number of Peers: 2
+
+    Hostname: server2
+    Uuid: 5e987bda-16dd-43c2-835b-08b7d55e94e5
+    State: Peer in Cluster (Connected)
+
+    Hostname: server3
+    Uuid: 1e0ca3aa-9ef7-4f66-8f15-cbc348f29ff7
+    State: Peer in Cluster (Connected)
+
+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/Administrator-Guide/Thin-Arbiter-Volumes/index.html b/Administrator-Guide/Thin-Arbiter-Volumes/index.html new file mode 100644 index 00000000..205e30f8 --- /dev/null +++ b/Administrator-Guide/Thin-Arbiter-Volumes/index.html @@ -0,0 +1,4504 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + Thin Arbiter volumes - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + +

Thin Arbiter volumes in gluster

+

Thin Arbiter is a new type of quorum node where granularity of what is +good and what is bad data is less compared to the traditional arbiter brick. +In this type of volume, quorum is taken into account at a brick +level rather than per file basis. If there is even one file that is marked +bad (i.e. needs healing) on a data brick, that brick is considered bad for +all files as a whole. So, even different file, if the write fails +on the other data brick but succeeds on this 'bad' brick we will return +failure for the write.

+ +

Why Thin Arbiter?

+

This is a solution for handling stretch cluster kind of workload, +but it can be used for regular workloads as well in case users are +satisfied with this kind of quorum in comparison to arbiter/3-way-replication. +Thin arbiter node can be placed outside of trusted storage pool i.e, +thin arbiter is the "stretched" node in the cluster. This node can be +placed on cloud or anywhere even if that connection has high latency. +As this node will take part only in case of failure (or a brick is down) +and to decide the quorum, it will not impact the performance in normal cases. +Cost to perform any file operation would be lesser than arbiter if +everything is fine. I/O will only go to the data bricks and goes to +thin-arbiter only in the case of first failure until heal completes.

+

Setting UP Thin Arbiter Volume

+

The command to run thin-arbiter process on node:

+
/usr/local/sbin/glusterfsd -N --volfile-id ta-vol -f /var/lib/glusterd/vols/thin-arbiter.vol --brick-port 24007 --xlator-option ta-vol-server.transport.socket.listen-port=24007
+
+

Creating a thin arbiter replica 2 volume:

+
glustercli volume create <volname> --replica 2 <host1>:<brick1> <host2>:<brick2> --thin-arbiter <quorum-host>:<path-to-store-replica-id-file>
+
+

For example:

+
glustercli volume create testvol --replica 2 server{1..2}:/bricks/brick-{1..2} --thin-arbiter server-3:/bricks/brick_ta --force
+volume create: testvol: success: please start the volume to access data
+
+

How Thin Arbiter works

+

There will be only one process running on thin arbiter node which will be +used to update replica id file for all replica pairs across all volumes. +Replica id file contains the information of good and bad data bricks in the +form of xattrs. Replica pairs will use its respective replica-id file that +is going to be created during mount.

+
    +
  1. +

    Read Transactions: + Reads are allowed when quorum is met. i.e.

    +
  2. +
  3. +

    When all data bricks and thin arbiter are up: Perform lookup on data bricks to figure out good/bad bricks and + serve content from the good brick.

    +
  4. +
  5. When one brick is up: Fail FOP with EIO.
  6. +
  7. +

    Two bricks are up: + If two data bricks are up, lookup is done on data bricks to figure out good/bad bricks and content will be served + from the good brick. One lookup is enough to figure out good/bad copy of that file and keep this in inode context. + If one data brick and thin arbiter brick are up, xattrop is done on thin arbiter to get information of source (good) + brick. If the data brick, which is UP, has also been marked as source brick on thin arbiter, lookup on this file is + done on the data brick to check if the file is really healthy or not. If the file is good, data will be served from + this brick else an EIO error would be returned to user.

    +
  8. +
  9. +

    Write transactions: + Thin arbiter doesn’t participate in I/O, transaction will choose to wind operations on thin-arbiter brick to + make sure the necessary metadata is kept up-to-date in case of failures. Operation failure will lead to + updating the replica-id file on thin-arbiter with source/sink information in the xattrs just how it happens in AFR.

    +
  10. +
+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/Administrator-Guide/Trash/index.html b/Administrator-Guide/Trash/index.html new file mode 100644 index 00000000..9af4a4b8 --- /dev/null +++ b/Administrator-Guide/Trash/index.html @@ -0,0 +1,4628 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + Trash for GlusterFS - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + +

Trash Translator

+

Trash translator will allow users to access deleted or truncated files. Every brick will maintain a hidden .trashcan directory, which will be used to store the files deleted or truncated from the respective brick. The aggregate of all those .trashcan directories can be accessed from the mount point. To avoid name collisions, a timestamp is appended to the original file name while it is being moved to the trash directory.

+

Implications and Usage

+

Apart from the primary use-case of accessing files deleted or truncated by the user, the trash translator can be helpful for internal operations such as self-heal and rebalance. During self-heal and rebalance it is possible to lose crucial data. In those circumstances, the trash translator can assist in the recovery of the lost data. The trash translator is designed to intercept unlink, truncate and ftruncate fops, store a copy of the current file in the trash directory, and then perform the fop on the original file. For the internal operations, the files are stored under the 'internal_op' folder inside the trash directory.

+

Volume Options

+
    +
  • gluster volume set <VOLNAME> features.trash <on/off>
  • +
+

This command can be used to enable a trash translator in a volume. If set to on, a trash directory will be created in every brick inside the volume during the volume start command. By default, a translator is loaded during volume start but remains non-functional. Disabling trash with the help of this option will not remove the trash directory or even its contents from the volume.

+
    +
  • gluster volume set <VOLNAME> features.trash-dir <name>
  • +
+

This command is used to reconfigure the trash directory to a user-specified name. The argument is a valid directory name. The directory will be created inside every brick under this name. If not specified by the user, the trash translator will create the trash directory with the default name “.trashcan”. This can be used only when the trash-translator is on.

+
    +
  • gluster volume set <VOLNAME> features.trash-max-filesize <size>
  • +
+

This command can be used to filter files entering the trash directory based on their size. Files above trash_max_filesize are deleted/truncated directly. Value for size may be followed by multiplicative suffixes as KB(=1024 bytes), MB(=1024*1024 bytes) ,and GB(=1024*1024*1024 bytes). The default size is set to 5MB.

+
    +
  • gluster volume set <VOLNAME> features.trash-eliminate-path <path1> [ , <path2> , . . . ]
  • +
+

This command can be used to set the eliminate pattern for the trash translator. Files residing under this pattern will not be moved to the trash directory during deletion/truncation. The path must be a valid one present in the volume.

+
    +
  • gluster volume set <VOLNAME> features.trash-internal-op <on/off>
  • +
+

This command can be used to enable trash for internal operations like self-heal and re-balance. By default set to off.

+

Sample usage

+

The following steps give illustrates a simple scenario of deletion of a file from a directory

+
    +
  1. +

    Create a simple distributed volume and start it.

    +
    gluster volume create test rhs:/home/brick
    +gluster volume start test
    +
    +
  2. +
  3. +

    Enable trash translator

    +
    gluster volume set test features.trash on
    +
    +
  4. +
  5. +

    Mount glusterfs volume via native client as follows.

    +
    mount -t glusterfs  rhs:test /mnt
    +
    +
  6. +
  7. +

    Create a directory and file in the mount.

    +
    mkdir mnt/dir
    +echo abc > mnt/dir/file
    +
    +
  8. +
  9. +

    Delete the file from the mount.

    +
    rm mnt/dir/file -rf
    +
    +
  10. +
  11. +

    Checkout inside the trash directory.

    +
    ls mnt/.trashcan
    +
    +
  12. +
+

We can find the deleted file inside the trash directory with a timestamp appending on its filename.

+

For example,

+
mount -t glusterfs rh-host:/test /mnt/test
+mkdir /mnt/test/abc
+touch /mnt/test/abc/file
+rm -f /mnt/test/abc/file
+
+ls /mnt/test/abc
+
+ls /mnt/test/.trashcan/abc/
+
+

You will see file2014-08-21_123400 as the output of the last ls command.

+

Points to be remembered

+
    +
  • As soon as the volume is started, the trash directory will be created inside the volume and will be visible through the mount. Disabling the trash will not have any impact on its visibility from the mount.
  • +
  • Even though deletion of trash-directory is not permitted, currently residing trash contents will be removed on issuing delete on it and only an empty trash-directory exists.
  • +
+

Known issue

+

Since trash translator resides on the server side higher translators like AFR, DHT are unaware of rename and truncate operations being done by this translator which eventually moves the files to trash directory. Unless and until a complete-path-based lookup comes on trashed files, those may not be visible from the mount.

+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/Administrator-Guide/Tuning-Volume-Options/index.html b/Administrator-Guide/Tuning-Volume-Options/index.html new file mode 100644 index 00000000..3a5fa364 --- /dev/null +++ b/Administrator-Guide/Tuning-Volume-Options/index.html @@ -0,0 +1,5316 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + Tuning Volume Options - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + +

+

You can tune volume options, as needed, while the cluster is online and +available.

+
+

Note

+

It is recommended to set server.allow-insecure option to ON if +there are too many bricks in each volume or if there are too many +services which have already utilized all the privileged ports in the +system. Turning this option ON allows ports to accept/reject messages +from insecure ports. So, use this option only if your deployment +requires it.

+
+

Tune volume options using the following command:

+

# gluster volume set <VOLNAME> <OPT-NAME> <OPT-VALUE>

+

For example, to specify the performance cache size for test-volume:

+
# gluster volume set test-volume performance.cache-size 256MB
+Set volume successful
+
+

You can view the changed volume options using command:

+

# gluster volume info

+

The following table lists the Volume options along with its +description and default value:

+
+

Note

+

The default options given here are subject to modification at any +given time and may not be the same for all versions.

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
TypeOptionDescriptionDefault ValueAvailable Options
auth.allowIP addresses of the clients which should be allowed to access the volume.* (allow all)Valid IP address which includes wild card patterns including *, such as 192.168.1.*
auth.rejectIP addresses of the clients which should be denied to access the volume.NONE (reject none)Valid IP address which includes wild card patterns including *, such as 192.168.2.*
Clustercluster.self-heal-window-sizeSpecifies the maximum number of blocks per file on which self-heal would happen simultaneously.10 - 1024 blocks
cluster.data-self-heal-algorithmSpecifies the type of self-heal. If you set the option as "full", the entire file is copied from source to destinations. If the option is set to "diff" the file blocks that are not in sync are copied to destinations. Reset uses a heuristic model. If the file does not exist on one of the subvolumes, or a zero-byte file exists (created by entry self-heal) the entire content has to be copied anyway, so there is no benefit from using the "diff" algorithm. If the file size is about the same as page size, the entire file can be read and written with a few operations, which will be faster than "diff" which has to read checksums and then read and write.resetfull/diff/reset
cluster.min-free-diskSpecifies the percentage of disk space that must be kept free. Might be useful for non-uniform bricks10%Percentage of required minimum free disk space
cluster.min-free-inodesSpecifies when system has only N% of inodes remaining, warnings starts to appear in log files10%Percentage of required minimum free inodes
cluster.stripe-block-sizeSpecifies the size of the stripe unit that will be read from or written to.128 KB (for all files)size in bytes
cluster.self-heal-daemonAllows you to turn-off proactive self-heal on replicatedOnOn/Off
cluster.ensure-durabilityThis option makes sure the data/metadata is durable across abrupt shutdown of the brick.OnOn/Off
cluster.lookup-unhashedThis option does a lookup through all the sub-volumes, in case a lookup didn’t return any result from the hashed subvolume. If set to OFF, it does not do a lookup on the remaining subvolumes.onauto, yes/no, enable/disable, 1/0, on/off
cluster.lookup-optimizeThis option enables the optimization of -ve lookups, by not doing a lookup on non-hashed subvolumes for files, in case the hashed subvolume does not return any result. This option disregards the lookup-unhashed setting, when enabled.onon/off
cluster.randomize-hash-range-by-gfidAllows to use gfid of directory to determine the subvolume from which hash ranges are allocated starting with 0. Note that we still use a directory/file’s name to determine the subvolume to which it hashesoffon/off
cluster.rebal-throttleSets the maximum number of parallel file migrations allowed on a node during the rebalance operation. The default value is normal and allows 2 files to be migrated at a time. Lazy will allow only one file to be migrated at a time and aggressive will allow maxof[(((processing units) - 4) / 2), 4]normallazy/normal/aggressive
cluster.background-self-heal-countSpecifies the number of per client self-heal jobs that can perform parallel heals in the background.80-256
cluster.heal-timeoutTime interval for checking the need to self-heal in self-heal-daemon6005-(signed-int)
cluster.eager-lockIf eager-lock is off, locks release immediately after file operations complete, improving performance for some operations, but reducing access efficiencyonon/off
cluster.quorum-typeIf value is “fixed” only allow writes if quorum-count bricks are present. If value is “auto” only allow writes if more than half of bricks, or exactly half including the first brick, are presentnonenone/auto/fixed
cluster.quorum-countIf quorum-type is “fixed” only allow writes if this many bricks are present. Other quorum types will OVERWRITE this valuenull1-(signed-int)
cluster.heal-wait-queue-lengthSpecifies the number of heals that can be queued for the parallel background self heal jobs.1280-10000
cluster.favorite-child-policySpecifies which policy can be used to automatically resolve split-brains without user intervention. “size” picks the file with the biggest size as the source. “ctime” and “mtime” pick the file with the latest ctime and mtime respectively as the source. “majority” picks a file with identical mtime and size in more than half the number of bricks in the replica.nonenone/size/ctime/mtime/majority
cluster.use-anonymous-inodeSetting this option heals directory renames efficientlynono/yes
Dispersedisperse.eager-lockIf eager-lock is on, the lock remains in place either until lock contention is detected, or for 1 second in order to check if there is another request for that file from the same client. If eager-lock is off, locks release immediately after file operations complete, improving performance for some operations, but reducing access efficiency.onon/off
disperse.other-eager-lockThis option is equivalent to the disperse.eager-lock option but applicable only for non regular files. When multiple clients access a particular directory, disabling disperse.other-eager-lockoption for the volume can improve performance for directory access without compromising performance of I/O's for regular files.offon/off
disperse.shd-max-threadsSpecifies the number of entries that can be self healed in parallel on each disperse subvolume by self-heal daemon.11 - 64
disperse.shd-wait-qlengthSpecifies the number of entries that must be kept in the dispersed subvolume's queue for self-heal daemon threads to take up as soon as any of the threads are free to heal. This value should be changed based on how much memory self-heal daemon process can use for keeping the next set of entries that need to be healed.10241 - 655536
disprse.eager-lock-timeoutMaximum time (in seconds) that a lock on an inode is kept held if no new operations on the inode are received.11-60
disperse.other-eager-lock-timeoutIt’s equivalent to eager-lock-timeout option but for non regular files.11-60
disperse.background-healsThis option can be used to control number of parallel heals running in background.80-256
disperse.heal-wait-qlengthThis option can be used to control number of heals that can wait1280-65536
disperse.read-policyinode-read fops happen only on ‘k’ number of bricks in n=k+m disperse subvolume. ‘round-robin’ selects the read subvolume using round-robin algo. ‘gfid-hash’ selects read subvolume based on hash of the gfid of that file/directory.gfid-hashround-robin/gfid-hash
disperse.self-heal-window-sizeMaximum number blocks(128KB) per file for which self-heal process would be applied simultaneously.11-1024
disperse.optimistic-change-logThis option Set/Unset dirty flag for every update fop at the start of the fop. If OFF, this option impacts performance of entry or metadata operations as it will set dirty flag at the start and unset it at the end of ALL update fop. If ON and all the bricks are good, dirty flag will be set at the start only for file fops, For metadata and entry fops dirty flag will not be set at the start This does not impact performance for metadata operations and entry operation but has a very small window to miss marking entry as dirty in case it is required to be healed.onon/off
disperse.parallel-writesThis controls if writes can be wound in parallel as long as it doesn’t modify same stripesonon/off
disperse.stripe-cacheThis option will keep the last stripe of write fop in memory. If next write falls in this stripe, we need not to read it again from backend and we can save READ fop going over the network. This will improve performance, specially for sequential writes. However, this will also lead to extra memory consumption, maximum (cache size * stripe size) Bytes per open file40-10
disperse.quorum-countThis option can be used to define how many successes on the bricks constitute a success to the application. This count should be in the range [disperse-data-count, disperse-count] (inclusive)00-(signedint)
disperse.use-anonymous-inodeSetting this option heals renames efficientlyoffon/off
Loggingdiagnostics.brick-log-levelChanges the log-level of the bricksINFODEBUG/WARNING/ERROR/CRITICAL/NONE/TRACE
diagnostics.client-log-levelChanges the log-level of the clients.INFODEBUG/WARNING/ERROR/CRITICAL/NONE/TRACE
diagnostics.brick-sys-log-levelDepending on the value defined for this option, log messages at and above the defined level are generated in the syslog and the brick log files.CRITICALINFO/WARNING/ERROR/CRITICAL
diagnostics.client-sys-log-levelDepending on the value defined for this option, log messages at and above the defined level are generated in the syslog and the client log files.CRITICALINFO/WARNING/ERROR/CRITICAL
diagnostics.brick-log-formatAllows you to configure the log format to log either with a message id or without one on the brick.with-msg-idno-msg-id/with-msg-id
diagnostics.client-log-formatAllows you to configure the log format to log either with a message ID or without one on the client.with-msg-idno-msg-id/with-msg-id
diagnostics.brick-log-buf-sizeThe maximum number of unique log messages that can be suppressed until the timeout or buffer overflow, whichever occurs first on the bricks.50 and 20 (0 and 20 included)
diagnostics.client-log-buf-sizeThe maximum number of unique log messages that can be suppressed until the timeout or buffer overflow, whichever occurs first on the clients.50 and 20 (0 and 20 included)
diagnostics.brick-log-flush-timeoutThe length of time for which the log messages are buffered, before being flushed to the logging infrastructure (gluster or syslog files) on the bricks.12030 - 300 seconds (30 and 300 included)
diagnostics.client-log-flush-timeoutThe length of time for which the log messages are buffered, before being flushed to the logging infrastructure (gluster or syslog files) on the clients.12030 - 300 seconds (30 and 300 included)
Performance*features.trashEnable/disable trash translatoroffon/off
*performance.readdir-aheadEnable/disable readdir-ahead translator in the volumeoffon/off
*performance.read-aheadEnable/disable read-ahead translator in the volumeoffon/off
*performance.io-cacheEnable/disable io-cache translator in the volumeoffon/off
performance.quick-readTo enable/disable quick-read translator in the volume.onoff/on
performance.md-cacheEnables and disables md-cache translator.offoff/on
performance.open-behindEnables and disables open-behind translator.onoff/on
performance.nl-cacheEnables and disables nl-cache translator.offoff/on
performance.stat-prefetchEnables and disables stat-prefetch translator.onoff/on
performance.client-io-threadsEnables and disables client-io-thread translator.onoff/on
performance.write-behindEnables and disables write-behind translator.onoff/on
performance.write-behind-window-sizeSize of the per-file write-behind buffer.1MBWrite-behind cache size
performance.io-thread-countThe number of threads in IO threads translator.161-64
performance.flush-behindIf this option is set ON, instructs write-behind translator to perform flush in background, by returning success (or any errors, if any of previous writes were failed) to application even before flush is sent to backend filesystem.OnOn/Off
performance.cache-max-file-sizeSets the maximum file size cached by the io-cache translator. Can use the normal size descriptors of KB, MB, GB,TB or PB (for example, 6GB). Maximum size uint64.2 ^ 64 -1 bytessize in bytes
performance.cache-min-file-sizeSets the minimum file size cached by the io-cache translator. Values same as "max" above0Bsize in bytes
performance.cache-refresh-timeoutThe cached data for a file will be retained till 'cache-refresh-timeout' seconds, after which data re-validation is performed.1s0-61
performance.cache-sizeSize of the read cache.32 MBsize in bytes
performance.lazy-openThis option requires open-behind to be on. Perform an open in the backend only when a necessary FOP arrives (for example, write on the file descriptor, unlink of the file). When this option is disabled, perform backend open immediately after an unwinding open.YesYes/No
performance.md-cache-timeoutThe time period in seconds which controls when metadata cache has to be refreshed. If the age of cache is greater than this time-period, it is refreshed. Every time cache is refreshed, its age is reset to 0.10-600 seconds
performance.nfs-strict-write-orderingSpecifies whether to prevent later writes from overtaking earlier writes for NFS, even if the writes do not relate to the same files or locations.offon/off
performance.nfs.flush-behindSpecifies whether the write-behind translator performs flush operations in the background for NFS by returning (false) success to the application before flush file operations are sent to the backend file system.onon/off
performance.nfs.strict-o-directSpecifies whether to attempt to minimize the cache effects of I/O for a file on NFS. When this option is enabled and a file descriptor is opened using the O_DIRECT flag, write-back caching is disabled for writes that affect that file descriptor. When this option is disabled, O_DIRECT has no effect on caching. This option is ignored if performance.write-behind is disabled.offon/off
performance.nfs.write-behind-trickling-writesEnables and disables trickling-write strategy for the write-behind translator for NFS clients.onoff/on
performance.nfs.write-behind-window-sizeSpecifies the size of the write-behind buffer for a single file or inode for NFS.1 MB512 KB - 1 GB
performance.rda-cache-limitThe value specified for this option is the maximum size of cache consumed by the readdir-ahead translator. This value is global and the total memory consumption by readdir-ahead is capped by this value, irrespective of the number/size of directories cached.10MB0-1GB
performance.rda-request-sizeThe value specified for this option will be the size of buffer holding directory entries in readdirp response.128KB4KB-128KB
performance.resync-failed-syncs-after-fsyncIf syncing cached writes that were issued before an fsync operation fails, this option configures whether to reattempt the failed sync operations.offon/off
performance.strict-o-directSpecifies whether to attempt to minimize the cache effects of I/O for a file. When this option is enabled and a file descriptor is opened using the O_DIRECT flag, write-back caching is disabled for writes that affect that file descriptor. When this option is disabled, O_DIRECT has no effect on caching. This option is ignored if performance.write-behind is disabled.onon/off
performance.strict-write-orderingSpecifies whether to prevent later writes from overtaking earlier writes, even if the writes do not relate to the same files or locations.onon/off
performance.use-anonymous-fdThis option requires open-behind to be on. For read operations, use anonymous file descriptor when the original file descriptor is open-behind and not yet opened in the backend.YesNo/Yes
performance.write-behind-trickling-writesEnables and disables trickling-write strategy for the write-behind translator for FUSE clients.onoff/on
performance.write-behind-window-sizeSpecifies the size of the write-behind buffer for a single file or inode.1MB512 KB - 1 GB
features.read-onlyEnables you to mount the entire volume as read-only for all the clients (including NFS clients) accessing it.OffOn/Off
features.quota-deem-statfsWhen this option is set to on, it takes the quota limits into consideration while estimating the filesystem size. The limit will be treated as the total size instead of the actual size of filesystem.onon/off
features.shardEnables or disables sharding on the volume. Affects files created after volume configuration.disableenable/disable
features.shard-block-sizeSpecifies the maximum size of file pieces when sharding is enabled. Affects files created after volume configuration.64MB4MB-4TB
features.ussThis option enable/disable User Serviceable Snapshots on the volume.offon/off
geo-replication.indexingUse this option to automatically sync the changes in the filesystem from Primary to Secondary.OffOn/Off
network.frame-timeoutThe time frame after which the operation has to be declared as dead, if the server does not respond for a particular operation.1800 (30 mins)1800 secs
network.ping-timeoutThe time duration for which the client waits to check if the server is responsive. When a ping timeout happens, there is a network disconnect between the client and server. All resources held by server on behalf of the client get cleaned up. When a reconnection happens, all resources will need to be re-acquired before the client can resume its operations on the server. Additionally, the locks will be acquired and the lock tables updated. This reconnect is a very expensive operation and should be avoided.42 Secs42 Secs
nfsnfs.enable-ino32For 32-bit nfs clients or applications that do not support 64-bit inode numbers or large files, use this option from the CLI to make Gluster NFS return 32-bit inode numbers instead of 64-bit inode numbers.OffOn/Off
nfs.volume-accessSet the access type for the specified sub-volume.read-writeread-write/read-only
nfs.trusted-writeIf there is an UNSTABLE write from the client, STABLE flag will be returned to force the client to not send a COMMIT request. In some environments, combined with a replicated GlusterFS setup, this option can improve write performance. This flag allows users to trust Gluster replication logic to sync data to the disks and recover when required. COMMIT requests if received will be handled in a default manner by fsyncing. STABLE writes are still handled in a sync manner.OffOn/Off
nfs.trusted-syncAll writes and COMMIT requests are treated as async. This implies that no write requests are guaranteed to be on server disks when the write reply is received at the NFS client. Trusted sync includes trusted-write behavior.OffOn/Off
nfs.export-dirThis option can be used to export specified comma separated subdirectories in the volume. The path must be an absolute path. Along with path allowed list of IPs/hostname can be associated with each subdirectory. If provided connection will allowed only from these IPs. Format: \<dir>[(hostspec[hostspec...])][,...]. Where hostspec can be an IP address, hostname or an IP range in CIDR notation. Note: Care must be taken while configuring this option as invalid entries and/or unreachable DNS servers can introduce unwanted delay in all the mount calls.No sub directory exported.Absolute path with allowed list of IP/hostname
nfs.export-volumesEnable/Disable exporting entire volumes, instead if used in conjunction with nfs3.export-dir, can allow setting up only subdirectories as exports.OnOn/Off
nfs.rpc-auth-unixEnable/Disable the AUTH_UNIX authentication type. This option is enabled by default for better interoperability. However, you can disable it if required.OnOn/Off
nfs.rpc-auth-nullEnable/Disable the AUTH_NULL authentication type. It is not recommended to change the default value for this option.OnOn/Off
nfs.rpc-auth-allow\<IP- Addresses>Allow a comma separated list of addresses and/or hostnames to connect to the server. By default, all clients are disallowed. This allows you to define a general rule for all exported volumes.Reject AllIP address or Host name
nfs.rpc-auth-reject\<IP- Addresses>Reject a comma separated list of addresses and/or hostnames from connecting to the server. By default, all connections are disallowed. This allows you to define a general rule for all exported volumes.Reject AllIP address or Host name
nfs.ports-insecureAllow client connections from unprivileged ports. By default only privileged ports are allowed. This is a global setting in case insecure ports are to be enabled for all exports using a single option.OffOn/Off
nfs.addr-namelookupTurn-off name lookup for incoming client connections using this option. In some setups, the name server can take too long to reply to DNS queries resulting in timeouts of mount requests. Use this option to turn off name lookups during address authentication. Note, turning this off will prevent you from using hostnames in rpc-auth.addr.* filters.OnOn/Off
nfs.register-with-portmapFor systems that need to run multiple NFS servers, you need to prevent more than one from registering with portmap service. Use this option to turn off portmap registration for Gluster NFS.OnOn/Off
nfs.port \<PORT- NUMBER>Use this option on systems that need Gluster NFS to be associated with a non-default port number.NA38465-38467
nfs.disableTurn-off volume being exported by NFSOffOn/Off
Serverserver.allow-insecureAllow client connections from unprivileged ports. By default only privileged ports are allowed. This is a global setting in case insecure ports are to be enabled for all exports using a single option.OnOn/Off
server.statedump-pathLocation of the state dump file.tmp directory of the brickNew directory path
server.allow-insecureAllows FUSE-based client connections from unprivileged ports.By default, this is enabled, meaning that ports can accept and reject messages from insecure ports. When disabled, only privileged ports are allowed.onon/off
server.anongidValue of the GID used for the anonymous user when root-squash is enabled. When root-squash is enabled, all the requests received from the root GID (that is 0) are changed to have the GID of the anonymous user.65534 (this UID is also known as nfsnobody)0 - 4294967295
server.anonuidValue of the UID used for the anonymous user when root-squash is enabled. When root-squash is enabled, all the requests received from the root UID (that is 0) are changed to have the UID of the anonymous user.65534 (this UID is also known as nfsnobody)0 - 4294967295
server.event-threadsSpecifies the number of event threads to execute in parallel. Larger values would help process responses faster, depending on available processing power.21-1024
server.gid-timeoutThe time period in seconds which controls when cached groups has to expire. This is the cache that contains the groups (GIDs) where a specified user (UID) belongs to. This option is used only when server.manage-gids is enabled.20-4294967295 seconds
server.manage-gidsResolve groups on the server-side. By enabling this option, the groups (GIDs) a user (UID) belongs to gets resolved on the server, instead of using the groups that were send in the RPC Call by the client. This option makes it possible to apply permission checks for users that belong to bigger group lists than the protocol supports (approximately 93).offon/off
server.root-squashPrevents root users from having root privileges, and instead assigns them the privileges of nfsnobody. This squashes the power of the root users, preventing unauthorized modification of files on the Red Hat Gluster Storage servers. This option is used only for glusterFS NFS protocol.offon/off
server.statedump-pathSpecifies the directory in which the statedumpfiles must be stored.path to directory/var/run/gluster (for a default installation)
Storagestorage.health-check-intervalNumber of seconds between health-checks done on the filesystem that is used for the brick(s). Defaults to 30 seconds, set to 0 to disable.tmp directory of the brickNew directory path
storage.linux-io_uringEnable/Disable io_uring based I/O at the posix xlator on the bricks.OffOn/Off
storage.fips-mode-rchecksumIf enabled, posix_rchecksum uses the FIPS compliant SHA256 checksum, else it uses MD5.onon/ off
storage.create-maskMaximum set (upper limit) of permission for the files that will be created.07770000 - 0777
storage.create-directory-maskMaximum set (upper limit) of permission for the directories that will be created.07770000 - 0777
storage.force-create-modeMinimum set (lower limit) of permission for the files that will be created.00000000 - 0777
storage.force-create-directoryMinimum set (lower limit) of permission for the directories that will be created.00000000 - 0777
storage.health-check-intervalSets the time interval in seconds for a filesystem health check. You can set it to 0 to disable.30 seconds0-4294967295 seconds
storage.reserveTo reserve storage space at the brick. This option accepts size in form of MB and also in form of percentage. If user has configured the storage.reserve option using size in MB earlier, and then wants to give the size in percentage, it can be done using the same option. Also, the newest set value is considered, if it was in MB before and then if it sent in percentage, the percentage value becomes new value and the older one is over-written1 (1% of the brick size)0-100
+
+

Note

+

We've found few performance xlators, options marked with * in above table have been causing more performance regression than improving. These xlators should be turned off for volumes.

+
+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/Administrator-Guide/arbiter-volumes-and-quorum/index.html b/Administrator-Guide/arbiter-volumes-and-quorum/index.html new file mode 100644 index 00000000..ca2619b1 --- /dev/null +++ b/Administrator-Guide/arbiter-volumes-and-quorum/index.html @@ -0,0 +1,4654 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + Arbiter volumes and quorum options - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + +

Arbiter volumes and quorum options in gluster

+

The arbiter volume is a special subset of replica volumes that is aimed at +preventing split-brains and providing the same consistency guarantees as a normal +replica 3 volume without consuming 3x space.

+ +

Arbiter configuration

+

The syntax for creating the volume is:

+
gluster volume create <VOLNAME>  replica 2 arbiter 1 <NEW-BRICK> ...
+
+

Note: The earlier syntax used to be replica 3 arbiter 1 but that was +leading to confusions among users about the total no. of data bricks. For the +sake of backward compatibility, the old syntax also works. In any case, the +implied meaning is that there are 2 data bricks and 1 arbiter brick in a nx(2+1) +arbiter volume.

+

For example:

+
gluster volume create testvol replica 2 arbiter 1  server{1..6}:/bricks/brick
+
+
volume create: testvol: success: please start the volume to access data
+
+

This means that for every 3 bricks listed, 1 of them is an arbiter. We have +created 6 bricks. With a replica count of three, each 3rd brick in the series will be +a replica subvolume. Since we have two sets of 3, this created a distribute +subvolume made of up two replica subvolumes.

+

Each replica subvolume is defined to have 1 arbiter out of the 3 bricks. The +arbiter bricks are taken from the end of each replica subvolume.

+
gluster volume info
+
+
Volume Name: testvol
+Type: Distributed-Replicate
+Volume ID: ae6c4162-38c2-4368-ae5d-6bad141a4119
+Status: Created
+Number of Bricks: 2 x (2 + 1) = 6
+Transport-type: tcp
+Bricks:
+Brick1: server1:/bricks/brick
+Brick2: server2:/bricks/brick
+Brick3: server3:/bricks/brick (arbiter)
+Brick4: server4:/bricks/brick
+Brick5: server5:/bricks/brick
+Brick6: server6:/bricks/brick (arbiter)
+Options Reconfigured  :
+transport.address-family: inet
+performance.readdir-ahead: on
+
+

The arbiter brick will store only the file/directory names (i.e. the tree structure) +and extended attributes (metadata) but not any data. i.e. the file size +(as shown by ls -l) will be zero bytes. It will also store other gluster +metadata like the .glusterfs folder and its contents.

+

Note: Enabling the arbiter feature automatically configures +client-quorum to 'auto'. This setting is not to be changed.

+

Arbiter brick(s) sizing

+

Since the arbiter brick does not store file data, its disk usage will be considerably +smaller than for the other bricks of the replica. The sizing of the brick will depend on +how many files you plan to store in the volume. A good estimate will be +4KB times the number of files in the replica. Note that the estimate also +depends on the inode space alloted by the underlying filesystem for a given +disk size.

+

The maxpct value in XFS for volumes of size 1TB to 50TB is only 5%. +If you want to store say 300 million files, 4KB x 300M gives us 1.2TB. +5% of this is around 60GB. Assuming the recommended inode size of 512 bytes, +that gives us the ability to store only 60GB/512 ~= 120 million files. So it is +better to choose a higher maxpct value (say 25%) while formatting an XFS disk +of size greater than +1TB. Refer the man page of mkfs.xfs for details.

+

Why Arbiter?

+

Split-brains in replica volumes

+

When a file is in split-brain, there is an inconsistency in either data or +metadata (permissions, uid/gid, extended attributes etc.) of the file amongst the +bricks of a replica and we do not have enough information to authoritatively +pick a copy as being pristine and heal to the bad copies, despite all bricks +being up and online. For directories, there +is also an entry-split brain where a file inside it has different gfids/ +file-type (say one is a file and another is a directory of the same name) +across the bricks of a replica.

+

This document +describes how to resolve files that are in split-brain using gluster cli or the +mount point. Almost always, split-brains occur due to network disconnects (where +a client temporarily loses connection to the bricks) and very rarely due to +the gluster brick processes going down or returning an error.

+

Server-quorum and some pitfalls

+

This document +provides a detailed description of this feature. +The volume options for server-quorum are:

+
+

Option: cluster.server-quorum-ratio
+Value Description: 0 to 100

+

Option: cluster.server-quorum-type
+Value Description: none | server
+If set to server, this option enables the specified volume to participate in the server-side quorum. +If set to none, that volume alone is not considered for volume checks.

+
+

The cluster.server-quorum-ratio is a percentage figure and is cluster wide- i.e. +you cannot have different ratio for different volumes in the same trusted pool.

+

For a two-node trusted storage pool, it is important to set this value +greater than 50%, so that two nodes separated from each other do not believe +they have quorum simultaneously. For a two-node plain replica volume, this would +mean both nodes need to be up and running. So there is no notion of HA/failover.

+

There are users who create a replica 2 volume from 2 nodes and peer-probe +a 'dummy' node without bricks and enable server quorum with a ratio of 51%. +This does not prevent files from getting into split-brain. For example, if B1 +and B2 are the bricks/nodes of the replica and B3 is the dummy node, we can +still end up in split-brain like so:

+
    +
  1. B1 goes down, B2 and B3 are up. Server-quorum is still. File is modified + by the client.
  2. +
  3. B2 goes down, B1 comes back up. Server-quorum is met. Same file is modified + by the client.
  4. +
  5. We now have different contents for the file in B1 and B2 ==>split-brain.
  6. +
+

In author’s opinion, server-quorum is useful if you want to avoid split-brains +to the volume(s) configuration across the nodes and not in the I/O path. +Unlike in client-quorum where the volume becomes read-only when quorum is lost, loss of +server-quorum in a particular node makes glusterd kill the brick processes on that +node (for the participating volumes) making even reads impossible.

+

Client Quorum

+

Client-quorum is a feature implemented in AFR to prevent split-brains in the I/O +path for replicate/distributed-replicate volumes. By default, if the client-quorum +is not met for a particular replica subvol, it becomes unavailable. The other subvols +(in a dist-rep volume) will still have R/W access.

+

The following volume set options are used to configure it:

+
+

Option: cluster.quorum-type
+Default Value: none
+Value Description: none|auto|fixed
+If set to "fixed", this option allows writes to a file only if the number of +active bricks in that replica set (to which the file belongs) is greater +than or equal to the count specified in the 'quorum-count' option. +If set to "auto", this option allows write to the file only if number of +bricks that are up >= ceil (of the total number of bricks that constitute that replica/2). +If the number of replicas is even, then there is a further check: +If the number of up bricks is exactly equal to n/2, then the first brick must +be one of the bricks that are up. If it is more than n/2 then it is not +necessary that the first brick is one of the up bricks.

+

Option: cluster.quorum-count
+Value Description:
+The number of bricks that must be active in a replica-set to allow writes. +This option is used in conjunction with cluster.quorum-type =fixed option +to specify the number of bricks to be active to participate in quorum. +If the quorum-type is auto then this option has no significance.

+
+

Earlier, when quorum was not met, the replica subvolume turned read-only. But +since glusterfs-3.13 and upwards, the subvolume becomes unavailable, i.e. all +the file operations fail with ENOTCONN error instead of becoming EROFS. +This means the cluster.quorum-reads volume option is also not supported.

+

Replica 2 and Replica 3 volumes

+

From the above descriptions, it is clear that client-quorum cannot really be applied +to a replica 2 volume (without costing HA). +If the quorum-type is set to auto, then by the description +given earlier, the first brick must always be up, irrespective of the status of the +second brick. IOW, if only the second brick is up, the subvol returns ENOTCONN, i.e. no HA. +If quorum-type is set to fixed, then the quorum-count has to be two +to prevent split-brains (otherwise a write can succeed in brick1, another in brick2 =>split-brain). +So for all practical purposes, if you want high availability in a replica 2 volume, +it is recommended not to enable client-quorum.

+

In a replica 3 volume, client-quorum is enabled by default and set to 'auto'. +This means 2 bricks need to be up for the write to succeed. Here is how this +configuration prevents files from ending up in split-brain:

+

Say B1, B2 and B3 are the bricks:

+
    +
  1. B3 is down, quorum is met, write happens on file B1 and B2.
  2. +
  3. B3 comes up, B2 is down, quorum is again met, write happens on B1 and B3.
  4. +
  5. B2 comes up, B1 goes down, quorum is met. Now when a write is issued, AFR sees + that B2 and B3's pending xattrs blame each other and therefore the write is not + allowed and is failed with ENOTCONN.
  6. +
+

How Arbiter works

+

There are 2 components to the arbiter volume. One is the arbiter xlator that is +loaded in the brick process of every 3rd (i.e. the arbiter) brick. The other is the +arbitration logic itself that is present in AFR (the replicate xlator) loaded +on the clients.

+

The former acts as a sort of 'filter' translator for the FOPS- i.e. it allows +entry operations to hit POSIX, blocks certain inode operations like +read (unwinds the call with ENOTCONN) and unwinds other inode operations +like write, truncate etc. with success without winding it down to POSIX.

+

The latter i.e. the arbitration logic present in AFR takes full file locks +when writing to a file, just like in normal replica volumes. The behavior of +arbiter volumes in allowing/failing write FOPS in conjunction with +client-quorum can be summarized in the below steps:

+
    +
  • +

    If all 3 bricks are up (happy case), then there is no issue and the FOPs are allowed.

    +
  • +
  • +

    If 2 bricks are up and if one of them is the arbiter (i.e. the 3rd brick) and + it blames the other up brick for a given file, then all write FOPS will fail + with ENOTCONN. This is because, in this scenario, the only true copy is on the + brick that is down. Hence we cannot allow writes until that brick is also up. + If the arbiter doesn't blame the other brick, FOPS will be allowed to proceed. + 'Blaming' here is w.r.t the values of AFR changelog extended attributes.

    +
  • +
  • +

    If 2 bricks are up and the arbiter is down, then FOPS will be allowed. + When the arbiter comes up, the entry/metadata heals to it happen. Of course data + heals are not needed.

    +
  • +
  • +

    If only one brick is up, then client-quorum is not met and the volume returns ENOTCONN.

    +
  • +
  • +

    In all cases, if there is only one source before the FOP is initiated + (even if all bricks are up) and if the FOP fails on that source, the + application will receive ENOTCONN. For example, assume that a write failed on B2 + and B3, i.e. B1 is the only source. Now if for some reason, the second write + failed on B1 (before there was a chance for selfheal to complete despite all brick + being up), the application would receive failure (ENOTCONN) for that write.

    +
  • +
+

The bricks being up or down described above does not necessarily mean the brick +process is offline. It can also mean the mount lost the connection to the brick +due to network disconnects etc.

+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/Administrator-Guide/formatting-and-mounting-bricks/index.html b/Administrator-Guide/formatting-and-mounting-bricks/index.html new file mode 100644 index 00000000..62505168 --- /dev/null +++ b/Administrator-Guide/formatting-and-mounting-bricks/index.html @@ -0,0 +1,4584 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + Formatting and Mounting Bricks - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + +

Formatting and Mounting Bricks

+ +

Formatting and Mounting Bricks

+

Creating a Thinly Provisioned Logical Volume

+

To create a thinly provisioned logical volume, proceed with the following steps:

+
    +
  1. +

    Create a physical volume(PV) by using the pvcreate command. + For example:

    +
    pvcreate --dataalignment 128K /dev/sdb
    +
    +

    Here, /dev/sdb is a storage device. +Use the correct dataalignment option based on your device.

    +
    +

    Note: +The device name and the alignment value will vary based on the device you are using.

    +
    +
  2. +
  3. +

    Create a Volume Group (VG) from the PV using the vgcreate command: + For example:

    +
    vgcreate --physicalextentsize 128K gfs_vg /dev/sdb
    +
    +

    It is recommended that only one VG must be created from one storage device.

    +
  4. +
  5. +

    Create a thin-pool using the following commands:

    +
      +
    1. +

      Create an LV to serve as the metadata device using the following command:

      +
      lvcreate -L metadev_sz --name metadata_device_name VOLGROUP
      +
      +

      For example:

      +
      lvcreate -L 16776960K --name gfs_pool_meta gfs_vg
      +
      +
    2. +
    3. +

      Create an LV to serve as the data device using the following command:

      +
      lvcreate -L datadev_sz --name thin_pool VOLGROUP`
      +
      +

      For example:

      +
      lvcreate -L 536870400K --name gfs_pool gfs_vg
      +
      +
    4. +
    5. +

      Create a thin pool from the data LV and the metadata LV using the following command:

      +
      lvconvert --chunksize STRIPE_WIDTH --thinpool VOLGROUP/thin_pool --poolmetadata VOLGROUP/metadata_device_name
      +
      +

      For example:

      +
      lvconvert --chunksize 1280K --thinpool gfs_vg/gfs_pool --poolmetadata gfs_vg/gfs_pool_meta
      +
      +
      +

      Note: +By default, the newly provisioned chunks in a thin pool are zeroed to prevent data leaking between different block devices.

      +
      +
      lvchange --zero n VOLGROUP/thin_pool
      +
      +

      For example:

      +
      lvchange --zero n gfs_vg/gfs_pool
      +
      +
    6. +
    +
  6. +
  7. +

    Create a thinly provisioned volume from the previously created pool using the lvcreate command:

    +

    For example:

    +
    lvcreate -V 1G -T gfs_vg/gfs_pool -n gfs_lv
    +
    +

    It is recommended that only one LV should be created in a thin pool.

    +

    Format bricks using the supported XFS configuration, mount the bricks, and verify the bricks are mounted correctly.

    +

    Run mkfs.xfs -f -i size=512 -n size=8192 -d su=128k,sw=10 DEVICE to format the bricks to the supported XFS file system format. Here, DEVICE is the thin LV(here /dev/gfs_vg/gfs_lv). The inode size is set to 512 bytes to accommodate for the extended attributes used by GlusterFS.

    +

    Run mkdir /mountpoint to create a directory to link the brick to.

    +

    Add an entry in /etc/fstab:

    +
    /dev/gfs_vg/gfs_lv    /mountpoint  xfs rw,inode64,noatime,nouuid      1 2
    +
    +

    Run mount /mountpoint to mount the brick.

    +

    Run the df -h command to verify the brick is successfully mounted:

    +
    # df -h
    +/dev/gfs_vg/gfs_lv   16G  1.2G   15G   7% /exp1
    +
    +
  8. +
+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/Administrator-Guide/index.html b/Administrator-Guide/index.html new file mode 100644 index 00000000..1d57c23a --- /dev/null +++ b/Administrator-Guide/index.html @@ -0,0 +1,4546 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + Index - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/Administrator-Guide/io_uring/index.html b/Administrator-Guide/io_uring/index.html new file mode 100644 index 00000000..0172eb55 --- /dev/null +++ b/Administrator-Guide/io_uring/index.html @@ -0,0 +1,4495 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + io_uring - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + +

io_uring support in gluster

+

io_uring is an asynchronous I/O interface similar to linux-aio, but aims to be more performant. +Refer https://kernel.dk/io_uring.pdf and https://kernel-recipes.org/en/2019/talks/faster-io-through-io_uring for more details.

+

Incorporating io_uring in various layers of gluster is an ongoing activity but beginning with glusterfs-9.0, support has been added to the posix translator via the storage.linux-io_uring volume option. When this option is enabled, the posix translator in the glusterfs brick process (at the server side) will use io_uring calls for reads, writes and fsyncs as opposed to the normal pread/pwrite based syscalls.

+

Example:

+
# gluster volume set testvol storage.linux-io_uring on
+volume set: success
+
+# gluster volume set testvol storage.linux-io_uring off
+volume set: success
+
+

This option can be enabled/disabled only when the volume is not running. +i.e. you can toggle the option when the volume is Created or is Stopped as indicated in gluster volume status $VOLNAME

+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/Administrator-Guide/overview/index.html b/Administrator-Guide/overview/index.html new file mode 100644 index 00000000..ab6a4412 --- /dev/null +++ b/Administrator-Guide/overview/index.html @@ -0,0 +1,4480 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + Overview - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + +

Overview

+ +

Overview

+

The Administration guide covers day to day management tasks as well as advanced configuration methods for your Gluster setup.

+

You can manage your Gluster cluster using the Gluster CLI

+

See the glossary for an explanation of the various terms used in this document.

+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/Administrator-Guide/setting-up-storage/index.html b/Administrator-Guide/setting-up-storage/index.html new file mode 100644 index 00000000..bb0ba608 --- /dev/null +++ b/Administrator-Guide/setting-up-storage/index.html @@ -0,0 +1,4452 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + Setting Up Storage - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + +

Setting Up Storage

+

A volume is a logical collection of bricks where each brick is an export directory on a server in the trusted storage pool. +Before creating a volume, you need to set up the bricks that will form the volume.

+ + + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/CLI-Reference/cli-main/index.html b/CLI-Reference/cli-main/index.html new file mode 100644 index 00000000..fbd03325 --- /dev/null +++ b/CLI-Reference/cli-main/index.html @@ -0,0 +1,4671 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + Overview - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + +

Overview

+ +

Gluster Command Line Interface

+

Overview

+

Use the Gluster CLI to setup and manage your Gluster cluster from a terminal. +You can run the Gluster CLI on any Gluster server either by invoking the commands +or by running the Gluster CLI in interactive mode. +You can also use the gluster command remotely using SSH.

+

The gluster CLI syntax is gluster <command>.

+

To run a command directly:

+
gluster <command>
+
+

For example, to view the status of all peers:

+
gluster peer status
+
+

To run a command in interactive mode, start a gluster shell by typing:

+
gluster
+
+

This will open a gluster command prompt. You now run the command at the prompt.

+
gluster> <command>
+
+

For example, to view the status of all peers,

+
gluster> peer status
+
+

Peer Commands

+

The peer commands are used to manage the Trusted Server Pool (TSP).

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
CommandSyntaxDescription
peer probepeer probe serverAdd server to the TSP
peer detachpeer detach serverRemove server from the TSP
peer statuspeer statusDisplay the status of all nodes in the TSP
pool listpool listList all nodes in the TSP
+

Volume Commands

+

The volume commands are used to setup and manage Gluster volumes.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
CommandSyntaxDescription
volume createvolume create volname [options] bricksCreate a volume called volname using the specified bricks with the configuration specified by options
volume startvolume start volname [force]Start volume volname
volume stopvolume stop volnameStop volume volname
volume infovolume info [volname]Display volume info for volname if provided, else for all volumes on the TSP
volume statusvolumes status[volname]Display volume status for volname if provided, else for all volumes on the TSP
volume listvolume listList all volumes in the TSP
volume setvolume set volname option valueSet option to value for volname
volume getvolume get volname <option|all>Display the value of option (if specified)for volname , or all options otherwise
volume add-brickvolume add-brick brick-1 ... brick-nExpand volname to include the bricks brick-1 to brick-n
volume remove-brickvolume remove-brick brick-1 ... brick-n \<start|stop|status|commit|force>Shrink volname by removing the bricks brick-1 to brick-n . start will trigger a rebalance to migrate data from the removed bricks. stop will stop an ongoing remove-brick operation. force will remove the bricks immediately and any data on them will no longer be accessible from Gluster clients.
volume replace-brickvolume replace-brick volname old-brick new-brickReplace old-brick of volname with new-brick
volume deletevolume delete volnameDelete volname
+

For additional detail of all the available CLI commands, please refer to man gluster output.

+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/CNAME b/CNAME new file mode 100644 index 00000000..d0202a54 --- /dev/null +++ b/CNAME @@ -0,0 +1 @@ +docs.gluster.org diff --git a/Contributors-Guide/Adding-your-blog/index.html b/Contributors-Guide/Adding-your-blog/index.html new file mode 100644 index 00000000..f6556492 --- /dev/null +++ b/Contributors-Guide/Adding-your-blog/index.html @@ -0,0 +1,4481 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + Adding your gluster blog - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + +

Adding your gluster blog

+ +

Adding your blog

+

As a developer/user, you have blogged about gluster and want to share the post to Gluster community.

+

OK, you can do that by editing planet-gluster feeds on Github.

+

Please find instructions mentioned in the file and send a pull request.

+

Once approved, all your gluster related posts will appear in planet.gluster.org website.

+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/Contributors-Guide/Bug-Reporting-Guidelines/index.html b/Contributors-Guide/Bug-Reporting-Guidelines/index.html new file mode 100644 index 00000000..1534601b --- /dev/null +++ b/Contributors-Guide/Bug-Reporting-Guidelines/index.html @@ -0,0 +1,4767 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + Bug reporting guidelines - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + + + + + +
+
+ + + + + + + + +

Bug reporting guidelines

+ +

Before filing an issue

+

If you are finding any issues, these preliminary checks as useful:

+
    +
  • Is SELinux enabled? (you can use getenforce to check)
  • +
  • Are iptables rules blocking any data traffic? (iptables -L can + help check)
  • +
  • Are all the nodes reachable from each other? [ Network problem ]
  • +
  • +

    Please search issues + to see if the bug has already been reported

    +
      +
    • If an issue has been already filed for a particular release and you found the issue in another release, add a comment in issue.
    • +
    +
  • +
+

Anyone can search in github issues, you don't need an account. Searching +requires some effort, but helps avoid duplicates, and you may find that +your problem has already been solved.

+

Reporting An Issue

+
    +
  • You should have an account with github.com
  • +
  • Here is the link to file an issue: + Github
  • +
+

Note: Please go through all below sections to understand what +information we need to put in a bug. So it will help the developer to +root cause and fix it

+

Required Information

+

You should gather the information below before creating the bug report.

+

Package Information

+
    +
  • Location from which the packages are used
  • +
  • Package Info - version of glusterfs package installed
  • +
+

Cluster Information

+
    +
  • Number of nodes in the cluster
  • +
  • +

    Hostnames and IPs of the gluster Node [if it is not a security + issue]

    +
      +
    • Hostname / IP will help developers in understanding & correlating with the logs
    • +
    +
  • +
  • +

    Output of gluster peer status

    +
  • +
  • +

    Node IP, from which the "x" operation is done

    +
      +
    • "x" here means any operation that causes the issue
    • +
    +
  • +
+

Volume Information

+
    +
  • Number of volumes
  • +
  • Volume Names
  • +
  • Volume on which the particular issue is seen [ if applicable ]
  • +
  • Type of volumes
  • +
  • Volume options if available
  • +
  • Output of gluster volume info
  • +
  • Output of gluster volume status
  • +
  • Get the statedump of the volume with the problem gluster volume statedump <vol-name>
  • +
+

This dumps statedump per brick process in /var/run/gluster

+

NOTE: Collect statedumps from one gluster Node in a directory.

+

Repeat it in all Nodes containing the bricks of the volume. All the so +collected directories could be archived, compressed and attached to bug

+

Brick Information

+
    +
  • +

    xfs options when a brick partition was done

    +
      +
    • This could be obtained with this command: xfs_info /dev/mapper/vg1-brick
    • +
    +
  • +
  • +

    Extended attributes on the bricks

    +
      +
    • This could be obtained with this command: getfattr -d -m. -ehex /rhs/brick1/b1
    • +
    +
  • +
+

Client Information

+
    +
  • OS Type ( Ubuntu, Fedora, RHEL )
  • +
  • OS Version: In case of Linux distro get the following :
  • +
+
uname -r
+cat /etc/issue
+
+
    +
  • Fuse or NFS Mount point on the client with output of mount commands
  • +
  • Output of df -Th command
  • +
+

Tool Information

+
    +
  • If any tools are used for testing, provide the info/version about it
  • +
  • if any IO is simulated using a script, provide the script
  • +
+

Logs Information

+
    +
  • +

    You can check logs for issues/warnings/errors.

    +
      +
    • Self-heal logs
    • +
    • Rebalance logs
    • +
    • Glusterd logs
    • +
    • Brick logs
    • +
    • NFS logs (if applicable)
    • +
    • Samba logs (if applicable)
    • +
    • Client mount log
    • +
    +
  • +
  • +

    Add the entire logs as attachment, if its very large to paste as a + comment

    +
  • +
+

SOS report for CentOS/Fedora

+
    +
  • Get the sosreport from the involved gluster Node and Client [ in + case of CentOS /Fedora ]
  • +
  • Add a meaningful name/IP to the sosreport, by renaming/adding + hostname/ip to the sosreport name
  • +
+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/Contributors-Guide/Bug-Triage/index.html b/Contributors-Guide/Bug-Triage/index.html new file mode 100644 index 00000000..4c127c23 --- /dev/null +++ b/Contributors-Guide/Bug-Triage/index.html @@ -0,0 +1,4675 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + Bug Triage - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + + + + + +
+
+ + + + + + + + +

Issues Triage Guidelines

+
    +
  • +

    Triaging of issues is an important task; when done correctly, it can + reduce the time between reporting an issue and the availability of a + fix enormously.

    +
  • +
  • +

    Triager should focus on new issues, and try to define the problem + easily understandable and as accurate as possible. The goal of the + triagers is to reduce the time that developers need to solve the bug + report.

    +
  • +
  • +

    A triager is like an assistant that helps with the information + gathering and possibly the debugging of a new bug report. Because a + triager helps preparing a bug before a developer gets involved, it + can be a very nice role for new community members that are + interested in technical aspects of the software.

    +
  • +
  • +

    Triagers will stumble upon many different kind of issues, ranging + from reports about spelling mistakes, or unclear log messages to + memory leaks causing crashes or performance issues in environments + with several hundred storage servers.

    +
  • +
+

Nobody expects that triagers can prepare all bug reports. Therefore most +developers will be able to assist the triagers, answer questions and +suggest approaches to debug and data to gather. Over time, triagers get +more experienced and will rely less on developers.

+

Issue triage can be summarized as below points:

+
    +
  • Is the issue a bug? an enhancement request? or a question? Assign the relevant label.
  • +
  • Is there enough information in the issue description?
  • +
  • Is it a duplicate issue?
  • +
  • Is it assigned to correct component of GlusterFS?
  • +
  • Is the bug summary is correct?
  • +
  • Assigning issue or Adding people's github handle in the comment, so they get notified.
  • +
+

The detailed discussion about the above points are below.

+

Is there enough information?

+

It's hard to generalize what makes a good report. For "average" +reporters is definitely often helpful to have good steps to reproduce, +GlusterFS software version , and information about the test/production +environment, Linux/GNU distribution.

+

If the reporter is a developer, steps to reproduce can sometimes be +omitted as context is obvious. However, this can create a problem for +contributors that need to find their way, hence it is strongly advised +to list the steps to reproduce an issue.

+

Other tips:

+
    +
  • +

    There should be only one issue per report. Try not to mix related or + similar looking bugs per report.

    +
  • +
  • +

    It should be possible to call the described problem fixed at some + point. "Improve the documentation" or "It runs slow" could never be + called fixed, while "Documentation should cover the topic Embedding" + or "The page at http://en.wikipedia.org/wiki/Example should load + in less than five seconds" would have a criterion. A good summary of + the bug will also help others in finding existing bugs and prevent + filing of duplicates.

    +
  • +
  • +

    If the bug is a graphical problem, you may want to ask for a + screenshot to attach to the bug report. Make sure to ask that the + screenshot should not contain any confidential information.

    +
  • +
+

Is it a duplicate?

+

If you think that you have found a duplicate but you are not totally +sure, just add a comment like "This issue looks related to issue #NNN" (and +replace NNN by issue-id) so somebody else can take a look and help judging.

+

Is it assigned with correct label?

+

Go through the labels and assign the appropriate label

+

Are the fields correct?

+

Description

+

Sometimes the description does not summarize the bug itself well. You may +want to update the bug summary to make the report distinguishable. A +good title may contain:

+
    +
  • A brief explanation of the root cause (if it was found)
  • +
  • Some of the symptoms people are experiencing
  • +
+

Assigning issue or Adding people's github handle in the comment

+

Normally, developers and potential assignees of an area are already +watching all the issues by default, but sometimes reports describe general +issues. Only if you know developers who work in the area covered by the +issue, and if you know that these developers accept getting CCed or assigned +to certain reports, you can mention in comment or even assign the bug report +to her/him.

+

To get an idea who works in which area, check to know component owners, +you can check the "MAINTAINERS" file in root of glusterfs code directory +(see Simplified dev workflow)

+

Bugs present in multiple Versions

+

During triaging you might come across a particular bug which is present +across multiple version of GlusterFS. Add that in comment.

+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/Contributors-Guide/GlusterFS-Release-process/index.html b/Contributors-Guide/GlusterFS-Release-process/index.html new file mode 100644 index 00000000..e7cd4ffb --- /dev/null +++ b/Contributors-Guide/GlusterFS-Release-process/index.html @@ -0,0 +1,4769 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + GlusterFS Release process - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + + + + + +
+
+ + + + + + + + +

Release Process for GlusterFS

+

The GlusterFS release process aims to provide regular, stable releases, with the ability to also ship new features quickly, while also attempting to reduce the complexity for release maintainers.

+

GlusterFS releases

+

GlusterFS Major releases happen once every 4-6 months. Check Release Schedule for more information on the schedule for major releases. Minor releases happen every month for corresponding branch of major release. Each major release is supported till we have N+2 version is made available.

+

Major releases don't guarantee complete backwards compatability with the previous major release.

+

Minor releases will have guaranteed backwards compatibilty with earlier minor releases of the same branch.

+

GlusterFS major release

+

Each GlusterFS major release has a 4-6 month release window, in which changes get merged. This window is split into two phases.

+
    +
  1. A Open phase, where all changes get merged
  2. +
  3. A Stability phase, where only changes that stabilize the release get merged.
  4. +
+

The first 2-4 months of a release window will be the Open phase, and the last month will be the stability phase.

+

The release engineer (or team doing the release) is responsible for messaging.

+

Open phase

+

Any changes are accepted during this phase. New features that are introduced in this phase, need to be capable of being selectively built. All changes in the master branch are automatically incuded in the next release.

+

All changes will be accepted during the Open phase. The changes have a few requirements,

+
    +
  • a change fixing a bug SHOULD have public test case
  • +
  • a change introducing a new feature MUST have a disable switch that can disable the feature during a build
  • +
+

Stability phase

+

This phase is used to stabilize any new features introduced in the open phase, or general bug fixes for already existing features.

+

A new release-<version> branch is created at the beginning of this phase. All changes need to be sent to the master branch before getting backported to the new release branch.

+

No new features will be merged in this phase. At the end of this phase, any new feature introduced that hasn't been declared stable will be disabled, if possible removed, to prevent confusion and set clear expectations towards users and developers.

+

Patches accepted in the Stability phase have the following requirements:

+
    +
  • a change MUST fix an issue that users have reported or are very likely to hit
  • +
  • each change SHOULD have a public test-case (.t or DiSTAF)
  • +
  • a change MUST NOT add a new FOP
  • +
  • a change MUST NOT add a new xlator
  • +
  • a change SHOULD NOT add a new volume option, unless a public discussion was kept and several maintainers agree that this is the only right approach
  • +
  • a change MAY add new values for existing volume options, these need to be documented in the release notes and be marked as a 'minor feature enhancement' or similar
  • +
  • it is NOT RECOMMENDED to modify the contents of existing log messages, automation and log parsers can depend on the phrasing
  • +
  • a change SHOULD NOT have more than approx. 100 lines changed, additional public discussion and agreement among maintainers is required to get big changes approved
  • +
  • a change SHOULD NOT modify existing structures or parameters that get sent over the network, unless a public discussion was kept and several maintainers agree that this is the only right approach
  • +
  • existing structures or parameters MAY get extended with additional values (i.e. new flags in a bitmap/mask) if the extensions are optional and do not affect older/newer client/server combinations
  • +
+

Patches that do not satisfy the above requirements can still be submitted for review, but cannot be merged.

+

Release procedure

+

This procedure is followed by a release maintainer/manager, to perform the actual release.

+

The release procedure for both major releases and minor releases is nearly the same.

+

The procedure for the major releases starts at the beginning of the Stability phase, and for the minor release at the start of the release window.

+

TODO: Add the release verification procedure

+

Release steps

+

The release-manager needs to follow the following steps, to actually perform the release once ready.

+

Create tarball

+
    +
  1. Add the release-notes to the docs/release-notes/ directory in the sources
  2. +
  3. after merging the release-notes, create a tag like v3.6.2
  4. +
  5. push the tag to git.gluster.org
  6. +
  7. create the tarball with the release job in Jenkins
  8. +
+

Notify packagers

+

Notify the packagers that we need packages created. Provide the link to the source tarball from the Jenkins release job to the packagers mailinglist. A list of the people involved in the package maintenance for the different distributions is in the MAINTAINERS file in the sources, all of them should be subscribed to the packagers mailinglist.

+

Create a new Tracker Bug for the next release

+

The tracker bugs are used as guidance for blocker bugs and should get created when a release is made. To create one

+
    +
  • Create a new milestone
  • +
  • base the contents on open issues, like the one for glusterfs-8
  • +
  • issues that were not fixed in previous release, but in milestone should be moved to the new milestone.
  • +
+

Create Release Announcement

+

(Major releases) +The Release Announcement is based off the release notes. This needs to indicate:

+
    +
  • What this release's overall focus is
  • +
  • Which versions will stop receiving updates as of this release
  • +
  • Links to the direct download folder
  • +
  • Feature set
  • +
+

Best practice as of version-8 is to create a collaborative version of the release notes that both the release manager and community lead work on together, and the release manager posts to the mailing lists (gluster-users@, gluster-devel@, announce@).

+

Create Upgrade Guide

+

(Major releases) +If required, as in the case of a major release, an upgrade guide needs to be available at the same time as the release. +This document should go under the Upgrade Guide section of the glusterdocs repository.

+

Send Release Announcement

+

Once the Fedora/EL RPMs are ready (and any others that are ready by then), send the release announcement:

+ + + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/Contributors-Guide/Guidelines-For-Maintainers/index.html b/Contributors-Guide/Guidelines-For-Maintainers/index.html new file mode 100644 index 00000000..889e1cf1 --- /dev/null +++ b/Contributors-Guide/Guidelines-For-Maintainers/index.html @@ -0,0 +1,4580 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + Guidelines For Maintainers - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+ +
+ + + +
+
+ + + + + + + + +

Guidelines For Maintainers

+ +

Guidelines For Maintainers

+

GlusterFS has maintainers, sub-maintainers and release maintainers to +manage the project's codebase. Sub-maintainers are the owners for +specific areas/components of the source tree. Maintainers operate across +all components in the source tree.Release maintainers are the owners for +various release branches (release-x.y) present in the GlusterFS +repository.

+

In the guidelines below, release maintainers and sub-maintainers are +also implied when there is a reference to maintainers unless it is +explicitly called out.

+

Guidelines that Maintainers are expected to adhere to

+
    +
  1. +

    Ensure qualitative and timely management of patches sent for review.

    +
  2. +
  3. +

    For merging patches into the repository, it is expected of maintainers to:

    +
      +
    • Merge patches of owned components only.
    • +
    • Seek approvals from all maintainers before merging a patchset spanning + multiple components.
    • +
    • Ensure that regression tests pass for all patches before merging.
    • +
    • Ensure that regression tests accompany all patch submissions.
    • +
    • Ensure the related Bug or GitHub Issue has sufficient details about the + cause of the problem, or description of the introduction for the change.
    • +
    • Ensure that documentation is updated for a noticeable change in user + perceivable behavior or design.
    • +
    • Encourage code unit tests from patch submitters to improve the overall + quality of the codebase.
    • +
    • Not merge patches written by themselves until there is a +2 Code Review + vote by other reviewers.
    • +
    +
  4. +
  5. +

    The responsibility of merging a patch into a release branch in normal + circumstances will be that of the release maintainer's. Only in exceptional + situations, maintainers & sub-maintainers will merge patches into a release + branch.

    +
  6. +
  7. +

    Release maintainers will ensure approval from appropriate maintainers before + merging a patch into a release branch.

    +
  8. +
  9. +

    Maintainers have a responsibility to the community, it is expected of maintainers to:

    +
      +
    • Facilitate the community in all aspects.
    • +
    • Be very active and visible in the community.
    • +
    • Be objective and consider the larger interests of the community ahead of + individual interests.
    • +
    • Be receptive to user feedback.
    • +
    • Address concerns & issues affecting users.
    • +
    • Lead by example.
    • +
    +
  10. +
+

Queries on Guidelines

+

Any questions or comments regarding these guidelines can be routed to +gluster-devel or slack channel.

+

Patches in Github

+

Github can be used to list patches that need reviews and/or can get +merged from Pull Requests

+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/Contributors-Guide/Index/index.html b/Contributors-Guide/Index/index.html new file mode 100644 index 00000000..2826d6d3 --- /dev/null +++ b/Contributors-Guide/Index/index.html @@ -0,0 +1,4545 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + Index - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + +

Workflow Guide

+

Bug Handling

+ +

Release Process

+ +

Patch Acceptance

+ +

Blogging about gluster

+ + + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/Developer-guide/Backport-Guidelines/index.html b/Developer-guide/Backport-Guidelines/index.html new file mode 100644 index 00000000..8e1ebb2a --- /dev/null +++ b/Developer-guide/Backport-Guidelines/index.html @@ -0,0 +1,4552 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + Backport Guidelines - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + +

Backport Guidelines

+

In GlusterFS project, as a policy, any new change, bug fix, etc., are to be +fixed in 'devel' branch before release branches. When a bug is fixed in +the devel branch, it might be desirable or necessary in release branch.

+

This page describes the policy GlusterFS has regarding the backports. As +a user, or contributor, being aware of this policy would help you to +understand how to request for backport from community.

+

Policy

+
    +
  • No feature from devel would be backported to the release branch
  • +
  • CVE ie., security vulnerability (listed on the CVE database) + reported in the existing releases would be backported, after getting fixed + in devel branch.
  • +
  • Only topics which bring about data loss or, unavailability would be + backported to the release.
  • +
  • For any other issues, the project recommends that the installation be + upgraded to a newer release where the specific bug has been addressed.
  • +
  • Gluster provides 'rolling' upgrade support, i.e., one can upgrade their + server version without stopping the application I/O, so we recommend migrating + to higher version.
  • +
+

Things to pay attention to while backporting a patch.

+

If your patch meets the criteria above, or you are a user, who prefer to have a +fix backported, because your current setup is facing issues, below are the +steps you need to take care to submit a patch on release branch.

+
    +
  • The patch should have same 'Change-Id'.
  • +
+

How to contact release owners?

+

All release owners are part of 'gluster-devel@gluster.org' mailing list. +Please write your expectation from next release there, so we can take that +to consideration while making the release.

+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/Developer-guide/Building-GlusterFS/index.html b/Developer-guide/Building-GlusterFS/index.html new file mode 100644 index 00000000..b412da0b --- /dev/null +++ b/Developer-guide/Building-GlusterFS/index.html @@ -0,0 +1,4885 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + Build and Install GlusterFS - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + +

Building GlusterFS

+

This page describes how to build and install GlusterFS.

+

Build Requirements

+

The following packages are required for building GlusterFS,

+
    +
  • +

    GNU Autotools

    +
      +
    • Automake
    • +
    • Autoconf
    • +
    • Libtool
    • +
    +
  • +
  • +

    lex (generally flex)

    +
  • +
  • GNU Bison
  • +
  • OpenSSL
  • +
  • libxml2
  • +
  • Python 2.x
  • +
  • libaio
  • +
  • libibverbs
  • +
  • librdmacm
  • +
  • readline
  • +
  • lvm2
  • +
  • glib2
  • +
  • liburcu
  • +
  • cmocka
  • +
  • libacl
  • +
  • sqlite
  • +
  • fuse-devel
  • +
  • liburing-devel
  • +
+

Fedora

+

The following dnf command installs all the build requirements for +Fedora,

+
dnf install automake autoconf libtool flex bison openssl-devel  \
+  libxml2-devel python-devel libaio-devel libibverbs-devel      \
+  librdmacm-devel readline-devel lvm2-devel glib2-devel         \
+  userspace-rcu-devel libcmocka-devel libacl-devel sqlite-devel \
+  fuse-devel redhat-rpm-config rpcgen libtirpc-devel make       \
+  libuuid-devel liburing-devel gperftools libcurl-devel
+
+

Ubuntu

+

The following apt-get command will install all the build requirements on +Ubuntu,

+
sudo apt-get install make automake autoconf libtool flex bison  \
+  pkg-config libssl-dev libxml2-dev python-dev libaio-dev       \
+  libibverbs-dev librdmacm-dev libreadline-dev liblvm2-dev      \
+  libglib2.0-dev liburcu-dev libcmocka-dev libsqlite3-dev       \
+  libacl1-dev liburing-dev google-perftools
+
+

CentOS / Enterprise Linux v7

+

The following yum command installs the build requirements for CentOS / Enterprise Linux 7,

+
yum install autoconf automake bison cmockery2-devel dos2unix flex   \
+  fuse-devel glib2-devel libacl-devel libaio-devel libattr-devel    \
+  libcurl-devel libibverbs-devel librdmacm-devel libtirpc-devel     \
+  libtool libxml2-devel lvm2-devel make openssl-devel pkgconfig     \
+  pyliblzma python-devel python-eventlet python-netifaces           \
+  python-paste-deploy python-simplejson python-sphinx python-webob  \
+  pyxattr readline-devel rpm-build sqlite-devel systemtap-sdt-devel \
+  tar userspace-rcu-devel
+
+

Note: You will need to enable the CentOS SIG repos in order to install userspace-rcu-devel package
+For details check https://wiki.centos.org/SpecialInterestGroup/Storage

+

Enable repositories for CentOS

+

The following yum command enables needed repositories providing the build requirements:

+

CentOS Stream 8:

+
yum-config-manager --enable powertools --enable devel
+
+
+

CentOS Stream 9:

+
yum-config-manager --enable crb --enable devel
+
+
+

CentOS / Enterprise Linux v8

+

The following yum command installs the build requirements for CentOS / Enterprise Linux 8,

+
yum install autoconf automake bison dos2unix flex fuse-devel glib2-devel   \
+  libacl-devel libaio-devel libattr-devel libcurl-devel libibverbs-devel   \
+  librdmacm-devel libtirpc-devel libuuid-devel libtool libxml2-devel       \
+  lvm2-devel make openssl-devel pkgconfig xz-devel  python3-devel          \
+  python3-netifaces python3-paste-deploy python3-simplejson python3-sphinx \
+  python3-webob python3-pyxattr readline-devel rpm-build sqlite-devel      \
+  systemtap-sdt-devel tar userspace-rcu-devel rpcgen liburing-devel
+
+

Building from Source

+

This section describes how to build GlusterFS from source. It is assumed +you have a copy of the GlusterFS source (either from a released tarball +or a git clone). All the commands below are to be run with the source +directory as the working directory.

+

Configuring for building

+

Run the below commands once for configuring and setting up the build +process.

+

Run autogen to generate the configure script.

+
./autogen.sh
+
+

Once autogen completes successfully a configure script is generated. Run +the configure script to generate the makefiles.

+
./configure
+
+

For CentOS 7, use:

+
./configure --without-libtirpc
+
+

If the above build requirements have been installed, running the +configure script should give the below configure summary,

+
GlusterFS configure summary
+===========================
+FUSE client          : yes
+Infiniband verbs     : yes
+epoll IO multiplex   : yes
+argp-standalone      : no
+fusermount           : yes
+readline             : yes
+georeplication       : yes
+Linux-AIO            : yes
+Enable Debug         : no
+Block Device xlator  : yes
+glupy                : yes
+Use syslog           : yes
+XML output           : yes
+Encryption xlator    : yes
+Unit Tests       : no
+Track priv ports     : yes
+POSIX ACLs           : yes
+Data Classification  : yes
+SELinux features     : yes
+firewalld-config     : no
+Experimental xlators : yes
+Events           : yes
+EC dynamic support   : x64 sse avx
+Use memory pools     : yes
+Nanosecond m/atimes  : yes
+Legacy gNFS server   : no
+
+

During development it is good to enable a debug build. To do this run +configure with a '--enable-debug' flag.

+
./configure --enable-debug
+
+

Further configuration flags can be found by running configure with a +'--help' flag,

+
./configure --help
+
+

Please note to enable gNFS use the following flag

+
./configure --enable-gnfs
+
+

If you are looking at contributing by fixing some of the memory issues, +use --enable-asan option

+
./configure --enable-asan
+
+

The above option will build with -fsanitize=address -fno-omit-frame-pointer +options and uses the libasan.so shared library, so that needs to be available.

+

io_uring is introduced on Linux kernel version 5.1. GlusterFS also needs the user space liburing helper library. +If these are not available for your machine or if you wish to build GlusterFS without io_uring support, +use --disable-linux-io_uring option

+
./configure --disable-linux-io_uring
+
+

Building

+

Once configured, GlusterFS can be built with a simple make command.

+
make
+
+

To speed up the build process on a multicore machine, add a '-jN' flag, +where N is the number of parallel jobs.

+

Installing

+

Run 'make install' to install GlusterFS. By default, GlusterFS will be +installed into '/usr/local' prefix. To change the install prefix, give +the appropriate option to configure. If installing into the default +prefix, you might need to use 'sudo' or 'su -c' to install.

+
sudo make install
+
+

NOTE: glusterfs can be installed on any target path. However, the +mount.glusterfs script has to be in /sbin/mount.glusterfs for +mounting via command mount -t glusterfs to work. See -t section +in man 8 mount for more details.

+

Running GlusterFS

+

GlusterFS can be only run as root, so the following commands will need +to be run as root. If you've installed into the default '/usr/local' +prefix, add '/usr/local/sbin' and '/usr/local/bin' to your PATH before +running the below commands.

+

A source install will generally not install any init scripts. So you +will need to start glusterd manually. To manually start glusterd just +run,

+
systemctl daemon-reload
+systemctl start glusterd
+
+

This will start glusterd and fork it into the background as a daemon +process. You now run 'gluster' commands and make use of GlusterFS.

+

Building packages

+

Building RPMs

+

Building RPMs is really simple. On a RPM based system, for eg. Fedora, +get the source and do the configuration steps as shown in the 'Building +from Source' section. After the configuration step, run the following +steps to build RPMs,

+
cd extras/LinuxRPM
+make glusterrpms
+
+

This will create rpms from the source in 'extras/LinuxRPM'. (Note: You +will need to install the rpmbuild requirements including rpmbuild and +mock)
+For Fedora / CentOS / Enterprise Linux 8 the dependencies can be installed via:

+
yum install mock rpm-build  selinux-policy-devel
+
+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/Developer-guide/Developers-Index/index.html b/Developer-guide/Developers-Index/index.html new file mode 100644 index 00000000..b7a7028a --- /dev/null +++ b/Developer-guide/Developers-Index/index.html @@ -0,0 +1,4587 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + Developers Home - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + +

Developers

+

Contributing to the Gluster community

+
+

Are you itching to send in patches and participate as a developer in the +Gluster community? Here are a number of starting points for getting +involved. All you need is your 'github' account to be handy.

+

Remember that, Gluster community has multiple projects, each of which has its own way of handling PRs and patches. Decide on which project you want to contribute. Below documents are mostly about 'GlusterFS' project, which is the core of Gluster Community.

+

Workflow

+
    +
  • +

    Simplified Developer Workflow

    +
      +
    • A simpler and faster intro to developing with GlusterFS, than the document below
    • +
    +
  • +
  • +

    Developer Workflow

    +
      +
    • Covers detail about requirements from a patch; tools and toolkits used by developers. + This is recommended reading in order to begin contributions to the project.
    • +
    +
  • +
  • +

    GD2 Developer Workflow

    +
      +
    • Helps in on-boarding developers to contribute in GlusterD2 project.
    • +
    +
  • +
+

Compiling Gluster

+ +

Developing

+ +

Releases and Backports

+ +

Some more GlusterFS Developer documentation can be found in glusterfs documentation directory

+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/Developer-guide/Development-Workflow/index.html b/Developer-guide/Development-Workflow/index.html new file mode 100644 index 00000000..d8429ebc --- /dev/null +++ b/Developer-guide/Development-Workflow/index.html @@ -0,0 +1,5225 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + Development-Workflow - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + + + + + +
+
+ + + + + + + + +

Development workflow of Gluster

+

This document provides a detailed overview of the development model +followed by the GlusterFS project. For a simpler overview visit +Simplified development workflow.

+

Basics

+

The GlusterFS development model largely revolves around the features and +functionality provided by Git version control system, Github and Jenkins +continuous integration system. It is a primer for a contributor to the project.

+

Git and Github

+

Git is an extremely flexible, distributed version control system. +GlusterFS's main repository is at Git and at +GitHub. +A good introduction to Git can be found at +http://www-cs-students.stanford.edu/~blynn/gitmagic/.

+

Jenkins

+

Jenkins is a Continuous Integration build system. Jenkins is hosted at +http://build.gluster.org. Jenkins is configured to work with Github by +setting up hooks. Every "Change" which is pushed to Github is +automatically picked up by Jenkins, built and smoke tested. The output of +all builds and tests can be viewed at +http://build.gluster.org/job/smoke/. Jenkins is also set up with a +'regression' job which is designed to execute test scripts provided as +part of the code change.

+

Preparatory Setup

+

Here is a list of initial one-time steps before you can start hacking on +code.

+

Fork Repository

+

Fork GlusterFS repository

+

Clone a working tree

+

Get yourself a working tree by cloning the development repository from

+
git clone git@github.com:${username}/glusterfs.git
+cd glusterfs/
+git remote add upstream git@github.com:gluster/glusterfs.git
+
+

Preferred email and set username

+

On the first login, add your git/work email to your identity. You will have +to click on the URL which is sent to your email and set up a proper Full +Name. Select yourself a username. Make sure you set your git/work email +as your preferred email. This should be the email address from which all your +code commits are associated.

+

Watch glusterfs

+

In Github, watch the 'glusterfs' repository. Tick on suitable +(All activity, Ignore, participating, or custom) type of notifications to +get alerts.

+

Email filters

+

Set up a filter rule in your mail client to tag or classify emails with +the header

+
list: <glusterfs.gluster.github.com>
+
+

as mails originating from the github system.

+

Development & Other flows

+

Issue

+
    +
  • Make sure there is an issue filed for the task you are working on.
  • +
  • If it is not filed, open the issue with all the description.
  • +
  • If it is a bug fix, add label "Type:Bug".
  • +
  • If it is an RFC, provide all the documentation, and request for "DocApproved", and "SpecApproved" label.
  • +
+

Code

+
    +
  • Start coding
  • +
  • Make sure clang-format is installed and is run on the patch.
  • +
+

Keep up-to-date

+
    +
  • GlusterFS is a large project with many developers, so there would be one or the other patch everyday.
  • +
  • It is critical for developer to be up-to-date with devel repo to be Conflict-Free when PR is opened.
  • +
  • Git provides many options to keep up-to-date, below is one of them
  • +
+
git fetch upstream
+git rebase upstream/devel
+
+

Branching policy

+

This section describes both, the branching policies on the public repo +as well as the suggested best-practice for local branching

+

Devel/release branches

+

In glusterfs, the 'devel' branch is the forward development branch. +This is where new features come in first. In fact this is where almost +every change (commit) comes in first. The devel branch is always kept +in a buildable state and smoke tests pass.

+

Release trains (3.1.z, 3.2.z,..., 8.y, 9.y) each have a branch originating from +devel. Code freeze of each new release train is marked by the creation +of the release-x.y branch. At this point, no new features are added to +the release-x.y branch. All fixes and commits first get into devel. +From there, only bug fixes get backported to the relevant release +branches. From the release-x.y branch, actual release code snapshots +(e.g. glusterfs-3.2.2 etc.) are tagged (git annotated tag with 'git tag +-a') shipped as a tarball.

+

Personal per-task branches

+

As a best practice, it is recommended you perform all code changes for a +task in a local branch in your working tree. The local branch should be +created from the upstream branch to which you intend to submit the +change. The name of the branch on your personal fork can start with issueNNNN, +followed by anything of your choice. If you are submitting changes to the devel +branch, first create a local task branch like this -

+
# git checkout -b issueNNNN upstream/main
+... <hack, commit>
+
+

Building

+

Environment Setup

+

For details about the required packages for the build environment +refer : Building GlusterFS

+

Creating build environment

+

Once the required packages are installed for your appropiate system, +generate the build configuration:

+
./autogen.sh
+./configure --enable-fusermount
+
+

Build and install

+
make && make install
+
+

Commit policy / PR description

+

Typically you would have a local branch per task. You will need to +sign-off your commit (git commit -s) before sending the +patch for review. By signing off your patch, you agree to the terms +listed under the "Developer's Certificate of Origin" section in the +CONTRIBUTING file available in the repository root.

+

Provide a meaningful commit message. Your commit message should be in +the following format

+
    +
  • A short one-line title of format 'component: title', describing what the patch accomplishes
  • +
  • An empty line following the subject
  • +
  • Situation necessitating the patch
  • +
  • Description of the code changes
  • +
  • Reason for doing it this way (compared to others)
  • +
  • Description of test cases
  • +
  • When you open a PR, having a reference Issue for the commit is mandatory in GlusterFS.
  • +
  • Commit message can have, either Fixes: #NNNN or Updates: #NNNN in a separate line in the commit message. + Here, NNNN is the Issue ID in glusterfs repository.
  • +
  • Each commit needs the author to have the 'Signed-off-by: Name ' line. + Can do this by -s option for git commit.
  • +
  • If the PR is not ready for review, apply the label work-in-progress. + Check the availability of "Draft PR" is present for you, if yes, use that instead.
  • +
+

Push the change

+

After doing the local commit, it is time to submit the code for review. +There is a script available inside glusterfs.git called rfc.sh. It is +recommended you keep pushing to your repo every day, so you don't loose +any work. You can submit your changes for review by simply executing

+
./rfc.sh
+
+

or

+
git push origin HEAD:issueNNN
+
+

This script rfc.sh does the following:

+
    +
  • The first time it is executed, it downloads a git hook from + http://review.gluster.org/tools/hooks/commit-msg and sets it up + locally to generate a Change-Id: tag in your commit message (if it + was not already generated.)
  • +
  • Rebase your commit against the latest upstream HEAD. This rebase + also causes your commits to undergo massaging from the just + downloaded commit-msg hook.
  • +
  • Prompt for a Reference Id for each commit (if it was not already provided) + and include it as a "fixes: #n" tag in the commit log. You can just hit + at this prompt if your submission is purely for review + purposes.
  • +
  • Push the changes for review. On a successful push, you will see a URL pointing to + the change in Pull requests section.
  • +
+

Test cases and Verification

+
+

Auto-triggered tests

+

The integration between Jenkins and Github triggers an event in Jenkins +on every push of changes, to pick up the change and run build and smoke +test on it. +Part of the workflow is to aggregate and execute pre-commit test cases +that accompany patches, cumulatively for every new patch. This +guarantees that tests that are working till the present are not broken +with the new patch. This is so that code changes and accompanying test +cases are reviewed together. Once you upload the patch -

+
    +
  1. +

    All the required smoke tests would be auto-triggered. You can retrigger + the smoke tests using "/recheck smoke" as comment. Passing the automated + smoke test is a necessary condition but not sufficient.

    +
  2. +
  3. +

    The regression tests would be triggered by a comment "/run regression" + from developers in the @gluster organization once smoke test is passed.

    +
  4. +
+

If smoke/regression fails, it is a good reason to skip code review till +a fixed change is pushed later. You can click on the build URL +automatically to inspect the reason for auto verification failure. +In the Jenkins job page, you can click on the 'Console Output' link to +see the exact point of failure.

+

All code changes which are not trivial (typo fixes, code comment +changes) must be accompanied with either a new test case script or +extend/modify an existing test case script. It is important to review +the test case in conjunction with the code change to analyze whether the +code change is actually verified by the test case.

+

Regression tests (i.e, execution of all test cases accumulated with +every commit) is not automatically triggered as the test cases can be +extensive and is quite expensive to execute for every change submission +in the review/resubmit cycle. Passing the regression test is a +necessary condition for merge along with code review points.

+

To check and run all regression tests locally, run the below script +from glusterfs root directory.

+
./run-tests.sh
+
+

To run a single regression test locally, run the below command.

+
prove -vf <path_to_the_file>
+
+

NOTE: The testing framework needs perl-Test-Harness package to be installed. +Ask for help as comment in PR if you have any questions about the process!

+

It is important to note that Jenkins verification is only a generic +verification of high-level tests. More concentrated testing effort for +the patch is necessary with manual verification.

+

Glusto test framework

+

For any new feature that is posted for review, there should be +accompanying set of tests in +glusto-tests. These +tests will be run nightly and/or before release to determine the health +of the feature. Please go through glusto-tests project to understand +more information on how to write and execute the tests in glusto.

+
    +
  1. +

    Extend/Modify old test cases in existing scripts - This is typically + when present behavior (default values etc.) of code is changed.

    +
  2. +
  3. +

    No test cases - This is typically when a code change is trivial + (e.g. fixing typos in output strings, code comments).

    +
  4. +
  5. +

    Only test case and no code change - This is typically when we are + adding test cases to old code (already existing before this regression + test policy was enforced). More details on how to work with test case + scripts can be found in tests/README.

    +
  6. +
+

Reviewing / Commenting

+

Code review with Github is relatively easy compared to other available +tools. Each change is presented as multiple files and each file can be +reviewed in Side-by-Side mode. While reviewing it is possible to comment +on each line by clicking on '+' icon and writing in your comments in +the text box. Such in-line comments are saved as drafts, till you +finally publish them by Starting a Review.

+

Incorporate, rfc.sh, Reverify

+

Code review comments are notified via email. After incorporating the +changes in code, you can mark each of the inline comments as 'done' +(optional). After all the changes to your local files, create new +commits in the same branch with -

+
git commit -a -s
+
+

Push the commit by executing rfc.sh. If your previous push was an "rfc" +push (i.e, without a Issue Id) you will be prompted for a Issue Id +again. You can re-push an rfc change without any other code change too +by giving a Issue Id.

+

On the new push, Jenkins will re-verify the new change (independent of +what the verification result was for the previous push).

+

It is the Change-Id line in the commit log (which does not change) that +associates the new push as an update for the old push (even though they +had different commit ids) under the same Change.

+

If further changes are found necessary, changes can be requested or +comments can be made on the new patch as well, and the same cycle repeats.

+

If no further changes are necessary, the reviewer can approve the patch.

+

Submission Qualifiers

+

GlusterFS project follows 'Squash and Merge' method.

+
    +
  • This is mainly to preserve the historic Gerrit method of one patch in git log for one URL link.
  • +
  • This also makes every merge a complete patch, which has passed all tests.
  • +
+

For a change to get merged, there are two qualifiers that are enforced +by the Github system. They are -

+
    +
  1. A change should have at approver flag from Reviewers
  2. +
  3. A change should have passed smoke and regression tests.
  4. +
+

The project maintainer will merge the changes once a patch +meets these qualifiers. If you feel there is delay, feel free +to add a comment, discuss the same in Slack channel, or send email.

+

Submission Disqualifiers

+
    +
  • +2 : is equivalent to "Approve" from the people in the maintainer's group.
  • +
  • +1 : can be given by a maintainer/reviewer by explicitly stating that in the comment.
  • +
  • -1 : provide details on required changes and pick "Request Changes" while submitting your review.
  • +
  • -2 : done by adding the DO-NOT-MERGE label.
  • +
+

Any further discussions can happen as comments in the PR.

+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/Developer-guide/Easy-Fix-Bugs/index.html b/Developer-guide/Easy-Fix-Bugs/index.html new file mode 100644 index 00000000..301b2583 --- /dev/null +++ b/Developer-guide/Easy-Fix-Bugs/index.html @@ -0,0 +1,4500 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + EasyFix bugs - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + +

Easy Fix Bugs

+

Fixing easy issues is an excellent method to start contributing patches to Gluster.

+

Sometimes an Easy Fix issue has a patch attached. In those cases, +the Patch keyword has been added to the bug. These bugs can be +used by new contributors that would like to verify their workflow. Bug +1099645 is one example of those.

+

All such issues can be found here

+

Guidelines for new comers

+
    +
  • While trying to write a patch, do not hesitate to ask questions.
  • +
  • If something in the documentation is unclear, we do need to know so + that we can improve it.
  • +
  • There are no stupid questions, and it's more stupid to not ask + questions that others can easily answer. Always assume that if you + have a question, someone else would like to hear the answer too.
  • +
+

Reach out to the developers +in #gluster on Gluster Slack channel, or on +one of the mailing lists, try to keep the discussions public so that anyone +can learn from it.

+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/Developer-guide/Fixing-issues-reported-by-tools-for-static-code-analysis/index.html b/Developer-guide/Fixing-issues-reported-by-tools-for-static-code-analysis/index.html new file mode 100644 index 00000000..70f61d03 --- /dev/null +++ b/Developer-guide/Fixing-issues-reported-by-tools-for-static-code-analysis/index.html @@ -0,0 +1,4586 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + Fixing issues reported by tools for static code analysis - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + +

Fixing issues reported by tools for static code analysis

+ +

Static Code Analysis Tools

+

Bug fixes for issues reported by Static Code Analysis Tools should +follow Development Work Flow

+

Coverity

+

GlusterFS is part of Coverity's scan +program.

+
    +
  • To see Coverity issues you have to be a member of the GlusterFS + project in Coverity scan website.
  • +
  • Here is the link to Coverity scan website
  • +
  • Go to above link and subscribe to GlusterFS project (as + contributor). It will send a request to Admin for including you in + the Project.
  • +
  • Once admins for the GlusterFS Coverity scan approve your request, + you will be able to see the defects raised by Coverity.
  • +
  • Issue #1060 + can be used as a umbrella bug for Coverity issues in master + branch unless you are trying to fix a specific issue.
  • +
  • When you decide to work on some issue, please assign it to your name + in the same Coverity website. So that we don't step on each others + work.
  • +
  • When marking a bug intentional in Coverity scan website, please put + an explanation for the same. So that it will help others to + understand the reasoning behind it.
  • +
+

If you have more questions please send it to +gluster-devel mailing +list

+

CPP Check

+

Cppcheck is available in Fedora and EL's EPEL repo

+
    +
  • +

    Install Cppcheck

    +
    dnf install cppcheck
    +
    +
  • +
  • +

    Clone GlusterFS code

    +
    git clone https://github.com/gluster/glusterfs
    +
    +
  • +
  • +

    Run Cpp check

    +
    cppcheck glusterfs/ 2>cppcheck.log
    +
    +
  • +
+

Clang-Scan Daily Runs

+

We have daily runs of static source code analysis tool clang-scan on +the glusterfs sources. There are daily analyses of the master and +on currently supported branches.

+

Results are posted at +https://build.gluster.org/job/clang-scan/lastBuild/clangScanBuildBugs/

+

Issue #1000 +can be used as a umbrella bug for Clang issues in master +branch unless you are trying to fix a specific issue.

+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/Developer-guide/Projects/index.html b/Developer-guide/Projects/index.html new file mode 100644 index 00000000..fe3b5e87 --- /dev/null +++ b/Developer-guide/Projects/index.html @@ -0,0 +1,4601 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + Project Ideas - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + + + + + +
+
+ + + + + + + + +

Projects

+

This page contains a list of project ideas which will be suitable for +students (for GSOC, internship etc.)

+

Projects/Features which needs contributors

+

RIO

+

Issue: https://github.com/gluster/glusterfs/issues/243

+

This is a new distribution logic, which can scale Gluster to 1000s of nodes.

+

Composition xlator for small files

+

Merge small files into a designated large file using our own custom +semantics. This can improve our small file performance.

+

Path based geo-replication

+

Issue: https://github.com/gluster/glusterfs/issues/460

+

This would allow remote volume to be of different type (NFS/S3 etc etc) too.

+

Project Quota support

+

Issue: https://github.com/gluster/glusterfs/issues/184

+

This will make Gluster's Quota faster, and also provide desired behavior.

+

Cluster testing framework based on gluster-tester

+

Repo: https://github.com/aravindavk/gluster-tester

+

Build a cluster using docker images (or VMs). Write a tool which would +extend current gluster testing's .t format to take NODE as an addition +parameter to run command. This would make upgrade and downgrade testing +very easy and feasible.

+

Network layer changes

+

Issue: https://github.com/gluster/glusterfs/issues/391

+

There is many improvements we can do in this area

+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/Developer-guide/Simplified-Development-Workflow/index.html b/Developer-guide/Simplified-Development-Workflow/index.html new file mode 100644 index 00000000..4d576176 --- /dev/null +++ b/Developer-guide/Simplified-Development-Workflow/index.html @@ -0,0 +1,4868 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + Simplified Development Workflow - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + +

Simplified development workflow for GlusterFS

+

This page gives a simplified model of the development workflow used by +the GlusterFS project. This will give the steps required to get a patch +accepted into the GlusterFS source.

+

Visit Development Work Flow a more +detailed description of the workflow.

+

Initial preparation

+

The GlusterFS development workflow revolves around +GitHub and +Jenkins. +Using these both tools requires some initial preparation.

+

Get the source

+

Git clone the GlusterFS source using

+
git clone git@github.com:${username}/glusterfs.git
+cd glusterfs/
+git remote add upstream git@github.com:gluster/glusterfs.git
+
+

This will clone the GlusterFS source into a subdirectory named glusterfs +with the devel branch checked out.

+

Dev system setup

+

You should install and setup Git on your development system. Use your +distribution specific package manger to install git. After installation +configure git. At the minimum, set a git user email. To set the email +do,

+
git config --global user.name <name>
+git config --global user.email <email address>
+
+

Next, install the build requirements for GlusterFS. Refer +Building GlusterFS - Build Requirements +for the actual requirements.

+

Actual development

+

The commands in this section are to be run inside the glusterfs source +directory.

+

Create a development branch

+

It is recommended to use separate local development branches for each +change you want to contribute to GlusterFS. To create a development +branch, first checkout the upstream branch you want to work on and +update it. More details on the upstream branching model for GlusterFS +can be found at Development Work Flow - Branching_policy. +For example if you want to develop on the devel branch,

+
git checkout devel
+git pull
+
+

Now, create a new branch from devel and switch to the new branch. It is +recommended to have descriptive branch names. Do,

+
git branch issueNNNN
+git checkout issueNNNN
+
+

or,

+
git checkout -b issueNNNN upstream/main
+
+

to do both in one command. Here, NNNN is the Issue ID in glusterfs repository.

+

Hack

+

Once you've switched to the development branch, you can perform the +actual code changes. Build and test to +see if your changes work.

+

Tests

+

Unless your changes are very minor and trivial, you should also add a +test for your change. Tests are used to ensure that the changes you did +are not broken inadvertently. More details on tests can be found at +Development Workflow - Test cases +and +Development Workflow - Regression tests and test cases.

+

Regression test

+

Once your change is working, locally you can run the regression test suite +to make sure you haven't broken anything. The regression test suite requires a +working GlusterFS installation and needs to be run as root. To run the +regression test suite, do

+
make install
+./run-tests.sh
+
+

or, After uploading the patch The regression tests would be triggered +by a comment "/run regression" from developers in the @gluster organization.

+

Commit your changes

+

If you haven't broken anything, you can now commit your changes. First +identify the files that you modified/added/deleted using git-status and +stage these files.

+
git status
+git add <list of modified files>
+
+

Now, commit these changes using

+
git commit -s
+
+

Provide a meaningful commit message. The commit message policy is +described at Development Work Flow - Commit policy. +It is essential that you commit with the '-s' option, which will +sign-off the commit with your configured email.

+

Submit for review

+

To submit your change for review, run the rfc.sh script,

+
./rfc.sh
+
+

or

+
git push origin HEAD:issueNNN
+
+

More details on the rfc.sh script are available at +Development Work Flow - rfc.sh.

+

Review process

+

Your change will now be reviewed by the GlusterFS maintainers and +component owners. You can follow and take part in the review process +on the change at the review url. The review process involves several steps.

+

To know component owners , you can check the "MAINTAINERS" file in root +of glusterfs code directory

+

Automated verification

+

Every change submitted to github triggers an initial automated +verification on jenkins known as smoke tests. +The automated verification ensures that your change doesn't break the build +and has an associated bug-id. Developers get a chance to retrigger the smoke tests using "/recheck smoke" as comment.

+

More details can be found at Development Work Flow - Auto verification.

+

Formal review

+

Once the auto verification is successful, the component owners will +perform a formal review. If they are okay with your change, they will +give a positive review. If not they will give a negative review and add +comments on the reasons.

+

More information regarding the review qualifiers and disqualifiers is +available at Development Work Flow - Submission Qualifiers +and +Development Work Flow - Submission Disqualifiers.

+

If your change gets a negative review, you will need to address the +comments and resubmit your change.

+

Resubmission

+

Switch to your development branch and make new changes to address the +review comments. Build and test to see if the new changes are working.

+

Stage your changes and commit your new changes in new commits using,

+
git commit -a -s
+
+

Now you can resubmit the commit for review using the rfc.sh script or git push.

+

The formal review process could take a long time. To increase chances +for a speedy review, you can add the component owners as reviewers on +the pull request. This will ensure they notice the change. The +list of component owners can be found in the MAINTAINERS file present in +the GlusterFS source

+

Verification

+

After a component owner has given a positive review, a developer will +run the regression test suite on your change to verify that your change +works and hasn't broken anything. This verification is done with the +help of jenkins.

+

If the verification fails, you will need to make necessary changes and +resubmit an updated commit for review.

+

Acceptance

+

After successful verification, a maintainer will Squash and merge +your change into the upstream GlusterFS source. Your change +will now be available in the upstream git repo for everyone to use.

+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/Developer-guide/compiling-rpms/index.html b/Developer-guide/compiling-rpms/index.html new file mode 100644 index 00000000..f14b6332 --- /dev/null +++ b/Developer-guide/compiling-rpms/index.html @@ -0,0 +1,4752 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + Compiling RPMS - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + + + + + +
+
+ + + + + + + + +

Compiling RPMS

+ +

How to compile GlusterFS RPMs from git source, for RHEL/CentOS, and Fedora

+

Creating rpm's of GlusterFS from git source is fairly easy, once you know the steps.

+

RPMs can be compiled on at least the following OS's:

+
    +
  • Red Hat Enterprise Linux 5, 6 (& 7 when available)
  • +
  • CentOS 5, 6, 7 and 8
  • +
  • Fedora 16-20
  • +
+

Specific instructions for compiling are below. If you're using:

+
    +
  • Fedora 16-20 - Follow the Fedora steps, then do all of the Common steps.
  • +
  • CentOS 5.x - Follow the CentOS 5.x steps, then do all of the Common steps
  • +
  • CentOS 6.x - Follow the CentOS 6.x steps, then do all of the Common steps.
  • +
  • CentOS 8.x - Follow the CentOS 8.x steps, then follow from step 2 in the Common steps.
  • +
  • RHEL 6.x - Follow the RHEL 6.x steps, then do all of the Common steps.
  • +
+

Note - these instructions have been explicitly tested on all of CentOS 5.10, RHEL 6.4, CentOS 6.4+, CentOS 8.4, and Fedora 16-20. Other releases of RHEL/CentOS and Fedora may work too but haven't been tested. Please update this page appropriately if you do so. :)

+

Preparation steps for Fedora 16-20 (only)

+
    +
  1. +

    Install gcc, the python development headers, and python setuptools:

    +
    sudo yum -y install gcc python-devel python-setuptools
    +
    +
  2. +
  3. +

    If you're compiling GlusterFS version 3.4, then install python-swiftclient. Other GlusterFS versions don't need it:

    +
    sudo easy_install simplejson python-swiftclient
    +
    +
  4. +
+

Now follow through with the Common Steps part below.

+

Preparation steps for CentOS 5.x (only)

+

You'll need EPEL installed first and some CentOS-specific packages. The commands below will get that done for you. After that, follow through the "Common steps" section.

+
    +
  1. +

    Install EPEL first:

    +
    curl -OL http://download.fedoraproject.org/pub/epel/5/x86_64/epel-release-5-4.noarch.rpm
    +sudo yum -y install epel-release-5-4.noarch.rpm --nogpgcheck
    +
    +
  2. +
  3. +

    Install the packages required only on CentOS 5.x:

    +
    sudo yum -y install buildsys-macros gcc ncurses-devel \
    +  python-ctypes python-sphinx10 redhat-rpm-config
    +
    +
  4. +
+

Now follow through with the Common Steps part below.

+

Preparation steps for CentOS 6.x (only)

+

You'll need EPEL installed first and some CentOS-specific packages. The commands below will get that done for you. After that, follow through the "Common steps" section.

+
    +
  1. +

    Install EPEL first:

    +
    sudo yum -y install http://download.fedoraproject.org/pub/epel/6/x86_64/epel-release-6-8.noarch.rpm
    +
    +
  2. +
  3. +

    Install the packages required only on CentOS:

    +
    sudo yum -y install python-webob1.0 python-paste-deploy1.5 python-sphinx10 redhat-rpm-config
    +
    +
  4. +
+

Now follow through with the Common Steps part below.

+

Preparation steps for CentOS 8.x (only)

+

You'll need EPEL installed and then the powertools package enabled.

+
    +
  1. +

    Install EPEL first:

    +
    sudo rpm -ivh https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm
    +
    +
  2. +
  3. +

    Enable the PowerTools repo and install CentOS 8.x specific packages for building the rpms.

    +
    sudo yum --enablerepo=PowerTools install automake autoconf libtool flex bison openssl-devel \
    +  libxml2-devel libaio-devel libibverbs-devel librdmacm-devel readline-devel lvm2-devel \
    +  glib2-devel userspace-rcu-devel libcmocka-devel libacl-devel sqlite-devel fuse-devel \
    +  redhat-rpm-config rpcgen libtirpc-devel make python3-devel rsync libuuid-devel \
    +  rpm-build dbench perl-Test-Harness attr libcurl-devel selinux-policy-devel -y
    +
    +
  4. +
+

Now follow through from Point 2 in the Common Steps part below.

+

Preparation steps for RHEL 6.x (only)

+

You'll need EPEL installed first and some RHEL specific packages. The 2 commands below will get that done for you. After that, follow through the "Common steps" section.

+
    +
  1. +

    Install EPEL first:

    +
    sudo yum -y install http://download.fedoraproject.org/pub/epel/6/x86_64/epel-release-6-8.noarch.rpm
    +
    +
  2. +
  3. +

    Install the packages required only on RHEL:

    +
    sudo yum -y --enablerepo=rhel-6-server-optional-rpms install python-webob1.0 \
    +  python-paste-deploy1.5 python-sphinx10 redhat-rpm-config
    +
    +
  4. +
+

Now follow through with the Common Steps part below.

+

Common Steps

+

These steps are for both Fedora and RHEL/CentOS. At the end you'll have the complete set of GlusterFS RPMs for your platform, ready to be installed.

+

NOTES for step 1 below:

+
    +
  • If you're on RHEL/CentOS 5.x and get a message about lvm2-devel not being available, it's ok. You can ignore it. :)
  • +
  • If you're on RHEL/CentOS 6.x and get any messages about python-eventlet, python-netifaces, python-sphinx and/or pyxattr not being available, it's ok. You can ignore them. :)
  • +
  • If you're on CentOS 8.x, you can skip step 1 and start from step 2. Also, for CentOS 8.x, the steps have been + tested for the master branch. It is unknown if it would work for older branches.
  • +
+


+
    +
  1. +

    Install the needed packages

    +
    sudo yum -y --disablerepo=rhs* --enablerepo=*optional-rpms install git autoconf \
    +  automake bison dos2unix flex fuse-devel glib2-devel libaio-devel \
    +  libattr-devel libibverbs-devel librdmacm-devel libtool libxml2-devel lvm2-devel make \
    +  openssl-devel pkgconfig pyliblzma python-devel python-eventlet python-netifaces \
    +  python-paste-deploy python-simplejson python-sphinx python-webob pyxattr readline-devel \
    +  rpm-build systemtap-sdt-devel tar libcmocka-devel
    +
    +
  2. +
  3. +

    Clone the GlusterFS git repository

    +
    git clone git://git.gluster.org/glusterfs
    +cd glusterfs
    +
    +
  4. +
  5. +

    Choose which branch to compile

    +

    If you want to compile the latest development code, you can skip this step and go on to the next one. :)

    +

    If instead, you want to compile the code for a specific release of GlusterFS (such as v3.4), get the list of release names here:

    +
    # git branch -a | grep release
    +remotes/origin/release-2.0
    +remotes/origin/release-3.0
    +remotes/origin/release-3.1
    +remotes/origin/release-3.2
    +remotes/origin/release-3.3
    +remotes/origin/release-3.4
    +remotes/origin/release-3.5
    +
    +

    Then switch to the correct release using the git "checkout" command, and the name of the release after the "remotes/origin/" bit from the list above:

    +
    git checkout release-3.4
    +
    +

    NOTE - The CentOS 5.x instructions have only been tested for the master branch in GlusterFS git. It is unknown (yet) if they work for branches older than release-3.5.

    +
    +

    If you are compiling the latest development code you can skip steps 4 and 5. Instead, you can run the below command and you will get the RPMs.

    +
    extras/LinuxRPM/make_glusterrpms
    +
    +
    +
  6. +
  7. +

    Configure and compile GlusterFS

    +

    Now you're ready to compile Gluster:

    +
    ./autogen.sh
    +./configure --enable-fusermount
    +make dist
    +
    +
  8. +
  9. +

    Create the GlusterFS RPMs

    +
    cd extras/LinuxRPM
    +make glusterrpms
    +
    +

    That should complete with no errors, leaving you with a directory containing the RPMs.

    +
    # ls -l *rpm
    +-rw-rw-r-- 1 jc jc 3966111 Mar  2 12:15 glusterfs-3git-1.el5.centos.src.rpm
    +-rw-rw-r-- 1 jc jc 1548890 Mar  2 12:17 glusterfs-3git-1.el5.centos.x86_64.rpm
    +-rw-rw-r-- 1 jc jc   66680 Mar  2 12:17 glusterfs-api-3git-1.el5.centos.x86_64.rpm
    +-rw-rw-r-- 1 jc jc   20399 Mar  2 12:17 glusterfs-api-devel-3git-1.el5.centos.x86_64.rpm
    +-rw-rw-r-- 1 jc jc  123806 Mar  2 12:17 glusterfs-cli-3git-1.el5.centos.x86_64.rpm
    +-rw-rw-r-- 1 jc jc 7850357 Mar  2 12:17 glusterfs-debuginfo-3git-1.el5.centos.x86_64.rpm
    +-rw-rw-r-- 1 jc jc  112677 Mar  2 12:17 glusterfs-devel-3git-1.el5.centos.x86_64.rpm
    +-rw-rw-r-- 1 jc jc  100410 Mar  2 12:17 glusterfs-fuse-3git-1.el5.centos.x86_64.rpm
    +-rw-rw-r-- 1 jc jc  187221 Mar  2 12:17 glusterfs-geo-replication-3git-1.el5.centos.x86_64.rpm
    +-rw-rw-r-- 1 jc jc  299171 Mar  2 12:17 glusterfs-libs-3git-1.el5.centos.x86_64.rpm
    +-rw-rw-r-- 1 jc jc   44943 Mar  2 12:17 glusterfs-rdma-3git-1.el5.centos.x86_64.rpm
    +-rw-rw-r-- 1 jc jc  123065 Mar  2 12:17 glusterfs-regression-tests-3git-1.el5.centos.x86_64.rpm
    +-rw-rw-r-- 1 jc jc   16224 Mar  2 12:17 glusterfs-resource-agents-3git-1.el5.centos.x86_64.rpm
    +-rw-rw-r-- 1 jc jc  654043 Mar  2 12:17 glusterfs-server-3git-1.el5.centos.x86_64.rpm
    +
    +
  10. +
+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/Developer-guide/coredump-on-customer-setup/index.html b/Developer-guide/coredump-on-customer-setup/index.html new file mode 100644 index 00000000..7dde0f25 --- /dev/null +++ b/Developer-guide/coredump-on-customer-setup/index.html @@ -0,0 +1,4509 @@ + + + + + + + + + + + + + + + + + + + + + + Get core dump on a customer set up without killing the process - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + + + + + +
+
+ + + + + + + + +

Get core dump on a customer set up without killing the process

+

Why do we need this?

+

Finding the root cause of an issue that occurred in the customer/production setup is a challenging task. +Most of the time we cannot replicate/setup the environment and scenario which is leading to the issue on +our test setup. In such cases, we got to grab most of the information from the system where the problem +has occurred.

+

What information we look for and also useful?

+

The information like a core dump is very helpful to catch the root cause of an issue by adding ASSERT() in +the code at the places where we feel something is wrong and install the custom build on the affected setup. +But the issue is ASSERT() would kill the process and produce the core dump.

+

Is it a good idea to do ASSERT() on customer setup?

+

Remember we are seeking help from customer setup, they unlikely agree to kill the process and produce the +core dump for us to root cause it. It affects the customer’s business and nobody agrees with this proposal.

+

What if we have a way to produce a core dump without a kill?

+

Yes, Glusterfs provides a way to do this. Gluster has customized ASSERT() i.e GF_ASSERT() in place which helps +in producing the core dump without killing the associated process and also provides a script which can be run on +the customer set up that produces the core dump without harming the running process (This presumes we already have +GF_ASSERT() at the expected place in the current build running on customer setup. If not, we need to install custom +build on that setup by adding GF_ASSERT()).

+

Is GF_ASSERT() newly introduced in Gluster code?

+

No. GF_ASSERT() is already there in the codebase before this improvement. In the debug build, GF_ASSERT() kills the +process and produces the core dump but in the production build, it just logs the error and moves on. What we have done +is we just changed the implementation of the code and now in production build also we get the core dump but the process +won’t be killed. The code places where GF_ASSERT() is not covered, please add it as per the requirement.

+

Here are the steps to achieve the goal:

+
    +
  • Add GF_ASSERT() in the Gluster code path where you expect something wrong is happening.
  • +
  • Build the Gluster code, install and mount the Gluster volume (For detailed steps refer: Gluster quick start guide).
  • +
  • Now, in the other terminal, run the gfcore.py script + # ./extras/debug/gfcore.py $PID 1 /tmp/ (PID of the gluster process you are interested in, got it by ps -ef | grep gluster + in the previous step. For more details, check # ./extras/debug/gfcore.py --help)
  • +
  • Hit the code path where you have introduced GF_ASSERT(). If GF_ASSERT() is in fuse_write() path, you can hit the code + path by writing on to a file present under Gluster moun. Ex: # dd if=/dev/zero of=/mnt/glustrefs/abcd bs=1M count=1 + where /mnt/glusterfs is the gluster mount
  • +
  • Go to the terminal where the gdb is running (step 3) and observe that the gdb process is terminated
  • +
  • Go to the directory where the core-dump is produced. Default would be present working directory.
  • +
  • Access the core dump using gdb Ex: # gdb -ex "core-file $GFCORE_FILE" $GLUSTER_BINARY + (1st arg would be core file name and 2nd arg is o/p of file command in the previous step)
  • +
  • Observe that the Gluster process is unaffected by checking its process state. Check pid status using ps -ef | grep gluster
  • +
+

Thanks, Xavi Hernandez(jahernan@redhat.com) for the idea. This will ease many Gluster developer's/maintainer’s life.

+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/GlusterFS-Tools/gfind-missing-files/index.html b/GlusterFS-Tools/gfind-missing-files/index.html new file mode 100644 index 00000000..c0925e7f --- /dev/null +++ b/GlusterFS-Tools/gfind-missing-files/index.html @@ -0,0 +1,4576 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + gfind missing files - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + +

gfind missing files

+ +

Introduction

+

The tool gfind_missing_files.sh can be used to find the missing files in a +GlusterFS geo-replicated secondary volume. The tool uses a multi-threaded crawler +operating on the backend .glusterfs of a brickpath which is passed as one of +the parameters to the tool. It does a stat on each entry in the secondary volume +mount to check for the presence of a file. The tool uses the aux-gfid-mount +thereby avoiding path conversions and potentially saving time.

+

This tool should be run on every node and each brickpath in a geo-replicated +primary volume to find the missing files on the secondary volume.

+

The script gfind_missing_files.sh is a wrapper script that in turn uses the +gcrawler binary to do the backend crawling. The script detects the gfids of +the missing files and runs the gfid-to-path conversion script to list out the +missing files with their full pathnames.

+

Usage

+
bash gfind_missing_files.sh <BRICK_PATH> <SECONDARY_HOST> <SECONDARY_VOL> <OUTFILE>
+            BRICK_PATH     -   Full path of the brick
+            SECONDARY_HOST -   Hostname of gluster volume
+            SECONDARY_VOL  -   Gluster volume name
+            OUTFILE        -    Output file which contains gfids of the missing files
+
+

The gfid-to-path conversion uses a quicker algorithm for converting gfids to +paths and it is possible that in some cases all missing gfids may not be +converted to their respective paths.

+

Example output(126733 missing files)

+
# ionice -c 2 -n 7 ./gfind_missing_files.sh /bricks/m3 acdc secondary-vol ~/test_results/m3-4.txt
+Calling crawler...
+Crawl Complete.
+gfids of skipped files are available in the file /root/test_results/m3-4.txt
+Starting gfid to path conversion
+Path names of skipped files are available in the file /root/test_results/m3-4.txt_pathnames
+WARNING: Unable to convert some GFIDs to Paths, GFIDs logged to /root/test_results/m3-4.txt_gfids
+Use bash gfid_to_path.sh <brick-path> /root/test_results/m3-4.txt_gfids to convert those GFIDs to Path
+Total Missing File Count : 126733
+
+

In such cases, an additional step is needed to convert those gfids to paths. +This can be used as shown below:

+
bash gfid_to_path.sh <BRICK_PATH> <GFID_FILE>
+             BRICK_PATH - Full path of the brick.
+             GFID_FILE  - OUTFILE_gfids got from gfind_missing_files.sh
+
+

Things to keep in mind when running the tool

+
    +
  1. +

    Running this tool can result in a crawl of the backend filesystem at each + brick which can be intensive. To ensure there is no impact on ongoing I/O on + RHS volumes, we recommend that this tool be run at a low I/O scheduling class + (best-effort) and priority.

    +
     ionice -c 2 -p <pid of gfind_missing_files.sh>
    +
    +
  2. +
  3. +

    We do not recommend interrupting the tool when it is running + (e.g. by doing CTRL^C). It is better to wait for the tool to finish + execution. In case it is interrupted, manually unmount the Slave Volume.

    +
     umount <MOUNT_POINT>
    +
    +
  4. +
+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/GlusterFS-Tools/glusterfind/index.html b/GlusterFS-Tools/glusterfind/index.html new file mode 100644 index 00000000..debfa481 --- /dev/null +++ b/GlusterFS-Tools/glusterfind/index.html @@ -0,0 +1,4729 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + glusterfind - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + +

glusterfind - A tool to find Modified files/dirs

+

A tool which helps to get full/incremental list of files/dirs from GlusterFS Volume using Changelog/Find. In Gluster volumes, detecting the modified files is challenging. Readdir on a directory leads to multiple network calls since files in a directory are distributed across nodes.

+

This tool should be run in one of the node, which will get Volume info and gets the list of nodes and brick paths. For each brick, it spawns the process and runs crawler command in respective node. Crawler will be run in brick FS(xfs, ext4 etc) and not in Gluster Mount. Crawler generates output file with the list of files modified after last run or after the session creation.

+

Session Management

+

Create a glusterfind session to remember the time when last sync or processing complete. For example, your backup application runs every day and gets incremental results on each run. The tool maintains session in $GLUSTERD_WORKDIR/glusterfind/, for each session it creates and directory and creates a sub directory with Volume name. (Default working directory is /var/lib/glusterd, in some systems this location may change. To find Working dir location run

+
grep working-directory /etc/glusterfs/glusterd.vol
+
+

or

+
grep working-directory /usr/local/etc/glusterfs/glusterd.vol
+
+

if you installed from the source.

+

For example, if the session name is "backup" and volume name is "datavol", then the tool creates $GLUSTERD_WORKDIR/glusterfind/backup/datavol. Now onwards we refer this directory as $SESSION_DIR.

+
create => pre => post => [delete]
+
+

Once the session is created, we can run the tool with two steps Pre and Post. To collect the list of modified files after the create time or last run time, we need to call pre command. Pre command finds the modified files and generates output file. Consumer can check the exit code of pre command and start processing those files. As a post processing step run the post command to update the session time as per latest run.

+

For example, backup utility runs Pre command and gets the list of files/directories changed. Sync those files to backup target and inform to glusterfind by calling Post command.

+

At the end of Pre command, $SESSION_DIR/status.pre status file will get created. Pre status file stores the time when current crawl is started, and get all the files/dirs modified till that time. Once Post is called, $SESSION_DIR/status.pre will be renamed to $SESSION_DIR/status. content of this file will be used as start time for the next crawl.

+

During Pre, we can force the tool to do full find instead of incremental find. Tool uses find command in brick backend to get list of files/dirs.

+

When glusterfind create, in that node it generates ssh key($GLUSTERD_WORKDIR/glusterfind.secret.pem) and distributes to all Peers via Glusterd. Once ssh key is distributed in Trusted pool, tool can run ssh commands and copy files from other Volume nodes.

+

When glusterfind pre is run, it internally runs gluster volume info to get list of nodes and respective brick paths. For each brick, it calls respective node agents via ssh to find the modified files/dirs which are local them. Once each node agents generates output file, glusterfind collects all the files via scp and merges it into given output file.

+

When glusterfind post is run, it renames $SESSION_DIR/status.pre file to $SESSION_DIR/status.

+

Changelog Mode and GFID to Path conversion

+

Incremental find uses Changelogs to get the list of GFIDs modified/created. Any application expects file path instead of GFID. Their is no standard/easy way to convert from GFID to Path.

+

If we set build-pgfid option in Volume GlusterFS starts recording each files parent directory GFID as xattr in file on any ENTRY fop.

+
trusted.pgfid.<GFID>=NUM_LINKS
+
+

To convert from GFID to path, we can mount Volume with aux-gfid-mount option, and get Path information by a getfattr query.

+
getfattr -n glusterfs.ancestry.path -e text /mnt/datavol/.gfid/<GFID>
+
+

This approach is slow, for a requested file gets parent GFID via xattr and reads that directory to gets the file which is having same inode number as of GFID file. To improve the performance, glusterfind uses build-pgfid option, but instead of using getfattr on mount it gets the details from brick backend. glusterfind collects all parent GFIDs at once and starts crawling each directory. Instead of processing one GFID to Path conversion, it gets inode numbers of all input GFIDs and filter while reading parent directory.

+

Above method is fast compared to find -samefile since it crawls only required directories to find files with same inode number as GFID file. But pgfid information only available when a lookup is made or any ENTRY fop to a file after enabling build-pgfid. The files created before build-pgfid enable will not get converted to path from GFID with this approach.

+

Tool collects the list of GFIDs failed to convert with above method and does a full crawl to convert it to path. Find command is used to crawl entire namespace. Instead of calling find command for every GFID, glusterfind uses an efficient way to convert all GFID to path with single call to find command.

+

Usage

+

Create the session

+
glusterfind create SESSION_NAME VOLNAME [--force]
+glusterfind create --help
+
+

Where, SESSION_NAME is any name without space to identify when run second time. When a node is added to Volume then the tool expects ssh keys to be copied to new node(s) also. Run Create command with --force to distribute keys again.

+

Examples,

+
# glusterfind create --help
+# glusterfind create backup datavol
+# glusterfind create antivirus_scanner datavol
+# glusterfind create backup datavol --force
+
+

Pre Command

+
glusterfind pre SESSION_NAME VOLUME_NAME OUTFILE
+glusterfind pre --help
+
+

We need not specify Volume name since session already has the details. List of files will be populated in OUTFILE.

+

To trigger the full find, call the pre command with --full argument. Multiple crawlers are available for incremental find, we can choose crawl type with --crawl argument.

+

Examples,

+
# glusterfind pre backup datavol /root/backup.txt
+# glusterfind pre backup datavol /root/backup.txt --full
+
+# # Changelog based crawler, works only for incremental
+# glusterfind pre backup datavol /root/backup.txt --crawler=changelog
+
+# # Find based crawler, works for both full and incremental
+# glusterfind pre backup datavol /root/backup.txt --crawler=brickfind
+
+

Output file contains list of files/dirs relative to the Volume mount, if we need to prefix with any path to have absolute path then,

+
glusterfind pre backup datavol /root/backup.txt --file-prefix=/mnt/datavol/
+
+

List Command

+

To get the list of sessions and respective session time,

+
glusterfind list [--session SESSION_NAME] [--volume VOLUME_NAME]
+
+

Examples,

+
# glusterfind list
+# glusterfind list --session backup
+
+

Example output,

+
SESSION                   VOLUME                    SESSION TIME
+---------------------------------------------------------------------------
+backup                    datavol                   2015-03-04 17:35:34
+
+

Post Command

+
glusterfind post SESSION_NAME VOLUME_NAME
+
+

Examples,

+
glusterfind post backup datavol
+
+

Delete Command

+
glusterfind delete SESSION_NAME VOLUME_NAME
+
+

Examples,

+
glusterfind delete backup datavol
+
+

Adding more Crawlers

+

Adding more crawlers is very simple, Add an entry in $GLUSTERD_WORKDIR/glusterfind.conf. glusterfind can choose your crawler using --crawl argument.

+
[crawlers]
+changelog=/usr/libexec/glusterfs/glusterfind/changelog.py
+brickfind=/usr/libexec/glusterfs/glusterfind/brickfind.py
+
+

For example, if you have a multithreaded brick crawler, say parallelbrickcrawl add it to the conf file.

+
[crawlers]
+changelog=/usr/libexec/glusterfs/glusterfind/changelog.py
+brickfind=/usr/libexec/glusterfs/glusterfind/brickfind.py
+parallelbrickcrawl=/root/parallelbrickcrawl
+
+

Custom crawler can be executable script/binary which accepts volume name, brick path, output_file and start time(and optional debug flag)

+

For example,

+
/root/parallelbrickcrawl SESSION_NAME VOLUME BRICK_PATH OUTFILE START_TIME [--debug]
+
+

Where START_TIME is in unix epoch format, START_TIME will be zero for full find.

+

Known Issues

+
    +
  1. Deleted files will not get listed, since we can't convert GFID to Path if file/dir is deleted.
  2. +
  3. Only new name will get listed if Renamed.
  4. +
  5. All hardlinks will get listed.
  6. +
+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/GlusterFS-Tools/index.html b/GlusterFS-Tools/index.html new file mode 100644 index 00000000..ed63f961 --- /dev/null +++ b/GlusterFS-Tools/index.html @@ -0,0 +1,4481 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + GlusterFS Tools List - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+ +
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/Install-Guide/Common-criteria/index.html b/Install-Guide/Common-criteria/index.html new file mode 100644 index 00000000..10b3e1be --- /dev/null +++ b/Install-Guide/Common-criteria/index.html @@ -0,0 +1,4571 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + Common Criteria - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + +

Common Criteria

+ +

Getting Started

+

This tutorial will cover different options for getting a Gluster +cluster up and running. Here is a rundown of the steps we need to do.

+

To start, we will go over some common things you will need to know for +setting up Gluster.

+

Next, choose the method you want to use to set up your first cluster:

+
    +
  • Within a virtual machine
  • +
  • To bare metal servers
  • +
  • To EC2 instances in Amazon
  • +
+

Finally, we will install Gluster, create a few volumes, and test using +them.

+

General Setup Principles

+

No matter where you will be installing Gluster, it helps to understand a +few key concepts on what the moving parts are.

+

First, it is important to understand that GlusterFS isn’t really a +filesystem in and of itself. It concatenates existing filesystems into +one (or more) big chunks so that data being written into or read out of +Gluster gets distributed across multiple hosts simultaneously. This +means that you can use space from any host that you have available. +Typically, XFS is recommended but it can be used with other filesystems +as well. Most commonly EXT4 is used when XFS isn’t, but you can (and +many, many people do) use another filesystem that suits you.

+

Now that we understand that, we can define a few of the common terms used in +Gluster.

+
    +
  • A trusted pool refers collectively to the hosts in a given + Gluster Cluster.
  • +
  • A node or “server” refers to any server that is part of a + trusted pool. In general, this assumes all nodes are in the same + trusted pool.
  • +
  • A brick is used to refer to any device (really this means + filesystem) that is being used for Gluster storage.
  • +
  • An export refers to the mount path of the brick(s) on a given + server, for example, /export/brick1
  • +
  • The term Global Namespace is a fancy way of saying a Gluster + volume
  • +
  • A Gluster volume is a collection of one or more bricks (of + course, typically this is two or more). This is analogous to + /etc/exports entries for NFS.
  • +
  • GNFS and kNFS. GNFS is how we refer to our inline NFS + server. kNFS stands for kernel NFS, or, as most people would say, + just plain NFS. Most often, you will want kNFS services disabled on + the Gluster nodes. Gluster NFS doesn't take any additional + configuration and works just like you would expect with NFSv3. It is + possible to configure Gluster and NFS to live in harmony if you want + to.
  • +
+

Other notes:

+
    +
  • For this test, if you do not have DNS set up, you can get away with + using /etc/hosts entries for the two nodes. However, when you move + from this basic setup to using Gluster in production, correct DNS + entries (forward and reverse) and NTP are essential.
  • +
  • When you install the Operating System, do not format the Gluster + storage disks! We will use specific settings with the mkfs command + later on when we set up Gluster. If you are testing with a single + disk (not recommended), make sure to carve out a free partition or + two to be used by Gluster later, so that you can format or reformat + at will during your testing.
  • +
  • Firewalls are great, except when they aren’t. For storage servers, + being able to operate in a trusted environment without firewalls can + mean huge gains in performance, and is recommended. In case you absolutely + need to set up a firewall, have a look at + Setting up clients for + information on the ports used.
  • +
+

Click here to get started

+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/Install-Guide/Community-Packages/index.html b/Install-Guide/Community-Packages/index.html new file mode 100644 index 00000000..ea02edd6 --- /dev/null +++ b/Install-Guide/Community-Packages/index.html @@ -0,0 +1,4910 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + Community Packages - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + +

Community Packages

+ +

Community Packages

+

GlusterFS

+

Tentative plans for community convenience packages.

+

A yes means packages are (or will be) provided in the respective repository.
+A no means no plans to build new updates. Existing packages will remain in the repos.
+The following GlusterFS versions have reached EOL[1]: 8, 7, 6 and earlier.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
1110
CentOS Storage SIG[2]Stream 8yesyes
Stream 9yesyes
Fedora[3]F37yesyes¹
F38yes¹yes
F39(rawhide)yes¹yes
Debian[3]Buster/10yesyes
Bullseye/11yesyes
Bookworm/12yesyes
Trixie/13(sid)yesno
Ubuntu Launchpad[4]Bionic/18.04yesyes
Focal/20.04yesyes
Jammy/22.04yesyes
Kinetic/22.10yesyes
Lunar/23.04yesyes
Mantic/23.10yesno
OpenSUSE Build Service[5]Leap15.4yesyes
SLES15SP4yesyes
Tumbleweedyesyes
+

NOTE - We are not building Debian arm packages due to resource constraints for a while now. There will be only amd64 packages present on download.gluster.org

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
glusterfs-selinuxgdeploygluster-blockglusterfs-coreutilsnfs-ganeshaSamba
CentOS Storage SIG[2]Stream 8yestbdyesyesyesyes
Stream 9yestbdyesyesyesyes
Fedora[3]F37yesyesyesyesyes?
F38yesyesyesyesyes?
F39(rawhide)yesyesyesyesyes?
Debian[3]Buster/10n/anonoyesyes?
Bullseye/11n/anonoyesyes?
Bookworm/12n/anonoyesyes?
Trixie/13(sid)n/anonoyesyes?
Ubuntu Launchpad[4]Bionic/18.04n/anonoyesyes?
Focal/20.04n/anonoyesyes?
Jammy/22.04n/anonoyesyes?
Kinetic/22.10n/anonoyesyes?
Lunar/23.04n/anonoyesyes?
Mantic/23.10n/anonoyesyes?
OpenSUSE Build Service[5]Leap15.4n/ayesyesyesyes?
SLES15SP4n/ayesyesyesyes?
Tumbleweedn/ayesyesyesyes?
+

[1] https://www.gluster.org/release-schedule/
+[2] https://wiki.centos.org/SpecialInterestGroup/Storage
+[3] https://download.gluster.org/pub/gluster/glusterfs
+[4] https://launchpad.net/~gluster
+[5] http://download.opensuse.org/repositories/home:/glusterfs:/

+

¹ Fedora Updates, UpdatesTesting, or Rawhide repository. Use dnf to install.

+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/Install-Guide/Configure/index.html b/Install-Guide/Configure/index.html new file mode 100644 index 00000000..2f350f48 --- /dev/null +++ b/Install-Guide/Configure/index.html @@ -0,0 +1,4661 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + Configure - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + + + + + +
+
+ + + + + + + + +

Configure

+ +

Configure Firewall

+

For the Gluster to communicate within a cluster either the firewalls +have to be turned off or enable communication for each server.

+
iptables -I INPUT -p all -s `<ip-address>` -j ACCEPT
+
+

Configure the trusted pool

+

Remember that the trusted pool is the term used to define a cluster of +nodes in Gluster. Choose a server to be your “primary” server. This is +just to keep things simple, you will generally want to run all commands +from this tutorial. Keep in mind, running many Gluster specific commands +(like gluster volume create) on one server in the cluster will +execute the same command on all other servers.

+

Replace nodename with hostname of the other server in the cluster, +or IP address if you don’t have DNS or /etc/hosts entries. +Let say we want to connect to node02:

+
gluster peer probe node02
+
+

Notice that running gluster peer status from the second node shows +that the first node has already been added.

+

Partition the disk

+

Assuming you have an empty disk at /dev/sdb: (You can check the partitions on your system using fdisk -l)

+
fdisk /dev/sdb
+
+

And then create a single XFS partition using fdisk

+

Format the partition

+
mkfs.xfs -i size=512 /dev/sdb1
+
+

Add an entry to /etc/fstab

+
echo "/dev/sdb1 /export/sdb1 xfs defaults 0 0"  >> /etc/fstab
+
+

Mount the partition as a Gluster "brick"

+
mkdir -p /export/sdb1 && mount -a
+
+

Set up a Gluster volume

+

The most basic Gluster volume type is a “Distribute only” volume (also +referred to as a “pure DHT” volume if you want to impress the folks at +the water cooler). This type of volume simply distributes the data +evenly across the available bricks in a volume. So, if I write 100 +files, on average, fifty will end up on one server, and fifty will end +up on another. This is faster than a “replicated” volume, but isn’t as +popular since it doesn’t give you two of the most sought after features +of Gluster — multiple copies of the data, and automatic failover if +something goes wrong.

+

To set up a replicated volume:

+
gluster volume create gv0 replica 3 node01.mydomain.net:/export/sdb1/brick \
+  node02.mydomain.net:/export/sdb1/brick                                   \
+  node03.mydomain.net:/export/sdb1/brick
+
+

Breaking this down into pieces:

+
    +
  • the first part says to create a gluster volume named gv0 + (the name is arbitrary, gv0 was chosen simply because + it’s less typing than gluster_volume_0).
  • +
  • make the volume a replica volume
  • +
  • keep a copy of the data on at least 3 bricks at any given time. + Since we only have three bricks total, this + means each server will house a copy of the data.
  • +
  • we specify which nodes to use, and which bricks on those nodes. The order here is + important when you have more bricks.
  • +
  • the brick directory will be created by this command. If the directory already + exists, you may get <brick> is already part of a volume errors.
  • +
+

It is possible (as of the most current release as of this writing, Gluster 3.3) +to specify the bricks in such a way that you would make both copies of the data reside on a +single node. This would make for an embarrassing explanation to your +boss when your bulletproof, completely redundant, always on super +cluster comes to a grinding halt when a single point of failure occurs.

+

Now, we can check to make sure things are working as expected:

+
gluster volume info
+
+

And you should see results similar to the following:

+
Volume Name: gv0
+Type: Replicate
+Volume ID: 8bc3e96b-a1b6-457d-8f7a-a91d1d4dc019
+Status: Created
+Number of Bricks: 1 x 3 = 3
+Transport-type: tcp
+Bricks:
+Brick1: node01.yourdomain.net:/export/sdb1/brick
+Brick2: node02.yourdomain.net:/export/sdb1/brick
+Brick3: node03.yourdomain.net:/export/sdb1/brick
+
+

This shows us essentially what we just specified during the volume +creation. The one key output worth noticing is Status. +A status of Created means that the volume has been created, +but hasn’t yet been started, which would cause any attempt to mount the volume fail.

+

Now, we should start the volume before we try to mount it.

+
gluster volume start gv0
+
+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/Install-Guide/Install/index.html b/Install-Guide/Install/index.html new file mode 100644 index 00000000..0b424024 --- /dev/null +++ b/Install-Guide/Install/index.html @@ -0,0 +1,4624 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + Install - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + +

Install

+ +

Installing Gluster

+

For RPM based distributions, if you will be using InfiniBand, add the +glusterfs RDMA package to the installations. For RPM based systems, yum/dnf +is used as the install method in order to satisfy external depencies +such as compat-readline5

+
Community Packages
+

Packages are provided according to this table.

+
For Debian
+

Download the GPG key to apt config directory:

+
wget -O - https://download.gluster.org/pub/gluster/glusterfs/9/rsa.pub | gpg --dearmor > /etc/apt/trusted.gpg.d/gluster.gpg
+
+

If the rsa.pub is not available at the above location, please look here https://download.gluster.org/pub/gluster/glusterfs/7/rsa.pub and add the GPG key to apt:

+
wget -O - https://download.gluster.org/pub/gluster/glusterfs/7/rsa.pub | gpg --dearmor > /etc/apt/trusted.gpg.d/gluster.gpg
+
+

Add the source:

+
DEBID=$(grep 'VERSION_ID=' /etc/os-release | cut -d '=' -f 2 | tr -d '"')
+DEBVER=$(grep 'VERSION=' /etc/os-release | grep -Eo '[a-z]+')
+DEBARCH=$(dpkg --print-architecture)
+echo "deb [signed-by=/etc/apt/trusted.gpg.d/gluster.gpg] https://download.gluster.org/pub/gluster/glusterfs/LATEST/Debian/${DEBID}/${DEBARCH}/apt ${DEBVER} main" > /etc/apt/sources.list.d/gluster.list
+
+

Update package list:

+
apt update
+
+

Install:

+
apt install glusterfs-server
+
+
For Ubuntu
+

Install software-properties-common:

+
apt install software-properties-common
+
+

Then add the community GlusterFS PPA:

+
add-apt-repository ppa:gluster/glusterfs-7
+apt update
+
+

Finally, install the packages:

+
apt install glusterfs-server
+
+

Note: Packages exist for Ubuntu 16.04 LTS, 18.04 +LTS, 20.04 LTS, 20.10, 21.04

+
For Red Hat/CentOS
+

RPMs for CentOS and other RHEL clones are available from the +CentOS Storage SIG mirrors.

+

For more installation details refer Gluster Quick start guide from CentOS Storage SIG.

+
For Fedora
+

Install the Gluster packages:

+
dnf install glusterfs-server
+
+

Once you are finished installing, you can move on to configuration section.

+
For Arch Linux
+

Install the Gluster package:

+
pacman -S glusterfs
+
+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/Install-Guide/Overview/index.html b/Install-Guide/Overview/index.html new file mode 100644 index 00000000..f8907f1b --- /dev/null +++ b/Install-Guide/Overview/index.html @@ -0,0 +1,4677 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + Overview - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + + + + + +
+
+ + + + + + + + +

Overview

+

Purpose

+

The Install Guide (IG) is aimed at providing the sequence of steps needed for +setting up Gluster. It contains a reasonable degree of detail which helps an +administrator to understand the terminology, the choices and how to configure +the deployment to the storage needs of their application workload. The Quick +Start Guide (QSG) is designed to get a +deployment with default choices and is aimed at those who want to spend less +time to get to a deployment.

+

After you deploy Gluster by following these steps, we recommend that you read +the Gluster Admin Guide to learn how to +administer Gluster and how to select a volume type that fits your needs. Also, +be sure to enlist the help of the Gluster community via the IRC or, Slack +channels (see https://www.gluster.org/community/) or Q&A section.

+

Overview

+

Before we begin, let’s talk about what Gluster is, address a few myths +and misconceptions, and define a few terms. This will help you to avoid +some of the common issues that others encounter as they start their journey with Gluster.

+

What is Gluster

+

Gluster is a distributed scale-out filesystem that allows rapid +provisioning of additional storage based on your storage consumption +needs. It incorporates automatic failover as a primary feature. All of +this is accomplished without a centralized metadata server.

+

What is Gluster without making me learn an extra glossary of terminology?

+
    +
  • Gluster is an easy way to provision your own storage backend NAS + using almost any hardware you choose.
  • +
  • You can add as much as you want to start with, and if you need more + later, adding more takes just a few steps.
  • +
  • You can configure failover automatically, so that if a server goes + down, you don’t lose access to the data. No manual steps are + required for failover. When you fix the server that failed and bring + it back online, you don’t have to do anything to get the data back + except wait. In the meantime, the most current copy of your data + keeps getting served from the node that was still running.
  • +
  • You can build a clustered filesystem in a matter of minutes… it is + trivially easy for basic setups
  • +
  • It takes advantage of what we refer to as “commodity hardware”, + which means, we run on just about any hardware you can think of, + from that stack of decomm’s and gigabit switches in the corner no + one can figure out what to do with (how many license servers do you + really need, after all?), to that dream array you were speccing out + online. Don’t worry, I won’t tell your boss.
  • +
  • It takes advantage of commodity software too. No need to mess with + kernels or fine tune the OS to a tee. We run on top of most unix + filesystems, with XFS and ext4 being the most popular choices. We do + have some recommendations for more heavily utilized arrays, but + these are simple to implement and you probably have some of these + configured already anyway.
  • +
  • Gluster data can be accessed from just about anywhere – You can use + traditional NFS, SMB/CIFS for Windows clients, or our own native + GlusterFS (a few additional packages are needed on the client + machines for this, but as you will see, they are quite small).
  • +
  • There are even more advanced features than this, but for now we will + focus on the basics.
  • +
  • It’s not just a toy. Gluster is enterprise-ready, and commercial + support is available if you need it. It is used in some of the most + taxing environments like media serving, natural resource + exploration, medical imaging, and even as a filesystem for Big Data.
  • +
+

Is Gluster going to work for me and what I need it to do?

+

Most likely, yes. People use Gluster for storage needs of a variety of application workloads. You are +encouraged to ask around in our IRC or, Slack channels or Q&A forums to see if +anyone has tried something similar. That being said, there are a few +places where Gluster is going to need more consideration than others.

+
    +
  • Accessing Gluster from SMB/CIFS is often going to be slow by most + people’s standards. If you only moderate access by users, then it most + likely won’t be an issue for you. On the other hand, adding enough + Gluster servers into the mix, some people have seen better performance + with us than other solutions due to the scale out nature of the + technology
  • +
  • Gluster is traditionally better when using file sizes of at least 16KB + (with a sweet spot around 128KB or so).
  • +
+

What is the cost and complexity required to set up cluster?

+

Question: How many billions of dollars is it going to cost to setup a cluster? +Don’t I need redundant networking, super fast SSD’s, +technology from Alpha Centauri delivered by men in black, etc…?

+

I have never seen anyone spend even close to a billion, unless they got +the rust proof coating on the servers. You don’t seem like the type that +would get bamboozled like that, so have no fear. For the purpose of this +tutorial, if your laptop can run two VM’s with 1GB of memory each, you +can get started testing and the only thing you are going to pay for is +coffee (assuming the coffee shop doesn’t make you pay them back for the +electricity to power your laptop).

+

If you want to test on bare metal, since Gluster is built with commodity +hardware in mind, and because there is no centralized meta-data server, +a very simple cluster can be deployed with two basic servers (2 CPU’s, +4GB of RAM each, 1 Gigabit network). This is sufficient to have a nice +file share or a place to put some nightly backups. Gluster is deployed +successfully on all kinds of disks, from the lowliest 5200 RPM SATA to +mightiest 1.21 gigawatt SSD’s. The more performance you need, the more +consideration you will want to put into how much hardware to buy, but +the great thing about Gluster is that you can start small, and add on as +your needs grow.

+

OK, but if I add servers on later, don’t they have to be exactly the same?

+

In a perfect world, sure. Having the hardware be the same means less +troubleshooting when the fires start popping up. But plenty of people +deploy Gluster on mix and match hardware, and successfully.

+

Get started by checking some Common Criteria

+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/Install-Guide/Setup-Bare-metal/index.html b/Install-Guide/Setup-Bare-metal/index.html new file mode 100644 index 00000000..a67988e8 --- /dev/null +++ b/Install-Guide/Setup-Bare-metal/index.html @@ -0,0 +1,4549 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + Setting up on physical servers - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + +

Setup Bare Metal

+

Note: You only need one of the three setup methods!

+

Setup, Method 2 – Setting up on physical servers

+

To set up Gluster on physical servers, we recommend two servers of very +modest specifications (2 CPUs, 2GB of RAM, 1GBE). Since we are dealing +with physical hardware here, keep in mind, what we are showing here is +for testing purposes. In the end, remember that forces beyond your +control (aka, your bosses’ boss...) can force you to take that the “just +for a quick test” environment right into production, despite your +kicking and screaming against it. To prevent this, it can be a good idea +to deploy your test environment as much as possible the same way you +would to a production environment (in case it becomes one, as mentioned +above). That being said, here is a reminder of some of the best +practices we mentioned before:

+
    +
  • Make sure DNS and NTP are setup, correct, and working
  • +
  • If you have access to a backend storage network, use it! 10GBE or + InfiniBand are great if you have access to them, but even a 1GBE + backbone can help you get the most out of your deployment. Make sure + that the interfaces you are going to use are also in DNS since we + will be using the hostnames when we deploy Gluster
  • +
  • When it comes to disks, the more the merrier. Although you could + technically fake things out with a single disk, there would be + performance issues as soon as you tried to do any real work on the + servers
  • +
+

With the explosion of commodity hardware, you don’t need to be a +hardware expert these days to deploy a server. Although this is +generally a good thing, it also means that paying attention to some +important, performance-impacting BIOS settings is commonly ignored. Several +points that might cause issues when if you're unaware of them:

+
    +
  • Most manufacturers enable power saving mode by default. This is a + great idea for servers that do not have high-performance + requirements. For the average storage server, the performance-impact + of the power savings is not a reasonable tradeoff
  • +
  • Newer motherboards and processors have lots of nifty features! + Enhancements in virtualization, newer ways of doing predictive + algorithms and NUMA are just a few to mention. To be safe, many + manufactures ship hardware with settings meant to work with as + massive a variety of workloads and configurations as they have + customers. One issue you could face is when you set up that blazing-fast + 10GBE card you were so thrilled about installing? In many + cases, it would end up being crippled by a default 1x speed put in + place on the PCI-E bus by the motherboard.
  • +
+

Thankfully, most manufacturers show all the BIOS settings, including the +defaults, right in the manual. It only takes a few minutes to download, +and you don’t even have to power off the server unless you need to make +changes. More and more boards include the functionality to make changes +in the BIOS on the fly without even powering the box off. One word of +caution of course, is don’t go too crazy. Fretting over each tiny little +detail and setting is usually not worth the time, and the more changes +you make, the more you need to document and implement later. Try to find +the happy balance between time spent managing the hardware (which +ideally should be as close to zero after you setup initially) and the +expected gains you get back from it.

+

Finally, remember that some hardware really is better than others. +Without pointing fingers anywhere specifically, it is often true that +onboard components are not as robust as add-ons. As a general rule, you +can safely delegate the onboard hardware to things like management +network for the NIC’s, and for installing the OS onto a SATA drive. At +least twice a year you should check the manufacturer's website for +bulletins about your hardware. Critical performance issues are often +resolved with a simple driver or firmware update. As often as not, these +updates affect the two most critical pieces of hardware on a machine you +want to use for networked storage - the RAID controller and the NIC's.

+

Once you have set up the servers and installed the OS, you are ready to +move on to the install section.

+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/Install-Guide/Setup-aws/index.html b/Install-Guide/Setup-aws/index.html new file mode 100644 index 00000000..f60eca1c --- /dev/null +++ b/Install-Guide/Setup-aws/index.html @@ -0,0 +1,4537 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + Deploying in AWS - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + +

Setup AWS

+

Note: You only need one of the three setup methods!

+

Setup, Method 3 – Deploying in AWS

+

Deploying in Amazon can be one of the fastest ways to get up and running +with Gluster. Of course, most of what we cover here will work with other +cloud platforms.

+
    +
  • Deploy at least two instances. For testing, you can use micro + instances (I even go as far as using spot instances in most cases). + Debates rage on what size instance to use in production, and there + is really no correct answer. As with most things, the real answer is + “whatever works for you”, where the trade-offs between cost and + performance are balanced in a continual dance of trying to make your + project successful while making sure there is enough money left over + in the budget for you to get that sweet new ping pong table in the + break room.
  • +
  • For cloud platforms, your data is wide open right from the start. As + such, you shouldn’t allow open access to all ports in your security + groups if you plan to put a single piece of even the least valuable + information on the test instances. By least valuable, I mean “Cash + value of this coupon is 1/100th of 1 cent” kind of least valuable. + Don’t be the next one to end up as a breaking news flash on the + latest inconsiderate company to allow their data to fall into the + hands of the baddies. See Step 2 for the minimum ports you will need + open to use Gluster
  • +
  • You can use the free “ephemeral” storage for the Gluster bricks + during testing, but make sure to use some form of protection against + data loss when you move to production. Typically this means EBS + backed volumes or using S3 to periodically back up your data bricks.
  • +
+

Other notes:

+
    +
  • In production, it is recommended to replicate your VM’s across + multiple zones. For purpose of this tutorial, it is overkill, but if + anyone is interested in this please let us know since we are always + looking to write articles on the most requested features and + questions.
  • +
  • Using EBS volumes and Elastic IPs are also recommended in + production. For testing, you can safely ignore these as long as you + are aware that the data could be lost at any moment, so make sure + your test deployment is just that, testing only.
  • +
  • Performance can fluctuate wildly in a cloud environment. If + performance issues are seen, there are several possible strategies, + but keep in mind that this is the perfect place to take advantage of + the scale-out capability of Gluster. While it is not true in all + cases that deploying more instances will necessarily result in a + “faster” cluster, in general, you will see that adding more nodes + means more performance for the cluster overall.
  • +
  • If a node reboots, you will typically need to do some extra work to + get Gluster running again using the default EC2 configuration. If a + node is shut down, it can mean absolute loss of the node (depending + on how you set things up). This is well beyond the scope of this + document but is discussed in any number of AWS-related forums and + posts. Since I found out the hard way myself (oh, so you read the + manual every time?!), I thought it worth at least mentioning.
  • +
+

Once you have both instances up, you can proceed to the install page.

+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/Install-Guide/Setup-virt/index.html b/Install-Guide/Setup-virt/index.html new file mode 100644 index 00000000..67067e4b --- /dev/null +++ b/Install-Guide/Setup-virt/index.html @@ -0,0 +1,4513 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + Setting up in virtual machines - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + +

Setup on Virtual Machine

+

Note: You only need one of the three setup methods!

+

Setup, Method 1 – Setting up in virtual machines

+

As we just mentioned, to set up Gluster using virtual machines, you will +need at least two virtual machines with at least 1GB of RAM each. You +may be able to test with less but most users will find it too slow for +their tastes. The particular virtualization product you use is a matter +of choice. Common platforms include Xen, VMware ESX and +Workstation, VirtualBox, and KVM. For purpose of this article, all steps +assume KVM but the concepts are expected to be simple to translate to +other platforms as well. The article assumes you know the particulars of +how to create a virtual machine and have installed a 64 bit linux +distribution already.

+

Create or clone two VM’s, with the following setup on each:

+
    +
  • 2 disks using the VirtIO driver, one for the base OS and one that we + will use as a Gluster “brick”. You can add more later to try testing + some more advanced configurations, but for now let’s keep it simple.
  • +
+

Note: If you have ample space available, consider allocating all the +disk space at once.

+
    +
  • 2 NIC’s using VirtIO driver. The second NIC is not strictly + required, but can be used to demonstrate setting up a separate + network for storage and management traffic.
  • +
+

Note: Attach each NIC to a separate network.

+

Other notes: Make sure that if you clone the VM, that Gluster has not +already been installed. Gluster generates a UUID to “fingerprint” each +system, so cloning a previously deployed system will result in errors +later on.

+

Once these are prepared, you are ready to move on to the +install section.

+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/Ops-Guide/Overview/index.html b/Ops-Guide/Overview/index.html new file mode 100644 index 00000000..4bf08f8c --- /dev/null +++ b/Ops-Guide/Overview/index.html @@ -0,0 +1,4459 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + Index - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + +

Overview

+

Over the years the infrastructure and services consumed by the Gluster.org +community have grown organically. There have been instances of design and +planning but the growth has mostly been ad-hoc and need-based.

+

Central to the plan of revitalizing the Gluster.org community is the ability to +provide well-maintained infrastructure services with predictable uptimes and +resilience. We're migrating the existing services into the Community Cage. The +implied objective is that the transition would open up ways and means of the +formation of a loose coalition among Infrastructure Administrators who provide +expertise and guidance to the community projects within the OSAS team.

+

A small group of Gluster.org community members was asked to assess the current +utilization and propose a planned growth. The ad-hoc nature of the existing +infrastructure impedes the development of a proposal based on +standardized methods of extrapolation. A part of the projection is based on a +combination of patterns and heuristics - problems that have been observed and +how mitigation strategies have enabled the community to continue to consume the +services available.

+

The guiding principle for the assessment has been the need to migrate services +to "Software-as-a-Service" models and providers wherever applicable and deemed +fit. To illustrate this specific directive - the documentation/docs aspect of +Gluster.org has been continuously migrating artifacts to readthedocs.org while +focusing on simple integration with the website. The website itself has been +put within the Gluster.org Github.com account to enable ease of maintenance and +sustainability.

+

For more details look at the full Tools List.

+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/Ops-Guide/Tools/index.html b/Ops-Guide/Tools/index.html new file mode 100644 index 00000000..ef8a5157 --- /dev/null +++ b/Ops-Guide/Tools/index.html @@ -0,0 +1,4550 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + Tools - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + +

Tools

+ +

Tools We Use

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Service/ToolPurposeHosted At
GithubCode ReviewGithub
JenkinsCI, build-verification-testTemporary Racks
BackupsWebsite, Gerrit and Jenkins backupRackspace
DocsDocumentation contentmkdocs.org
download.gluster.orgOfficial download site of the binariesRackspace
MailmanLists mailmanRackspace
www.gluster.orgWeb assetRackspace
+

Notes

+
    +
  • download.gluster.org: Resiliency is important for availability and metrics. + Since it's official download, access need to restricted as much as possible. + Few developers building the community packages have access. If anyone requires + access can raise an issue at gluster/project-infrastructure + with valid reason
  • +
  • Mailman: Should be migrated to a separate host. Should be made more redundant + (ie, more than 1 MX).
  • +
  • www.gluster.org: Framework, Artifacts now exist under gluster.github.com. Has + various legacy installation of software (mediawiki, etc ), being cleaned as + we find them.
  • +
+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/Quick-Start-Guide/Architecture/index.html b/Quick-Start-Guide/Architecture/index.html new file mode 100644 index 00000000..57bead22 --- /dev/null +++ b/Quick-Start-Guide/Architecture/index.html @@ -0,0 +1,5131 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + Architecture - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+ +
+ + + +
+
+ + + + + + + + +

Architecture

+

architecture

+

A gluster volume is a collection of servers belonging to a Trusted Storage Pool. +A management daemon (glusterd) runs on each server and manages a brick process +(glusterfsd) which in turn exports the underlying on disk storage (XFS +filesystem). The client process mounts the volume and exposes the storage from +all the bricks as a single unified storage namespace to the applications +accessing it. The client and brick processes' stacks have various translators +loaded in them. I/O from the application is routed to different bricks via +these translators.

+

Types of Volumes

+

Gluster file system supports different +types of volumes based on the requirements. Some volumes are good for +scaling storage size, some for improving performance and some for both.

+

1.Distributed Glusterfs Volume - This is the type of volume which is created by default if no volume type is specified. +Here, files are distributed across various bricks in the volume. So file1 +may be stored only in brick1 or brick2 but not on both. Hence there is +no data redundancy. The purpose for such a storage volume is to easily & cheaply +scale the volume size. However this also means that a brick failure will +lead to complete loss of data and one must rely on the underlying +hardware for data loss protection.

+

distributed volume

+

Create a Distributed Volume

+
gluster volume create NEW-VOLNAME [transport [tcp | rdma | tcp,rdma]] NEW-BRICK...
+
+

For example to create a distributed volume with four storage servers +using TCP.

+
gluster volume create test-volume server1:/exp1 server2:/exp2 server3:/exp3 server4:/exp4
+
+
volume create: test-volume: success: please start the volume to access data
+
+

To display the volume info:

+
gluster volume info
+
+
Volume Name: test-volume
+Type: Distribute
+Status: Created
+Number of Bricks: 4
+Transport-type: tcp
+Bricks:
+Brick1: server1:/exp1
+Brick2: server2:/exp2
+Brick3: server3:/exp3
+Brick4: server4:/exp4
+
+

2.Replicated Glusterfs Volume - In this volume we overcome the +risk of data loss which is present in the distributed volume. Here exact copies of +the data are maintained on all bricks. The number of replicas in the +volume can be decided by client while creating the volume. So we need to +have at least two bricks to create a volume with 2 replicas or a minimum +of three bricks to create a volume of 3 replicas. One major advantage of +such a volume is that even if one brick fails the data can still be +accessed from its replicated bricks. Such a volume is used for better +reliability and data redundancy.

+

replicated volume

+

Create a Replicated Volume

+
gluster volume create NEW-VOLNAME [replica COUNT] [transport [tcp |rdma | tcp,rdma]] NEW-BRICK...
+
+

For example, to create a replicated volume with three storage servers:

+
gluster volume create test-volume replica 3 transport tcp \
+      server1:/exp1 server2:/exp2 server3:/exp3
+
+
volume create: test-volume: success: please start the volume to access data
+
+

3.Distributed Replicated Glusterfs Volume - In this volume files +are distributed across replicated sets of bricks. The number of bricks +must be a multiple of the replica count. Also the order in which we +specify the bricks is important since adjacent bricks become replicas of each +other. This type of volume is used when high availability of data due to +redundancy and scaling storage is required. So if there were eight +bricks and replica count 2 then the first two bricks become replicas of +each other then the next two and so on. This volume is denoted as 4x2. +Similarly if there were eight bricks and replica count 4 then four +bricks become replica of each other and we denote this volume as 2x4 +volume.

+

distributed_replicated_volume

+

Create the distributed replicated volume:

+
gluster volume create NEW-VOLNAME [replica COUNT] [transport [tcp | rdma | tcp,rdma]] NEW-BRICK...
+
+

For example, six node distributed replicated volume with a three-way +mirror:

+
gluster volume create test-volume replica 3 transport tcp server1:/exp1 server2:/exp2 server3:/exp3 server4:/exp4 server5:/exp5 server6:/exp6
+
+
volume create: test-volume: success: please start the volume to access data
+
+

4.Dispersed Glusterfs Volume - Dispersed volumes are based on +erasure codes. It stripes the encoded data of files, with some redundancy added, +across multiple bricks in the volume. You can use dispersed volumes to +have a configurable level of reliability with minimum space waste. +The number of redundant bricks in the volume can be decided by clients while +creating the volume. Redundant bricks determines how many bricks can be lost +without interrupting the operation of the volume.

+

Dispersed volume +Create a dispersed volume:

+
gluster volume create test-volume [disperse [<COUNT>]] [disperse-data <COUNT>] [redundancy <COUNT>] [transport tcp | rdma | tcp,rdma] <NEW-BRICK>
+
+

For example, three node dispersed volume with level of redundancy 1, (2 + 1):

+
gluster volume create test-volume disperse 3 redundancy 1 server1:/exp1 server2:/exp2 server3:/exp3
+
+
volume create: test-volume: success: please start the volume to access data
+
+

5.Distributed Dispersed Glusterfs Volume - +Distributed dispersed volumes are the equivalent to distributed replicated volumes, but using dispersed subvolumes +instead of replicated ones. The number of bricks must be a multiple of the 1st subvol. +The purpose for such a volume is to easily scale the volume size and distribute the load +across various bricks.

+

distributed_dispersed_volume +Create a distributed dispersed volume:

+
gluster volume create [disperse [<COUNT>]] [disperse-data <COUNT>] [redundancy <COUNT>] [transport tcp | rdma | tcp,rdma] <NEW-BRICK>
+
+

For example, six node distributed dispersed volume with level of redundancy 1, 2 x (2 + 1) = 6:

+
gluster volume create test-volume disperse 3 redundancy 1 server1:/exp1 server2:/exp2 server3:/exp3 server4:/exp4 server5:/exp5 server6:/exp6
+
+
volume create: test-volume: success: please start the volume to access data
+
+
+

Note:

+
    +
  • +

    A dispersed volume can be created by specifying the number of bricks in a + disperse set, by specifying the number of redundancy bricks, or both.

    +
  • +
  • +

    If disperse is not specified, or the <COUNT> is missing, the + entire volume will be treated as a single disperse set composed by all + bricks enumerated in the command line.

    +
  • +
  • +

    If redundancy is not specified, it is computed automatically to be the + optimal value. If this value does not exist, it's assumed to be '1' and a + warning message is shown:

    +

    # gluster volume create test-volume disperse 4 server{1..4}:/bricks/test-volume

    +

    There isn't an optimal redundancy value for this configuration. Do you want to create the volume with redundancy 1 ? (y/n)

    +
  • +
  • +

    In all cases where redundancy is automatically computed and it's not equal to '1', a warning message is displayed:

    +

    # gluster volume create test-volume disperse 6 server{1..6}:/bricks/test-volume

    +

    The optimal redundancy for this configuration is 2. Do you want to create the volume with this value ? (y/n)

    +
  • +
  • +

    redundancy must be greater than 0, and the total number of bricks must + be greater than 2 * redundancy. This means that a dispersed volume must + have a minimum of 3 bricks.

    +
  • +
+
+

FUSE

+

GlusterFS is a userspace filesystem. The GluserFS developers opted for this approach in order to avoid the need to have modules in the Linux kernel.

+

As it is a userspace filesystem, to interact with kernel VFS, GlusterFS +makes use of FUSE (File System in Userspace). For a long time, +implementation of a userspace filesystem was considered impossible. FUSE +was developed as a solution for this. FUSE is a kernel module that +supports interaction between kernel VFS and non-privileged user +applications and it has an API that can be accessed from userspace. +Using this API, any type of filesystem can be written using almost any +language you prefer as there are many bindings between FUSE and other +languages.

+

fuse_structure

+

Structural diagram of FUSE.

+

This shows a filesystem "hello world" that is compiled to create a +binary "hello". It is executed with a filesystem mount point /tmp/fuse. +Then the user issues a command ls -l on the mount point /tmp/fuse. This +command reaches VFS via glibc and since the mount /tmp/fuse corresponds +to a FUSE based filesystem, VFS passes it over to FUSE module. The FUSE +kernel module contacts the actual filesystem binary "hello" after +passing through glibc and FUSE library in userspace(libfuse). The result +is returned by the "hello" through the same path and reaches the ls -l +command.

+

The communication between FUSE kernel module and the FUSE +library(libfuse) is via a special file descriptor which is obtained by +opening /dev/fuse. This file can be opened multiple times, and the +obtained file descriptor is passed to the mount syscall, to match up the +descriptor with the mounted filesystem.

+ +

Translators

+

Translating “translators”:

+
    +
  • A translator converts requests from users into requests for storage.
  • +
+

*One to one, one to many, one to zero (e.g. caching)

+

translator

+
    +
  • A translator can modify requests on the way through :
  • +
+

convert one request type to another ( during the request transfer amongst the translators) + modify paths, flags, even data (e.g. encryption)

+
    +
  • +

    Translators can intercept or block the requests. (e.g. access + control)

    +
  • +
  • +

    Or spawn new requests (e.g. pre-fetch)

    +
  • +
+

How Do Translators Work?

+
    +
  • Shared Objects
  • +
  • Dynamically loaded according to 'volfile'
  • +
+

dlopen/dlsync + setup pointers to parents / children + call init (constructor) + call IO functions through fops.

+
    +
  • Conventions for validating/ passing options, etc.
  • +
  • The configuration of translators (since GlusterFS 3.1) is managed + through the gluster command line interface (cli), so you don't need + to know in what order to graph the translators together.
  • +
+

Types of Translators

+

List of known translators with their current status.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Translator TypeFunctional Purpose
StorageLowest level translator, stores and accesses data from local file system.
DebugProvide interface and statistics for errors and debugging.
ClusterHandle distribution and replication of data as it relates to writing to and reading from bricks & nodes.
EncryptionExtension translators for on-the-fly encryption/decryption of stored data.
ProtocolExtension translators for client/server communication protocols.
PerformanceTuning translators to adjust for workload and I/O profiles.
BindingsAdd extensibility, e.g. The Python interface written by Jeff Darcy to extend API interaction with GlusterFS.
SystemSystem access translators, e.g. Interfacing with file system access control.
SchedulerI/O schedulers that determine how to distribute new write operations across clustered systems.
FeaturesAdd additional features such as Quotas, Filters, Locks, etc.
+

The default / general hierarchy of translators in vol files :

+

translator_h

+

All the translators hooked together to perform a function is called a +graph. The left-set of translators comprises of Client-stack.The +right-set of translators comprises of Server-stack.

+

The glusterfs translators can be sub-divided into many categories, but +two important categories are - Cluster and Performance translators :

+

One of the most important and the first translator the data/request has +to go through is fuse translator which falls under the category of +Mount Translators.

+
    +
  1. +

    Cluster Translators:

    +
      +
    • DHT(Distributed Hash Table)
    • +
    • AFR(Automatic File Replication)
    • +
    +
  2. +
  3. +

    Performance Translators:

    +
      +
    • io-cache
    • +
    • io-threads
    • +
    • md-cache
    • +
    • O-B (open behind)
    • +
    • QR (quick read)
    • +
    • r-a (read-ahead)
    • +
    • w-b (write-behind)
    • +
    +
  4. +
+

Other Feature Translators include:

+
    +
  • changelog
  • +
  • locks - GlusterFS has locks translator which provides the following internal locking operations + called inodelk, entrylk, + which are used by afr to achieve synchronization of operations on files or directories that conflict with each other.
  • +
  • marker
  • +
  • quota
  • +
+

Debug Translators

+
    +
  • trace - To trace the error logs generated during the communication amongst the translators.
  • +
  • io-stats
  • +
+

DHT(Distributed Hash Table) Translator

+

What is DHT?

+

DHT is the real core of how GlusterFS aggregates capacity and +performance across multiple servers. Its responsibility is to place each +file on exactly one of its subvolumes – unlike either replication (which +places copies on all of its subvolumes) or striping (which places pieces +onto all of its subvolumes). It’s a routing function, not splitting or +copying.

+

How DHT works?

+

The basic method used in DHT is consistent hashing. Each subvolume +(brick) is assigned a range within a 32-bit hash space, covering the +entire range with no holes or overlaps. Then each file is also assigned +a value in that same space, by hashing its name. Exactly one brick will +have an assigned range including the file’s hash value, and so the file +“should” be on that brick. However, there are many cases where that +won’t be the case, such as when the set of bricks (and therefore the +range assignment of ranges) has changed since the file was created, or +when a brick is nearly full. Much of the complexity in DHT involves +these special cases, which we’ll discuss in a moment.

+

When you open() a file, the distribute translator is giving one piece of +information to find your file, the file-name. To determine where that +file is, the translator runs the file-name through a hashing algorithm +in order to turn that file-name into a number.

+

A few Observations of DHT hash-values assignment:

+
    +
  1. The assignment of hash ranges to bricks is determined by extended + attributes stored on directories, hence distribution is + directory-specific.
  2. +
  3. Consistent hashing is usually thought of as hashing around a circle, + but in GlusterFS it’s more linear. There’s no need to “wrap around” + at zero, because there’s always a break (between one brick’s range + and another’s) at zero.
  4. +
  5. If a brick is missing, there will be a hole in the hash space. Even + worse, if hash ranges are reassigned while a brick is offline, some + of the new ranges might overlap with the (now out of date) range + stored on that brick, creating a bit of confusion about where files + should be.
  6. +
+

AFR(Automatic File Replication) Translator

+

The Automatic File Replication (AFR) translator in GlusterFS makes use +of the extended attributes to keep track of the file operations.It is +responsible for replicating the data across the bricks.

+
Responsibilities of AFR
+

Its responsibilities include the following:

+
    +
  1. Maintain replication consistency (i.e. Data on both the bricks + should be same, even in the cases where there are operations + happening on same file/directory in parallel from multiple + applications/mount points as long as all the bricks in replica set + are up).
  2. +
  3. Provide a way of recovering data in case of failures as long as + there is at least one brick which has the correct data.
  4. +
  5. Serve fresh data for read/stat/readdir etc.
  6. +
+

Geo-Replication

+

Geo-replication provides asynchronous replication of data across +geographically distinct locations and was introduced in Glusterfs 3.2. +It mainly works across WAN and is used to replicate the entire volume +unlike AFR which is intra-cluster replication. This is mainly useful for +backup of entire data for disaster recovery.

+

Geo-replication uses a primary-secondary model, whereby replication occurs +between a Primary and a Secondary, both of which should +be GlusterFS volumes. +Geo-replication provides an incremental replication service over Local +Area Networks (LANs), Wide Area Network (WANs), and across the +Internet.

+

Geo-replication over LAN

+

You can configure Geo-replication to mirror data over a Local Area +Network.

+

geo-rep_lan

+

Geo-replication over WAN

+

You can configure Geo-replication to replicate data over a Wide Area +Network.

+

geo-rep_wan

+

Geo-replication over Internet

+

You can configure Geo-replication to mirror data over the Internet.

+

geo-rep03_internet

+

Multi-site cascading Geo-replication

+

You can configure Geo-replication to mirror data in a cascading fashion +across multiple sites.

+

geo-rep04_cascading

+

There are mainly two aspects while asynchronously replicating data:

+

1.Change detection - These include file-operation necessary +details. There are two methods to sync the detected changes:

+

i. Changelogs - Changelog is a translator which records necessary +details for the fops that occur. The changes can be written in binary +format or ASCII. There are three category with each category represented +by a specific changelog format. All three types of categories are +recorded in a single changelog file.

+

Entry - create(), mkdir(), mknod(), symlink(), link(), rename(), +unlink(), rmdir()

+

Data - write(), writev(), truncate(), ftruncate()

+

Meta - setattr(), fsetattr(), setxattr(), fsetxattr(), +removexattr(), fremovexattr()

+

In order to record the type of operation and entity underwent, a type +identifier is used. Normally, the entity on which the operation is +performed would be identified by the pathname, but we choose to use +GlusterFS internal file identifier (GFID) instead (as GlusterFS supports +GFID based backend and the pathname field may not always be valid and +other reasons which are out of scope of this document). Therefore, +the format of the record for the three types of operation can be +summarized as follows:

+

Entry - GFID + FOP + MODE + UID + GID + PARGFID/BNAME [PARGFID/BNAME]

+

Meta - GFID of the file

+

Data - GFID of the file

+

GFID's are analogous to inodes. Data and Meta fops record the GFID of +the entity on which the operation was performed, thereby recording that +there was a data/metadata change on the inode. Entry fops record at the +minimum a set of six or seven records (depending on the type of +operation), that is sufficient to identify what type of operation the +entity underwent. Normally this record includes the GFID of the entity, +the type of file operation (which is an integer [an enumerated value +which is used in Glusterfs]) and the parent GFID and the basename +(analogous to parent inode and basename).

+

Changelog file is rolled over after a specific time interval. We then +perform processing operations on the file like converting it to +understandable/human readable format, keeping private copy of the +changelog etc. The library then consumes these logs and serves +application requests.

+

ii. Xsync - Marker translator maintains an extended attribute “xtime” +for each file and directory. Whenever any update happens it would update +the xtime attribute of that file and all its ancestors. So the change is +propagated from the node (where the change has occurred) all the way to +the root.

+

geo-replication-sync

+

Consider the above directory tree structure. At time T1 the primary and +secondary were in sync each other.

+

geo-replication-async

+

At time T2 a new file File2 was created. This will trigger the xtime +marking (where xtime is the current timestamp) from File2 upto to the +root, i.e, the xtime of File2, Dir3, Dir1 and finally Dir0 all will be +updated.

+

Geo-replication daemon crawls the file system based on the condition +that xtime(primary) > xtime(secondary). Hence in our example it would crawl +only the left part of the directory structure since the right part of +the directory structure still has equal timestamp. Although the crawling +algorithm is fast we still need to crawl a good part of the directory +structure.

+

2.Replication - We use rsync for data replication. Rsync is an +external utility which will calculate the diff of the two files and +sends this difference from source to sync.

+

Overall working of GlusterFS

+

As soon as GlusterFS is installed in a server node, a gluster management +daemon(glusterd) binary will be created. This daemon should be running +in all participating nodes in the cluster. After starting glusterd, a +trusted server pool(TSP) can be created +consisting of all storage server nodes (TSP can contain even a single +node). Now bricks which are the basic units of storage can be created as +export directories in these servers. Any number of bricks from this TSP +can be clubbed together to form a volume.

+

Once a volume is created, +a glusterfsd process starts running in each of the participating brick. +Along with this, configuration files known as vol files will be +generated inside /var/lib/glusterd/vols/. There will be configuration +files corresponding to each brick in the volume. This will contain all +the details about that particular brick. Configuration file required by +a client process will also be created. Now our filesystem is ready to +use. We can mount this volume on a client machine very easily as follows +and use it like we use a local storage:

+

mount.glusterfs <IP or hostname>:<volume_name> <mount_point>

+

IP or hostname can be that of any node in the trusted server pool in +which the required volume is created.

+

When we mount the volume in the client, the client glusterfs process +communicates with the servers’ glusterd process. Server glusterd process +sends a configuration file (vol file) containing the list of client +translators and another containing the information of each brick in the +volume with the help of which the client glusterfs process can now +directly communicate with each brick’s glusterfsd process. The setup is +now complete and the volume is now ready for client's service.

+

overallprocess

+

When a system call (File operation or Fop) is issued by client in the +mounted filesystem, the VFS (identifying the type of filesystem to be +glusterfs) will send the request to the FUSE kernel module. The FUSE +kernel module will in turn send it to the GlusterFS in the userspace of +the client node via /dev/fuse (this has been described in FUSE section). +The GlusterFS process on the client consists of a stack of translators +called the client translators which are defined in the configuration +file(vol file) sent by the storage server glusterd process. The first +among these translators being the FUSE translator which consists of the +FUSE library(libfuse). Each translator has got functions corresponding +to each file operation or fop supported by glusterfs. The request will +hit the corresponding function in each of the translators. Main client +translators include:

+
    +
  • FUSE translator
  • +
  • DHT translator- DHT translator maps the request to the correct brick + that contains the file or directory required.
  • +
  • AFR translator- It receives the request from the previous translator + and if the volume type is replicate, it duplicates the request and + passes it on to the Protocol client translators of the replicas.
  • +
  • Protocol Client translator- Protocol Client translator is the last + in the client translator stack. This translator is divided into + multiple threads, one for each brick in the volume. This will + directly communicate with the glusterfsd of each brick.
  • +
+

In the storage server node that contains the brick in need, the request +again goes through a series of translators known as server translators, +main ones being:

+
    +
  • Protocol server translator
  • +
  • POSIX translator
  • +
+

The request will finally reach VFS and then will communicate with the +underlying native filesystem. The response will retrace the same path.

+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/Quick-Start-Guide/Quickstart/index.html b/Quick-Start-Guide/Quickstart/index.html new file mode 100644 index 00000000..466b6311 --- /dev/null +++ b/Quick-Start-Guide/Quickstart/index.html @@ -0,0 +1,4790 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + Quick Start Guide - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + + + + + +
+
+ + + + + + + + +

Quick Start Guide

+ +

Installing GlusterFS - a Quick Start Guide

+

Purpose of this document

+

This document is intended to provide a step-by-step guide to setting up +GlusterFS for the first time with minimum degree of complexity. For the purposes of this guide, it is +required to use Fedora 30 (or, higher, see https://fedoraproject.org/wiki/End_of_life) virtual machine instances.

+

After you deploy GlusterFS by following these steps, +we recommend that you read the GlusterFS Admin Guide to how to select a volume type that fits your +needs and administer GlusterFS. The GlusterFS Install Guide provides a more detailed explanation +of the steps we show in this Quick Start Guide.

+

If you would like a more detailed walkthrough with instructions for +installing using different methods (in local virtual machines, EC2 and +baremetal) and different distributions, then have a look at the Install +guide.

+

Using Ansible to deploy and manage GlusterFS

+

If you are already an Ansible user, and are more comfortable with setting +up distributed systems with Ansible, we recommend you to skip all these and +move over to gluster-ansible repository, which gives most of the details to get the systems running faster.

+

Automatically deploying GlusterFS with Puppet-Gluster+Vagrant

+

To deploy GlusterFS using scripted methods, please read this +article.

+

Step 1 – Have at least three nodes

+
    +
  • Fedora 30 (or later) on 3 nodes named "server1", "server2" and "server3"
  • +
  • A working network connection
  • +
  • At least two virtual disks, one for the OS installation, and one to be + used to serve GlusterFS storage (sdb), on each of these VMs. This will + emulate a real-world deployment, where you would want to separate + GlusterFS storage from the OS install.
  • +
  • Setup NTP on each of these servers to get the proper functioning of + many applications on top of filesystem. This is an important requirement
  • +
+

Note: GlusterFS stores its dynamically generated configuration files +at /var/lib/glusterd. If at any point in time GlusterFS is unable to +write to these files (for example, when the backing filesystem is full), +it will at minimum cause erratic behavior for your system; or worse, +take your system offline completely. It is recommended to create separate +partitions for directories such as /var/log to reduce the chances of this happening.

+

Step 2 - Format and mount the bricks

+

Perform this step on all the nodes, "server{1,2,3}"

+

Note: We are going to use the XFS filesystem for the backend bricks. But Gluster is designed to work on top of any filesystem, which supports extended attributes.

+

The following examples assume that the brick will be residing on /dev/sdb1.

+
mkfs.xfs -i size=512 /dev/sdb1
+mkdir -p /data/brick1
+echo '/dev/sdb1 /data/brick1 xfs defaults 1 2' >> /etc/fstab
+mount -a && mount
+
+

You should now see sdb1 mounted at /data/brick1

+

Step 3 - Installing GlusterFS

+

Install the software

+
yum install glusterfs-server
+
+

Start the GlusterFS management daemon:

+
service glusterd start
+
+

Check the status of the daemon:

+
service glusterd status
+
+

You should see something like this:

+
glusterd.service - LSB: glusterfs server
+       Loaded: loaded (/etc/rc.d/init.d/glusterd)
+   Active: active (running) since Mon, 13 Aug 2012 13:02:11 -0700; 2s ago
+   Process: 19254 ExecStart=/etc/rc.d/init.d/glusterd start (code=exited, status=0/SUCCESS)
+   CGroup: name=systemd:/system/glusterd.service
+       ├ 19260 /usr/sbin/glusterd -p /run/glusterd.pid
+       ├ 19304 /usr/sbin/glusterfsd --xlator-option georep-server.listen-port=24009 -s localhost...
+       └ 19309 /usr/sbin/glusterfs -f /var/lib/glusterd/nfs/nfs-server.vol -p /var/lib/glusterd/...
+
+

Step 4 - Configure the firewall

+

The gluster processes on the nodes need to be able to communicate with each other. +To simplify this setup, configure the firewall on each node to accept all traffic from the other node.

+
iptables -I INPUT -p all -s <ip-address> -j ACCEPT
+
+

where ip-address is the address of the other node.

+

Step 5 - Configure the trusted pool

+

From "server1"

+
gluster peer probe server2
+gluster peer probe server3
+
+

Note: When using hostnames, the first server i.e, server1 needs to be probed from +one other server to set its hostname. Reason being when the other server +i.e, server2 is probed from server1 it may happen that the hosts are +configured in a way that the IP Address of the server is transmitted on probing. +So in order to use the hostnames in the cluster, it is advised to probe back the +server1 from server2, server3 or upto nth server based on the cluster size.

+

From "server2"

+
gluster peer probe server1
+
+

Note: Once this pool has been established, only trusted members may +probe new servers into the pool. A new server cannot probe the pool, it +must be probed from the pool.

+

Check the peer status on server1

+
gluster peer status
+
+

You should see something like this (the UUID will differ)

+
Number of Peers: 2
+
+Hostname: server2
+Uuid: f0e7b138-4874-4bc0-ab91-54f20c7068b4
+State: Peer in Cluster (Connected)
+
+Hostname: server3
+Uuid: f0e7b138-4532-4bc0-ab91-54f20c701241
+State: Peer in Cluster (Connected)
+
+

Step 6 - Set up a GlusterFS volume

+

On all servers:

+
mkdir -p /data/brick1/gv0
+
+

From any single server:

+
gluster volume create gv0 replica 3 server1:/data/brick1/gv0 server2:/data/brick1/gv0 server3:/data/brick1/gv0
+
+

On successful operation, you should see something like:

+
volume create: gv0: success: please start the volume to access data
+
+

Then start the newly created volume:

+
gluster volume start gv0
+
+

You should see something like:

+
volume start: gv0: success
+
+

Confirm that the volume shows "Started":

+
gluster volume info
+
+

You should see something like this (the Volume ID will differ):

+
Volume Name: gv0
+Type: Replicate
+Volume ID: f25cc3d8-631f-41bd-96e1-3e22a4c6f71f
+Status: Started
+Snapshot Count: 0
+Number of Bricks: 1 x 3 = 3
+Transport-type: tcp
+Bricks:
+Brick1: server1:/data/brick1/gv0
+Brick2: server2:/data/brick1/gv0
+Brick3: server3:/data/brick1/gv0
+Options Reconfigured:
+transport.address-family: inet
+
+

Note: If the volume does not show "Started", the files under +/var/log/glusterfs/glusterd.log should be checked in order to debug and +diagnose the situation. These logs can be looked at on one or, all the +servers configured.

+

Step 7 - Testing the GlusterFS volume

+

For this step, we will use one of the servers to mount the volume. +Typically, you would do this from an external machine, known as a +"client". Since using this method would require additional packages to +be installed on the client machine, we will use one of the servers as +a simple place to test first , as if it were that "client".

+
mkdir /mnt/gluster-test
+mount -t glusterfs server1:/gv0 /mnt/gluster-test
+for i in `seq -w 1 100`; do cp -rp /var/log/messages /mnt/gluster-test/copy-test-$i; done
+
+

First, check the client mount point:

+
ls -lA /mnt/gluster-test/copy* | wc -l
+
+

You should see 100 files returned. Next, check the GlusterFS brick mount +points on each server:

+
ls -lA /data/brick1/gv0/copy*
+
+

You should see 100 files on each server using the method we listed here. +Without replication, in a distribute only volume (not detailed here), you +should see about 33 files on each one.

+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/Troubleshooting/gfid-to-path/index.html b/Troubleshooting/gfid-to-path/index.html new file mode 100644 index 00000000..f0c79ad5 --- /dev/null +++ b/Troubleshooting/gfid-to-path/index.html @@ -0,0 +1,4611 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + gfid to path - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+ +
+
+ + + +
+
+ + + + + + + + +

Convert GFID to Path

+

GlusterFS internal file identifier (GFID) is a uuid that is unique to each +file across the entire cluster. This is analogous to inode number in a +normal filesystem. The GFID of a file is stored in its xattr named +trusted.gfid.

+

Special mount using gfid-access translator:

+
mount -t glusterfs -o aux-gfid-mount vm1:test /mnt/testvol
+
+

Assuming, you have GFID of a file from changelog (or somewhere else). +For trying this out, you can get GFID of a file from mountpoint:

+
getfattr -n glusterfs.gfid.string /mnt/testvol/dir/file
+
+
+

Get file path from GFID (Method 1):

+

(Lists hardlinks delimited by :, returns path as seen from mountpoint)

+

Turn on build-pgfid option

+
gluster volume set test build-pgfid on
+
+

Read virtual xattr glusterfs.ancestry.path which contains the file path

+
getfattr -n glusterfs.ancestry.path -e text /mnt/testvol/.gfid/<GFID>
+
+

Example:

+
[root@vm1 glusterfs]# ls -il /mnt/testvol/dir/
+total 1
+10610563327990022372 -rw-r--r--. 2 root root 3 Jul 17 18:05 file
+10610563327990022372 -rw-r--r--. 2 root root 3 Jul 17 18:05 file3
+
+[root@vm1 glusterfs]# getfattr -n glusterfs.gfid.string /mnt/testvol/dir/file
+getfattr: Removing leading '/' from absolute path names
+# file: mnt/testvol/dir/file
+glusterfs.gfid.string="11118443-1894-4273-9340-4b212fa1c0e4"
+
+[root@vm1 glusterfs]# getfattr -n glusterfs.ancestry.path -e text /mnt/testvol/.gfid/11118443-1894-4273-9340-4b212fa1c0e4
+getfattr: Removing leading '/' from absolute path names
+# file: mnt/testvol/.gfid/11118443-1894-4273-9340-4b212fa1c0e4
+glusterfs.ancestry.path="/dir/file:/dir/file3"
+
+

Get file path from GFID (Method 2):

+

(Does not list all hardlinks, returns backend brick path)

+
getfattr -n trusted.glusterfs.pathinfo -e text /mnt/testvol/.gfid/<GFID>
+
+

Example:

+
[root@vm1 glusterfs]# getfattr -n trusted.glusterfs.pathinfo -e text /mnt/testvol/.gfid/11118443-1894-4273-9340-4b212fa1c0e4
+getfattr: Removing leading '/' from absolute path names
+# file: mnt/testvol/.gfid/11118443-1894-4273-9340-4b212fa1c0e4
+trusted.glusterfs.pathinfo="(<DISTRIBUTE:test-dht> <POSIX(/mnt/brick-test/b):vm1:/mnt/brick-test/b/dir//file3>)"
+
+ +

posix: placeholders for GFID to path conversion

+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/Troubleshooting/gluster-crash/index.html b/Troubleshooting/gluster-crash/index.html new file mode 100644 index 00000000..bf802d03 --- /dev/null +++ b/Troubleshooting/gluster-crash/index.html @@ -0,0 +1,4446 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + Crashes - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + +

Debugging a Crash

+

To find out why a Gluster process terminated abruptly, we need the following:

+
    +
  • A coredump of the process that crashed
  • +
  • The exact version of Gluster that is running
  • +
  • The Gluster log files
  • +
  • the output of gluster volume info
  • +
  • Steps to reproduce the crash if available
  • +
+

Contact the community with this information or open an issue

+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/Troubleshooting/index.html b/Troubleshooting/index.html new file mode 100644 index 00000000..a7f03414 --- /dev/null +++ b/Troubleshooting/index.html @@ -0,0 +1,4537 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + Index - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + +

Index

+ +

Troubleshooting Guide

+

This guide describes some commonly seen issues and steps to recover from them. +If that doesn’t help, reach out to the Gluster community, in which case the guide also describes what information needs to be provided in order to debug the issue. At minimum, we need the version of gluster running and the output of gluster volume info.

+

Where Do I Start?

+

Is the issue already listed in the component specific troubleshooting sections?

+ +

If that didn't help, here is how to debug further.

+

Identifying the problem and getting the necessary information to diagnose it is the first step in troubleshooting your Gluster setup. As Gluster operations involve interactions between multiple processes, this can involve multiple steps.

+

What Happened?

+ + + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/Troubleshooting/resolving-splitbrain/index.html b/Troubleshooting/resolving-splitbrain/index.html new file mode 100644 index 00000000..0756f6cf --- /dev/null +++ b/Troubleshooting/resolving-splitbrain/index.html @@ -0,0 +1,5443 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + Troubleshooting Split-Brains - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + +

Heal info and split-brain resolution

+

This document explains the heal info command available in gluster for monitoring pending heals in replicate volumes and the methods available to resolve split-brains.

+

Types of Split-Brains:

+

A file is said to be in split-brain when Gluster AFR cannot determine which copy in the replica +is the correct one.

+

There are three types of split-brains:

+
    +
  • Data split-brain: The data in the file differs on the bricks in the replica set
  • +
  • Metadata split-brain: The metadata differs on the bricks
  • +
  • Entry split-brain: The GFID of the file is different on the bricks in the replica or the type of the file is different on the bricks in the replica. Type-mismatch cannot be healed using any of the split-brain resolution methods while gfid split-brains can be.
  • +
+

1) Volume heal info:

+

Usage: gluster volume heal <VOLNAME> info

+

This lists all the files that require healing (and will be processed by the self-heal daemon). It prints either their path or their GFID.

+

Interpreting the output

+

All the files listed in the output of this command need to be healed. +The files listed may also be accompanied by the following tags:

+

a) 'Is in split-brain'
+A file in data or metadata split-brain will +be listed with " - Is in split-brain" appended after its path/GFID. E.g. +"/file4" in the output provided below. However, for a file in GFID split-brain, +the parent directory of the file is shown to be in split-brain and the file +itself is shown to be needing healing, e.g. "/dir" in the output provided below +is in split-brain because of GFID split-brain of file "/dir/a". +Files in split-brain cannot be healed without resolving the split-brain.

+

b) 'Is possibly undergoing heal'
+When the heal info command is run, it (or to be more specific, the 'glfsheal' binary that is executed when you run the command) takes locks on each file to find if it needs healing. However, if the self-heal daemon had already started healing the file, it would have taken locks which glfsheal wouldn't be able to acquire. In such a case, it could print this message. Another possible case could be multiple glfsheal processes running simultaneously (e.g. multiple users ran a heal info command at the same time) and competing for same lock.

+

The following is an example of heal info command's output.

+

Example

+

Consider a replica volume "test" with two bricks b1 and b2; +self-heal daemon off, mounted at /mnt.

+
# gluster volume heal test info
+Brick \<hostname:brickpath-b1>
+<gfid:aaca219f-0e25-4576-8689-3bfd93ca70c2> - Is in split-brain
+<gfid:39f301ae-4038-48c2-a889-7dac143e82dd> - Is in split-brain
+<gfid:c3c94de2-232d-4083-b534-5da17fc476ac> - Is in split-brain
+<gfid:6dc78b20-7eb6-49a3-8edb-087b90142246>
+
+Number of entries: 4
+
+Brick <hostname:brickpath-b2>
+/dir/file2
+/dir/file1 - Is in split-brain
+/dir - Is in split-brain
+/dir/file3
+/file4 - Is in split-brain
+/dir/a
+
+
+Number of entries: 6
+
+

Analysis of the output

+

It can be seen that

+

A) from brick b1, four entries need healing:

+
    +
  • file with gfid:6dc78b20-7eb6-49a3-8edb-087b90142246 needs healing
  • +
  • "aaca219f-0e25-4576-8689-3bfd93ca70c2", "39f301ae-4038-48c2-a889-7dac143e82dd" and "c3c94de2-232d-4083-b534-5da17fc476ac" are in split-brain
  • +
+

B) from brick b2 six entries need healing-

+
    +
  • "a", "file2" and "file3" need healing
  • +
  • "file1", "file4" & "/dir" are in split-brain
  • +
+

2. Volume heal info split-brain

+

Usage: gluster volume heal <VOLNAME> info split-brain
+This command only shows the list of files that are in split-brain. The output is therefore a subset of gluster volume heal <VOLNAME> info

+

Example

+
# gluster volume heal test info split-brain
+Brick <hostname:brickpath-b1>
+<gfid:aaca219f-0e25-4576-8689-3bfd93ca70c2>
+<gfid:39f301ae-4038-48c2-a889-7dac143e82dd>
+<gfid:c3c94de2-232d-4083-b534-5da17fc476ac>
+Number of entries in split-brain: 3
+
+Brick <hostname:brickpath-b2>
+/dir/file1
+/dir
+/file4
+Number of entries in split-brain: 3
+
+

Note that similar to the heal info command, for GFID split-brains (same filename but different GFID) +their parent directories are listed to be in split-brain.

+

3. Resolution of split-brain using gluster CLI

+

Once the files in split-brain are identified, their resolution can be done +from the gluster command line using various policies. Type-mismatch cannot be healed using this methods. Split-brain resolution commands let the user resolve data, metadata, and GFID split-brains.

+

3.1 Resolution of data/metadata split-brain using gluster CLI

+

Data and metadata split-brains can be resolved using the following policies:

+

i) Select the bigger-file as source

+

This command is useful for per file healing where it is known/decided that the +file with bigger size is to be considered as source.
+gluster volume heal <VOLNAME> split-brain bigger-file <FILE>
+Here, <FILE> can be either the full file name as seen from the root of the volume +(or) the GFID-string representation of the file, which sometimes gets displayed +in the heal info command's output. Once this command is executed, the replica containing the <FILE> with a bigger +size is found and healing is completed with that brick as a source.

+

Example :

+

Consider the earlier output of the heal info split-brain command.

+

Before healing the file, notice file size and md5 checksums :

+

On brick b1:

+
[brick1]# stat b1/dir/file1
+  File: ‘b1/dir/file1’
+  Size: 17              Blocks: 16         IO Block: 4096   regular file
+Device: fd03h/64771d    Inode: 919362      Links: 2
+Access: (0644/-rw-r--r--)  Uid: (    0/    root)   Gid: (    0/    root)
+Access: 2015-03-06 13:55:40.149897333 +0530
+Modify: 2015-03-06 13:55:37.206880347 +0530
+Change: 2015-03-06 13:55:37.206880347 +0530
+ Birth: -
+[brick1]#
+[brick1]# md5sum b1/dir/file1
+040751929ceabf77c3c0b3b662f341a8  b1/dir/file1
+
+

On brick b2:

+
[brick2]# stat b2/dir/file1
+  File: ‘b2/dir/file1’
+  Size: 13              Blocks: 16         IO Block: 4096   regular file
+Device: fd03h/64771d    Inode: 919365      Links: 2
+Access: (0644/-rw-r--r--)  Uid: (    0/    root)   Gid: (    0/    root)
+Access: 2015-03-06 13:54:22.974451898 +0530
+Modify: 2015-03-06 13:52:22.910758923 +0530
+Change: 2015-03-06 13:52:22.910758923 +0530
+ Birth: -
+[brick2]#
+[brick2]# md5sum b2/dir/file1
+cb11635a45d45668a403145059c2a0d5  b2/dir/file1
+
+

Healing file1 using the above command :-
+gluster volume heal test split-brain bigger-file /dir/file1
+Healed /dir/file1.

+

After healing is complete, the md5sum and file size on both bricks should be the same.

+

On brick b1:

+
[brick1]# stat b1/dir/file1
+  File: ‘b1/dir/file1’
+  Size: 17              Blocks: 16         IO Block: 4096   regular file
+Device: fd03h/64771d    Inode: 919362      Links: 2
+Access: (0644/-rw-r--r--)  Uid: (    0/    root)   Gid: (    0/    root)
+Access: 2015-03-06 14:17:27.752429505 +0530
+Modify: 2015-03-06 13:55:37.206880347 +0530
+Change: 2015-03-06 14:17:12.880343950 +0530
+ Birth: -
+[brick1]#
+[brick1]# md5sum b1/dir/file1
+040751929ceabf77c3c0b3b662f341a8  b1/dir/file1
+
+

On brick b2:

+
[brick2]# stat b2/dir/file1
+  File: ‘b2/dir/file1’
+  Size: 17              Blocks: 16         IO Block: 4096   regular file
+Device: fd03h/64771d    Inode: 919365      Links: 2
+Access: (0644/-rw-r--r--)  Uid: (    0/    root)   Gid: (    0/    root)
+Access: 2015-03-06 14:17:23.249403600 +0530
+Modify: 2015-03-06 13:55:37.206880000 +0530
+Change: 2015-03-06 14:17:12.881343955 +0530
+ Birth: -
+[brick2]#
+[brick2]# md5sum b2/dir/file1
+040751929ceabf77c3c0b3b662f341a8  b2/dir/file1
+
+

ii) Select the file with the latest mtime as source

+
gluster volume heal <VOLNAME> split-brain latest-mtime <FILE>
+
+

As is perhaps self-explanatory, this command uses the brick having the latest modification time for <FILE> as the source for healing.

+

iii) Select one of the bricks in the replica as the source for a particular file

+
gluster volume heal <VOLNAME> split-brain source-brick <HOSTNAME:BRICKNAME> <FILE>
+
+

Here, <HOSTNAME:BRICKNAME> is selected as source brick and <FILE> present in the source brick is taken as the source for healing.

+

Example :

+

Notice the md5 checksums and file size before and after healing.

+

Before heal :

+

On brick b1:

+
[brick1]# stat b1/file4
+  File: ‘b1/file4’
+  Size: 4               Blocks: 16         IO Block: 4096   regular file
+Device: fd03h/64771d    Inode: 919356      Links: 2
+Access: (0644/-rw-r--r--)  Uid: (    0/    root)   Gid: (    0/    root)
+Access: 2015-03-06 13:53:19.417085062 +0530
+Modify: 2015-03-06 13:53:19.426085114 +0530
+Change: 2015-03-06 13:53:19.426085114 +0530
+ Birth: -
+[brick1]#
+[brick1]# md5sum b1/file4
+b6273b589df2dfdbd8fe35b1011e3183  b1/file4
+
+

On brick b2:

+
[brick2]# stat b2/file4
+  File: ‘b2/file4’
+  Size: 4               Blocks: 16         IO Block: 4096   regular file
+Device: fd03h/64771d    Inode: 919358      Links: 2
+Access: (0644/-rw-r--r--)  Uid: (    0/    root)   Gid: (    0/    root)
+Access: 2015-03-06 13:52:35.761833096 +0530
+Modify: 2015-03-06 13:52:35.769833142 +0530
+Change: 2015-03-06 13:52:35.769833142 +0530
+ Birth: -
+[brick2]#
+[brick2]# md5sum b2/file4
+0bee89b07a248e27c83fc3d5951213c1  b2/file4
+
+

Healing the file with gfid c3c94de2-232d-4083-b534-5da17fc476ac using the above command :

+
gluster volume heal test split-brain source-brick test-host:/test/b1 gfid:c3c94de2-232d-4083-b534-5da17fc476ac
+
+

Healed gfid:c3c94de2-232d-4083-b534-5da17fc476ac.

+

After healing :

+

On brick b1:

+
# stat b1/file4
+  File: ‘b1/file4’
+  Size: 4               Blocks: 16         IO Block: 4096   regular file
+Device: fd03h/64771d    Inode: 919356      Links: 2
+Access: (0644/-rw-r--r--)  Uid: (    0/    root)   Gid: (    0/    root)
+Access: 2015-03-06 14:23:38.944609863 +0530
+Modify: 2015-03-06 13:53:19.426085114 +0530
+Change: 2015-03-06 14:27:15.058927962 +0530
+ Birth: -
+# md5sum b1/file4
+b6273b589df2dfdbd8fe35b1011e3183  b1/file4
+
+

On brick b2:

+
# stat b2/file4
+ File: ‘b2/file4’
+  Size: 4               Blocks: 16         IO Block: 4096   regular file
+Device: fd03h/64771d    Inode: 919358      Links: 2
+Access: (0644/-rw-r--r--)  Uid: (    0/    root)   Gid: (    0/    root)
+Access: 2015-03-06 14:23:38.944609000 +0530
+Modify: 2015-03-06 13:53:19.426085000 +0530
+Change: 2015-03-06 14:27:15.059927968 +0530
+ Birth: -
+# md5sum b2/file4
+b6273b589df2dfdbd8fe35b1011e3183  b2/file4
+
+

iv) Select one brick of the replica as the source for all files

+
gluster volume heal <VOLNAME> split-brain source-brick <HOSTNAME:BRICKNAME>
+
+

Consider a scenario where many files are in split-brain such that one brick of +replica pair is source. As the result of the above command all split-brained +files in <HOSTNAME:BRICKNAME> are selected as source and healed to the sink.

+

Example:

+

Consider a volume having three entries "a, b and c" in split-brain.

+
# gluster volume heal test split-brain source-brick test-host:/test/b1
+Healed gfid:944b4764-c253-4f02-b35f-0d0ae2f86c0f.
+Healed gfid:3256d814-961c-4e6e-8df2-3a3143269ced.
+Healed gfid:b23dd8de-af03-4006-a803-96d8bc0df004.
+Number of healed entries: 3
+
+

3.2 Resolution of GFID split-brain using gluster CLI

+

GFID split-brains can also be resolved by the gluster command line using the same policies that are used to resolve data and metadata split-brains.

+

i) Selecting the bigger-file as source

+

This method is useful for per file healing and where you can decided that the file with bigger size is to be considered as source.

+

Run the following command to obtain the path of the file that is in split-brain:

+
# gluster volume heal VOLNAME info split-brain
+
+

From the output, identify the files for which file operations performed from the client failed with input/output error.

+

Example :

+
# gluster volume heal testvol info
+Brick 10.70.47.45:/bricks/brick2/b0
+/f5
+/ - Is in split-brain
+
+Status: Connected
+Number of entries: 2
+
+Brick 10.70.47.144:/bricks/brick2/b1
+/f5
+/ - Is in split-brain
+
+Status: Connected
+Number of entries: 2
+
+
+

Note +Entries which are in GFID split-brain may not be shown as in split-brain by the heal info or heal info split-brain commands always. For entry split-brains, it is the parent directory which is shown as being in split-brain. So one might need to run info split-brain to get the dir names and then heal info to get the list of files under that dir which might be in split-brain (it could just be needing heal without split-brain).

+
+

In the above command, testvol is the volume name, b0 and b1 are the bricks. +Execute the below getfattr command on the brick to fetch information if a file is in GFID split-brain or not.

+
# getfattr -d -e hex -m. <path-to-file>
+
+

Example :

+

On brick /b0

+
# getfattr -d -m . -e hex /bricks/brick2/b0/f5
+getfattr: Removing leading '/' from absolute path names
+file: bricks/brick2/b0/f5
+security.selinux=0x73797374656d5f753a6f626a6563745f723a676c7573746572645f627269636b5f743a733000
+trusted.afr.testvol-client-1=0x000000020000000100000000
+trusted.afr.dirty=0x000000000000000000000000
+trusted.gfid=0xce0a9956928e40afb78e95f78defd64f
+trusted.gfid2path.9cde09916eabc845=0x30303030303030302d303030302d303030302d303030302d3030303030303030303030312f6635
+
+

On brick /b1

+
# getfattr -d -m . -e hex /bricks/brick2/b1/f5
+getfattr: Removing leading '/' from absolute path names
+file: bricks/brick2/b1/f5
+security.selinux=0x73797374656d5f753a6f626a6563745f723a676c7573746572645f627269636b5f743a733000
+trusted.afr.testvol-client-0=0x000000020000000100000000
+trusted.afr.dirty=0x000000000000000000000000
+trusted.gfid=0x9563544118653550e888ab38c232e0c
+trusted.gfid2path.9cde09916eabc845=0x30303030303030302d303030302d303030302d303030302d3030303030303030303030312f6635
+
+

You can notice the difference in GFID for the file f5 in both the bricks. +You can find the differences in the file size by executing stat command on the file from the bricks.

+

On brick /b0

+
# stat /bricks/brick2/b0/f5
+File: ‘/bricks/brick2/b0/f5’
+Size: 15            Blocks: 8          IO Block: 4096   regular file
+Device: fd15h/64789d    Inode: 67113350    Links: 2
+Access: (0644/-rw-r--r--)  Uid: (    0/    root)   Gid: (    0/    root)
+Context: system_u:object_r:glusterd_brick_t:s0
+Access: 2018-08-29 20:46:26.353751073 +0530
+Modify: 2018-08-29 20:46:26.361751203 +0530
+Change: 2018-08-29 20:47:16.363751236 +0530
+Birth: -
+
+

On brick /b1

+
# stat /bricks/brick2/b1/f5
+File: ‘/bricks/brick2/b1/f5’
+Size: 2             Blocks: 8          IO Block: 4096   regular file
+Device: fd15h/64789d    Inode: 67111750    Links: 2
+Access: (0644/-rw-r--r--)  Uid: (    0/    root)   Gid: (    0/    root)
+Context: system_u:object_r:glusterd_brick_t:s0
+Access: 2018-08-29 20:44:56.153301616 +0530
+Modify: 2018-08-29 20:44:56.161301745 +0530
+Change: 2018-08-29 20:44:56.162301761 +0530
+Birth: -
+
+

Execute the following command along with the full filename as seen from the root of the volume which is displayed in the heal info command's output:

+
# gluster volume heal VOLNAME split-brain bigger-file FILE
+
+

Example :

+
# gluster volume heal testvol split-brain bigger-file /f5
+GFID split-brain resolved for file /f5
+
+

After the healing is complete, the GFID of the file on both the bricks must be the same as that of the file which had the bigger size. The following is a sample output of the getfattr command after completion of healing the file.

+

On brick /b0

+
# getfattr -d -m . -e hex /bricks/brick2/b0/f5
+getfattr: Removing leading '/' from absolute path names
+file: bricks/brick2/b0/f5
+security.selinux=0x73797374656d5f753a6f626a6563745f723a676c7573746572645f627269636b5f743a733000
+trusted.gfid=0xce0a9956928e40afb78e95f78defd64f
+trusted.gfid2path.9cde09916eabc845=0x30303030303030302d303030302d303030302d303030302d3030303030303030303030312f6635
+
+

On brick /b1

+
# getfattr -d -m . -e hex /bricks/brick2/b1/f5
+getfattr: Removing leading '/' from absolute path names
+file: bricks/brick2/b1/f5
+security.selinux=0x73797374656d5f753a6f626a6563745f723a676c7573746572645f627269636b5f743a733000
+trusted.gfid=0xce0a9956928e40afb78e95f78defd64f
+trusted.gfid2path.9cde09916eabc845=0x30303030303030302d303030302d303030302d303030302d3030303030303030303030312f6635
+
+

ii) Selecting the file with latest mtime as source

+

This method is useful for per file healing and if you want the file with latest mtime has to be considered as source.

+

Example :

+

Lets take another file which is in GFID split-brain and try to heal that using the latest-mtime option.

+

On brick /b0

+
# getfattr -d -m . -e hex /bricks/brick2/b0/f4
+getfattr: Removing leading '/' from absolute path names
+file: bricks/brick2/b0/f4
+security.selinux=0x73797374656d5f753a6f626a6563745f723a676c7573746572645f627269636b5f743a733000
+trusted.afr.testvol-client-1=0x000000020000000100000000
+trusted.afr.dirty=0x000000000000000000000000
+trusted.gfid=0xb66b66d07b315f3c9cffac2fb6422a28
+trusted.gfid2path.364f55367c7bd6f4=0x30303030303030302d303030302d303030302d303030302d3030303030303030303030312f6634
+
+

On brick /b1

+
# getfattr -d -m . -e hex /bricks/brick2/b1/f4
+getfattr: Removing leading '/' from absolute path names
+file: bricks/brick2/b1/f4
+security.selinux=0x73797374656d5f753a6f626a6563745f723a676c7573746572645f627269636b5f743a733000
+trusted.afr.testvol-client-0=0x000000020000000100000000
+trusted.afr.dirty=0x000000000000000000000000
+trusted.gfid=0x87242f808c6e56a007ef7d49d197acff
+trusted.gfid2path.364f55367c7bd6f4=0x30303030303030302d303030302d303030302d303030302d3030303030303030303030312f6634
+
+

You can notice the difference in GFID for the file f4 in both the bricks. +You can find the difference in the modification time by executing stat command on the file from the bricks.

+

On brick /b0

+
# stat /bricks/brick2/b0/f4
+File: ‘/bricks/brick2/b0/f4’
+Size: 14            Blocks: 8          IO Block: 4096   regular file
+Device: fd15h/64789d    Inode: 67113349    Links: 2
+Access: (0644/-rw-r--r--)  Uid: (    0/    root)   Gid: (    0/    root)
+Context: system_u:object_r:glusterd_brick_t:s0
+Access: 2018-08-29 20:57:38.913629991 +0530
+Modify: 2018-08-29 20:57:38.921630122 +0530
+Change: 2018-08-29 20:57:38.923630154 +0530
+Birth: -
+
+

On brick /b1

+
# stat /bricks/brick2/b1/f4
+File: ‘/bricks/brick2/b1/f4’
+Size: 2             Blocks: 8          IO Block: 4096   regular file
+Device: fd15h/64789d    Inode: 67111749    Links: 2
+Access: (0644/-rw-r--r--)  Uid: (    0/    root)   Gid: (    0/    root)
+Context: system_u:object_r:glusterd_brick_t:s0
+Access: 2018-08-24 20:54:50.953217256 +0530
+Modify: 2018-08-24 20:54:50.961217385 +0530
+Change: 2018-08-24 20:54:50.962217402 +0530
+Birth: -
+
+

Execute the following command:

+
# gluster volume heal VOLNAME split-brain latest-mtime FILE
+
+

Example :

+
# gluster volume heal testvol split-brain latest-mtime /f4
+GFID split-brain resolved for file /f4
+
+

After the healing is complete, the GFID of the files on both bricks must be same. The following is a sample output of the getfattr command after completion of healing the file. You can notice that the file has been healed using the brick having the latest mtime as the source.

+

On brick /b0

+
# getfattr -d -m . -e hex /bricks/brick2/b0/f4
+getfattr: Removing leading '/' from absolute path names
+file: bricks/brick2/b0/f4
+security.selinux=0x73797374656d5f753a6f626a6563745f723a676c7573746572645f627269636b5f743a733000
+trusted.gfid=0xb66b66d07b315f3c9cffac2fb6422a28
+trusted.gfid2path.364f55367c7bd6f4=0x30303030303030302d303030302d303030302d303030302d3030303030303030303030312f6634
+
+

On brick /b1

+
# getfattr -d -m . -e hex /bricks/brick2/b1/f4
+getfattr: Removing leading '/' from absolute path names
+file: bricks/brick2/b1/f4
+security.selinux=0x73797374656d5f753a6f626a6563745f723a676c7573746572645f627269636b5f743a733000
+trusted.gfid=0xb66b66d07b315f3c9cffac2fb6422a28
+trusted.gfid2path.364f55367c7bd6f4=0x30303030303030302d303030302d303030302d303030302d3030303030303030303030312f6634
+
+

iii) Select one of the bricks in the replica as source for a particular file

+

This method is useful for per file healing and if you know which copy of the file is good.

+

Example :

+

Lets take another file which is in GFID split-brain and try to heal that using the source-brick option.

+

On brick /b0

+
# getfattr -d -m . -e hex /bricks/brick2/b0/f3
+getfattr: Removing leading '/' from absolute path names
+file: bricks/brick2/b0/f3
+security.selinux=0x73797374656d5f753a6f626a6563745f723a676c7573746572645f627269636b5f743a733000
+trusted.afr.testvol-client-1=0x000000020000000100000000
+trusted.afr.dirty=0x000000000000000000000000
+trusted.gfid=0x9d542fb1b3b15837a2f7f9dcdf5d6ee8
+trusted.gfid2path.364f55367c7bd6f4=0x30303030303030302d303030302d303030302d303030302d3030303030303030303030312f6634
+
+

On brick /b1

+
# getfattr -d -m . -e hex /bricks/brick2/b1/f3
+getfattr: Removing leading '/' from absolute path names
+file: bricks/brick2/b0/f3
+security.selinux=0x73797374656d5f753a6f626a6563745f723a676c7573746572645f627269636b5f743a733000
+trusted.afr.testvol-client-1=0x000000020000000100000000
+trusted.afr.dirty=0x000000000000000000000000
+trusted.gfid=0xc90d9b0f65f6530b95b9f3f8334033df
+trusted.gfid2path.364f55367c7bd6f4=0x30303030303030302d303030302d303030302d303030302d3030303030303030303030312f6634
+
+

You can notice the difference in GFID for the file f3 in both the bricks.

+

Execute the following command:

+
# gluster volume heal VOLNAME split-brain source-brick HOSTNAME:export-directory-absolute-path FILE
+
+

In this command, FILE present in HOSTNAME : export-directory-absolute-path is taken as source for healing.

+

Example :

+
# gluster volume heal testvol split-brain source-brick 10.70.47.144:/bricks/brick2/b1 /f3
+GFID split-brain resolved for file /f3
+
+

After the healing is complete, the GFID of the file on both the bricks should be same as that of the brick which was chosen as source for healing. The following is a sample output of the getfattr command after the file is healed.

+

On brick /b0

+
# getfattr -d -m . -e hex /bricks/brick2/b0/f3
+getfattr: Removing leading '/' from absolute path names
+file: bricks/brick2/b0/f3
+security.selinux=0x73797374656d5f753a6f626a6563745f723a676c7573746572645f627269636b5f743a733000
+trusted.gfid=0x90d9b0f65f6530b95b9f3f8334033df
+trusted.gfid2path.364f55367c7bd6f4=0x30303030303030302d303030302d303030302d303030302d3030303030303030303030312f6634
+
+

On brick /b1

+
# getfattr -d -m . -e hex /bricks/brick2/b1/f3
+getfattr: Removing leading '/' from absolute path names
+file: bricks/brick2/b1/f3
+security.selinux=0x73797374656d5f753a6f626a6563745f723a676c7573746572645f627269636b5f743a733000
+trusted.gfid=0x90d9b0f65f6530b95b9f3f8334033df
+trusted.gfid2path.364f55367c7bd6f4=0x30303030303030302d303030302d303030302d303030302d3030303030303030303030312f6634
+
+
+

Note

+
    +
  • +

    One cannot use the GFID of the file as an argument with any of the CLI options to resolve GFID split-brain. It should be the absolute path as seen from the mount point to the file considered as source.

    +
  • +
  • +

    With source-brick option there is no way to resolve all the GFID split-brain in one shot by not specifying any file path in the CLI as done while resolving data or metadata split-brain. For each file in GFID split-brain, run the CLI with the policy you want to use.

    +
  • +
  • +

    Resolving directory GFID split-brain using CLI with the "source-brick" option in a "distributed-replicated" volume needs to be done on all the sub-volumes explicitly, which are in this state. Since directories get created on all the sub-volumes, using one particular brick as source for directory GFID split-brain heals the directory for that particular sub-volume. Source brick should be chosen in such a way that after heal all the bricks of all the sub-volumes have the same GFID.

    +
  • +
+
+

Note:

+

As mentioned earlier, type-mismatch can not be resolved using CLI. Type-mismatch means different st_mode values (for example, the entry is a file in one brick while it is a directory on the other). Trying to heal such entry would fail.

+

Example

+

The entry named "entry1" is of different types on the bricks of the replica. Lets try to heal that using the split-brain CLI.

+
# gluster volume heal test split-brain source-brick test-host:/test/b1 /entry1
+Healing /entry1 failed:Operation not permitted.
+Volume heal failed.
+
+

However, they can be fixed by deleting the file from all but one bricks. See Fixing Directory entry split-brain

+

An overview of working of heal info commands

+

When these commands are invoked, a "glfsheal" process is spawned which reads +the entries from the various sub-directories under /<brick-path>/.glusterfs/indices/ of all +the bricks that are up (that it can connect to) one after another. These +entries are GFIDs of files that might need healing. Once GFID entries from a +brick are obtained, based on the lookup response of this file on each +participating brick of replica-pair & trusted.afr.* extended attributes it is +found out if the file needs healing, is in split-brain etc based on the +requirement of each command and displayed to the user.

+

4. Resolution of split-brain from the mount point

+

A set of getfattr and setfattr commands have been provided to detect the data and metadata split-brain status of a file and resolve split-brain, if any, from mount point.

+

Consider a volume "test", having bricks b0, b1, b2 and b3.

+
# gluster volume info test
+
+Volume Name: test
+Type: Distributed-Replicate
+Volume ID: 00161935-de9e-4b80-a643-b36693183b61
+Status: Started
+Number of Bricks: 2 x 2 = 4
+Transport-type: tcp
+Bricks:
+Brick1: test-host:/test/b0
+Brick2: test-host:/test/b1
+Brick3: test-host:/test/b2
+Brick4: test-host:/test/b3
+
+

Directory structure of the bricks is as follows:

+
# tree -R /test/b?
+/test/b0
+├── dir
+│   └── a
+└── file100
+
+/test/b1
+├── dir
+│   └── a
+└── file100
+
+/test/b2
+├── dir
+├── file1
+├── file2
+└── file99
+
+/test/b3
+├── dir
+├── file1
+├── file2
+└── file99
+
+

Some files in the volume are in split-brain.

+
# gluster v heal test info split-brain
+Brick test-host:/test/b0/
+/file100
+/dir
+Number of entries in split-brain: 2
+
+Brick test-host:/test/b1/
+/file100
+/dir
+Number of entries in split-brain: 2
+
+Brick test-host:/test/b2/
+/file99
+<gfid:5399a8d1-aee9-4653-bb7f-606df02b3696>
+Number of entries in split-brain: 2
+
+Brick test-host:/test/b3/
+<gfid:05c4b283-af58-48ed-999e-4d706c7b97d5>
+<gfid:5399a8d1-aee9-4653-bb7f-606df02b3696>
+Number of entries in split-brain: 2
+
+

To know data/metadata split-brain status of a file:

+
getfattr -n replica.split-brain-status <path-to-file>
+
+

The above command executed from mount provides information if a file is in data/metadata split-brain. Also provides the list of afr children to analyze to get more information about the file. +This command is not applicable to gfid/directory split-brain.

+

Example:

+
    +
  1. "file100" is in metadata split-brain. Executing the above mentioned command for file100 gives :
  2. +
+
# getfattr -n replica.split-brain-status file100
+file: file100
+replica.split-brain-status="data-split-brain:no    metadata-split-brain:yes    Choices:test-client-0,test-client-1"
+
+
    +
  1. "file1" is in data split-brain.
  2. +
+
# getfattr -n replica.split-brain-status file1
+file: file1
+replica.split-brain-status="data-split-brain:yes    metadata-split-brain:no    Choices:test-client-2,test-client-3"
+
+
    +
  1. "file99" is in both data and metadata split-brain.
  2. +
+
# getfattr -n replica.split-brain-status file99
+file: file99
+replica.split-brain-status="data-split-brain:yes    metadata-split-brain:yes    Choices:test-client-2,test-client-3"
+
+
    +
  1. "dir" is in directory split-brain but as mentioned earlier, the above command is not applicable to such split-brain. So it says that the file is not under data or metadata split-brain.
  2. +
+
# getfattr -n replica.split-brain-status dir
+file: dir
+replica.split-brain-status="The file is not under data or metadata split-brain"
+
+
    +
  1. "file2" is not in any kind of split-brain.
  2. +
+
# getfattr -n replica.split-brain-status file2
+file: file2
+replica.split-brain-status="The file is not under data or metadata split-brain"
+
+

To analyze the files in data and metadata split-brain

+

Trying to do operations (say cat, getfattr etc) from the mount on files in split-brain, gives an input/output error. To enable the users analyze such files, a setfattr command is provided.

+
# setfattr -n replica.split-brain-choice -v "choiceX" <path-to-file>
+
+

Using this command, a particular brick can be chosen to access the file in split-brain from.

+

Example:

+
    +
  1. "file1" is in data-split-brain. Trying to read from the file gives input/output error.
  2. +
+
# cat file1
+cat: file1: Input/output error
+
+

Split-brain choices provided for file1 were test-client-2 and test-client-3.

+

Setting test-client-2 as split-brain choice for file1 serves reads from b2 for the file.

+
# setfattr -n replica.split-brain-choice -v test-client-2 file1
+
+

Now, read operations on the file can be done.

+
# cat file1
+xyz
+
+

Similarly, to inspect the file from other choice, replica.split-brain-choice is to be set to test-client-3.

+

Trying to inspect the file from a wrong choice errors out.

+

To undo the split-brain-choice that has been set, the above mentioned setfattr command can be used +with "none" as the value for extended attribute.

+

Example:

+
# setfattr -n replica.split-brain-choice -v none file1
+
+

Now performing cat operation on the file will again result in input/output error, as before.

+
# cat file
+cat: file1: Input/output error
+
+

Once the choice for resolving split-brain is made, source brick is supposed to be set for the healing to be done. +This is done using the following command:

+
# setfattr -n replica.split-brain-heal-finalize -v <heal-choice> <path-to-file>
+
+

Example

+
# setfattr -n replica.split-brain-heal-finalize -v test-client-2 file1
+
+

The above process can be used to resolve data and/or metadata split-brain on all the files.

+

NOTE:

+
    +
  1. If "fopen-keep-cache" fuse mount option is disabled then inode needs to be invalidated each time before selecting a new replica.split-brain-choice to inspect a file. This can be done by using:
  2. +
+
# sefattr -n inode-invalidate -v 0 <path-to-file>
+
+
    +
  1. The above mentioned process for split-brain resolution from mount will not work on nfs mounts as it doesn't provide xattrs support.
  2. +
+

5. Automagic unsplit-brain by [ctime|mtime|size|majority]

+

The CLI and fuse mount based resolution methods require intervention in the sense that the admin/ user needs to run the commands manually. There is a cluster.favorite-child-policy volume option which when set to one of the various policies available, automatically resolve split-brains without user intervention. The default value is 'none', i.e. it is disabled.

+
# gluster volume set help | grep -A3 cluster.favorite-child-policy
+Option: cluster.favorite-child-policy
+Default Value: none
+Description: This option can be used to automatically resolve split-brains using various policies without user intervention. "size" picks the file with the biggest size as the source. "ctime" and "mtime" pick the file with the latest ctime and mtime respectively as the source. "majority" picks a file with identical mtime and size in more than half the number of bricks in the replica.
+
+

cluster.favorite-child-policy applies to all files of the volume. It is assumed that if this option is enabled with a particular policy, you don't care to examine the split-brain files on a per file basis but just want the split-brain to be resolved as and when it occurs based on the set policy.

+

Manual Split-Brain Resolution:

+

Quick Start:

+
    +
  1. +

    Get the path of the file that is in split-brain:

    +
    +

    It can be obtained either by
    + a) The command gluster volume heal info split-brain.
    + b) Identify the files for which file operations performed from the client keep failing with Input/Output error.

    +
    +
  2. +
  3. +

    Close the applications that opened this file from the mount point. + In case of VMs, they need to be powered-off.

    +
  4. +
  5. +

    Decide on the correct copy:

    +
    +

    This is done by observing the afr changelog extended attributes of the file on +the bricks using the getfattr command; then identifying the type of split-brain +(data split-brain, metadata split-brain, entry split-brain or split-brain due to +gfid-mismatch); and finally determining which of the bricks contains the 'good copy' +of the file.
    +getfattr -d -m . -e hex <file-path-on-brick>.
    +It is also possible that one brick might contain the correct data while the +other might contain the correct metadata.

    +
    +
  6. +
  7. +

    Reset the relevant extended attribute on the brick(s) that contains the + 'bad copy' of the file data/metadata using the setfattr command.

    +
    +

    setfattr -n <attribute-name> -v <attribute-value> <file-path-on-brick>

    +
    +
  8. +
  9. +

    Trigger self-heal on the file by performing lookup from the client:

    +
    +

    ls -l <file-path-on-gluster-mount>

    +
    +
  10. +
+

Detailed Instructions for steps 3 through 5:

+

To understand how to resolve split-brain we need to know how to interpret the +afr changelog extended attributes.

+

Execute getfattr -d -m . -e hex <file-path-on-brick>

+

Example:

+
[root@store3 ~]# getfattr -d -e hex -m. brick-a/file.txt
+\#file: brick-a/file.txt
+security.selinux=0x726f6f743a6f626a6563745f723a66696c655f743a733000
+trusted.afr.vol-client-2=0x000000000000000000000000
+trusted.afr.vol-client-3=0x000000000200000000000000
+trusted.gfid=0x307a5c9efddd4e7c96e94fd4bcdcbd1b
+
+

The extended attributes with trusted.afr.<volname>-client-<subvolume-index> +are used by afr to maintain changelog of the file.The values of the +trusted.afr.<volname>-client-<subvolume-index> are calculated by the glusterfs +client (fuse or nfs-server) processes. When the glusterfs client modifies a file +or directory, the client contacts each brick and updates the changelog extended +attribute according to the response of the brick.

+

'subvolume-index' is nothing but (brick number - 1) in +gluster volume info <volname> output.

+

Example:

+
[root@pranithk-laptop ~]# gluster volume info vol
+ Volume Name: vol
+ Type: Distributed-Replicate
+ Volume ID: 4f2d7849-fbd6-40a2-b346-d13420978a01
+ Status: Created
+ Number of Bricks: 4 x 2 = 8
+ Transport-type: tcp
+ Bricks:
+ brick-a: pranithk-laptop:/gfs/brick-a
+ brick-b: pranithk-laptop:/gfs/brick-b
+ brick-c: pranithk-laptop:/gfs/brick-c
+ brick-d: pranithk-laptop:/gfs/brick-d
+ brick-e: pranithk-laptop:/gfs/brick-e
+ brick-f: pranithk-laptop:/gfs/brick-f
+ brick-g: pranithk-laptop:/gfs/brick-g
+ brick-h: pranithk-laptop:/gfs/brick-h
+
+

In the example above:

+
Brick             |    Replica set        |    Brick subvolume index
+----------------------------------------------------------------------------
+-/gfs/brick-a     |       0               |       0
+-/gfs/brick-b     |       0               |       1
+-/gfs/brick-c     |       1               |       2
+-/gfs/brick-d     |       1               |       3
+-/gfs/brick-e     |       2               |       4
+-/gfs/brick-f     |       2               |       5
+-/gfs/brick-g     |       3               |       6
+-/gfs/brick-h     |       3               |       7
+
+

Each file in a brick maintains the changelog of itself and that of the files +present in all the other bricks in its replica set as seen by that brick.

+

In the example volume given above, all files in brick-a will have 2 entries, +one for itself and the other for the file present in its replica pair, i.e.brick-b:
+trusted.afr.vol-client-0=0x000000000000000000000000 -->changelog for itself (brick-a)
+trusted.afr.vol-client-1=0x000000000000000000000000 -->changelog for brick-b as seen by brick-a

+

Likewise, all files in brick-b will have:
+trusted.afr.vol-client-0=0x000000000000000000000000 -->changelog for brick-a as seen by brick-b
+trusted.afr.vol-client-1=0x000000000000000000000000 -->changelog for itself (brick-b)

+

The same can be extended for other replica pairs.

+

Interpreting Changelog (roughly pending operation count) Value:
+Each extended attribute has a value which is 24 hexa decimal digits.
+First 8 digits represent changelog of data. Second 8 digits represent changelog +of metadata. Last 8 digits represent Changelog of directory entries.

+

Pictorially representing the same, we have:

+
0x 000003d7 00000001 00000000
+        |      |       |
+        |      |        \_ changelog of directory entries
+        |       \_ changelog of metadata
+         \ _ changelog of data
+
+

For Directories metadata and entry changelogs are valid. +For regular files data and metadata changelogs are valid. +For special files like device files etc metadata changelog is valid. +When a file split-brain happens it could be either data split-brain or +meta-data split-brain or both. When a split-brain happens the changelog of the +file would be something like this:

+

Example:(Lets consider both data, metadata split-brain on same file).

+
[root@pranithk-laptop vol]# getfattr -d -m . -e hex /gfs/brick-?/a
+getfattr: Removing leading '/' from absolute path names
+\#file: gfs/brick-a/a
+trusted.afr.vol-client-0=0x000000000000000000000000
+trusted.afr.vol-client-1=0x000003d70000000100000000
+trusted.gfid=0x80acdbd886524f6fbefa21fc356fed57
+\#file: gfs/brick-b/a
+trusted.afr.vol-client-0=0x000003b00000000100000000
+trusted.afr.vol-client-1=0x000000000000000000000000
+trusted.gfid=0x80acdbd886524f6fbefa21fc356fed57
+
+

Observations:

+

According to changelog extended attributes on file /gfs/brick-a/a:

+

The first 8 digits of trusted.afr.vol-client-0 are all +zeros (0x00000000................), and the first 8 digits of +trusted.afr.vol-client-1 are not all zeros (0x000003d7................). +So the changelog on /gfs/brick-a/a implies that some data operations succeeded +on itself but failed on /gfs/brick-b/a.

+

The second 8 digits of trusted.afr.vol-client-0 are +all zeros (0x........00000000........), and the second 8 digits of +trusted.afr.vol-client-1 are not all zeros (0x........00000001........). +So the changelog on /gfs/brick-a/a implies that some metadata operations succeeded +on itself but failed on /gfs/brick-b/a.

+

According to Changelog extended attributes on file /gfs/brick-b/a:

+

The first 8 digits of trusted.afr.vol-client-0 are not all +zeros (0x000003b0................), and the first 8 digits of +trusted.afr.vol-client-1 are all zeros (0x00000000................). +So the changelog on /gfs/brick-b/a implies that some data operations succeeded +on itself but failed on /gfs/brick-a/a.

+

The second 8 digits of trusted.afr.vol-client-0 are not +all zeros (0x........00000001........), and the second 8 digits of +trusted.afr.vol-client-1 are all zeros (0x........00000000........). +So the changelog on /gfs/brick-b/a implies that some metadata operations succeeded +on itself but failed on /gfs/brick-a/a.

+

Since both the copies have data, metadata changes that are not on the other +file, it is in both data and metadata split-brain.

+

Deciding on the correct copy:

+

The user may have to inspect stat,getfattr output of the files to decide which +metadata to retain and contents of the file to decide which data to retain. +Continuing with the example above, lets say we want to retain the data +of /gfs/brick-a/a and metadata of /gfs/brick-b/a.

+

Resetting the relevant changelogs to resolve the split-brain:

+

For resolving data-split-brain:

+

We need to change the changelog extended attributes on the files as if some data +operations succeeded on /gfs/brick-a/a but failed on /gfs/brick-b/a. But +/gfs/brick-b/a should NOT have any changelog which says some data operations +succeeded on /gfs/brick-b/a but failed on /gfs/brick-a/a. We need to reset the +data part of the changelog on trusted.afr.vol-client-0 of /gfs/brick-b/a.

+

For resolving metadata-split-brain:

+

We need to change the changelog extended attributes on the files as if some +metadata operations succeeded on /gfs/brick-b/a but failed on /gfs/brick-a/a. +But /gfs/brick-a/a should NOT have any changelog which says some metadata +operations succeeded on /gfs/brick-a/a but failed on /gfs/brick-b/a. +We need to reset metadata part of the changelog on +trusted.afr.vol-client-1 of /gfs/brick-a/a

+

So, the intended changes are:
+On /gfs/brick-b/a:
+For trusted.afr.vol-client-0
+0x000003b00000000100000000 to 0x000000000000000100000000
+(Note that the metadata part is still not all zeros)
+Hence execute +setfattr -n trusted.afr.vol-client-0 -v 0x000000000000000100000000 /gfs/brick-b/a

+

On /gfs/brick-a/a:
+For trusted.afr.vol-client-1
+0x000003d70000000100000000 to 0x000003d70000000000000000
+(Note that the data part is still not all zeros)
+Hence execute
+setfattr -n trusted.afr.vol-client-1 -v 0x000003d70000000000000000 /gfs/brick-a/a

+

Thus after the above operations are done, the changelogs look like this:

+
[root@pranithk-laptop vol]# getfattr -d -m . -e hex /gfs/brick-?/a
+getfattr: Removing leading '/' from absolute path names
+\#file: gfs/brick-a/a
+trusted.afr.vol-client-0=0x000000000000000000000000
+trusted.afr.vol-client-1=0x000003d70000000000000000
+trusted.gfid=0x80acdbd886524f6fbefa21fc356fed57
+
+\#file: gfs/brick-b/a
+trusted.afr.vol-client-0=0x000000000000000100000000
+trusted.afr.vol-client-1=0x000000000000000000000000
+trusted.gfid=0x80acdbd886524f6fbefa21fc356fed57
+
+

Triggering Self-heal:

+

Perform ls -l <file-path-on-gluster-mount> to trigger healing.

+

+Fixing Directory entry split-brain:

+
+

Afr has the ability to conservatively merge different entries in the directories +when there is a split-brain on directory. +If on one brick directory 'd' has entries '1', '2' and has entries '3', '4' on +the other brick then afr will merge all of the entries in the directory to have +'1', '2', '3', '4' entries in the same directory. +(Note: this may result in deleted files to re-appear in case the split-brain +happens because of deletion of files on the directory) +Split-brain resolution needs human intervention when there is at least one entry +which has same file name but different gfid in that directory. +Example:
+On brick-a the directory has entries '1' (with gfid g1), '2' and on brick-b +directory has entries '1' (with gfid g2) and '3'. +These kinds of directory split-brains need human intervention to resolve. +The user needs to remove either file '1' on brick-a or the file '1' on brick-b +to resolve the split-brain. In addition, the corresponding gfid-link file also +needs to be removed.The gfid-link files are present in the .glusterfs folder +in the top-level directory of the brick. If the gfid of the file is +0x307a5c9efddd4e7c96e94fd4bcdcbd1b (the trusted.gfid extended attribute got +from the getfattr command earlier),the gfid-link file can be found at

+
+

/gfs/brick-a/.glusterfs/30/7a/307a5c9efddd4e7c96e94fd4bcdcbd1b

+
+

Word of caution:

+

Before deleting the gfid-link, we have to ensure that there are no hard links +to the file present on that brick. If hard-links exist,they must be deleted as +well.

+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/Troubleshooting/statedump/index.html b/Troubleshooting/statedump/index.html new file mode 100644 index 00000000..5c36948c --- /dev/null +++ b/Troubleshooting/statedump/index.html @@ -0,0 +1,5105 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + Statedump - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + + + + + +
+
+ + + + + + + + +

Statedump

+

A statedump is, as the name suggests, a dump of the internal state of a glusterfs process. It captures information about in-memory structures such as frames, call stacks, active inodes, fds, mempools, iobufs, and locks as well as xlator specific data structures. This can be an invaluable tool for debugging memory leaks and hung processes.

+ +
+

Generate a Statedump

+

Run the command

+
gluster --print-statedumpdir
+
+

on a gluster server node to find out which directory the statedumps will be created in. This directory may need to be created if not already present. +For the rest of this document, we will refer to this directory as statedump-directory.

+

To generate a statedump for a process, run

+
kill -USR1 <pid-of-gluster-process>
+
+

For client mounts:

+

Run the following command on the client system

+
kill -USR1 <pid-of-gluster-mount-process>
+
+

There are specific commands to generate statedumps for all brick processes/nfs server/quotad which can be used instead of the above. Run the following +commands on one of the server nodes:

+

For bricks:

+
gluster volume statedump <volname>
+
+

For the NFS server:

+
gluster volume statedump <volname> nfs
+
+

For quotad:

+
gluster volume statedump <volname> quotad
+
+

The statedumps will be created in statedump-directory on each node. The statedumps for brick processes will be created with the filename hyphenated-brick-path.<pid>.dump.timestamp while for all other processes it will be glusterdump.<pid>.dump.timestamp.

+
+

Read a Statedump

+

Statedumps are text files and can be opened in any text editor. The first and last lines of the file contain the start and end time (in UTC)respectively of when the statedump file was written.

+

Mallinfo

+

The mallinfo return status is printed in the following format. Please read man mallinfo for more information about what each field means.

+
[mallinfo]
+mallinfo_arena=100020224    /* Non-mmapped space allocated (bytes) */
+mallinfo_ordblks=69467      /* Number of free chunks */
+mallinfo_smblks=449         /* Number of free fastbin blocks */
+mallinfo_hblks=13           /* Number of mmapped regions */
+mallinfo_hblkhd=20144128    /* Space allocated in mmapped regions (bytes) */
+mallinfo_usmblks=0          /* Maximum total allocated space (bytes) */
+mallinfo_fsmblks=39264      /* Space in freed fastbin blocks (bytes) */
+mallinfo_uordblks=96710112  /* Total allocated space (bytes) */
+mallinfo_fordblks=3310112   /* Total free space (bytes) */
+mallinfo_keepcost=133712    /* Top-most, releasable space (bytes) */
+
+

Memory accounting stats

+

Each xlator defines data structures specific to its requirements. The statedump captures information about the memory usage and allocations of these structures for each xlator in the call-stack and prints them in the following format:

+

For the xlator with the name glusterfs

+
[global.glusterfs - Memory usage]   #[global.<xlator-name> - Memory usage]
+num_types=119                       #The number of data types it is using
+
+

followed by the memory usage for each data-type for that translator. The following example displays a sample for the gf_common_mt_gf_timer_t type

+
[global.glusterfs - usage-type gf_common_mt_gf_timer_t memusage]
+#[global.<xlator-name> - usage-type <tag associated with the data-type> memusage]
+size=112          #Total size allocated for data-type when the statedump was taken i.e. num_allocs * sizeof (data-type)
+num_allocs=2      #Number of allocations of the data-type which are active at the time of taking the statedump.
+max_size=168      #max_num_allocs times the sizeof(data-type) i.e. max_num_allocs * sizeof (data-type)
+max_num_allocs=3  #Maximum number of active allocations at any point in the life of the process.
+total_allocs=7    #Number of times this data-type was allocated in the life of the process.
+
+

This information is useful while debugging high memory usage issues as steadily increasing values for num_allocs may indicate a memory leak for that data-type.

+

Mempools

+

Mempools are an optimization intended to reduce the number of allocations of a data type. By creating a mempool of 1024 elements for a data-type, new elements of that type will be allocated from the heap using syscalls like calloc only if all the 1024 elements in the pool are in active use.

+

Memory pool allocations by each xlator are displayed in the following format:

+
[mempool] #Section name
+-----=-----
+pool-name=fuse:fd_t #pool-name=<xlator-name>:<data-type>
+hot-count=1         #number of mempool elements in active use. i.e. for this pool it is the number of 'fd_t' elements in active use.
+cold-count=1023     #number of mempool elements that are not in use. New allocation requests will be served from here until all the elements in the pool are in use i.e. cold-count becomes 0.
+padded_sizeof=108   #Element size including padding. Each mempool element is padded with a doubly-linked-list + ptr of mempool + is-in-use info to operate the pool of elements
+pool-misses=0       #Number of times the element was allocated from heap because all elements from the pool were in active use.
+alloc-count=314     #Number of times this type of data was allocated through out the life of this process. This may include pool-misses as well.
+max-alloc=3         #Maximum number of elements from the pool in active use at any point in the life of the process. This does *not* include pool-misses.
+cur-stdalloc=0      #Number of allocations made from heap that are yet to be released via mem_put().
+max-stdalloc=0      #Maximum number of allocations from heap that were in active use at any point in the life of the process.
+
+

This information is also useful while debugging high memory usage issues as large hot_count and cur-stdalloc values may point to an element not being freed after it has been used.

+

Iobufs

+
[iobuf.global]
+iobuf_pool=0x1f0d970                #The memory pool for iobufs
+iobuf_pool.default_page_size=131072 #The default size of iobuf (if no iobuf size is specified the default size is allocated)
+#iobuf_arena: One arena represents a group of iobufs of a particular size
+iobuf_pool.arena_size=12976128       # The initial size of the iobuf pool (doesn't include the stdalloc'd memory or newly added arenas)
+iobuf_pool.arena_cnt=8               #Total number of arenas in the pool
+iobuf_pool.request_misses=0          #The number of iobufs that were stdalloc'd (as they exceeded the default max page size provided by iobuf_pool).
+
+

There are 3 lists of arenas

+
    +
  1. Arena list: arenas allocated during iobuf pool creation and the arenas that are in use(active_cnt != 0) will be part of this list.
  2. +
  3. Purge list: arenas that can be purged(no active iobufs, active_cnt == 0).
  4. +
  5. Filled list: arenas without free iobufs.
  6. +
+
[purge.1]                        #purge.<S.No.>
+purge.1.mem_base=0x7fc47b35f000  #The address of the arena structure
+purge.1.active_cnt=0             #The number of iobufs active in that arena
+purge.1.passive_cnt=1024         #The number of unused iobufs in the arena
+purge.1.alloc_cnt=22853          #Total allocs in this pool(number of times the iobuf was allocated from this arena)
+purge.1.max_active=7             #Max active iobufs from this arena, at any point in the life of this process.
+purge.1.page_size=128            #Size of all the iobufs in this arena.
+
+[arena.5] #arena.<S.No.>
+arena.5.mem_base=0x7fc47af1f000
+arena.5.active_cnt=0
+arena.5.passive_cnt=64
+arena.5.alloc_cnt=0
+arena.5.max_active=0
+arena.5.page_size=32768
+
+

If the active_cnt of any arena is non zero, then the statedump will also have the iobuf list.

+
[arena.6.active_iobuf.1]                  #arena.<S.No>.active_iobuf.<iobuf.S.No.>
+arena.6.active_iobuf.1.ref=1              #refcount of the iobuf
+arena.6.active_iobuf.1.ptr=0x7fdb921a9000 #address of the iobuf
+
+[arena.6.active_iobuf.2]
+arena.6.active_iobuf.2.ref=1
+arena.6.active_iobuf.2.ptr=0x7fdb92189000
+
+

A lot of filled arenas at any given point in time could be a sign of iobuf leaks.

+

Call stack

+

The fops received by gluster are handled using call stacks. A call stack contains information about the uid/gid/pid etc of the process that is executing the fop. Each call stack contains different call-frames for each xlator which handles that fop.

+
[global.callpool.stack.3]    #global.callpool.stack.<Serial-Number>
+stack=0x7fc47a44bbe0         #Stack address
+uid=0                        #Uid of the process executing the fop
+gid=0                        #Gid of the process executing the fop
+pid=6223                     #Pid of the process executing the fop
+unique=2778                  #Some Xlators like afr do copy_frame and perform the operation in a different stack. This id is used to determine the stacks that are inter-related because of copy-frame
+lk-owner=0000000000000000    #Some of the fuse fops have lk-owner.
+op=LOOKUP                    #Fop
+type=1                       #Type of the op i.e. FOP/MGMT-OP
+cnt=9                        #Number of frames in this stack.
+
+

Call-frame

+

Each frame will have information about which xlator the frame belongs to, which function it wound to/from and which it will be unwound to, and whether it has unwound.

+
[global.callpool.stack.3.frame.2] #global.callpool.stack.<stack-serial-number>.frame.<frame-serial-number>
+frame=0x7fc47a611dbc              #Frame address
+ref_count=0                       #Incremented at the time of wind and decremented at the time of unwind.
+translator=r2-client-1            #Xlator this frame belongs to
+complete=0                        #1 if this frame is already unwound. 0 if it is yet to unwind.
+parent=r2-replicate-0             #Parent xlator of this frame
+wind_from=afr_lookup              #Parent xlator function from which it was wound
+wind_to=priv->children[i]->fops->lookup
+unwind_to=afr_lookup_cbk          #Parent xlator function to unwind to
+
+

To debug hangs in the system, see which xlator has not yet unwound its fop by checking the value of the complete tag in the statedump. (complete=0 indicates the xlator has not yet unwound).

+

FUSE Operation History

+

Gluster Fuse maintains a history of the operations that it has performed.

+
[xlator.mount.fuse.history]
+TIME=2014-07-09 16:44:57.523364
+message=[0] fuse_release: RELEASE(): 4590:, fd: 0x1fef0d8, gfid: 3afb4968-5100-478d-91e9-76264e634c9f
+
+TIME=2014-07-09 16:44:57.523373
+message=[0] send_fuse_err: Sending Success for operation 18 on inode 3afb4968-5100-478d-91e9-76264e634c9f
+
+TIME=2014-07-09 16:44:57.523394
+message=[0] fuse_getattr_resume: 4591, STAT, path: (/iozone.tmp), gfid: (3afb4968-5100-478d-91e9-76264e634c9f)
+
+

Xlator configuration

+
[cluster/replicate.r2-replicate-0] #Xlator type, name information
+child_count=2                      #Number of children for the xlator
+#Xlator specific configuration below
+child_up[0]=1
+pending_key[0]=trusted.afr.r2-client-0
+child_up[1]=1
+pending_key[1]=trusted.afr.r2-client-1
+data_self_heal=on
+metadata_self_heal=1
+entry_self_heal=1
+data_change_log=1
+metadata_change_log=1
+entry-change_log=1
+read_child=1
+favorite_child=-1
+wait_count=1
+
+

Graph/inode table

+
[active graph - 1]
+
+conn.1.bound_xl./data/brick01a/homegfs.hashsize=14057
+conn.1.bound_xl./data/brick01a/homegfs.name=/data/brick01a/homegfs/inode
+conn.1.bound_xl./data/brick01a/homegfs.lru_limit=16384 #Least recently used size limit
+conn.1.bound_xl./data/brick01a/homegfs.active_size=690 #Number of inodes undergoing some kind of fop ie., on which there is at least one ref.
+conn.1.bound_xl./data/brick01a/homegfs.lru_size=183    #Number of inodes present in lru list
+conn.1.bound_xl./data/brick01a/homegfs.purge_size=0    #Number of inodes present in purge list
+
+

Inode

+
[conn.1.bound_xl./data/brick01a/homegfs.active.324] #324th inode in active inode list
+gfid=e6d337cf-97eb-44b3-9492-379ba3f6ad42           #Gfid of the inode
+nlookup=13                                          #Number of times lookups happened from the client or from fuse kernel
+fd-count=4                                          #Number of fds opened on the inode
+ref=11                                              #Number of refs taken on the inode
+ia_type=1                                           #Type of the inode. This should be changed to some string :-(
+
+[conn.1.bound_xl./data/brick01a/homegfs.lru.1] #1st inode in lru list. Note that ref count is zero for these inodes.
+gfid=5114574e-69bc-412b-9e52-f13ff087c6fc
+nlookup=5
+fd-count=0
+ref=0
+ia_type=2
+
+

Inode context

+

Each xlator can store information specific to it in the inode context. This context can also be printed in the statedump. Here is the inode context of the locks xlator

+
[xlator.features.locks.homegfs-locks.inode]
+path=/homegfs/users/dfrobins/gfstest/r4/SCRATCH/fort.5102 - path of the file
+mandatory=0
+inodelk-count=5 #Number of inode locks
+lock-dump.domain.domain=homegfs-replicate-0:self-heal #Domain on which the lock was taken. In this case, this domain is used by the selfheal to prevent more than one heal on the same file
+inodelk.inodelk[0](ACTIVE)=type=WRITE, whence=0, start=0, len=0, pid = 18446744073709551615, owner=080b1ada117f0000, client=0xb7fc30, connection-id=compute-30-029.com-3505-2014/06/29-14:46:12:477358-homegfs-client-0-0-1, granted at Sun Jun 29 11:01:00 2014 #Active lock information
+
+inodelk.inodelk[1](BLOCKED)=type=WRITE, whence=0, start=0, len=0, pid = 18446744073709551615, owner=c0cb091a277f0000, client=0xad4f10, connection-id=gfs01a.com-4080-2014/06/29-14:41:36:917768-homegfs-client-0-0-0, blocked at Sun Jun 29 11:04:44 2014 #Blocked lock information
+
+lock-dump.domain.domain=homegfs-replicate-0:metadata #Domain name where metadata operations take locks to maintain replication consistency
+lock-dump.domain.domain=homegfs-replicate-0 #Domain name where entry/data operations take locks to maintain replication consistency
+inodelk.inodelk[0](ACTIVE)=type=WRITE, whence=0, start=11141120, len=131072, pid = 18446744073709551615, owner=080b1ada117f0000, client=0xb7fc30, connection-id=compute-30-029.com-3505-2014/06/29-14:46:12:477358-homegfs-client-0-0-1, granted at Sun Jun 29 11:10:36 2014 #Active lock information
+
+
+

Debug With Statedumps

+

Memory leaks

+

Statedumps can be used to determine whether the high memory usage of a process is caused by a leak. To debug the issue, generate statedumps for that process at regular intervals, or before and after running the steps that cause the memory used to increase. Once you have multiple statedumps, compare the memory allocation stats to see if any of them are increasing steadily as those could indicate a potential memory leak.

+

The following examples walk through using statedumps to debug two different memory leaks.

+

With the memory accounting feature:

+

BZ 1120151 reported high memory usage by the self heal daemon whenever one of the bricks was wiped in a replicate volume and a full self-heal was invoked to heal the contents. This issue was debugged using statedumps to determine which data-structure was leaking memory.

+

A statedump of the self heal daemon process was taken using

+
kill -USR1 `<pid-of-gluster-self-heal-daemon>`
+
+

On examining the statedump:

+
grep -w num_allocs glusterdump.5225.dump.1405493251
+num_allocs=77078
+num_allocs=87070
+num_allocs=117376
+....
+
+grep hot-count glusterdump.5225.dump.1405493251
+hot-count=16384
+hot-count=16384
+hot-count=4095
+....
+
+

On searching for num_allocs with high values in the statedump, a grep of the statedump revealed a large number of allocations for the following data-types under the replicate xlator:

+
    +
  1. gf_common_mt_asprintf
  2. +
  3. gf_common_mt_char
  4. +
  5. gf_common_mt_mem_pool.
  6. +
+

On checking the afr-code for allocations with tag gf_common_mt_char, it was found that the data-self-heal code path does not free one such allocated data structure. gf_common_mt_mem_pool suggests that there is a leak in pool memory. The replicate-0:dict_t, glusterfs:data_t and glusterfs:data_pair_t pools are using a lot of memory, i.e. cold_count is 0 and there are too many allocations. Checking the source code of dict.c shows that key in dict is allocated with gf_common_mt_char i.e. 2. tag and value is created using gf_asprintf which in-turn uses gf_common_mt_asprintf i.e. 1.. Checking the code for leaks in self-heal code paths led to a line which over-writes a variable with new dictionary even when it was already holding a reference to another dictionary. After fixing these leaks, we ran the same test to verify that none of the num_allocs values increased in the statedump of the self-daemon after healing 10,000 files. +Please check http://review.gluster.org/8316 for more info about the patch/code.

+

Leaks in mempools:

+

The statedump output of mempools was used to test and verify the fixes for BZ 1134221. On code analysis, dict_t objects were found to be leaking (due to missing unref's) during name self-heal.

+

Glusterfs was compiled with the -DDEBUG flags to have cold count set to 0 by default. The test involved creating 100 files on plain replicate volume, removing them from one of the backend bricks, and then triggering lookups on them from the mount point. A statedump of the mount process was taken before executing the test case and after it was completed.

+

Statedump output of the fuse mount process before the test case was executed:

+
pool-name=glusterfs:dict_t
+hot-count=0
+cold-count=0
+padded_sizeof=140
+alloc-count=33
+max-alloc=0
+pool-misses=33
+cur-stdalloc=14
+max-stdalloc=18
+
+

Statedump output of the fuse mount process after the test case was executed:

+
pool-name=glusterfs:dict_t
+hot-count=0
+cold-count=0
+padded_sizeof=140
+alloc-count=2841
+max-alloc=0
+pool-misses=2841
+cur-stdalloc=214
+max-stdalloc=220
+
+

Here, as cold count was 0 by default, cur-stdalloc indicates the number of dict_t objects that were allocated from the heap using mem_get(), and are yet to be freed using mem_put(). After running the test case (named selfheal of 100 files), there was a rise in the cur-stdalloc value (from 14 to 214) for dict_t.

+

After the leaks were fixed, glusterfs was again compiled with -DDEBUG flags and the steps were repeated. Statedumps of the FUSE mount were taken before and after executing the test case to ascertain the validity of the fix. And the results were as follows:

+

Statedump output of the fuse mount process before executing the test case:

+
pool-name=glusterfs:dict_t
+hot-count=0
+cold-count=0
+padded_sizeof=140
+alloc-count=33
+max-alloc=0
+pool-misses=33
+cur-stdalloc=14
+max-stdalloc=18
+
+

Statedump output of the fuse mount process after executing the test case:

+
pool-name=glusterfs:dict_t
+hot-count=0
+cold-count=0
+padded_sizeof=140
+alloc-count=2837
+max-alloc=0
+pool-misses=2837
+cur-stdalloc=14
+max-stdalloc=119
+
+

The value of cur-stdalloc remained 14 after the test, indicating that the fix indeed does what it's supposed to do.

+

Hangs caused by frame loss

+

BZ 994959 reported that the Fuse mount hangs on a readdirp operation. +Here are the steps used to locate the cause of the hang using statedump.

+

Statedumps were taken for all gluster processes after reproducing the issue. The following stack was seen in the FUSE mount's statedump:

+
[global.callpool.stack.1.frame.1]
+ref_count=1
+translator=fuse
+complete=0
+
+[global.callpool.stack.1.frame.2]
+ref_count=0
+translator=r2-client-1
+complete=1 <<----- Client xlator has completed the readdirp call and unwound to afr
+parent=r2-replicate-0
+wind_from=afr_do_readdir
+wind_to=children[call_child]->fops->readdirp
+unwind_from=client3_3_readdirp_cbk
+unwind_to=afr_readdirp_cbk
+
+[global.callpool.stack.1.frame.3]
+ref_count=0
+translator=r2-replicate-0
+complete=0 <<---- But the Afr xlator is not unwinding for some reason.
+parent=r2-dht
+wind_from=dht_do_readdir
+wind_to=xvol->fops->readdirp
+unwind_to=dht_readdirp_cbk
+
+[global.callpool.stack.1.frame.4]
+ref_count=1
+translator=r2-dht
+complete=0
+parent=r2-io-cache
+wind_from=ioc_readdirp
+wind_to=FIRST_CHILD(this)->fops->readdirp
+unwind_to=ioc_readdirp_cbk
+
+[global.callpool.stack.1.frame.5]
+ref_count=1
+translator=r2-io-cache
+complete=0
+parent=r2-quick-read
+wind_from=qr_readdirp
+wind_to=FIRST_CHILD (this)->fops->readdirp
+unwind_to=qr_readdirp_cbk
+
+

unwind_to shows that call was unwound to afr_readdirp_cbk from the r2-client-1 xlator. +Inspecting that function revealed that afr is not unwinding the stack when fop failed. +Check http://review.gluster.org/5531 for more info about patch/code changes.

+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/Troubleshooting/troubleshooting-afr/index.html b/Troubleshooting/troubleshooting-afr/index.html new file mode 100644 index 00000000..542c9230 --- /dev/null +++ b/Troubleshooting/troubleshooting-afr/index.html @@ -0,0 +1,4781 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + Troubleshooting Self-heal - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + + + + + +
+
+ + + + + + + + +

Troubleshooting Self-heal

+ +

The first level of analysis always starts with looking at the log files. Which ones, you ask?

+
    +
  • /var/log/glusterfs/$fuse-mount-point.log –> Fuse client log
  • +
  • /var/log/glusterfs/glfsheal-$volname.log –> This is the log file to look at when you run the heal info/split-brain resolution commands.
  • +
  • /var/log/glusterfs/glustershd.log –> This is the self-heal daemon log that prints the names of files undergoing heal, the sources and sinks for each file etc. It is common for all volumes.
  • +
  • /var/log/glusterfs/bricks/$brick.log–>Some errors in clients are simply propagated from the bricks themselves, so correlating client log errors with the logs from the brick is necessary.
  • +
+

Sometimes, you might need more verbose logging to figure out what’s going on: +gluster volume set $volname client-log-level $LEVEL

+

where LEVEL can be any one of DEBUG, WARNING, ERROR, INFO, CRITICAL, NONE, TRACE. This should ideally make all the log files mentioned above to start logging at $LEVEL. The default is INFO but you can temporarily toggle it to DEBUG or TRACE if you want to see under-the-hood messages. Useful when the normal logs don’t give a clue as to what is happening.

+ +

Most issues I’ve seen on the mailing list and with customers can broadly fit into the following buckets:

+

(Note: Not discussing split-brains here. If they occur, you need to use split-brain resolution CLI or cluster.favorite-child-policy options to fix them. They usually occur in replica 2 volumes and can be prevented by using replica 3 or arbiter volumes.)

+

i) Heal info appears to hang/takes a long time to complete

+

If the number of entries are large, then heal info will take longer than usual. While there are performance improvements to heal info being planned, a faster way to get an approx. count of the pending entries is to use the gluster volume heal $VOLNAME statistics heal-count command.

+

Knowledge Hack: Since we know that during the write transaction. the xattrop folder will capture the gfid-string of the file if it needs heal, we can also do an ls /brick/.glusterfs/indices/xattrop|wc -l on each brick to get the approx. no of entries that need heal. If this number reduces over time, it is a sign that the heal backlog is reducing. You will also see messages whenever a particular type of heal starts/ends for a given gfid, like so:

+
[2019-05-07 12:05:14.460442] I [MSGID: 108026] [afr-self-heal-entry.c:883:afr_selfheal_entry_do] 0-testvol-replicate-0: performing entry selfheal on d120c0cf-6e87-454b-965b-0d83a4c752bb
+
+[2019-05-07 12:05:14.474710] I [MSGID: 108026] [afr-self-heal-common.c:1741:afr_log_selfheal] 0-testvol-replicate-0: Completed entry selfheal on d120c0cf-6e87-454b-965b-0d83a4c752bb. sources=[0] 2 sinks=1
+
+[2019-05-07 12:05:14.493506] I [MSGID: 108026] [afr-self-heal-common.c:1741:afr_log_selfheal] 0-testvol-replicate-0: Completed data selfheal on a9b5f183-21eb-4fb3-a342-287d3a7dddc5. sources=[0] 2 sinks=1
+
+[2019-05-07 12:05:14.494577] I [MSGID: 108026] [afr-self-heal-metadata.c:52:__afr_selfheal_metadata_do] 0-testvol-replicate-0: performing metadata selfheal on a9b5f183-21eb-4fb3-a342-287d3a7dddc5
+
+[2019-05-07 12:05:14.498398] I [MSGID: 108026] [afr-self-heal-common.c:1741:afr_log_selfheal] 0-testvol-replicate-0: Completed metadata selfheal on a9b5f183-21eb-4fb3-a342-287d3a7dddc5. sources=[0] 2 sinks=1
+
+

ii) Self-heal is stuck/ not getting completed.

+

If a file seems to be forever appearing in heal info and not healing, check the following:

+
    +
  • Examine the afr xattrs- Do they clearly indicate the good and bad copies? If there isn’t at least one good copy, then the file is in split-brain and you would need to use the split-brain resolution CLI.
  • +
  • Identify which node’s shds would be picking up the file for heal. If a file is listed in the heal info output under brick1 and brick2, then the shds on the nodes which host those bricks would attempt (and one of them would succeed) in doing the heal.
  • +
  • Once the shd is identified, look at the shd logs to see if it is indeed connected to the bricks.
  • +
+

This is good:

+
[2019-05-07 09:53:02.912923] I [MSGID: 114046] [client-handshake.c:1106:client_setvolume_cbk] 0-testvol-client-2: Connected to testvol-client-2, attached to remote volume '/bricks/brick3'
+
+

This indicates a disconnect:

+
[2019-05-07 11:44:47.602862] I [MSGID: 114018] [client.c:2334:client_rpc_notify] 0-testvol-client-2: disconnected from testvol-client-2. Client process will keep trying to connect to glusterd until brick's port is available
+
+[2019-05-07 11:44:50.953516] E [MSGID: 114058] [client-handshake.c:1456:client_query_portmap_cbk] 0-testvol-client-2: failed to get the port number for remote subvolume. Please run 'gluster volume status' on server to see if brick process is running.
+
+

Alternatively, take a statedump of the self-heal daemon (shd) and check if all client xlators are connected to the respective bricks. The shd must have connected=1 for all the client xlators, meaning it can talk to all the bricks.

+ + + + + + + + + + + + + +
Shd’s statedump entry of a client xlator that is connected to the 3rd brickShd’s statedump entry of the same client xlator if it is diconnected from the 3rd brick
[xlator.protocol.client.testvol-client-2.priv] connected=1 total_bytes_read=75004 ping_timeout=42 total_bytes_written=50608 ping_msgs_sent=0 msgs_sent=0[xlator.protocol.client.testvol-client-2.priv] connected=0 total_bytes_read=75004 ping_timeout=42 total_bytes_written=50608 ping_msgs_sent=0 msgs_sent=0
+

If there are connection issues (i.e. connected=0), you would need to investigate and fix them. Check if the pid and the TCP/RDMA Port of the brick proceess from gluster volume status $VOLNAME matches that of ps aux|grep glusterfsd|grep $brick-path

+
# gluster volume status
+Status of volume: testvol
+Gluster process TCP Port RDMA Port Online Pid
+
+---
+
+Brick 127.0.0.2:/bricks/brick1 49152 0 Y 12527
+
+
# ps aux|grep brick1
+
+root 12527 0.0 0.1 1459208 20104 ? Ssl 11:20 0:01 /usr/local/sbin/glusterfsd -s 127.0.0.2 --volfile-id testvol.127.0.0.2.bricks-brick1 -p /var/run/gluster/vols/testvol/127.0.0.2-bricks-brick1.pid -S /var/run/gluster/70529980362a17d6.socket --brick-name /bricks/brick1 -l /var/log/glusterfs/bricks/bricks-brick1.log --xlator-option *-posix.glusterd-uuid=d90b1532-30e5-4f9d-a75b-3ebb1c3682d4 --process-name brick --brick-port 49152 --xlator-option testvol-server.listen-port=49152
+
+

Though this will likely match, sometimes there could be a bug leading to stale port usage. A quick workaround would be to restart glusterd on that node and check if things match. Report the issue to the devs if you see this problem.

+
    +
  • +

    I have seen some cases where a file is listed in heal info, and the afr xattrs indicate pending metadata or data heal but the file itself is not present on all bricks. Ideally, the parent directory of the file must have pending entry heal xattrs so that the file either gets created on the missing bricks or gets deleted from the ones where it is present. But if the parent dir doesn’t have xattrs, the entry heal can’t proceed. In such cases, you can

    +
      +
    • Either do a lookup directly on the file from the mount so that name heal is triggered and then shd can pickup the data/metadata heal.
    • +
    • Or manually set entry xattrs on the parent dir to emulate an entry heal so that the file gets created as a part of it.
    • +
    • If a brick’s underlying filesystem/lvm was damaged and fsck’d to recovery, some files/dirs might be missing on it. If there is a lot of missing info on the recovered bricks, it might be better to just to a replace-brick or reset-brick and let the heal fully sync everything rather than fiddling with afr xattrs of individual entries.
    • +
    +
  • +
+

Hack: How to trigger heal on any file/directory +Knowing about self-heal logic and index heal from the previous post, we can sort of emulate a heal with the following steps. This is not something that you should be doing on your cluster but it pays to at least know that it is possible when push comes to shove.

+
    +
  1. Picking one brick as good and setting the afr pending xattr on it blaming the bad bricks.
  2. +
  3. Capture the gfid inside .glusterfs/indices/xattrop so that the shd can pick it up during index heal.
  4. +
  5. Finally, trigger index heal: gluster volume heal $VOLNAME .
  6. +
+

Example: Let us say a FILE-1 exists with trusted.gfid=0x1ad2144928124da9b7117d27393fea5c on all bricks of a replica 3 volume called testvol. It has no afr xattrs. But you still need to emulate a heal. Let us say you choose brick-2 as the source. Let us do the steps listed above:

+
    +
  1. +

    Make brick-2 blame the other 2 bricks:

    +
    setfattr -n trusted.afr.testvol-client-2 -v 0x000000010000000000000000 /bricks/brick2/FILE-1
    +setfattr -n trusted.afr.testvol-client-1 -v 0x000000010000000000000000 /bricks/brick2/FILE-1
    +
    +
  2. +
  3. +

    Store the gfid string inside xattrop folder as a hardlink to the base entry:

    +
    # cd /bricks/brick2/.glusterfs/indices/xattrop/
    +# ls -li
    +total 0
    +17829255 ----------. 1 root root 0 May 10 11:20 xattrop-a400ca91-cec9-4463-a183-aca9eaff9fa7`
    +
    +# ln xattrop-a400ca91-cec9-4463-a183-aca9eaff9fa7 1ad21449-2812-4da9-b711-7d27393fea5c
    +# ll
    +total 0
    +----------. 2 root root 0 May 10 11:20 1ad21449-2812-4da9-b711-7d27393fea5c
    +----------. 2 root root 0 May 10 11:20 xattrop-a400ca91-cec9-4463-a183-aca9eaff9fa7
    +
    +
  4. +
  5. +

    Trigger heal: gluster volume heal testvol

    +

    The glustershd.log of node-2 should log about the heal.

    +
    [2019-05-10 06:10:46.027238] I [MSGID: 108026] [afr-self-heal-common.c:1741:afr_log_selfheal] 0-testvol-replicate-0: Completed data selfheal on 1ad21449-2812-4da9-b711-7d27393fea5c. sources=[1] sinks=0 2
    +
    +

    So the data was healed from the second brick to the first and third brick.

    +
  6. +
+

iii) Self-heal is too slow

+

If the heal backlog is decreasing and you see glustershd logging heals but you’re not happy with the rate of healing, then you can play around with shd-max-threads and shd-wait-qlength volume options.

+
Option: cluster.shd-max-threads
+Default Value: 1
+Description: Maximum number of parallel heals SHD can do per local brick. This can substantially lower heal times, but can also crush your bricks if you don’t have the storage hardware to support this.
+
+Option: cluster.shd-wait-qlength
+Default Value: 1024
+Description: This option can be used to control number of heals that can wait in SHD per subvolume
+
+

I’m not covering it here but it is possible to launch multiple shd instances (and kill them later on) on your node for increasing heal throughput. It is documented at https://access.redhat.com/solutions/3794011.

+

iv) Self-heal is too aggressive and slows down the system.

+

If shd-max-threads are at the lowest value (i.e. 1) and you see if CPU usage of the bricks is too high, you can check if the volume’s profile info shows a lot of RCHECKSUM fops. Data self-heal does checksum calculation (i.e the posix_rchecksum() FOP) which can be CPU intensive. You can the cluster.data-self-heal-algorithm option to full. This does a full file copy instead of computing rolling checksums and syncing only the mismatching blocks. The tradeoff is that the network consumption will be increased.

+

You can also disable all client-side heals if they are turned on so that the client bandwidth is consumed entirely by the application FOPs and not the ones by client side background heals. i.e. turn off cluster.metadata-self-heal, cluster.data-self-heal and cluster.entry-self-heal. +Note: In recent versions of gluster, client-side heals are disabled by default.

+ +

i) All fops are failing with ENOTCONN

+

Check mount log/ statedump for loss of quorum, just like for glustershd. If this is a fuse client (as opposed to an nfs/ gfapi client), you can also check the .meta folder to check the connection status to the bricks.

+
# cat /mnt/fuse_mnt/.meta/graphs/active/testvol-client-*/private |grep connected
+
+connected = 0
+connected = 1
+connected = 1
+
+

If connected=0, the connection to that brick is lost. Find out why. If the client is not connected to quorum number of bricks, then AFR fails lookups (and therefore any subsequent FOP) with Transport endpoint is not connected

+

ii) FOPs on some files are failing with ENOTCONN

+

Check mount log for the file being unreadable:

+
[2019-05-10 11:04:01.607046] W [MSGID: 108027] [afr-common.c:2268:afr_attempt_readsubvol_set] 13-testvol-replicate-0: no read subvols for /FILE.txt
+[2019-05-10 11:04:01.607775] W [fuse-bridge.c:939:fuse_entry_cbk] 0-glusterfs-fuse: 234: LOOKUP() /FILE.txt => -1 (Transport endpoint is not connected)
+
+

This means there was only 1 good copy and the client has lost connection to that brick. You need to ensure that the client is connected to all bricks.

+

iii) Mount is hung

+

It can be difficult to pin-point the issue immediately and might require assistance from the developers but the first steps to debugging could be to

+
    +
  • strace the fuse mount; see where it is hung.
  • +
  • Take a statedump of the mount to see which xlator has frames that are not wound (i.e. complete=0) and for which FOP. Then check the source code to see if there are any unhanded cases where the xlator doesn’t wind the FOP to its child.
  • +
  • Take statedump of bricks to see if there are any stale locks. An indication of stale locks is the same lock being present in multiple statedumps or the ‘granted’ date being very old.
  • +
+

Excerpt from a brick statedump:

+
[xlator.features.locks.testvol-locks.inode]
+path=/FILE
+mandatory=0
+inodelk-count=1
+lock-dump.domain.domain=testvol-replicate-0:self-heal
+lock-dump.domain.domain=testvol-replicate-0
+inodelk.inodelk[0](ACTIVE)=type=WRITE, whence=0, start=0, len=0,
+pid = 18446744073709551610, owner=700a0060037f0000, client=0x7fc57c09c1c0,
+connection-id=vm1-17902-2018/10/14-07:18:17:132969-testvol-client-0-0-0, granted at 2018-10-14 07:18:40
+
+

While stale lock issues are candidates for bug reports, the locks xlator on the brick releases locks from a particular client upon a network disconnect. That can be used as a workaround to release the stale locks- i.e. restart the brick or restart the client or induce a network disconnect between them.

+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/Troubleshooting/troubleshooting-filelocks/index.html b/Troubleshooting/troubleshooting-filelocks/index.html new file mode 100644 index 00000000..24ee72b3 --- /dev/null +++ b/Troubleshooting/troubleshooting-filelocks/index.html @@ -0,0 +1,4512 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + Troubleshooting File Locks - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + +

Troubleshooting File Locks

+

Use statedumps to find and list the locks held +on files. The statedump output also provides information on each lock +with its range, basename, PID of the application holding the lock, and +so on. You can analyze the output to know about the locks whose +owner/application is no longer running or interested in that lock. After +ensuring that the no application is using the file, you can clear the +lock using the following clear lock commands.

+
    +
  1. +

    Perform statedump on the volume to view the files that are locked + using the following command:

    +
    gluster volume statedump  inode
    +
    +

    For example, to display statedump of test-volume:

    +
    gluster volume statedump test-volume
    +Volume statedump successful
    +
    +

    The statedump files are created on the brick servers in the/tmp +directory or in the directory set using server.statedump-path +volume option. The naming convention of the dump file is +<brick-path>.<brick-pid>.dump.

    +

    The following are the sample contents of the statedump file. It +indicates that GlusterFS has entered into a state where there is an +entry lock (entrylk) and an inode lock (inodelk). Ensure that those +are stale locks and no resources own them.

    +
    [xlator.features.locks.vol-locks.inode]
    +path=/
    +mandatory=0
    +entrylk-count=1
    +lock-dump.domain.domain=vol-replicate-0
    +xlator.feature.locks.lock-dump.domain.entrylk.entrylk[0](ACTIVE)=type=ENTRYLK_WRLCK on basename=file1, pid = 714782904, owner=ffffff2a3c7f0000, transport=0x20e0670, , granted at Mon Feb 27 16:01:01 2012
    +
    +conn.2.bound_xl./gfs/brick1.hashsize=14057
    +conn.2.bound_xl./gfs/brick1.name=/gfs/brick1/inode
    +conn.2.bound_xl./gfs/brick1.lru_limit=16384
    +conn.2.bound_xl./gfs/brick1.active_size=2
    +conn.2.bound_xl./gfs/brick1.lru_size=0
    +conn.2.bound_xl./gfs/brick1.purge_size=0
    +
    +[conn.2.bound_xl./gfs/brick1.active.1]
    +gfid=538a3d4a-01b0-4d03-9dc9-843cd8704d07
    +nlookup=1
    +ref=2
    +ia_type=1
    +[xlator.features.locks.vol-locks.inode]
    +path=/file1
    +mandatory=0
    +inodelk-count=1
    +lock-dump.domain.domain=vol-replicate-0
    +inodelk.inodelk[0](ACTIVE)=type=WRITE, whence=0, start=0, len=0, pid = 714787072, owner=00ffff2a3c7f0000, transport=0x20e0670, , granted at Mon Feb 27 16:01:01 2012
    +
    +
  2. +
  3. +

    Clear the lock using the following command:

    +
    gluster volume clear-locks
    +
    +

    For example, to clear the entry lock on file1 of test-volume:

    +
    gluster volume clear-locks test-volume / kind granted entry file1
    +Volume clear-locks successful
    +vol-locks: entry blocked locks=0 granted locks=1
    +
    +
  4. +
  5. +

    Clear the inode lock using the following command:

    +
    gluster volume clear-locks
    +
    +

    For example, to clear the inode lock on file1 of test-volume:

    +
    gluster  volume clear-locks test-volume /file1 kind granted inode 0,0-0
    +Volume clear-locks successful
    +vol-locks: inode blocked locks=0 granted locks=1
    +
    +

    Perform statedump on test-volume again to verify that the +above inode and entry locks are cleared.

    +
  6. +
+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/Troubleshooting/troubleshooting-georep/index.html b/Troubleshooting/troubleshooting-georep/index.html new file mode 100644 index 00000000..336a22f0 --- /dev/null +++ b/Troubleshooting/troubleshooting-georep/index.html @@ -0,0 +1,4718 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + Troubleshooting Geo-replication - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + + + + + +
+
+ + + + + + + + +

Troubleshooting Geo-replication

+ +

Troubleshooting Geo-replication

+

This section describes the most common troubleshooting scenarios related +to GlusterFS Geo-replication.

+

Locating Log Files

+

For every Geo-replication session, the following three log files are +associated to it (four, if the secondary is a gluster volume):

+
    +
  • Primary-log-file - log file for the process which monitors the Primary + volume
  • +
  • Secondary-log-file - log file for process which initiates the changes in + secondary
  • +
  • Primary-gluster-log-file - log file for the maintenance mount point + that Geo-replication module uses to monitor the Primary volume
  • +
  • Secondary-gluster-log-file - is the secondary's counterpart of it
  • +
+

Primary Log File

+

To get the Primary-log-file for geo-replication, use the following +command:

+
gluster volume geo-replication <session> config log-file
+
+

For example:

+
gluster volume geo-replication Volume1 example.com:/data/remote_dir config log-file
+
+

Secondary Log File

+

To get the log file for geo-replication on secondary (glusterd must be +running on secondary machine), use the following commands:

+
    +
  1. +

    On primary, run the following command:

    +
    gluster volume geo-replication Volume1 example.com:/data/remote_dir config session-owner 5f6e5200-756f-11e0-a1f0-0800200c9a66
    +
    +

    Displays the session owner details.

    +
  2. +
  3. +

    On secondary, run the following command:

    +
    gluster volume geo-replication /data/remote_dir config log-file /var/log/gluster/${session-owner}:remote-mirror.log
    +
    +
  4. +
  5. +

    Replace the session owner details (output of Step 1) to the output + of Step 2 to get the location of the log file.

    +
    /var/log/gluster/5f6e5200-756f-11e0-a1f0-0800200c9a66:remote-mirror.log
    +
    +
  6. +
+

Rotating Geo-replication Logs

+

Administrators can rotate the log file of a particular primary-secondary +session, as needed. When you run geo-replication's log-rotate +command, the log file is backed up with the current timestamp suffixed +to the file name and signal is sent to gsyncd to start logging to a new +log file.

+

To rotate a geo-replication log file

+
    +
  • Rotate log file for a particular primary-secondary session using the + following command:
    gluster volume geo-replication  log-rotate
    +
    +
  • +
+

For example, to rotate the log file of primary Volume1 and secondary + example.com:/data/remote_dir :

+
    gluster volume geo-replication Volume1 example.com:/data/remote_dir log rotate
+    log rotate successful
+
+
    +
  • Rotate log file for all sessions for a primary volume using the + following command:
    gluster volume geo-replication  log-rotate
    +
    +
  • +
+

For example, to rotate the log file of primary Volume1:

+
    gluster volume geo-replication Volume1 log rotate
+    log rotate successful
+
+
    +
  • Rotate log file for all sessions using the following command:
    gluster volume geo-replication log-rotate
    +
    +
  • +
+

For example, to rotate the log file for all sessions:

+
    gluster volume geo-replication log rotate
+    log rotate successful
+
+

Synchronization is not complete

+

Description: GlusterFS geo-replication did not synchronize the data +completely but the geo-replication status displayed is OK.

+

Solution: You can enforce a full sync of the data by erasing the +index and restarting GlusterFS geo-replication. After restarting, +GlusterFS geo-replication begins synchronizing all the data. All files +are compared using checksum, which can be a lengthy and high resource +utilization operation on large data sets.

+

Issues in Data Synchronization

+

Description: Geo-replication display status as OK, but the files do +not get synced, only directories and symlink gets synced with the +following error message in the log:

+
[2011-05-02 13:42:13.467644] E [primary:288:regjob] GMaster: failed to sync ./some\_file\`
+
+

Solution: Geo-replication invokes rsync v3.0.0 or higher on the host +and the remote machine. You must verify if you have installed the +required version.

+

Geo-replication status displays Faulty very often

+

Description: Geo-replication displays status as faulty very often +with a backtrace similar to the following:

+
2011-04-28 14:06:18.378859] E [syncdutils:131:log\_raise\_exception]
+\<top\>: FAIL: Traceback (most recent call last): File
+"/usr/local/libexec/glusterfs/python/syncdaemon/syncdutils.py", line
+152, in twraptf(\*aa) File
+"/usr/local/libexec/glusterfs/python/syncdaemon/repce.py", line 118, in
+listen rid, exc, res = recv(self.inf) File
+"/usr/local/libexec/glusterfs/python/syncdaemon/repce.py", line 42, in
+recv return pickle.load(inf) EOFError
+
+

Solution: This error indicates that the RPC communication between +the primary gsyncd module and secondary gsyncd module is broken and this can +happen for various reasons. Check if it satisfies all the following +pre-requisites:

+
    +
  • Password-less SSH is set up properly between the host and the remote + machine.
  • +
  • If FUSE is installed in the machine, because geo-replication module + mounts the GlusterFS volume using FUSE to sync data.
  • +
  • If the Secondary is a volume, check if that volume is started.
  • +
  • If the Secondary is a plain directory, verify if the directory has been + created already with the required permissions.
  • +
  • If GlusterFS 3.2 or higher is not installed in the default location + (in Primary) and has been prefixed to be installed in a custom + location, configure the gluster-command for it to point to the + exact location.
  • +
  • If GlusterFS 3.2 or higher is not installed in the default location + (in secondary) and has been prefixed to be installed in a custom + location, configure the remote-gsyncd-command for it to point to + the exact place where gsyncd is located.
  • +
+

Intermediate Primary goes to Faulty State

+

Description: In a cascading set-up, the intermediate primary goes to +faulty state with the following log:

+
raise RuntimeError ("aborting on uuid change from %s to %s" % \\
+RuntimeError: aborting on uuid change from af07e07c-427f-4586-ab9f-
+4bf7d299be81 to de6b5040-8f4e-4575-8831-c4f55bd41154
+
+

Solution: In a cascading set-up the Intermediate primary is loyal to +the original primary. The above log means that the +geo-replication module has detected change in original primary. If this is +the desired behavior, delete the config option volume-id in the session +initiated from the intermediate primary.

+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/Troubleshooting/troubleshooting-glusterd/index.html b/Troubleshooting/troubleshooting-glusterd/index.html new file mode 100644 index 00000000..711f76ee --- /dev/null +++ b/Troubleshooting/troubleshooting-glusterd/index.html @@ -0,0 +1,4626 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + Troubleshooting CLI and glusterd - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + +

Troubleshooting CLI and glusterd

+ +

Troubleshooting the gluster CLI and glusterd

+

The glusterd daemon runs on every trusted server node and is responsible for the management of the trusted pool and volumes.

+

The gluster CLI sends commands to the glusterd daemon on the local node, which executes the operation and returns the result to the user.

+

Debugging glusterd

+

Logs

+

Start by looking at the log files for clues as to what went wrong when you hit a problem. +The default directory for Gluster logs is /var/log/glusterfs. The logs for the CLI and glusterd are:

+
    +
  • glusterd : /var/log/glusterfs/glusterd.log
  • +
  • gluster CLI : /var/log/glusterfs/cli.log
  • +
+

Statedumps

+

Statedumps are useful in debugging memory leaks and hangs. +See Statedump for more details.

+

Common Issues and How to Resolve Them

+

"Another transaction is in progress for volname" or "Locking failed on xxx.xxx.xxx.xxx"

+

As Gluster is distributed by nature, glusterd takes locks when performing operations to ensure that configuration changes made to a volume are atomic across the cluster. +These errors are returned when:

+
    +
  • More than one transaction contends on the same lock.
  • +
+
+

Solution : These are likely to be transient errors and the operation will succeed if retried once the other transaction is complete.

+
+
    +
  • A stale lock exists on one of the nodes.
  • +
+
+

Solution : Repeating the operation will not help until the stale lock is cleaned up. Restart the glusterd process holding the lock

+
+
    +
  • Check the glusterd.log file to find out which node holds the stale lock. Look for the message: + lock being held by <uuid>
  • +
  • Run gluster peer status to identify the node with the uuid in the log message.
  • +
  • Restart glusterd on that node.
  • +
+

"Transport endpoint is not connected" errors but all bricks are up

+

This is usually seen when a brick process does not shut down cleanly, leaving stale data behind in the glusterd process. +Gluster client processes query glusterd for the ports the bricks processes are listening on and attempt to connect to that port. +If the port information in glusterd is incorrect, the client will fail to connect to the brick even though it is up. Operations which +would need to access that brick may fail with "Transport endpoint is not connected".

+

Solution : Restart the glusterd service.

+

"Peer Rejected"

+

gluster peer status returns "Peer Rejected" for a node.

+
Hostname: <hostname>
+Uuid: <xxxx-xxx-xxxx>
+State: Peer Rejected (Connected)
+
+

This indicates that the volume configuration on the node is not in sync with the rest of the trusted storage pool. +You should see the following message in the glusterd log for the node on which the peer status command was run:

+
Version of Cksums <vol-name> differ. local cksum = xxxxxx, remote cksum = xxxxyx on peer <hostname>
+
+

Solution: Update the cluster.op-version

+
    +
  • Run gluster volume get all cluster.max-op-version to get the latest supported op-version.
  • +
  • Update the cluster.op-version to the latest supported op-version by executing gluster volume set all cluster.op-version <op-version>.
  • +
+

"Accepted Peer Request"

+

If the glusterd handshake fails while expanding a cluster, the view of the cluster will be inconsistent. The state of the peer in gluster peer status will be “accepted peer request” and subsequent CLI commands will fail with an error. +Eg. Volume create command will fail with "volume create: testvol: failed: Host <hostname> is not in 'Peer in Cluster' state

+

In this case the value of the state field in /var/lib/glusterd/peers/<UUID> will be other than 3.

+

Solution:

+
    +
  • Stop glusterd
  • +
  • Open /var/lib/glusterd/peers/<UUID>
  • +
  • Change state to 3
  • +
  • Start glusterd
  • +
+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/Troubleshooting/troubleshooting-gnfs/index.html b/Troubleshooting/troubleshooting-gnfs/index.html new file mode 100644 index 00000000..8c8603e8 --- /dev/null +++ b/Troubleshooting/troubleshooting-gnfs/index.html @@ -0,0 +1,4761 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + Troubleshooting gNFS - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + + + + + +
+
+ + + + + + + + +

Troubleshooting gNFS

+ +

Troubleshooting Gluster NFS

+

This section describes the most common troubleshooting issues related to +NFS .

+

mount command on NFS client fails with “RPC Error: Program not registered”

+

Start portmap or rpcbind service on the NFS server.

+

This error is encountered when the server has not started correctly. +On most Linux distributions this is fixed by starting portmap:

+
/etc/init.d/portmap start
+
+

On some distributions where portmap has been replaced by rpcbind, the +following command is required:

+
/etc/init.d/rpcbind start
+
+

After starting portmap or rpcbind, gluster NFS server needs to be +restarted.

+

NFS server start-up fails with “Port is already in use” error in the log file.

+

Another Gluster NFS server is running on the same machine.

+

This error can arise in case there is already a Gluster NFS server +running on the same machine. This situation can be confirmed from the +log file, if the following error lines exist:

+
[2010-05-26 23:40:49] E [rpc-socket.c:126:rpcsvc_socket_listen] rpc-socket: binding socket failed:Address already in use
+[2010-05-26 23:40:49] E [rpc-socket.c:129:rpcsvc_socket_listen] rpc-socket: Port is already in use
+[2010-05-26 23:40:49] E [rpcsvc.c:2636:rpcsvc_stage_program_register] rpc-service: could not create listening connection
+[2010-05-26 23:40:49] E [rpcsvc.c:2675:rpcsvc_program_register] rpc-service: stage registration of program failed
+[2010-05-26 23:40:49] E [rpcsvc.c:2695:rpcsvc_program_register] rpc-service: Program registration failed: MOUNT3, Num: 100005, Ver: 3, Port: 38465
+[2010-05-26 23:40:49] E [nfs.c:125:nfs_init_versions] nfs: Program init failed
+[2010-05-26 23:40:49] C [nfs.c:531:notify] nfs: Failed to initialize protocols
+
+

To resolve this error one of the Gluster NFS servers will have to be +shutdown. At this time, Gluster NFS server does not support running +multiple NFS servers on the same machine.

+ +

If the mount command fails with the following error message:

+
mount.nfs: rpc.statd is not running but is required for remote locking.
+mount.nfs: Either use '-o nolock' to keep locks local, or start statd.
+
+

For NFS clients to mount the NFS server, rpc.statd service must be +running on the clients. Start rpc.statd service by running the following command:

+
rpc.statd
+
+

mount command takes too long to finish.

+

Start rpcbind service on the NFS client

+

The problem is that the rpcbind or portmap service is not running on the +NFS client. The resolution for this is to start either of these services +by running the following command:

+
/etc/init.d/portmap start
+
+

On some distributions where portmap has been replaced by rpcbind, the +following command is required:

+
/etc/init.d/rpcbind start
+
+

NFS server glusterfsd starts but initialization fails with “nfsrpc- service: portmap registration of program failed” error message in the log.

+

NFS start-up can succeed but the initialization of the NFS service can +still fail preventing clients from accessing the mount points. Such a +situation can be confirmed from the following error messages in the log +file:

+
[2010-05-26 23:33:47] E [rpcsvc.c:2598:rpcsvc_program_register_portmap] rpc-service: Could notregister with portmap
+[2010-05-26 23:33:47] E [rpcsvc.c:2682:rpcsvc_program_register] rpc-service: portmap registration of program failed
+[2010-05-26 23:33:47] E [rpcsvc.c:2695:rpcsvc_program_register] rpc-service: Program registration failed: MOUNT3, Num: 100005, Ver: 3, Port: 38465
+[2010-05-26 23:33:47] E [nfs.c:125:nfs_init_versions] nfs: Program init failed
+[2010-05-26 23:33:47] C [nfs.c:531:notify] nfs: Failed to initialize protocols
+[2010-05-26 23:33:49] E [rpcsvc.c:2614:rpcsvc_program_unregister_portmap] rpc-service: Could not unregister with portmap
+[2010-05-26 23:33:49] E [rpcsvc.c:2731:rpcsvc_program_unregister] rpc-service: portmap unregistration of program failed
+[2010-05-26 23:33:49] E [rpcsvc.c:2744:rpcsvc_program_unregister] rpc-service: Program unregistration failed: MOUNT3, Num: 100005, Ver: 3, Port: 38465
+
+
    +
  1. +

    Start portmap or rpcbind service on the NFS server

    +

    On most Linux distributions, portmap can be started using the +following command:

    +
    /etc/init.d/portmap start
    +
    +

    On some distributions where portmap has been replaced by rpcbind, +run the following command:

    +
    /etc/init.d/rpcbind start
    +
    +

    After starting portmap or rpcbind, gluster NFS server needs to be +restarted.

    +
  2. +
  3. +

    Stop another NFS server running on the same machine

    +

    Such an error is also seen when there is another NFS server running +on the same machine but it is not the Gluster NFS server. On Linux +systems, this could be the kernel NFS server. Resolution involves +stopping the other NFS server or not running the Gluster NFS server +on the machine. Before stopping the kernel NFS server, ensure that +no critical service depends on access to that NFS server's exports.

    +

    On Linux, kernel NFS servers can be stopped by using either of the +following commands depending on the distribution in use:

    +
    /etc/init.d/nfs-kernel-server stop
    +/etc/init.d/nfs stop
    +
    +
  4. +
  5. +

    Restart Gluster NFS server

    +
  6. +
+

mount command fails with NFS server failed error.

+

mount command fails with following error

+
mount: mount to NFS server '10.1.10.11' failed: timed out (retrying).
+
+

Perform one of the following to resolve this issue:

+
    +
  1. +

    Disable name lookup requests from NFS server to a DNS server

    +

    The NFS server attempts to authenticate NFS clients by performing a +reverse DNS lookup to match hostnames in the volume file with the +client IP addresses. There can be a situation where the NFS server +either is not able to connect to the DNS server or the DNS server is +taking too long to responsd to DNS request. These delays can result +in delayed replies from the NFS server to the NFS client resulting +in the timeout error seen above.

    +

    NFS server provides a work-around that disables DNS requests, +instead relying only on the client IP addresses for authentication. +The following option can be added for successful mounting in such +situations:

    +

    option rpc-auth.addr.namelookup off

    +
    +

    Note: Remember that disabling the NFS server forces authentication +of clients to use only IP addresses and if the authentication +rules in the volume file use hostnames, those authentication rules +will fail and disallow mounting for those clients.

    +
    +

    OR

    +
  2. +
  3. +

    NFS version used by the NFS client is other than version 3

    +

    Gluster NFS server supports version 3 of NFS protocol. In recent +Linux kernels, the default NFS version has been changed from 3 to 4. +It is possible that the client machine is unable to connect to the +Gluster NFS server because it is using version 4 messages which are +not understood by Gluster NFS server. The timeout can be resolved by +forcing the NFS client to use version 3. The vers option to +mount command is used for this purpose:

    +
    mount -o vers=3
    +
    +
  4. +
+

showmount fails with clnt_create: RPC: Unable to receive

+

Check your firewall setting to open ports 111 for portmap +requests/replies and Gluster NFS server requests/replies. Gluster NFS +server operates over the following port numbers: 38465, 38466, and 38467.

+

Application fails with "Invalid argument" or "Value too large for defined data type" error.

+

These two errors generally happen for 32-bit nfs clients or applications +that do not support 64-bit inode numbers or large files. Use the +following option from the CLI to make Gluster NFS return 32-bit inode +numbers instead: nfs.enable-ino32 \<on|off>

+

Applications that will benefit are those that were either:

+
    +
  • built 32-bit and run on 32-bit machines such that they do not + support large files by default
  • +
  • built 32-bit on 64-bit systems
  • +
+

This option is disabled by default so NFS returns 64-bit inode numbers +by default.

+

Applications which can be rebuilt from source are recommended to rebuild +using the following flag with gcc:

+
-D_FILE_OFFSET_BITS=64
+
+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/Troubleshooting/troubleshooting-memory/index.html b/Troubleshooting/troubleshooting-memory/index.html new file mode 100644 index 00000000..49de579a --- /dev/null +++ b/Troubleshooting/troubleshooting-memory/index.html @@ -0,0 +1,4448 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + Debugging Memory Leaks - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + +

Troubleshooting High Memory Utilization

+

If the memory utilization of a Gluster process increases significantly with time, it could be a leak caused by resources not being freed. +If you suspect that you may have hit such an issue, try using statedumps to debug the issue.

+

If you are unable to figure out where the leak is, please file an issue and provide the following details:

+
    +
  • Gluster version
  • +
  • The affected process
  • +
  • The output of gluster volume info
  • +
  • Steps to reproduce the issue if available
  • +
  • Statedumps for the process collected at intervals as the memory utilization increases
  • +
  • The Gluster log files for the process (if possible)
  • +
+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/Upgrade-Guide/generic-upgrade-procedure/index.html b/Upgrade-Guide/generic-upgrade-procedure/index.html new file mode 100644 index 00000000..52f42008 --- /dev/null +++ b/Upgrade-Guide/generic-upgrade-procedure/index.html @@ -0,0 +1,4760 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + Generic upgrade procedure - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + + + + + +
+
+ + + + + + + + +

Generic Upgrade procedure

+

Pre-upgrade notes

+
    +
  • Online upgrade is only possible with replicated and distributed replicate volumes
  • +
  • Online upgrade is not supported for dispersed or distributed dispersed volumes
  • +
  • Ensure no configuration changes are done during the upgrade
  • +
  • If you are using geo-replication, please upgrade the slave cluster(s) before upgrading the master
  • +
  • Upgrading the servers ahead of the clients is recommended
  • +
  • It is recommended to have the same client and server, major versions running eventually
  • +
+

Online upgrade procedure for servers

+

This procedure involves upgrading one server at a time, while keeping the volume(s) online and client IO ongoing. This procedure assumes that multiple replicas of a replica set, are not part of the same server in the trusted storage pool.

+
+

ALERT: If there are disperse or, pure distributed volumes in the storage pool being upgraded, this procedure is NOT recommended, use the Offline upgrade procedure instead.

+
+

Repeat the following steps, on each server in the trusted storage pool, to upgrade the entire pool to new-version :

+
    +
  1. +

    Stop all gluster services, either using the command below, or through other means.

    +
    systemctl stop glusterd
    +systemctl stop glustereventsd
    +killall glusterfs glusterfsd glusterd
    +
    +
  2. +
  3. +

    Stop all applications that run on this server and access the volumes via gfapi (qemu, NFS-Ganesha, Samba, etc.)

    +
  4. +
  5. +

    Install Gluster new-version, below example shows how to create a repository on fedora and use it to upgrade :

    +

    3.1 Create a private repository (assuming /new-gluster-rpms/ folder has the new rpms ):

    +
    createrepo /new-gluster-rpms/
    +
    +

    3.2 Create the .repo file in /etc/yum.d/ :

    +
    # cat /etc/yum.d/newglusterrepo.repo
    + [newglusterrepo]
    + name=NewGlusterRepo
    + baseurl="file:///new-gluster-rpms/"
    + gpgcheck=0
    + enabled=1
    +
    +

    3.3 Upgrade glusterfs, for example to upgrade glusterfs-server to x.y version :

    +
    yum update glusterfs-server-x.y.fc30.x86_64.rpm
    +
    +
  6. +
  7. +

    Ensure that version reflects new-version in the output of,

    +
    gluster --version
    +
    +
  8. +
  9. +

    Start glusterd on the upgraded server

    +
    systemctl start glusterd
    +
    +
  10. +
  11. +

    Ensure that all gluster processes are online by checking the output of,

    +
    gluster volume status
    +
    +
  12. +
  13. +

    If the glustereventsd service was previously enabled, it is required to start it using the commands below, or, through other means,

    +
    systemctl start glustereventsd
    +
    +
  14. +
  15. +

    Invoke self-heal on all the gluster volumes by running,

    +
    for i in `gluster volume list`; do gluster volume heal $i; done
    +
    +
  16. +
  17. +

    Verify that there are no heal backlog by running the command for all the volumes,

    +
    gluster volume heal <volname> info
    +
    +
  18. +
+
+

NOTE: Before proceeding to upgrade the next server in the pool it is recommended to check the heal backlog. If there is a heal backlog, it is recommended to wait until the backlog is empty, or, the backlog does not contain any entries requiring a sync to the just upgraded server.

+
+
    +
  1. Restart any gfapi based application stopped previously in step (2)
  2. +
+

Offline upgrade procedure

+

This procedure involves cluster downtime and during the upgrade window, clients are not allowed access to the volumes.

+

Steps to perform an offline upgrade:

+
    +
  1. +

    On every server in the trusted storage pool, stop all gluster services, either using the command below, or through other means,

    +
    systemctl stop glusterd
    +systemctl stop glustereventsd
    +killall glusterfs glusterfsd glusterd
    +
    +
  2. +
  3. +

    Stop all applications that access the volumes via gfapi (qemu, NFS-Ganesha, Samba, etc.), across all servers

    +
  4. +
  5. +

    Install Gluster new-version, on all servers

    +
  6. +
  7. +

    Ensure that version reflects new-version in the output of the following command on all servers,

    +
    gluster --version
    +
    +
  8. +
  9. +

    Start glusterd on all the upgraded servers

    +
    systemctl start glusterd
    +
    +
  10. +
  11. +

    Ensure that all gluster processes are online by checking the output of,

    +
    gluster volume status
    +
    +
  12. +
  13. +

    If the glustereventsd service was previously enabled, it is required to start it using the commands below, or, through other means,

    +
    systemctl start glustereventsd
    +
    +
  14. +
  15. +

    Restart any gfapi based application stopped previously in step (2)

    +
  16. +
+

Post upgrade steps

+

Perform the following steps post upgrading the entire trusted storage pool,

+
    +
  • It is recommended to update the op-version of the cluster. Refer, to the op-version section for further details
  • +
  • Proceed to upgrade the clients to new-version version as well
  • +
  • Post upgrading the clients, for replicate volumes, it is recommended to enable the option gluster volume set <volname> fips-mode-rchecksum on to turn off usage of MD5 checksums during healing. This enables running Gluster on FIPS compliant systems.
  • +
+

If upgrading from a version lesser than Gluster 7.0

+
+

NOTE: If you have ever enabled quota on your volumes then after the upgrade +is done, you will have to restart all the nodes in the cluster one by one so as to +fix the checksum values in the quota.cksum file under the /var/lib/glusterd/vols/<volname>/ directory. +The peers may go into Peer rejected state while doing so but once all the nodes are rebooted +everything will be back to normal.

+
+

Upgrade procedure for clients

+

Following are the steps to upgrade clients to the new-version version,

+
    +
  1. Unmount all glusterfs mount points on the client
  2. +
  3. Stop all applications that access the volumes via gfapi (qemu, etc.)
  4. +
  5. Install Gluster new-version
  6. +
  7. Mount all gluster shares
  8. +
  9. Start any applications that were stopped previously in step (2)
  10. +
+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/Upgrade-Guide/index.html b/Upgrade-Guide/index.html new file mode 100644 index 00000000..b3cd4574 --- /dev/null +++ b/Upgrade-Guide/index.html @@ -0,0 +1,4509 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + Upgrade-Guide Index - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + +

Upgrade-Guide Index

+ +

Upgrading GlusterFS

+ +

If you are using GlusterFS version 6.x or above, you can upgrade it to the following:

+ +

If you are using GlusterFS version 5.x or above, you can upgrade it to the following:

+ +

If you are using GlusterFS version 4.x or above, you can upgrade it to the following:

+ +

If you are using GlusterFS version 3.4.x or above, you can upgrade it to following:

+ + + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/Upgrade-Guide/op-version/index.html b/Upgrade-Guide/op-version/index.html new file mode 100644 index 00000000..e35bc38e --- /dev/null +++ b/Upgrade-Guide/op-version/index.html @@ -0,0 +1,4533 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + Op-version - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + +

Op-version

+ +

op-version

+

op-version is the operating version of the Gluster which is running.

+

op-version was introduced to ensure gluster running with different versions do not end up in a problem and backward compatibility issues can be tackled.

+

After Gluster upgrade, it is advisable to have op-version updated.

+

Updating op-version

+

Current op-version can be queried as below:

+

For 3.10 onwards:

+
gluster volume get all cluster.op-version
+
+

For release < 3.10:

+
# gluster volume get <VOLNAME> cluster.op-version
+
+

To get the maximum possible op-version a cluster can support, the following query can be used (this is available 3.10 release onwards):

+
gluster volume get all cluster.max-op-version
+
+

For example, if some nodes in a cluster have been upgraded to X and some to X+, then the maximum op-version supported by the cluster is X, and the cluster.op-version can be bumped up to X to support new features.

+

op-version can be updated as below. +For example, after upgrading to glusterfs-4.0.0, set op-version as:

+
gluster volume set all cluster.op-version 40000
+
+

Note: +This is not mandatory, but advisable to have updated op-version if you want to make use of latest features in the updated gluster.

+

Client op-version

+

When trying to set a volume option, it might happen that one or more of the connected clients cannot support the feature being set and might need to be upgraded to the op-version the cluster is currently running on.

+

To check op-version information for the connected clients and find the offending client, the following query can be used for 3.10 release onwards:

+
# gluster volume status <all|VOLNAME> clients
+
+

The respective clients can then be upgraded to the required version.

+

This information could also be used to make an informed decision while bumping up the op-version of a cluster, so that connected clients can support all the new features provided by the upgraded cluster as well.

+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/Upgrade-Guide/upgrade-to-10/index.html b/Upgrade-Guide/upgrade-to-10/index.html new file mode 100644 index 00000000..2c474d64 --- /dev/null +++ b/Upgrade-Guide/upgrade-to-10/index.html @@ -0,0 +1,4569 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + Upgrade to 10 - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + + + + + +
+
+ + + + + + + + +

Upgrade procedure to Gluster 10, from Gluster 9.x, 8.x and 7.x

+

We recommend reading the release notes for 10.0 to be +aware of the features and fixes provided with the release.

+
+

NOTE: Before following the generic upgrade procedure checkout the "Major Issues" section given below.

+
+

Refer, to the generic upgrade procedure guide and follow documented instructions.

+

Major issues

+

The following options are removed from the code base and require to be unset

+

before an upgrade from releases older than release 4.1.0,

+
- features.lock-heal
+- features.grace-timeout
+
+

To check if these options are set use,

+
gluster volume info
+
+

and ensure that the above options are not part of the Options Reconfigured: +section in the output of all volumes in the cluster.

+

If these are set, then unset them using the following commands,

+
# gluster volume reset <volname> <option>
+
+

Make sure you are not using any of the following depricated features :

+
- Block device (bd) xlator
+- Decompounder feature
+- Crypt xlator
+- Symlink-cache xlator
+- Stripe feature
+- Tiering support (tier xlator and changetimerecorder)
+- Glupy
+
+

NOTE: Failure to do the above may result in failure during online upgrades, +and the reset of these options to their defaults needs to be done prior to +upgrading the cluster.

+

Deprecated translators and upgrade procedure for volumes using these features

+

If you are upgrading from a release prior to release-6 be aware of deprecated xlators and functionality.

+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/Upgrade-Guide/upgrade-to-11/index.html b/Upgrade-Guide/upgrade-to-11/index.html new file mode 100644 index 00000000..d111a281 --- /dev/null +++ b/Upgrade-Guide/upgrade-to-11/index.html @@ -0,0 +1,4571 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + Upgrade to 11 - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + + + + + +
+
+ + + + + + + + +

Upgrade procedure to Gluster 11, from Gluster 10.x, 9.x and 8.x

+

We recommend reading the release notes for 11.0 to be +aware of the features and fixes provided with the release.

+
+

NOTE: Before following the generic upgrade procedure checkout the "Major Issues" section given below.

+
+

Refer, to the generic upgrade procedure guide and follow documented instructions.

+

Major issues

+

The following options are removed from the code base and require to be unset

+

before an upgrade from releases older than release 4.1.0,

+
- features.lock-heal
+- features.grace-timeout
+
+

To check if these options are set use,

+
gluster volume info
+
+

and ensure that the above options are not part of the Options Reconfigured: +section in the output of all volumes in the cluster.

+

If these are set, then unset them using the following commands,

+
# gluster volume reset <volname> <option>
+
+

Make sure you are not using any of the following depricated features :

+
- Block device (bd) xlator
+- Decompounder feature
+- Crypt xlator
+- Symlink-cache xlator
+- Stripe feature
+- Tiering support (tier xlator and changetimerecorder)
+- Glupy
+
+

NOTE: Failure to do the above may result in failure during online upgrades, +and the reset of these options to their defaults needs to be done prior to +upgrading the cluster.

+

Online Upgrade: User will obsrve "Peer Rejected" issues while upgrading if NFS Ganesha is not enabled as the nfs options +were made optional in this release causing checksum misamtch. Stopping and starting a server after upgrade should fix the issue.

+

Deprecated translators and upgrade procedure for volumes using these features

+

If you are upgrading from a release prior to release-6 be aware of deprecated xlators and functionality.

+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/Upgrade-Guide/upgrade-to-3.10/index.html b/Upgrade-Guide/upgrade-to-3.10/index.html new file mode 100644 index 00000000..6d8acc6d --- /dev/null +++ b/Upgrade-Guide/upgrade-to-3.10/index.html @@ -0,0 +1,4719 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + Upgrade to 3.10 - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + + + + + +
+
+ + + + + + + + +

Upgrade to 3.10

+ +

Upgrade procedure to Gluster 3.10.0, from Gluster 3.9.x, 3.8.x and 3.7.x

+

Pre-upgrade notes

+
    +
  • Online upgrade is only possible with replicated and distributed replicate volumes
  • +
  • Online upgrade is not supported for dispersed or distributed dispersed volumes
  • +
  • Ensure no configuration changes are done during the upgrade
  • +
  • If you are using geo-replication, please upgrade the slave cluster(s) before upgrading the master
  • +
  • Upgrading the servers ahead of the clients is recommended
  • +
  • It is recommended to have the same client and server, major versions running eventually
  • +
+

Online upgrade procedure for servers

+

This procedure involves upgrading one server at a time, while keeping the volume(s) online and client IO ongoing. This procedure assumes that multiple replicas of a replica set, are not part of the same server in the trusted storage pool.

+
+

ALERT: If any of your volumes, in the trusted storage pool that is being upgraded, uses disperse or is a pure distributed volume, this procedure is NOT recommended, use the Offline upgrade procedure instead.

+
+

Repeat the following steps, on each server in the trusted storage pool, to upgrade the entire pool to 3.10 version:

+
    +
  1. +

    Stop all gluster services, either using the command below, or through other means,

    +
    killall glusterfs glusterfsd glusterd
    +
    +
  2. +
  3. +

    Stop all applications that run on this server and access the volumes via gfapi (qemu, NFS-Ganesha, Samba, etc.)

    +
  4. +
  5. +

    Install Gluster 3.10

    +
  6. +
  7. +

    Ensure that version reflects 3.10.0 in the output of,

    +
    gluster --version
    +
    +
  8. +
  9. +

    Start glusterd on the upgraded server

    +
    glusterd
    +
    +
  10. +
  11. +

    Ensure that all gluster processes are online by checking the output of,

    +
    gluster volume status
    +
    +
  12. +
  13. +

    Self-heal all gluster volumes by running

    +
    for i in `gluster volume list`; do gluster volume heal $i; done
    +
    +
  14. +
  15. +

    Ensure that there is no heal backlog by running the below command for all volumes

    +
    gluster volume heal <volname> info
    +
    +
    +

    NOTE: If there is a heal backlog, wait till the backlog is empty, or the backlog does not have any entries needing a sync to the just upgraded server, before proceeding to upgrade the next server in the pool

    +
    +
  16. +
  17. +

    Restart any gfapi based application stopped previously in step (2)

    +
  18. +
+

Offline upgrade procedure

+

This procedure involves cluster downtime and during the upgrade window, clients are not allowed access to the volumes.

+

Steps to perform an offline upgrade:

+
    +
  1. +

    On every server in the trusted storage pool, stop all gluster services, either using the command below, or through other means,

    +
    killall glusterfs glusterfsd glusterd
    +
    +
  2. +
  3. +

    Stop all applications that access the volumes via gfapi (qemu, NFS-Ganesha, Samba, etc.), across all servers

    +
  4. +
  5. +

    Install Gluster 3.10, on all servers

    +
  6. +
  7. +

    Ensure that version reflects 3.10.0 in the output of the following command on all servers,

    +
    gluster --version
    +
    +
  8. +
  9. +

    Start glusterd on all the upgraded servers

    +
    glusterd
    +
    +
  10. +
  11. +

    Ensure that all gluster processes are online by checking the output of,

    +
    gluster volume status
    +
    +
  12. +
  13. +

    Restart any gfapi based application stopped previously in step (2)

    +
  14. +
+

Post upgrade steps

+

Perform the following steps post upgrading the entire trusted storage pool,

+
    +
  • It is recommended to update the op-version of the cluster. Refer, to the op-version section for further details
  • +
  • Proceed to upgrade the clients to 3.10 version as well
  • +
+

Upgrade procedure for clients

+

Following are the steps to upgrade clients to the 3.10.0 version,

+
    +
  1. Unmount all glusterfs mount points on the client
  2. +
  3. Stop all applications that access the volumes via gfapi (qemu, etc.)
  4. +
  5. Install Gluster 3.10
  6. +
  7. Mount all gluster shares
  8. +
  9. Start any applications that were stopped previously in step (2)
  10. +
+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/Upgrade-Guide/upgrade-to-3.11/index.html b/Upgrade-Guide/upgrade-to-3.11/index.html new file mode 100644 index 00000000..48252430 --- /dev/null +++ b/Upgrade-Guide/upgrade-to-3.11/index.html @@ -0,0 +1,4723 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + Upgrade to 3.11 - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + + + + + +
+
+ + + + + + + + +

Upgrade to 3.11

+ +

Upgrade procedure to Gluster 3.11, from Gluster 3.10.x, and 3.8.x

+

NOTE: Upgrade procedure remains the same as with the 3.10 release

+

Pre-upgrade notes

+
    +
  • Online upgrade is only possible with replicated and distributed replicate volumes
  • +
  • Online upgrade is not supported for dispersed or distributed dispersed volumes
  • +
  • Ensure no configuration changes are done during the upgrade
  • +
  • If you are using geo-replication, please upgrade the slave cluster(s) before upgrading the master
  • +
  • Upgrading the servers ahead of the clients is recommended
  • +
  • It is recommended to have the same client and server, major versions running eventually
  • +
+

Online upgrade procedure for servers

+

This procedure involves upgrading one server at a time, while keeping the volume(s) online and client IO ongoing. This procedure assumes that multiple replicas of a replica set, are not part of the same server in the trusted storage pool.

+
+

ALERT: If any of your volumes, in the trusted storage pool that is being upgraded, uses disperse or is a pure distributed volume, this procedure is NOT recommended, use the Offline upgrade procedure instead.

+
+

Repeat the following steps, on each server in the trusted storage pool, to upgrade the entire pool to 3.11 version:

+
    +
  1. +

    Stop all gluster services, either using the command below, or through other means,

    +
    killall glusterfs glusterfsd glusterd
    +
    +
  2. +
  3. +

    Stop all applications that run on this server and access the volumes via gfapi (qemu, NFS-Ganesha, Samba, etc.)

    +
  4. +
  5. +

    Install Gluster 3.11

    +
  6. +
  7. +

    Ensure that version reflects 3.11.x in the output of,

    +
    gluster --version
    +
    +

    NOTE: x is the minor release number for the release

    +
  8. +
  9. +

    Start glusterd on the upgraded server

    +
    glusterd
    +
    +
  10. +
  11. +

    Ensure that all gluster processes are online by checking the output of,

    +
    gluster volume status
    +
    +
  12. +
  13. +

    Self-heal all gluster volumes by running

    +
    for i in `gluster volume list`; do gluster volume heal $i; done
    +
    +
  14. +
  15. +

    Ensure that there is no heal backlog by running the below command for all volumes

    +
    gluster volume heal <volname> info
    +
    +
    +

    NOTE: If there is a heal backlog, wait till the backlog is empty, or the backlog does not have any entries needing a sync to the just upgraded server, before proceeding to upgrade the next server in the pool

    +
    +
  16. +
  17. +

    Restart any gfapi based application stopped previously in step (2)

    +
  18. +
+

Offline upgrade procedure

+

This procedure involves cluster downtime and during the upgrade window, clients are not allowed access to the volumes.

+

Steps to perform an offline upgrade:

+
    +
  1. +

    On every server in the trusted storage pool, stop all gluster services, either using the command below, or through other means,

    +
    killall glusterfs glusterfsd glusterd
    +
    +
  2. +
  3. +

    Stop all applications that access the volumes via gfapi (qemu, NFS-Ganesha, Samba, etc.), across all servers

    +
  4. +
  5. +

    Install Gluster 3.11, on all servers

    +
  6. +
  7. +

    Ensure that version reflects 3.11.x in the output of the following command on all servers,

    +
    gluster --version
    +
    +

    NOTE: x is the minor release number for the release

    +
  8. +
  9. +

    Start glusterd on all the upgraded servers

    +
    glusterd
    +
    +
  10. +
  11. +

    Ensure that all gluster processes are online by checking the output of,

    +
    gluster volume status
    +
    +
  12. +
  13. +

    Restart any gfapi based application stopped previously in step (2)

    +
  14. +
+

Post upgrade steps

+

Perform the following steps post upgrading the entire trusted storage pool,

+
    +
  • It is recommended to update the op-version of the cluster. Refer, to the op-version section for further details
  • +
  • Proceed to upgrade the clients to 3.11 version as well
  • +
+

Upgrade procedure for clients

+

Following are the steps to upgrade clients to the 3.11.x version,

+

NOTE: x is the minor release number for the release

+
    +
  1. Unmount all glusterfs mount points on the client
  2. +
  3. Stop all applications that access the volumes via gfapi (qemu, etc.)
  4. +
  5. Install Gluster 3.11
  6. +
  7. Mount all gluster shares
  8. +
  9. Start any applications that were stopped previously in step (2)
  10. +
+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/Upgrade-Guide/upgrade-to-3.12/index.html b/Upgrade-Guide/upgrade-to-3.12/index.html new file mode 100644 index 00000000..26aa5471 --- /dev/null +++ b/Upgrade-Guide/upgrade-to-3.12/index.html @@ -0,0 +1,4743 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + Upgrade to 3.12 - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + + + + + +
+
+ + + + + + + + +

Upgrade to 3.12

+ +

Upgrade procedure to Gluster 3.12, from Gluster 3.11.x, 3.10.x, and 3.8.x

+
+

NOTE: Upgrade procedure remains the same as with 3.11 and 3.10 releases

+
+

Pre-upgrade notes

+
    +
  • Online upgrade is only possible with replicated and distributed replicate volumes
  • +
  • Online upgrade is not supported for dispersed or distributed dispersed volumes
  • +
  • Ensure no configuration changes are done during the upgrade
  • +
  • If you are using geo-replication, please upgrade the slave cluster(s) before upgrading the master
  • +
  • Upgrading the servers ahead of the clients is recommended
  • +
  • It is recommended to have the same client and server, major versions running eventually
  • +
+

Online upgrade procedure for servers

+

This procedure involves upgrading one server at a time, while keeping the volume(s) online and client IO ongoing. This procedure assumes that multiple replicas of a replica set, are not part of the same server in the trusted storage pool.

+
+

ALERT: If there are disperse or, pure distributed volumes in the storage pool being upgraded, this procedure is NOT recommended, use the Offline upgrade procedure instead.

+
+

Repeat the following steps, on each server in the trusted storage pool, to upgrade the entire pool to 3.12 version:

+
    +
  1. +

    Stop all gluster services, either using the command below, or through other means,

    +
    killall glusterfs glusterfsd glusterd
    +systemctl stop glustereventsd
    +
    +
  2. +
  3. +

    Stop all applications that run on this server and access the volumes via gfapi (qemu, NFS-Ganesha, Samba, etc.)

    +
  4. +
  5. +

    Install Gluster 3.12

    +
  6. +
  7. +

    Ensure that version reflects 3.12.x in the output of,

    +
    gluster --version
    +
    +
    +

    NOTE: x is the minor release number for the release

    +
    +
  8. +
  9. +

    Start glusterd on the upgraded server

    +
    glusterd
    +
    +
  10. +
  11. +

    Ensure that all gluster processes are online by checking the output of,

    +
    gluster volume status
    +
    +
  12. +
  13. +

    If the glustereventsd service was previously enabled, it is required to start it using the commands below, or, through other means,

    +
    systemctl start glustereventsd
    +
    +
  14. +
  15. +

    Invoke self-heal on all the gluster volumes by running,

    +
    for i in `gluster volume list`; do gluster volume heal $i; done
    +
    +
  16. +
  17. +

    Verify that there are no heal backlog by running the command for all the volumes,

    +
    gluster volume heal <volname> info
    +
    +
    +

    NOTE: Before proceeding to upgrade the next server in the pool it is recommended to check the heal backlog. If there is a heal backlog, it is recommended to wait until the backlog is empty, or, the backlog does not contain any entries requiring a sync to the just upgraded server.

    +
    +
  18. +
  19. +

    Restart any gfapi based application stopped previously in step (2)

    +
  20. +
+

Offline upgrade procedure

+

This procedure involves cluster downtime and during the upgrade window, clients are not allowed access to the volumes.

+

Steps to perform an offline upgrade:

+
    +
  1. +

    On every server in the trusted storage pool, stop all gluster services, either using the command below, or through other means,

    +
    killall glusterfs glusterfsd glusterd glustereventsd
    +systemctl stop glustereventsd
    +
    +
  2. +
  3. +

    Stop all applications that access the volumes via gfapi (qemu, NFS-Ganesha, Samba, etc.), across all servers

    +
  4. +
  5. +

    Install Gluster 3.12, on all servers

    +
  6. +
  7. +

    Ensure that version reflects 3.12.x in the output of the following command on all servers,

    +
    gluster --version
    +
    +
    +

    NOTE: x is the minor release number for the release

    +
    +
  8. +
  9. +

    Start glusterd on all the upgraded servers

    +
    glusterd
    +
    +
  10. +
  11. +

    Ensure that all gluster processes are online by checking the output of,

    +
    gluster volume status
    +
    +
  12. +
  13. +

    If the glustereventsd service was previously enabled, it is required to start it using the commands below, or, through other means,

    +
    systemctl start glustereventsd
    +
    +
  14. +
  15. +

    Restart any gfapi based application stopped previously in step (2)

    +
  16. +
+

Post upgrade steps

+

Perform the following steps post upgrading the entire trusted storage pool,

+
    +
  • It is recommended to update the op-version of the cluster. Refer, to the op-version section for further details
  • +
  • Proceed to upgrade the clients to 3.12 version as well
  • +
+

Upgrade procedure for clients

+

Following are the steps to upgrade clients to the 3.12.x version,

+
+

NOTE: x is the minor release number for the release

+
+
    +
  1. Unmount all glusterfs mount points on the client
  2. +
  3. Stop all applications that access the volumes via gfapi (qemu, etc.)
  4. +
  5. Install Gluster 3.12
  6. +
  7. Mount all gluster shares
  8. +
  9. Start any applications that were stopped previously in step (2)
  10. +
+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/Upgrade-Guide/upgrade-to-3.13/index.html b/Upgrade-Guide/upgrade-to-3.13/index.html new file mode 100644 index 00000000..e40f7190 --- /dev/null +++ b/Upgrade-Guide/upgrade-to-3.13/index.html @@ -0,0 +1,4723 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + Upgrade to 3.13 - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + + + + + +
+
+ + + + + + + + +

Upgrade to 3.13

+ +

Upgrade procedure to Gluster 3.13, from Gluster 3.12.x, and 3.10.x

+

NOTE: Upgrade procedure remains the same as with 3.12 and 3.10 releases

+

Pre-upgrade notes

+
    +
  • Online upgrade is only possible with replicated and distributed replicate volumes
  • +
  • Online upgrade is not supported for dispersed or distributed dispersed volumes
  • +
  • Ensure no configuration changes are done during the upgrade
  • +
  • If you are using geo-replication, please upgrade the slave cluster(s) before upgrading the master
  • +
  • Upgrading the servers ahead of the clients is recommended
  • +
  • It is recommended to have the same client and server, major versions running eventually
  • +
+

Online upgrade procedure for servers

+

This procedure involves upgrading one server at a time, while keeping the volume(s) online and client IO ongoing. This procedure assumes that multiple replicas of a replica set, are not part of the same server in the trusted storage pool.

+
+

ALERT: If any of your volumes, in the trusted storage pool that is being upgraded, uses disperse or is a pure distributed volume, this procedure is NOT recommended, use the Offline upgrade procedure instead.

+
+

Repeat the following steps, on each server in the trusted storage pool, to upgrade the entire pool to 3.13 version:

+
    +
  1. +

    Stop all gluster services, either using the command below, or through other means,

    +
    killall glusterfs glusterfsd glusterd
    +
    +
  2. +
  3. +

    Stop all applications that run on this server and access the volumes via gfapi (qemu, NFS-Ganesha, Samba, etc.)

    +
  4. +
  5. +

    Install Gluster 3.13

    +
  6. +
  7. +

    Ensure that version reflects 3.13.x in the output of,

    +
    gluster --version
    +
    +

    NOTE: x is the minor release number for the release

    +
  8. +
  9. +

    Start glusterd on the upgraded server

    +
    glusterd
    +
    +
  10. +
  11. +

    Ensure that all gluster processes are online by checking the output of,

    +
    gluster volume status
    +
    +
  12. +
  13. +

    Self-heal all gluster volumes by running

    +
    for i in `gluster volume list`; do gluster volume heal $i; done
    +
    +
  14. +
  15. +

    Ensure that there is no heal backlog by running the below command for all volumes

    +
    gluster volume heal <volname> info
    +
    +
    +

    NOTE: If there is a heal backlog, wait till the backlog is empty, or the backlog does not have any entries needing a sync to the just upgraded server, before proceeding to upgrade the next server in the pool

    +
    +
  16. +
  17. +

    Restart any gfapi based application stopped previously in step (2)

    +
  18. +
+

Offline upgrade procedure

+

This procedure involves cluster downtime and during the upgrade window, clients are not allowed access to the volumes.

+

Steps to perform an offline upgrade:

+
    +
  1. +

    On every server in the trusted storage pool, stop all gluster services, either using the command below, or through other means,

    +
    killall glusterfs glusterfsd glusterd
    +
    +
  2. +
  3. +

    Stop all applications that access the volumes via gfapi (qemu, NFS-Ganesha, Samba, etc.), across all servers

    +
  4. +
  5. +

    Install Gluster 3.13, on all servers

    +
  6. +
  7. +

    Ensure that version reflects 3.13.x in the output of the following command on all servers,

    +
    gluster --version
    +
    +

    NOTE: x is the minor release number for the release

    +
  8. +
  9. +

    Start glusterd on all the upgraded servers

    +
    glusterd
    +
    +
  10. +
  11. +

    Ensure that all gluster processes are online by checking the output of,

    +
    gluster volume status
    +
    +
  12. +
  13. +

    Restart any gfapi based application stopped previously in step (2)

    +
  14. +
+

Post upgrade steps

+

Perform the following steps post upgrading the entire trusted storage pool,

+
    +
  • It is recommended to update the op-version of the cluster. Refer, to the op-version section for further details
  • +
  • Proceed to upgrade the clients to 3.13 version as well
  • +
+

Upgrade procedure for clients

+

Following are the steps to upgrade clients to the 3.13.x version,

+

NOTE: x is the minor release number for the release

+
    +
  1. Unmount all glusterfs mount points on the client
  2. +
  3. Stop all applications that access the volumes via gfapi (qemu, etc.)
  4. +
  5. Install Gluster 3.13
  6. +
  7. Mount all gluster shares
  8. +
  9. Start any applications that were stopped previously in step (2)
  10. +
+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/Upgrade-Guide/upgrade-to-3.5/index.html b/Upgrade-Guide/upgrade-to-3.5/index.html new file mode 100644 index 00000000..42b60371 --- /dev/null +++ b/Upgrade-Guide/upgrade-to-3.5/index.html @@ -0,0 +1,4698 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + Upgrade to 3.5 - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + +

Upgrade to 3.5

+ +

Glusterfs upgrade from 3.4.x to 3.5

+

Now that GlusterFS 3.5.0 is out, here are some mechanisms to upgrade +from earlier installed versions of GlusterFS.

+

Upgrade from GlusterFS 3.4.x:

+

GlusterFS 3.5.0 is compatible with 3.4.x (yes, you read it right!). You +can upgrade your deployment by following one of the two procedures +below.

+

a) Scheduling a downtime (Recommended)

+

For this approach, schedule a downtime and prevent all your clients from +accessing the servers.

+

If you have quota configured, you need to perform step 1 and 6, +otherwise you can skip it.

+

If you have geo-replication session running, stop the session using the +geo-rep stop command (please refer to step 1 of geo-rep upgrade steps +provided below)

+
    +
  1. Execute "pre-upgrade-script-for-quota.sh" mentioned under "Upgrade Steps For Quota" section.
  2. +
  3. Stop all glusterd, glusterfsd and glusterfs processes on your server.
  4. +
  5. Install GlusterFS 3.5.0
  6. +
  7. Start glusterd.
  8. +
  9. Ensure that all started volumes have processes online in “gluster volume status”.
  10. +
  11. Execute "Post-Upgrade Script" mentioned under "Upgrade Steps For Quota" section.
  12. +
+

You would need to repeat these steps on all servers that form your +trusted storage pool.

+

To upgrade geo-replication session, please refer to geo-rep upgrade +steps provided below (from step 2)

+

After upgrading the servers, it is recommended to upgrade all client +installations to 3.5.0.

+

b) Rolling upgrades with no downtime

+

If you have replicated or distributed replicated volumes with bricks +placed in the right fashion for redundancy, have no data to be +self-healed and feel adventurous, you can perform a rolling upgrade +through the following procedure:

+

NOTE: Rolling upgrade of geo-replication session from glusterfs version \< 3.5 to 3.5.x is not supported.

+

If you have quota configured, you need to perform step 1 and 7, +otherwise you can skip it.

+
    +
  1. Execute "pre-upgrade-script-for-quota.sh" mentioned under "Upgrade Steps For Quota" section.
  2. +
  3. Stop all glusterd, glusterfs and glusterfsd processes on your server.
  4. +
  5. Install GlusterFS 3.5.0.
  6. +
  7. Start glusterd.
  8. +
  9. Run “gluster volume heal <volname> info” on all volumes and ensure that there is nothing left to be self-healed on every volume. If you have pending data for self-heal, run “gluster volume heal <volname>” and wait for self-heal to complete.
  10. +
  11. Ensure that all started volumes have processes online in “gluster volume status”.
  12. +
  13. Execute "Post-Upgrade Script" mentioned under "Upgrade Steps For Quota" section.
  14. +
+

Repeat the above steps on all servers that are part of your trusted +storage pool.

+

Again after upgrading the servers, it is recommended to upgrade all +client installations to 3.5.0.

+

Do report your findings on 3.5.0 in gluster-users, #gluster on Freenode +and bugzilla.

+

Please note that this may not work for all installations & upgrades. If +you notice anything amiss and would like to see it covered here, please +point the same.

+

Upgrade Steps For Quota

+

The upgrade process for quota involves executing two upgrade scripts:

+
    +
  1. pre-upgrade-script-for-quota.sh, and\
  2. +
  3. post-upgrade-script-for-quota.sh
  4. +
+

Pre-Upgrade Script:

+

What it does:

+

The pre-upgrade script (pre-upgrade-script-for-quota.sh) iterates over +the list of volumes that have quota enabled and captures the configured +quota limits for each such volume in a file under +/var/tmp/glusterfs/quota-config-backup/vol_\<VOLNAME> by executing +'quota list' command on each one of them.

+

Pre-requisites for running Pre-Upgrade Script:

+
    +
  1. Make sure glusterd and the brick processes are running on all nodes + in the cluster.
  2. +
  3. The pre-upgrade script must be run prior to upgradation.
  4. +
  5. The pre-upgrade script must be run on only one of the nodes in the + cluster.
  6. +
+

Location:

+

pre-upgrade-script-for-quota.sh must be retrieved from the source tree +under the 'extras' directory.

+

Invocation:

+

Invoke the script by executing `./pre-upgrade-script-for-quota.sh` +from the shell on any one of the nodes in the cluster.

+
    +
  • +

    Example:

    +

    [root@server1 extras]#./pre-upgrade-script-for-quota.sh

    +
  • +
+

Post-Upgrade Script:

+

What it does:

+

The post-upgrade script (post-upgrade-script-for-quota.sh) picks the +volumes that have quota enabled.

+

Because the cluster must be operating at op-version 3 for quota to work, +the 'default-soft-limit' for each of these volumes is set to 80% (which +is its default value) via `volume set` operation as an explicit +trigger to bump up the op-version of the cluster and also to trigger a +re-write of volfiles which knocks quota off client volume file.

+

Once this is done, these volumes are started forcefully using `volume +start force` to launch the Quota Daemon on all the nodes.

+

Thereafter, for each of these volumes, the paths and the limits +configured on them are retrieved from the backed up file +/var/tmp/glusterfs/quota-config-backup/vol_\<VOLNAME> and limits are +set on them via the `quota limit-usage` interface.

+

Note:

+

In the new version of quota, the command `quota limit-usage` will fail +if the directory on which quota limit is to be set for a given volume +does not exist. Therefore, it is advised that you create these +directories first before running post-upgrade-script-for-quota.sh if you +want limits to be set on these directories.

+

Pre-requisites for running Post-Upgrade Script:

+
    +
  1. The post-upgrade script must be executed after all the nodes in the + cluster have upgraded.
  2. +
  3. Also, all the clients accessing the given volume must also be + upgraded before the script is run.
  4. +
  5. Make sure glusterd and the brick processes are running on all nodes + in the cluster post upgrade.
  6. +
  7. The script must be run from the same node where the pre-upgrade + script was run.
  8. +
+

Location:

+

post-upgrade-script-for-quota.sh can be found under the 'extras' +directory of the source tree for glusterfs.

+

Invocation:

+

post-upgrade-script-for-quota.sh takes one command line argument. This +argument could be one of the following: ''the name of the volume which +has quota enabled; or' '' 'all'.''

+

In the first case, invoke post-upgrade-script-for-quota.sh from the +shell for each volume with quota enabled, with the name of the volume +passed as an argument in the command-line:

+
    +
  • Example:
  • +
+

For a volume "vol1" on which quota is enabled, invoke the script in the following way:

+
    [root@server1 extras]#./post-upgrade-script-for-quota.sh vol1
+
+

In the second case, the post-upgrade script picks on its own, the +volumes on which quota is enabled, and executes the post-upgrade +procedure on each one of them. In this case, invoke +post-upgrade-script-for-quota.sh from the shell with 'all' passed as an +argument in the command-line:

+
    +
  • +

    Example:

    +

    [root@server1 extras]#./post-upgrade-script-for-quota.sh all

    +
  • +
+

Note:

+

In the second case, post-upgrade-script-for-quota.sh exits prematurely +upon failure to ugprade any given volume. In that case, you may run +post-upgrade-script-for-quota.sh individually (using the volume name as +command line argument) on this volume and also on all volumes appearing +after this volume in the output of `gluster volume list`, that have +quota enabled.

+

The backed up files under /var/tmp/glusterfs/quota-config-backup/ are +retained after the post-upgrade procedure for reference.

+

Upgrade steps for geo replication:

+

Here are the steps to upgrade your existing geo-replication setup to new +distributed geo-replication in glusterfs-3.5. The new version leverges +all the nodes in your master volume and provides better performace.

+

Note:

+

Since new version of geo-rep very much different from the older one, +this has to be done offline.

+

New version supports only syncing between two gluster volumes via +ssh+gluster.

+

This doc deals with upgrading geo-rep. So upgrading the volumes are not +covered in detail here.

+

Below are the steps to upgrade:

+

​1. Stop the geo-replication session in older version ( \< 3.5) using +the below command

+
    #gluster volume geo-replication `<master_vol>` `<slave_host>`::`<slave_vol>` stop
+
+

​2. Now since the new geo-replication requires gfids of master and slave +volume to be same, generate a file containing the gfids of all the files +in master

+
    cd /usr/share/glusterfs/scripts/ ;
+    bash generate-gfid-file.sh localhost:`<master_vol>` $PWD/get-gfid.sh    /tmp/master_gfid_file.txt ;
+    scp /tmp/master_gfid_file.txt root@`<slave_host>`:/tmp
+
+

​3. Now go to the slave host and aplly the gfid to the slave volume.

+
    cd /usr/share/glusterfs/scripts/
+    bash slave-upgrade.sh localhost:`<slave_vol>` /tmp/master_gfid_file.txt    $PWD/gsync-sync-gfid
+
+

This will ask you for password of all the nodes in slave cluster. Please +provide them, if asked.

+

​4. Also note that this will restart your slave gluster volume (stop and +start)

+

​5. Now create and start the geo-rep session between master and slave. +For instruction on creating new geo-rep seesion please refer +distributed-geo-rep admin guide.

+
    gluster volume geo-replication `<master_volume>` `<slave_host>`::`<slave_volume>` create push-pem force
+    gluster volume geo-replication `<master_volume>` `<slave_host>`::`<slave_volume>` start
+
+

​6. Now your session is upgraded to use distributed-geo-rep

+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/Upgrade-Guide/upgrade-to-3.6/index.html b/Upgrade-Guide/upgrade-to-3.6/index.html new file mode 100644 index 00000000..2b588c98 --- /dev/null +++ b/Upgrade-Guide/upgrade-to-3.6/index.html @@ -0,0 +1,4716 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + Upgrade to 3.6 - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + + + + + +
+
+ + + + + + + + +

GlusterFS upgrade from 3.5.x to 3.6.x

+

Now that GlusterFS 3.6.0 is out, here is the process to upgrade from +earlier installed versions of GlusterFS.

+

If you are using GlusterFS replication ( \< 3.6) in your setup , please +note that the new afrv2 implementation is only compatible with 3.6 +GlusterFS clients. If you are not updating your clients to GlusterFS +version 3.6 you need to disable client self healing process. You can +perform this by below steps.

+
# gluster v set testvol cluster.entry-self-heal off
+volume set: success
+
+# gluster v set testvol cluster.data-self-heal off
+volume set: success
+
+# gluster v set testvol cluster.metadata-self-heal off
+volume set: success
+
+

GlusterFS upgrade from 3.5.x to 3.6.x

+

a) Scheduling a downtime (Recommended)

+

For this approach, schedule a downtime and prevent all your clients from +accessing ( umount your volumes, stop gluster Volumes..etc)the servers.

+
    +
  1. Stop all glusterd, glusterfsd and glusterfs processes on your server.
  2. +
  3. Install GlusterFS 3.6.0
  4. +
  5. Start glusterd.
  6. +
  7. Ensure that all started volumes have processes online in “gluster volume status”.
  8. +
+

You would need to repeat these steps on all servers that form your +trusted storage pool.

+

After upgrading the servers, it is recommended to upgrade all client +installations to 3.6.0

+

GlusterFS upgrade from 3.4.x to 3.6.X

+

Upgrade from GlusterFS 3.4.x:

+

GlusterFS 3.6.0 is compatible with 3.4.x (yes, you read it right!). You +can upgrade your deployment by following one of the two procedures +below.

+

a) Scheduling a downtime (Recommended)

+

For this approach, schedule a downtime and prevent all your clients from +accessing ( umount your volumes, stop gluster Volumes..etc)the servers.

+

If you have quota configured, you need to perform step 1 and 6, +otherwise you can skip it.

+

If you have geo-replication session running, stop the session using the +geo-rep stop command (please refer to step 1 of geo-rep upgrade steps +provided below)

+
    +
  1. Execute "pre-upgrade-script-for-quota.sh" mentioned under "Upgrade Steps For Quota" section.
  2. +
  3. Stop all glusterd, glusterfsd and glusterfs processes on your server.
  4. +
  5. Install GlusterFS 3.6.0
  6. +
  7. Start glusterd.
  8. +
  9. Ensure that all started volumes have processes online in “gluster volume status”.
  10. +
  11. Execute "Post-Upgrade Script" mentioned under "Upgrade Steps For Quota" section.
  12. +
+

You would need to repeat these steps on all servers that form your +trusted storage pool.

+

To upgrade geo-replication session, please refer to geo-rep upgrade +steps provided below (from step 2)

+

After upgrading the servers, it is recommended to upgrade all client +installations to 3.6.0.

+

Do report your findings on 3.6.0 in gluster-users, #gluster on Freenode +and bugzilla.

+

Please note that this may not work for all installations & upgrades. If +you notice anything amiss and would like to see it covered here, please +point the same.

+

Upgrade Steps For Quota

+

The upgrade process for quota involves executing two upgrade scripts:

+
    +
  1. pre-upgrade-script-for-quota.sh, and\
  2. +
  3. post-upgrade-script-for-quota.sh
  4. +
+

Pre-Upgrade Script:

+

What it does:

+

The pre-upgrade script (pre-upgrade-script-for-quota.sh) iterates over +the list of volumes that have quota enabled and captures the configured +quota limits for each such volume in a file under +/var/tmp/glusterfs/quota-config-backup/vol_\<VOLNAME> by executing +'quota list' command on each one of them.

+

Pre-requisites for running Pre-Upgrade Script:

+
    +
  1. Make sure glusterd and the brick processes are running on all nodes + in the cluster.
  2. +
  3. The pre-upgrade script must be run prior to upgradation.
  4. +
  5. The pre-upgrade script must be run on only one of the nodes in the + cluster.
  6. +
+

Location:

+

pre-upgrade-script-for-quota.sh must be retrieved from the source tree +under the 'extras' directory.

+

Invocation:

+

Invoke the script by executing `./pre-upgrade-script-for-quota.sh` +from the shell on any one of the nodes in the cluster.

+

Example:

+
[root@server1 extras]#./pre-upgrade-script-for-quota.sh
+
+

Post-Upgrade Script:

+

What it does:

+

The post-upgrade script (post-upgrade-script-for-quota.sh) picks the +volumes that have quota enabled.

+

Because the cluster must be operating at op-version 3 for quota to work, +the 'default-soft-limit' for each of these volumes is set to 80% (which +is its default value) via `volume set` operation as an explicit +trigger to bump up the op-version of the cluster and also to trigger a +re-write of volfiles which knocks quota off client volume file.

+

Once this is done, these volumes are started forcefully using `volume +start force` to launch the Quota Daemon on all the nodes.

+

Thereafter, for each of these volumes, the paths and the limits +configured on them are retrieved from the backed up file +/var/tmp/glusterfs/quota-config-backup/vol_\<VOLNAME> and limits are +set on them via the `quota limit-usage` interface.

+

Note:

+

In the new version of quota, the command `quota limit-usage` will fail +if the directory on which quota limit is to be set for a given volume +does not exist. Therefore, it is advised that you create these +directories first before running post-upgrade-script-for-quota.sh if you +want limits to be set on these directories.

+

Pre-requisites for running Post-Upgrade Script:

+
    +
  1. The post-upgrade script must be executed after all the nodes in the + cluster have upgraded.
  2. +
  3. Also, all the clients accessing the given volume must also be + upgraded before the script is run.
  4. +
  5. Make sure glusterd and the brick processes are running on all nodes + in the cluster post upgrade.
  6. +
  7. The script must be run from the same node where the pre-upgrade + script was run.
  8. +
+

Location:

+

post-upgrade-script-for-quota.sh can be found under the 'extras' +directory of the source tree for glusterfs.

+

Invocation:

+

post-upgrade-script-for-quota.sh takes one command line argument. This +argument could be one of the following: ''the name of the volume which +has quota enabled; or' '' 'all'.''

+

In the first case, invoke post-upgrade-script-for-quota.sh from the +shell for each volume with quota enabled, with the name of the volume +passed as an argument in the command-line:

+

Example:

+

For a volume "vol1" on which quota is enabled, invoke the script in the following way:

+
[root@server1 extras]#./post-upgrade-script-for-quota.sh vol1
+
+

In the second case, the post-upgrade script picks on its own, the +volumes on which quota is enabled, and executes the post-upgrade +procedure on each one of them. In this case, invoke +post-upgrade-script-for-quota.sh from the shell with 'all' passed as an +argument in the command-line:

+

Example:

+
[root@server1 extras]#./post-upgrade-script-for-quota.sh all
+
+

Note:

+

In the second case, post-upgrade-script-for-quota.sh exits prematurely +upon failure to ugprade any given volume. In that case, you may run +post-upgrade-script-for-quota.sh individually (using the volume name as +command line argument) on this volume and also on all volumes appearing +after this volume in the output of `gluster volume list`, that have +quota enabled.

+

The backed up files under /var/tmp/glusterfs/quota-config-backup/ are +retained after the post-upgrade procedure for reference.

+

Upgrade steps for geo replication:

+

Here are the steps to upgrade your existing geo-replication setup to new +distributed geo-replication in glusterfs-3.5. The new version leverges +all the nodes in your master volume and provides better performace.

+

Note:

+

Since new version of geo-rep very much different from the older one, +this has to be done offline.

+

New version supports only syncing between two gluster volumes via +ssh+gluster.

+

This doc deals with upgrading geo-rep. So upgrading the volumes are not +covered in detail here.

+

Below are the steps to upgrade:

+

​1. Stop the geo-replication session in older version ( \< 3.5) using +the below command

+
    # gluster volume geo-replication `<master_vol>` `<slave_host>`::`<slave_vol>` stop
+
+

​2. Now since the new geo-replication requires gfids of master and slave +volume to be same, generate a file containing the gfids of all the files +in master

+
    # cd /usr/share/glusterfs/scripts/ ;
+    # bash generate-gfid-file.sh localhost:`<master_vol>` $PWD/get-gfid.sh    /tmp/master_gfid_file.txt ;
+    # scp /tmp/master_gfid_file.txt root@`<slave_host>`:/tmp
+
+

​3. Now go to the slave host and aplly the gfid to the slave volume.

+
    # cd /usr/share/glusterfs/scripts/
+    # bash slave-upgrade.sh localhost:`<slave_vol>` /tmp/master_gfid_file.txt    $PWD/gsync-sync-gfid
+
+

This will ask you for password of all the nodes in slave cluster. Please +provide them, if asked.

+

​4. Also note that this will restart your slave gluster volume (stop and +start)

+

​5. Now create and start the geo-rep session between master and slave. +For instruction on creating new geo-rep seesion please refer +distributed-geo-rep admin guide.

+
    # gluster volume geo-replication `<master_volume>` `<slave_host>`::`<slave_volume>` create push-pem force
+    # gluster volume geo-replication `<master_volume>` `<slave_host>`::`<slave_volume>` start
+
+

​6. Now your session is upgraded to use distributed-geo-rep

+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/Upgrade-Guide/upgrade-to-3.7/index.html b/Upgrade-Guide/upgrade-to-3.7/index.html new file mode 100644 index 00000000..b7a9539d --- /dev/null +++ b/Upgrade-Guide/upgrade-to-3.7/index.html @@ -0,0 +1,4712 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + Upgrade to 3.7 - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + + + + + +
+
+ + + + + + + + +

GlusterFS upgrade to 3.7.x

+

Now that GlusterFS 3.7.0 is out, here is the process to upgrade from +earlier installed versions of GlusterFS. Please read the entire howto +before proceeding with an upgrade of your deployment

+

Pre-upgrade

+

GlusterFS contains afrv2 implementation from 3.6.0 by default. If you are +using GlusterFS replication ( \< 3.6) in your setup, please note that +the new afrv2 implementation is only compatible with 3.6 or greater +GlusterFS clients. If you are not updating your clients to GlusterFS +version 3.6 along with your servers you would need to disable client +self healing process before the upgrade. You can perform this by below +steps.

+
# gluster v set testvol cluster.entry-self-heal off
+volume set: success
+
+# gluster v set testvol cluster.data-self-heal off
+volume set: success
+
+# gluster v set testvol cluster.metadata-self-heal off
+volume set: success
+
+

GlusterFS upgrade to 3.7.x

+

a) Scheduling a downtime

+

For this approach, schedule a downtime and prevent all your clients from +accessing (umount your volumes, stop gluster Volumes..etc) the servers.

+
1. Stop all glusterd, glusterfsd and glusterfs processes on your server.
+2. Install  GlusterFS 3.7.0
+3. Start glusterd.
+4. Ensure that all started volumes have processes online in “gluster volume status”.
+
+

You would need to repeat these steps on all servers that form your +trusted storage pool.

+

After upgrading the servers, it is recommended to upgrade all client +installations to 3.7.0.

+

b) Rolling Upgrade

+

If you have replicated or distributed replicated volumes with bricks placed in the right fashion for redundancy, have no data to be self-healed and feel adventurous, you can perform a rolling upgrade through the following procedure:

+
1.Stop all glusterd, glusterfs and glusterfsd processes on your server.
+2.Install GlusterFS 3.7.0.
+3.Start glusterd.
+4.Run “gluster volume heal <volname> info” on all volumes and ensure that there is nothing left to be 5.self-healed on every volume. If you have pending data for self-heal, run “gluster volume heal <volname>” and wait for self-heal to complete.
+6.Ensure that all started volumes have processes online in “gluster volume status”.
+
+

Repeat the above steps on all servers that are part of your trusted storage pool.

+

Again after upgrading the servers, it is recommended to upgrade all client installations to 3.7.0.

+

Special notes for upgrading from 3.4.x to 3.7.X

+

If you have quota or geo-replication configured in 3.4.x, please read +below. Else you can skip this section.

+

Architectural changes in Quota & geo-replication were introduced in +Gluster 3.5.0. Hence scheduling a downtime is recommended for upgrading +from 3.4.x to 3.7.x if you have these features enabled.

+

Upgrade Steps For Quota

+

The upgrade process for quota involves the following:

+
    +
  1. Run pre-upgrade-script-for-quota.sh
  2. +
  3. Upgrade to 3.7.0
  4. +
  5. Run post-upgrade-script-for-quota.sh
  6. +
+

More details on the scripts are as under.

+

Pre-Upgrade Script:

+

What it does:

+

The pre-upgrade script (pre-upgrade-script-for-quota.sh) iterates over +the list of volumes that have quota enabled and captures the configured +quota limits for each such volume in a file under +/var/tmp/glusterfs/quota-config-backup/vol_\<VOLNAME> by executing +'quota list' command on each one of them.

+

Pre-requisites for running Pre-Upgrade Script:

+
    +
  1. Make sure glusterd and the brick processes are running on all nodes + in the cluster.
  2. +
  3. The pre-upgrade script must be run prior to upgradation.
  4. +
  5. The pre-upgrade script must be run on only one of the nodes in the + cluster.
  6. +
+

Location:

+

pre-upgrade-script-for-quota.sh must be retrieved from the source tree +under the 'extras' directory.

+

Invocation:

+

Invoke the script by executing `./pre-upgrade-script-for-quota.sh` +from the shell on any one of the nodes in the cluster.

+

Example:

+
[root@server1 extras]#./pre-upgrade-script-for-quota.sh
+
+

Post-Upgrade Script:

+

What it does:

+

The post-upgrade script (post-upgrade-script-for-quota.sh) picks the +volumes that have quota enabled.

+

Because the cluster must be operating at op-version 3 for quota to work, +the 'default-soft-limit' for each of these volumes is set to 80% (which +is its default value) via `volume set` operation as an explicit +trigger to bump up the op-version of the cluster and also to trigger a +re-write of volfiles which knocks quota off client volume file.

+

Once this is done, these volumes are started forcefully using `volume +start force` to launch the Quota Daemon on all the nodes.

+

Thereafter, for each of these volumes, the paths and the limits +configured on them are retrieved from the backed up file +/var/tmp/glusterfs/quota-config-backup/vol_\<VOLNAME> and limits are +set on them via the `quota limit-usage` interface.

+

Note:

+

In the new version of quota, the command `quota limit-usage` will fail +if the directory on which quota limit is to be set for a given volume +does not exist. Therefore, it is advised that you create these +directories first before running post-upgrade-script-for-quota.sh if you +want limits to be set on these directories.

+

Pre-requisites for running Post-Upgrade Script:

+
    +
  1. The post-upgrade script must be executed after all the nodes in the + cluster have upgraded.
  2. +
  3. Also, all the clients accessing the given volume must also be + upgraded before the script is run.
  4. +
  5. Make sure glusterd and the brick processes are running on all nodes + in the cluster post upgrade.
  6. +
  7. The script must be run from the same node where the pre-upgrade + script was run.
  8. +
+

Location:

+

post-upgrade-script-for-quota.sh can be found under the 'extras' +directory of the source tree for glusterfs.

+

Invocation:

+

post-upgrade-script-for-quota.sh takes one command line argument. This +argument could be one of the following: ''the name of the volume which +has quota enabled; or' '' 'all'.''

+

In the first case, invoke post-upgrade-script-for-quota.sh from the +shell for each volume with quota enabled, with the name of the volume +passed as an argument in the command-line:

+

Example: For a volume "vol1" on which quota is enabled, invoke the script in the following way:

+
[root@server1 extras]#./post-upgrade-script-for-quota.sh vol1
+
+

In the second case, the post-upgrade script picks on its own, the +volumes on which quota is enabled, and executes the post-upgrade +procedure on each one of them. In this case, invoke +post-upgrade-script-for-quota.sh from the shell with 'all' passed as an +argument in the command-line:

+

Example:

+
[root@server1 extras]#./post-upgrade-script-for-quota.sh all
+
+

Note:

+

In the second case, post-upgrade-script-for-quota.sh exits prematurely +upon failure to ugprade any given volume. In that case, you may run +post-upgrade-script-for-quota.sh individually (using the volume name as +command line argument) on this volume and also on all volumes appearing +after this volume in the output of `gluster volume list`, that have +quota enabled.

+

The backed up files under /var/tmp/glusterfs/quota-config-backup/ are +retained after the post-upgrade procedure for reference.

+

Upgrade steps for geo replication:

+

New version supports only syncing between two gluster volumes via +ssh+gluster.

+

''Below are the steps to upgrade. ''

+

​1. Stop the geo-replication session in older version ( \< 3.5) using +the below command

+
    # gluster volume geo-replication <master_vol> <slave_host>::<slave_vol> stop
+
+

​2. Now since the new geo-replication requires gfids of master and slave +volume to be same, generate a file containing the gfids of all the files +in master

+
    # cd /usr/share/glusterfs/scripts/ ;
+    # bash generate-gfid-file.sh localhost:<master_vol> $PWD/get-gfid.sh    /tmp/master_gfid_file.txt ;
+    # scp /tmp/master_gfid_file.txt root@<slave_host>:/tmp
+
+

​3. Upgrade the slave cluster installation to 3.7.0

+

​4. Now go to the slave host and apply the gfid to the slave volume.

+
    # cd /usr/share/glusterfs/scripts/
+    # bash slave-upgrade.sh localhost:<slave_vol> /tmp/master_gfid_file.txt    $PWD/gsync-sync-gfid
+
+

This will ask you for password of all the nodes in slave cluster. Please +provide them, if asked. Also note that this will restart your slave +gluster volume (stop and start)

+

​5. Upgrade the master cluster to 3.7.0

+

​6. Now create and start the geo-rep session between master and slave. +For instruction on creating new geo-rep session please refer +distributed-geo-rep chapter in admin guide.

+
    # gluster volume geo-replication <master_volume> <slave_host>::<slave_volume> create push-pem force
+    # gluster volume geo-replication <master_volume> <slave_host>::<slave_volume> start
+
+

At this point, your distributed geo-replication should be configured +appropriately.

+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/Upgrade-Guide/upgrade-to-3.8/index.html b/Upgrade-Guide/upgrade-to-3.8/index.html new file mode 100644 index 00000000..19f5fd4e --- /dev/null +++ b/Upgrade-Guide/upgrade-to-3.8/index.html @@ -0,0 +1,4649 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + Upgrade to 3.8 - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+ +
+ + + +
+
+ + + + + + + + +

Upgrade to 3.8

+ +

Upgrade procedure from Gluster 3.7.x

+

Pre-upgrade Notes

+
    +
  • Online upgrade is only possible with replicated and distributed replicate volumes.
  • +
  • Online upgrade is not yet supported for dispersed or distributed dispersed volumes.
  • +
  • Ensure no configuration changes are done during the upgrade.
  • +
  • If you are using geo-replication, please upgrade the slave cluster(s) before upgrading the master.
  • +
  • Upgrading the servers ahead of the clients is recommended.
  • +
  • Upgrade the clients after the servers are upgraded. It is recommended to have the same client and server major versions.
  • +
+

Online Upgrade Procedure for Servers

+

The procedure involves upgrading one server at a time . On every storage server in your trusted storage pool:

+
    +
  • +

    Stop all gluster services using the below command or through your favorite way to stop them.

    +
    killall glusterfs glusterfsd glusterd
    +
    +
  • +
  • +

    If you are using gfapi based applications (qemu, NFS-Ganesha, Samba etc.) on the servers, please stop those applications too.

    +
  • +
  • +

    Install Gluster 3.8

    +
  • +
  • +

    Ensure that version reflects 3.8.x in the output of

    +
    gluster --version
    +
    +
  • +
  • +

    Start glusterd on the upgraded server

    +
    glusterd
    +
    +
  • +
  • +

    Ensure that all gluster processes are online by executing

    +
    gluster volume status
    +
    +
  • +
  • +

    Self-heal all gluster volumes by running

    +
    for i in `gluster volume list`; do gluster volume heal $i; done
    +
    +
  • +
  • +

    Ensure that there is no heal backlog by running the below command for all volumes

    +
    gluster volume heal <volname> info
    +
    +
  • +
  • +

    Restart any gfapi based application stopped previously.

    +
  • +
  • +

    After the upgrade is complete on all servers, run the following command:

    +
    gluster volume set all cluster.op-version 30800
    +
    +
  • +
+

Offline Upgrade Procedure

+

For this procedure, schedule a downtime and prevent all your clients from accessing the servers.

+

On every storage server in your trusted storage pool:

+
    +
  • +

    Stop all gluster services using the below command or through your favorite way to stop them.

    +
    killall glusterfs glusterfsd glusterd
    +
    +
  • +
  • +

    If you are using gfapi based applications (qemu, NFS-Ganesha, Samba etc.) on the servers, please stop those applications too.

    +
  • +
  • +

    Install Gluster 3.8

    +
  • +
  • +

    Ensure that version reflects 3.8.x in the output of

    +
    gluster --version
    +
    +
  • +
  • +

    Start glusterd on the upgraded server

    +
    glusterd
    +
    +
  • +
  • +

    Ensure that all gluster processes are online by executing

    +
    gluster volume status
    +
    +
  • +
  • +

    Restart any gfapi based application stopped previously.

    +
  • +
  • +

    After the upgrade is complete on all servers, run the following command:

    +
    gluster volume set all cluster.op-version 30800
    +
    +
  • +
+

Upgrade Procedure for Clients

+
    +
  • Unmount all glusterfs mount points on the client
  • +
  • Stop applications using gfapi (qemu etc.)
  • +
  • Install Gluster 3.8
  • +
  • Mount all gluster shares
  • +
  • Start applications using libgfapi that were stopped previously
  • +
+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/Upgrade-Guide/upgrade-to-3.9/index.html b/Upgrade-Guide/upgrade-to-3.9/index.html new file mode 100644 index 00000000..a3577429 --- /dev/null +++ b/Upgrade-Guide/upgrade-to-3.9/index.html @@ -0,0 +1,4484 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + Upgrade to 3.9 - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + +

Upgrade to 3.9

+ +

Upgrade procedure from Gluster 3.8.x and 3.7.x

+

The steps to uprade to Gluster 3.9 are the same as for upgrading to Gluster +3.8. Please follow the detailed instructions from the 3.8 upgrade +guide.

+

Note that there is only a single difference, related to the op-version:

+

After the upgrade is complete on all servers, run the following command:

+
gluster volume set all cluster.op-version 30900
+
+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/Upgrade-Guide/upgrade-to-4.0/index.html b/Upgrade-Guide/upgrade-to-4.0/index.html new file mode 100644 index 00000000..f32b6a6c --- /dev/null +++ b/Upgrade-Guide/upgrade-to-4.0/index.html @@ -0,0 +1,4724 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + Upgrade to 4.0 - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + + + + + +
+
+ + + + + + + + +

Upgrade to 4.0

+ +

Upgrade procedure to Gluster 4.0, from Gluster 3.13.x, 3.12.x, and 3.10.x

+

NOTE: Upgrade procedure remains the same as with 3.12 and 3.10 releases

+

Pre-upgrade notes

+
    +
  • Online upgrade is only possible with replicated and distributed replicate volumes
  • +
  • Online upgrade is not supported for dispersed or distributed dispersed volumes
  • +
  • Ensure no configuration changes are done during the upgrade
  • +
  • If you are using geo-replication, please upgrade the slave cluster(s) before upgrading the master
  • +
  • Upgrading the servers ahead of the clients is recommended
  • +
  • It is recommended to have the same client and server, major versions running eventually
  • +
+

Online upgrade procedure for servers

+

This procedure involves upgrading one server at a time, while keeping the volume(s) online and client IO ongoing. This procedure assumes that multiple replicas of a replica set, are not part of the same server in the trusted storage pool.

+
+

ALERT: If any of your volumes, in the trusted storage pool that is being upgraded, uses disperse or is a pure distributed volume, this procedure is NOT recommended, use the Offline upgrade procedure instead.

+
+

Repeat the following steps, on each server in the trusted storage pool, to upgrade the entire pool to 4.0 version:

+
    +
  1. +

    Stop all gluster services, either using the command below, or through other means,

    +
    killall glusterfs glusterfsd glusterd
    +
    +
  2. +
  3. +

    Stop all applications that run on this server and access the volumes via gfapi (qemu, NFS-Ganesha, Samba, etc.)

    +
  4. +
  5. +

    Install Gluster 4.0

    +
  6. +
  7. +

    Ensure that version reflects 4.0.x in the output of,

    +
    gluster --version
    +
    +

    NOTE: x is the minor release number for the release

    +
  8. +
  9. +

    Start glusterd on the upgraded server

    +
    glusterd
    +
    +
  10. +
  11. +

    Ensure that all gluster processes are online by checking the output of,

    +
    gluster volume status
    +
    +
  12. +
  13. +

    Self-heal all gluster volumes by running

    +
    for i in `gluster volume list`; do gluster volume heal $i; done
    +
    +
  14. +
  15. +

    Ensure that there is no heal backlog by running the below command for all volumes

    +
    gluster volume heal <volname> info
    +
    +
    +

    NOTE: If there is a heal backlog, wait till the backlog is empty, or the backlog does not have any entries needing a sync to the just upgraded server, before proceeding to upgrade the next server in the pool

    +
    +
  16. +
  17. +

    Restart any gfapi based application stopped previously in step (2)

    +
  18. +
+

Offline upgrade procedure

+

This procedure involves cluster downtime and during the upgrade window, clients are not allowed access to the volumes.

+

Steps to perform an offline upgrade:

+
    +
  1. +

    On every server in the trusted storage pool, stop all gluster services, either using the command below, or through other means,

    +
    killall glusterfs glusterfsd glusterd
    +
    +
  2. +
  3. +

    Stop all applications that access the volumes via gfapi (qemu, NFS-Ganesha, Samba, etc.), across all servers

    +
  4. +
  5. +

    Install Gluster 4.0, on all servers

    +
  6. +
  7. +

    Ensure that version reflects 4.0.x in the output of the following command on all servers,

    +
    gluster --version
    +
    +

    NOTE: x is the minor release number for the release

    +
  8. +
  9. +

    Start glusterd on all the upgraded servers

    +
    glusterd
    +
    +
  10. +
  11. +

    Ensure that all gluster processes are online by checking the output of,

    +
    gluster volume status
    +
    +
  12. +
  13. +

    Restart any gfapi based application stopped previously in step (2)

    +
  14. +
+

Post upgrade steps

+

Perform the following steps post upgrading the entire trusted storage pool,

+
    +
  • It is recommended to update the op-version of the cluster. Refer, to the op-version section for further details
  • +
  • Proceed to upgrade the clients to 4.0 version as well
  • +
  • Post upgrading the clients, for replicate volumes, it is recommended to enable the option gluster volume set <volname> fips-mode-rchecksum on to turn off usage of MD5 checksums during healing. This enables running Gluster on FIPS compliant systems.
  • +
+

Upgrade procedure for clients

+

Following are the steps to upgrade clients to the 4.0.x version,

+

NOTE: x is the minor release number for the release

+
    +
  1. Unmount all glusterfs mount points on the client
  2. +
  3. Stop all applications that access the volumes via gfapi (qemu, etc.)
  4. +
  5. Install Gluster 4.0
  6. +
  7. Mount all gluster shares
  8. +
  9. Start any applications that were stopped previously in step (2)
  10. +
+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/Upgrade-Guide/upgrade-to-4.1/index.html b/Upgrade-Guide/upgrade-to-4.1/index.html new file mode 100644 index 00000000..de797a0c --- /dev/null +++ b/Upgrade-Guide/upgrade-to-4.1/index.html @@ -0,0 +1,4746 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + Upgrade to 4.1 - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + + + + + +
+
+ + + + + + + + +

Upgrade to 4.1

+ +

Upgrade procedure to Gluster 4.1, from Gluster 4.0.x, 3.12.x, and 3.10.x

+
+

NOTE: Upgrade procedure remains the same as with 3.12 and 3.10 releases

+
+

Pre-upgrade notes

+
    +
  • Online upgrade is only possible with replicated and distributed replicate volumes
  • +
  • Online upgrade is not supported for dispersed or distributed dispersed volumes
  • +
  • Ensure no configuration changes are done during the upgrade
  • +
  • If you are using geo-replication, please upgrade the slave cluster(s) before upgrading the master
  • +
  • Upgrading the servers ahead of the clients is recommended
  • +
  • It is recommended to have the same client and server, major versions running eventually
  • +
+

Online upgrade procedure for servers

+

This procedure involves upgrading one server at a time, while keeping the volume(s) online and client IO ongoing. This procedure assumes that multiple replicas of a replica set, are not part of the same server in the trusted storage pool.

+
+

ALERT: If there are disperse or, pure distributed volumes in the storage pool being upgraded, this procedure is NOT recommended, use the Offline upgrade procedure instead.

+
+

Repeat the following steps, on each server in the trusted storage pool, to upgrade the entire pool to 4.1 version:

+
    +
  1. +

    Stop all gluster services, either using the command below, or through other means,

    +
    killall glusterfs glusterfsd glusterd
    +systemctl stop glustereventsd
    +
    +
  2. +
  3. +

    Stop all applications that run on this server and access the volumes via gfapi (qemu, NFS-Ganesha, Samba, etc.)

    +
  4. +
  5. +

    Install Gluster 4.1

    +
  6. +
  7. +

    Ensure that version reflects 4.1.x in the output of,

    +
    gluster --version
    +
    +
  8. +
+
+

NOTE: x is the minor release number for the release

+
+
    +
  1. +

    Start glusterd on the upgraded server

    +
    glusterd
    +
    +
  2. +
  3. +

    Ensure that all gluster processes are online by checking the output of,

    +
    gluster volume status
    +
    +
  4. +
  5. +

    If the glustereventsd service was previously enabled, it is required to start it using the commands below, or, through other means,

    +
    systemctl start glustereventsd
    +
    +
  6. +
  7. +

    Invoke self-heal on all the gluster volumes by running,

    +
    for i in `gluster volume list`; do gluster volume heal $i; done
    +
    +
  8. +
  9. +

    Verify that there are no heal backlog by running the command for all the volumes,

    +
    gluster volume heal <volname> info
    +
    +
  10. +
+
+

NOTE: Before proceeding to upgrade the next server in the pool it is recommended to check the heal backlog. If there is a heal backlog, it is recommended to wait until the backlog is empty, or, the backlog does not contain any entries requiring a sync to the just upgraded server.

+
+
    +
  1. Restart any gfapi based application stopped previously in step (2)
  2. +
+

Offline upgrade procedure

+

This procedure involves cluster downtime and during the upgrade window, clients are not allowed access to the volumes.

+

Steps to perform an offline upgrade:

+
    +
  1. +

    On every server in the trusted storage pool, stop all gluster services, either using the command below, or through other means,

    +
    killall glusterfs glusterfsd glusterd glustereventsd
    +systemctl stop glustereventsd
    +
    +
  2. +
  3. +

    Stop all applications that access the volumes via gfapi (qemu, NFS-Ganesha, Samba, etc.), across all servers

    +
  4. +
  5. +

    Install Gluster 4.1, on all servers

    +
  6. +
  7. +

    Ensure that version reflects 4.1.x in the output of the following command on all servers,

    +
    gluster --version
    +
    +
    +

    NOTE: x is the minor release number for the release

    +
    +
  8. +
  9. +

    Start glusterd on all the upgraded servers

    +
    glusterd
    +
    +
  10. +
  11. +

    Ensure that all gluster processes are online by checking the output of,

    +
    gluster volume status
    +
    +
  12. +
  13. +

    If the glustereventsd service was previously enabled, it is required to start it using the commands below, or, through other means,

    +
    systemctl start glustereventsd
    +
    +
  14. +
  15. +

    Restart any gfapi based application stopped previously in step (2)

    +
  16. +
+

Post upgrade steps

+

Perform the following steps post upgrading the entire trusted storage pool,

+
    +
  • It is recommended to update the op-version of the cluster. Refer, to the op-version section for further details
  • +
  • Proceed to upgrade the clients to 4.1 version as well
  • +
  • Post upgrading the clients, for replicate volumes, it is recommended to enable the option gluster volume set <volname> fips-mode-rchecksum on to turn off usage of MD5 checksums during healing. This enables running Gluster on FIPS compliant systems.
  • +
+

Upgrade procedure for clients

+

Following are the steps to upgrade clients to the 4.1.x version,

+
+

NOTE: x is the minor release number for the release

+
+
    +
  1. Unmount all glusterfs mount points on the client
  2. +
  3. Stop all applications that access the volumes via gfapi (qemu, etc.)
  4. +
  5. Install Gluster 4.1
  6. +
  7. Mount all gluster shares
  8. +
  9. Start any applications that were stopped previously in step (2)
  10. +
+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/Upgrade-Guide/upgrade-to-5/index.html b/Upgrade-Guide/upgrade-to-5/index.html new file mode 100644 index 00000000..6e4f9dd0 --- /dev/null +++ b/Upgrade-Guide/upgrade-to-5/index.html @@ -0,0 +1,4531 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + Upgrade to 5 - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + +

Upgrade to 5

+ +

Upgrade procedure to Gluster 5, from Gluster 4.1.x, 4.0.x, 3.12.x and 3.10.x

+
+

NOTE: Upgrade procedure remains the same as with 4.1 release

+
+

Refer, to the Upgrading to 4.1 guide and follow +documented instructions, replacing 5 when you encounter 4.1 in the guide as the +version reference.

+

Major issues

+
    +
  1. +

    The following options are removed from the code base and require to be unset + before an upgrade from releases older than release 4.1.0,

    +
      +
    • features.lock-heal
    • +
    • features.grace-timeout
    • +
    +
  2. +
+

To check if these options are set use,

+
gluster volume info
+
+

and ensure that the above options are not part of the Options Reconfigured: +section in the output of all volumes in the cluster.

+

If these are set, then unset them using the following commands,

+
# gluster volume reset <volname> <option>
+
+

NOTE: Failure to do the above may result in failure during online upgrades, +and the reset of these options to their defaults needs to be done prior to +upgrading the cluster.

+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/Upgrade-Guide/upgrade-to-6/index.html b/Upgrade-Guide/upgrade-to-6/index.html new file mode 100644 index 00000000..86d66d47 --- /dev/null +++ b/Upgrade-Guide/upgrade-to-6/index.html @@ -0,0 +1,4632 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + Upgrade to 6 - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + + + + + +
+
+ + + + + + + + +

Upgrade to 6

+ +

Upgrade procedure to Gluster 6, from Gluster 5.x, 4.1.x, and 3.12.x

+

We recommend reading the release notes for 6.0 to be +aware of the features and fixes provided with the release.

+
+

NOTE: Upgrade procedure remains the same as with 4.1.x release

+
+

Refer, to the Upgrading to 4.1 guide and follow +documented instructions, replacing 6 when you encounter 4.1 in the guide as the +version reference.

+

Major issues

+
    +
  1. +

    The following options are removed from the code base and require to be unset + before an upgrade from releases older than release 4.1.0,

    +
      +
    • features.lock-heal
    • +
    • features.grace-timeout
    • +
    +
  2. +
+

To check if these options are set use,

+
gluster volume info
+
+

and ensure that the above options are not part of the Options Reconfigured: +section in the output of all volumes in the cluster.

+

If these are set, then unset them using the following commands,

+
# gluster volume reset <volname> <option>
+
+

NOTE: Failure to do the above may result in failure during online upgrades, +and the reset of these options to their defaults needs to be done prior to +upgrading the cluster.

+

Deprecated translators and upgrade procedure for volumes using these features

+

With this release of Gluster, the following xlator/features are deprecated and +are not available in the distribution specific packages. If any of these xlators +or features are in use, refer to instructions on steps needed pre-upgrade to +plan for an upgrade to this release.

+

Stripe volume

+

Stripe xlator, provided the ability to stripe data across bricks. This +functionality was used to create and support files larger than a single +brick and also to provide better disk utilization across large file IO, +by spreading the IO blocks across bricks and hence physical disks.

+

This functionality is now provided by the shard xlator.

+

There is no in place upgrade feasible for volumes using the stripe +feature, and users are encouraged to migrate their data from existing +stripe based volumes to sharded volumes.

+

Tier volume

+

Tier feature is no longer supported with this release. There is no replacement +for the tiering feature as well.

+

Volumes using the existing Tier feature need to be converted to regular volumes +before upgrading to this release.

+

Command reference:

+
volume tier <VOLNAME> detach <start|stop|status|commit|[force]>
+
+

Other miscellaneous features

+
    +
  • BD xlator
  • +
  • glupy
  • +
+

The above translators were not supported in previous versions as well, but users +had an option to create volumes using these features. If such volumes were in +use, data from the same need to me migrated into a new volume without the +feature, before upgrading the clusters.

+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/Upgrade-Guide/upgrade-to-7/index.html b/Upgrade-Guide/upgrade-to-7/index.html new file mode 100644 index 00000000..699e2a37 --- /dev/null +++ b/Upgrade-Guide/upgrade-to-7/index.html @@ -0,0 +1,4556 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + Upgrade to 7 - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + + + + + +
+
+ + + + + + + + +

Upgrade to 7

+ +

Upgrade procedure to Gluster 7, from Gluster 6.x, 5.x, 4.1.x, and 3.12.x

+

We recommend reading the release notes for 7.0 to be +aware of the features and fixes provided with the release.

+
+

NOTE: Upgrade procedure remains the same as with 4.1.x release

+
+

Refer, to the Upgrading to 4.1 guide and follow +documented instructions, replacing 7 when you encounter 4.1 in the guide as the +version reference.

+
+

NOTE: If you have ever enabled quota on your volumes then after the upgrade +is done, you will have to restart all the nodes in the cluster one by one so as to +fix the checksum values in the quota.cksum file under the /var/lib/glusterd/vols/<volname>/ directory. +The peers may go into Peer rejected state while doing so but once all the nodes are rebooted +everything will be back to normal.

+
+

Major issues

+
    +
  1. +

    The following options are removed from the code base and require to be unset + before an upgrade from releases older than release 4.1.0,

    +
      +
    • features.lock-heal
    • +
    • features.grace-timeout
    • +
    +
  2. +
+

To check if these options are set use,

+
gluster volume info
+
+

and ensure that the above options are not part of the Options Reconfigured: +section in the output of all volumes in the cluster.

+

If these are set, then unset them using the following commands,

+
# gluster volume reset <volname> <option>
+
+

NOTE: Failure to do the above may result in failure during online upgrades, +and the reset of these options to their defaults needs to be done prior to +upgrading the cluster.

+

Deprecated translators and upgrade procedure for volumes using these features

+

If you are upgrading from a release prior to release-6 be aware of deprecated xlators and functionality.

+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/Upgrade-Guide/upgrade-to-8/index.html b/Upgrade-Guide/upgrade-to-8/index.html new file mode 100644 index 00000000..19a7d3aa --- /dev/null +++ b/Upgrade-Guide/upgrade-to-8/index.html @@ -0,0 +1,4578 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + Upgrade to 8 - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + + + + + +
+
+ + + + + + + + +

Upgrade procedure to Gluster 8, from Gluster 7.x, 6.x and 5.x

+

We recommend reading the release notes for 8.0 to be +aware of the features and fixes provided with the release.

+
+

NOTE: Before following the generic upgrade procedure checkout the "Major Issues" section given below.

+

With version 8, there are certain changes introduced to the directory structure of changelog files in gluster geo-replication. +Thus, before the upgrade of geo-rep packages, we need to execute the upgrade script with the brick path as argument, as described below:

+
    +
  1. Stop the geo-rep session
  2. +
  3. Run the upgrade script with the brick path as the argument. Script can be used in loop for multiple bricks.
  4. +
  5. Start the upgradation process. + This script will update the existing changelog directory structure and the paths inside the htime files to a new format introduced in version 8. + If the above mentioned script is not executed, the search algorithm, used during the history crawl will fail with the wrong result for upgradation from version 7 and below to version 8 and above.
  6. +
+
+

Refer, to the generic upgrade procedure guide and follow documented instructions.

+

Major issues

+

The following options are removed from the code base and require to be unset

+

before an upgrade from releases older than release 4.1.0,

+
- features.lock-heal
+- features.grace-timeout
+
+

To check if these options are set use,

+
gluster volume info
+
+

and ensure that the above options are not part of the Options Reconfigured: +section in the output of all volumes in the cluster.

+

If these are set, then unset them using the following commands,

+
# gluster volume reset <volname> <option>
+
+

Make sure you are not using any of the following depricated features :

+
- Block device (bd) xlator
+- Decompounder feature
+- Crypt xlator
+- Symlink-cache xlator
+- Stripe feature
+- Tiering support (tier xlator and changetimerecorder)
+- Glupy
+
+

NOTE: Failure to do the above may result in failure during online upgrades, +and the reset of these options to their defaults needs to be done prior to +upgrading the cluster.

+

Deprecated translators and upgrade procedure for volumes using these features

+

If you are upgrading from a release prior to release-6 be aware of deprecated xlators and functionality.

+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/Upgrade-Guide/upgrade-to-9/index.html b/Upgrade-Guide/upgrade-to-9/index.html new file mode 100644 index 00000000..1046fec2 --- /dev/null +++ b/Upgrade-Guide/upgrade-to-9/index.html @@ -0,0 +1,4569 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + Upgrade to 9 - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + + + + + +
+
+ + + + + + + + +

Upgrade procedure to Gluster 9, from Gluster 8.x, 7.x and 6.x

+

We recommend reading the release notes for 9.0 to be +aware of the features and fixes provided with the release.

+
+

NOTE: Before following the generic upgrade procedure checkout the "Major Issues" section given below.

+
+

Refer, to the generic upgrade procedure guide and follow documented instructions.

+

Major issues

+

The following options are removed from the code base and require to be unset

+

before an upgrade from releases older than release 4.1.0,

+
- features.lock-heal
+- features.grace-timeout
+
+

To check if these options are set use,

+
gluster volume info
+
+

and ensure that the above options are not part of the Options Reconfigured: +section in the output of all volumes in the cluster.

+

If these are set, then unset them using the following commands,

+
# gluster volume reset <volname> <option>
+
+

Make sure you are not using any of the following deprecated features :

+
- Block device (bd) xlator
+- Decompounder feature
+- Crypt xlator
+- Symlink-cache xlator
+- Stripe feature
+- Tiering support (tier xlator and changetimerecorder)
+- Glupy
+
+

NOTE: Failure to do the above may result in failure during online upgrades, +and the reset of these options to their defaults needs to be done prior to +upgrading the cluster.

+

Deprecated translators and upgrade procedure for volumes using these features

+

If you are upgrading from a release prior to release-6 be aware of deprecated xlators and functionality.

+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/analytics.txt b/analytics.txt new file mode 100644 index 00000000..7bdd9725 --- /dev/null +++ b/analytics.txt @@ -0,0 +1 @@ +GooGhywoiu9839t543j0s7543uw1 - pls add amye@redhat.com to GA accountgoogle64817fdc11b2f6b6 with ‘Manage Users and Edit’ permissions - date March 27, 2018. diff --git a/assets/images/favicon.png b/assets/images/favicon.png new file mode 100644 index 0000000000000000000000000000000000000000..1cf13b9f9d978896599290a74f77d5dbe7d1655c GIT binary patch literal 1870 zcmV-U2eJ5xP)Gc)JR9QMau)O=X#!i9;T z37kk-upj^(fsR36MHs_+1RCI)NNu9}lD0S{B^g8PN?Ww(5|~L#Ng*g{WsqleV}|#l zz8@ri&cTzw_h33bHI+12+kK6WN$h#n5cD8OQt`5kw6p~9H3()bUQ8OS4Q4HTQ=1Ol z_JAocz`fLbT2^{`8n~UAo=#AUOf=SOq4pYkt;XbC&f#7lb$*7=$na!mWCQ`dBQsO0 zLFBSPj*N?#u5&pf2t4XjEGH|=pPQ8xh7tpx;US5Cx_Ju;!O`ya-yF`)b%TEt5>eP1ZX~}sjjA%FJF?h7cX8=b!DZl<6%Cv z*G0uvvU+vmnpLZ2paivG-(cd*y3$hCIcsZcYOGh{$&)A6*XX&kXZd3G8m)G$Zz-LV z^GF3VAW^Mdv!)4OM8EgqRiz~*Cji;uzl2uC9^=8I84vNp;ltJ|q-*uQwGp2ma6cY7 z;`%`!9UXO@fr&Ebapfs34OmS9^u6$)bJxrucutf>`dKPKT%%*d3XlFVKunp9 zasduxjrjs>f8V=D|J=XNZp;_Zy^WgQ$9WDjgY=z@stwiEBm9u5*|34&1Na8BMjjgf3+SHcr`5~>oz1Y?SW^=K z^bTyO6>Gar#P_W2gEMwq)ot3; zREHn~U&Dp0l6YT0&k-wLwYjb?5zGK`W6S2v+K>AM(95m2C20L|3m~rN8dprPr@t)5lsk9Hu*W z?pS990s;Ez=+Rj{x7p``4>+c0G5^pYnB1^!TL=(?HLHZ+HicG{~4F1d^5Awl_2!1jICM-!9eoLhbbT^;yHcefyTAaqRcY zmuctDopPT!%k+}x%lZRKnzykr2}}XfG_ne?nRQO~?%hkzo;@RN{P6o`&mMUWBYMTe z6i8ChtjX&gXl`nvrU>jah)2iNM%JdjqoaeaU%yVn!^70x-flljp6Q5tK}5}&X8&&G zX3fpb3E(!rH=zVI_9Gjl45w@{(ITqngWFe7@9{mX;tO25Z_8 zQHEpI+FkTU#4xu>RkN>b3Tnc3UpWzPXWm#o55GKF09j^Mh~)K7{QqbO_~(@CVq! zS<8954|P8mXN2MRs86xZ&Q4EfM@JB94b=(YGuk)s&^jiSF=t3*oNK3`rD{H`yQ?d; ztE=laAUoZx5?RC8*WKOj`%LXEkgDd>&^Q4M^z`%u0rg-It=hLCVsq!Z%^6eB-OvOT zFZ28TN&cRmgU}Elrnk43)!>Z1FCPL2K$7}gwzIc48NX}#!A1BpJP?#v5wkNprhV** z?Cpalt1oH&{r!o3eSKc&ap)iz2BTn_VV`4>9M^b3;(YY}4>#ML6{~(4mH+?%07*qo IM6N<$f(jP3KmY&$ literal 0 HcmV?d00001 diff --git a/assets/javascripts/bundle.51d95adb.min.js b/assets/javascripts/bundle.51d95adb.min.js new file mode 100644 index 00000000..b20ec683 --- /dev/null +++ b/assets/javascripts/bundle.51d95adb.min.js @@ -0,0 +1,29 @@ +"use strict";(()=>{var Hi=Object.create;var xr=Object.defineProperty;var Pi=Object.getOwnPropertyDescriptor;var $i=Object.getOwnPropertyNames,kt=Object.getOwnPropertySymbols,Ii=Object.getPrototypeOf,Er=Object.prototype.hasOwnProperty,an=Object.prototype.propertyIsEnumerable;var on=(e,t,r)=>t in e?xr(e,t,{enumerable:!0,configurable:!0,writable:!0,value:r}):e[t]=r,P=(e,t)=>{for(var r in t||(t={}))Er.call(t,r)&&on(e,r,t[r]);if(kt)for(var r of kt(t))an.call(t,r)&&on(e,r,t[r]);return e};var sn=(e,t)=>{var r={};for(var n in e)Er.call(e,n)&&t.indexOf(n)<0&&(r[n]=e[n]);if(e!=null&&kt)for(var n of kt(e))t.indexOf(n)<0&&an.call(e,n)&&(r[n]=e[n]);return r};var Ht=(e,t)=>()=>(t||e((t={exports:{}}).exports,t),t.exports);var Fi=(e,t,r,n)=>{if(t&&typeof t=="object"||typeof t=="function")for(let o of $i(t))!Er.call(e,o)&&o!==r&&xr(e,o,{get:()=>t[o],enumerable:!(n=Pi(t,o))||n.enumerable});return e};var yt=(e,t,r)=>(r=e!=null?Hi(Ii(e)):{},Fi(t||!e||!e.__esModule?xr(r,"default",{value:e,enumerable:!0}):r,e));var fn=Ht((wr,cn)=>{(function(e,t){typeof wr=="object"&&typeof cn!="undefined"?t():typeof define=="function"&&define.amd?define(t):t()})(wr,function(){"use strict";function e(r){var n=!0,o=!1,i=null,a={text:!0,search:!0,url:!0,tel:!0,email:!0,password:!0,number:!0,date:!0,month:!0,week:!0,time:!0,datetime:!0,"datetime-local":!0};function s(T){return!!(T&&T!==document&&T.nodeName!=="HTML"&&T.nodeName!=="BODY"&&"classList"in T&&"contains"in T.classList)}function f(T){var Ke=T.type,We=T.tagName;return!!(We==="INPUT"&&a[Ke]&&!T.readOnly||We==="TEXTAREA"&&!T.readOnly||T.isContentEditable)}function c(T){T.classList.contains("focus-visible")||(T.classList.add("focus-visible"),T.setAttribute("data-focus-visible-added",""))}function u(T){T.hasAttribute("data-focus-visible-added")&&(T.classList.remove("focus-visible"),T.removeAttribute("data-focus-visible-added"))}function p(T){T.metaKey||T.altKey||T.ctrlKey||(s(r.activeElement)&&c(r.activeElement),n=!0)}function m(T){n=!1}function d(T){s(T.target)&&(n||f(T.target))&&c(T.target)}function h(T){s(T.target)&&(T.target.classList.contains("focus-visible")||T.target.hasAttribute("data-focus-visible-added"))&&(o=!0,window.clearTimeout(i),i=window.setTimeout(function(){o=!1},100),u(T.target))}function v(T){document.visibilityState==="hidden"&&(o&&(n=!0),B())}function B(){document.addEventListener("mousemove",z),document.addEventListener("mousedown",z),document.addEventListener("mouseup",z),document.addEventListener("pointermove",z),document.addEventListener("pointerdown",z),document.addEventListener("pointerup",z),document.addEventListener("touchmove",z),document.addEventListener("touchstart",z),document.addEventListener("touchend",z)}function re(){document.removeEventListener("mousemove",z),document.removeEventListener("mousedown",z),document.removeEventListener("mouseup",z),document.removeEventListener("pointermove",z),document.removeEventListener("pointerdown",z),document.removeEventListener("pointerup",z),document.removeEventListener("touchmove",z),document.removeEventListener("touchstart",z),document.removeEventListener("touchend",z)}function z(T){T.target.nodeName&&T.target.nodeName.toLowerCase()==="html"||(n=!1,re())}document.addEventListener("keydown",p,!0),document.addEventListener("mousedown",m,!0),document.addEventListener("pointerdown",m,!0),document.addEventListener("touchstart",m,!0),document.addEventListener("visibilitychange",v,!0),B(),r.addEventListener("focus",d,!0),r.addEventListener("blur",h,!0),r.nodeType===Node.DOCUMENT_FRAGMENT_NODE&&r.host?r.host.setAttribute("data-js-focus-visible",""):r.nodeType===Node.DOCUMENT_NODE&&(document.documentElement.classList.add("js-focus-visible"),document.documentElement.setAttribute("data-js-focus-visible",""))}if(typeof window!="undefined"&&typeof document!="undefined"){window.applyFocusVisiblePolyfill=e;var t;try{t=new CustomEvent("focus-visible-polyfill-ready")}catch(r){t=document.createEvent("CustomEvent"),t.initCustomEvent("focus-visible-polyfill-ready",!1,!1,{})}window.dispatchEvent(t)}typeof document!="undefined"&&e(document)})});var un=Ht(Sr=>{(function(e){var t=function(){try{return!!Symbol.iterator}catch(c){return!1}},r=t(),n=function(c){var u={next:function(){var p=c.shift();return{done:p===void 0,value:p}}};return r&&(u[Symbol.iterator]=function(){return u}),u},o=function(c){return encodeURIComponent(c).replace(/%20/g,"+")},i=function(c){return decodeURIComponent(String(c).replace(/\+/g," "))},a=function(){var c=function(p){Object.defineProperty(this,"_entries",{writable:!0,value:{}});var m=typeof p;if(m!=="undefined")if(m==="string")p!==""&&this._fromString(p);else if(p instanceof c){var d=this;p.forEach(function(re,z){d.append(z,re)})}else if(p!==null&&m==="object")if(Object.prototype.toString.call(p)==="[object Array]")for(var h=0;hd[0]?1:0}),c._entries&&(c._entries={});for(var p=0;p1?i(d[1]):"")}})})(typeof global!="undefined"?global:typeof window!="undefined"?window:typeof self!="undefined"?self:Sr);(function(e){var t=function(){try{var o=new e.URL("b","http://a");return o.pathname="c d",o.href==="http://a/c%20d"&&o.searchParams}catch(i){return!1}},r=function(){var o=e.URL,i=function(f,c){typeof f!="string"&&(f=String(f)),c&&typeof c!="string"&&(c=String(c));var u=document,p;if(c&&(e.location===void 0||c!==e.location.href)){c=c.toLowerCase(),u=document.implementation.createHTMLDocument(""),p=u.createElement("base"),p.href=c,u.head.appendChild(p);try{if(p.href.indexOf(c)!==0)throw new Error(p.href)}catch(T){throw new Error("URL unable to set base "+c+" due to "+T)}}var m=u.createElement("a");m.href=f,p&&(u.body.appendChild(m),m.href=m.href);var d=u.createElement("input");if(d.type="url",d.value=f,m.protocol===":"||!/:/.test(m.href)||!d.checkValidity()&&!c)throw new TypeError("Invalid URL");Object.defineProperty(this,"_anchorElement",{value:m});var h=new e.URLSearchParams(this.search),v=!0,B=!0,re=this;["append","delete","set"].forEach(function(T){var Ke=h[T];h[T]=function(){Ke.apply(h,arguments),v&&(B=!1,re.search=h.toString(),B=!0)}}),Object.defineProperty(this,"searchParams",{value:h,enumerable:!0});var z=void 0;Object.defineProperty(this,"_updateSearchParams",{enumerable:!1,configurable:!1,writable:!1,value:function(){this.search!==z&&(z=this.search,B&&(v=!1,this.searchParams._fromString(this.search),v=!0))}})},a=i.prototype,s=function(f){Object.defineProperty(a,f,{get:function(){return this._anchorElement[f]},set:function(c){this._anchorElement[f]=c},enumerable:!0})};["hash","host","hostname","port","protocol"].forEach(function(f){s(f)}),Object.defineProperty(a,"search",{get:function(){return this._anchorElement.search},set:function(f){this._anchorElement.search=f,this._updateSearchParams()},enumerable:!0}),Object.defineProperties(a,{toString:{get:function(){var f=this;return function(){return f.href}}},href:{get:function(){return this._anchorElement.href.replace(/\?$/,"")},set:function(f){this._anchorElement.href=f,this._updateSearchParams()},enumerable:!0},pathname:{get:function(){return this._anchorElement.pathname.replace(/(^\/?)/,"/")},set:function(f){this._anchorElement.pathname=f},enumerable:!0},origin:{get:function(){var f={"http:":80,"https:":443,"ftp:":21}[this._anchorElement.protocol],c=this._anchorElement.port!=f&&this._anchorElement.port!=="";return this._anchorElement.protocol+"//"+this._anchorElement.hostname+(c?":"+this._anchorElement.port:"")},enumerable:!0},password:{get:function(){return""},set:function(f){},enumerable:!0},username:{get:function(){return""},set:function(f){},enumerable:!0}}),i.createObjectURL=function(f){return o.createObjectURL.apply(o,arguments)},i.revokeObjectURL=function(f){return o.revokeObjectURL.apply(o,arguments)},e.URL=i};if(t()||r(),e.location!==void 0&&!("origin"in e.location)){var n=function(){return e.location.protocol+"//"+e.location.hostname+(e.location.port?":"+e.location.port:"")};try{Object.defineProperty(e.location,"origin",{get:n,enumerable:!0})}catch(o){setInterval(function(){e.location.origin=n()},100)}}})(typeof global!="undefined"?global:typeof window!="undefined"?window:typeof self!="undefined"?self:Sr)});var Qr=Ht((Lt,Kr)=>{/*! + * clipboard.js v2.0.11 + * https://clipboardjs.com/ + * + * Licensed MIT © Zeno Rocha + */(function(t,r){typeof Lt=="object"&&typeof Kr=="object"?Kr.exports=r():typeof define=="function"&&define.amd?define([],r):typeof Lt=="object"?Lt.ClipboardJS=r():t.ClipboardJS=r()})(Lt,function(){return function(){var e={686:function(n,o,i){"use strict";i.d(o,{default:function(){return ki}});var a=i(279),s=i.n(a),f=i(370),c=i.n(f),u=i(817),p=i.n(u);function m(j){try{return document.execCommand(j)}catch(O){return!1}}var d=function(O){var w=p()(O);return m("cut"),w},h=d;function v(j){var O=document.documentElement.getAttribute("dir")==="rtl",w=document.createElement("textarea");w.style.fontSize="12pt",w.style.border="0",w.style.padding="0",w.style.margin="0",w.style.position="absolute",w.style[O?"right":"left"]="-9999px";var k=window.pageYOffset||document.documentElement.scrollTop;return w.style.top="".concat(k,"px"),w.setAttribute("readonly",""),w.value=j,w}var B=function(O,w){var k=v(O);w.container.appendChild(k);var F=p()(k);return m("copy"),k.remove(),F},re=function(O){var w=arguments.length>1&&arguments[1]!==void 0?arguments[1]:{container:document.body},k="";return typeof O=="string"?k=B(O,w):O instanceof HTMLInputElement&&!["text","search","url","tel","password"].includes(O==null?void 0:O.type)?k=B(O.value,w):(k=p()(O),m("copy")),k},z=re;function T(j){return typeof Symbol=="function"&&typeof Symbol.iterator=="symbol"?T=function(w){return typeof w}:T=function(w){return w&&typeof Symbol=="function"&&w.constructor===Symbol&&w!==Symbol.prototype?"symbol":typeof w},T(j)}var Ke=function(){var O=arguments.length>0&&arguments[0]!==void 0?arguments[0]:{},w=O.action,k=w===void 0?"copy":w,F=O.container,q=O.target,Le=O.text;if(k!=="copy"&&k!=="cut")throw new Error('Invalid "action" value, use either "copy" or "cut"');if(q!==void 0)if(q&&T(q)==="object"&&q.nodeType===1){if(k==="copy"&&q.hasAttribute("disabled"))throw new Error('Invalid "target" attribute. Please use "readonly" instead of "disabled" attribute');if(k==="cut"&&(q.hasAttribute("readonly")||q.hasAttribute("disabled")))throw new Error(`Invalid "target" attribute. You can't cut text from elements with "readonly" or "disabled" attributes`)}else throw new Error('Invalid "target" value, use a valid Element');if(Le)return z(Le,{container:F});if(q)return k==="cut"?h(q):z(q,{container:F})},We=Ke;function Ie(j){return typeof Symbol=="function"&&typeof Symbol.iterator=="symbol"?Ie=function(w){return typeof w}:Ie=function(w){return w&&typeof Symbol=="function"&&w.constructor===Symbol&&w!==Symbol.prototype?"symbol":typeof w},Ie(j)}function Ti(j,O){if(!(j instanceof O))throw new TypeError("Cannot call a class as a function")}function nn(j,O){for(var w=0;w0&&arguments[0]!==void 0?arguments[0]:{};this.action=typeof F.action=="function"?F.action:this.defaultAction,this.target=typeof F.target=="function"?F.target:this.defaultTarget,this.text=typeof F.text=="function"?F.text:this.defaultText,this.container=Ie(F.container)==="object"?F.container:document.body}},{key:"listenClick",value:function(F){var q=this;this.listener=c()(F,"click",function(Le){return q.onClick(Le)})}},{key:"onClick",value:function(F){var q=F.delegateTarget||F.currentTarget,Le=this.action(q)||"copy",Rt=We({action:Le,container:this.container,target:this.target(q),text:this.text(q)});this.emit(Rt?"success":"error",{action:Le,text:Rt,trigger:q,clearSelection:function(){q&&q.focus(),window.getSelection().removeAllRanges()}})}},{key:"defaultAction",value:function(F){return yr("action",F)}},{key:"defaultTarget",value:function(F){var q=yr("target",F);if(q)return document.querySelector(q)}},{key:"defaultText",value:function(F){return yr("text",F)}},{key:"destroy",value:function(){this.listener.destroy()}}],[{key:"copy",value:function(F){var q=arguments.length>1&&arguments[1]!==void 0?arguments[1]:{container:document.body};return z(F,q)}},{key:"cut",value:function(F){return h(F)}},{key:"isSupported",value:function(){var F=arguments.length>0&&arguments[0]!==void 0?arguments[0]:["copy","cut"],q=typeof F=="string"?[F]:F,Le=!!document.queryCommandSupported;return q.forEach(function(Rt){Le=Le&&!!document.queryCommandSupported(Rt)}),Le}}]),w}(s()),ki=Ri},828:function(n){var o=9;if(typeof Element!="undefined"&&!Element.prototype.matches){var i=Element.prototype;i.matches=i.matchesSelector||i.mozMatchesSelector||i.msMatchesSelector||i.oMatchesSelector||i.webkitMatchesSelector}function a(s,f){for(;s&&s.nodeType!==o;){if(typeof s.matches=="function"&&s.matches(f))return s;s=s.parentNode}}n.exports=a},438:function(n,o,i){var a=i(828);function s(u,p,m,d,h){var v=c.apply(this,arguments);return u.addEventListener(m,v,h),{destroy:function(){u.removeEventListener(m,v,h)}}}function f(u,p,m,d,h){return typeof u.addEventListener=="function"?s.apply(null,arguments):typeof m=="function"?s.bind(null,document).apply(null,arguments):(typeof u=="string"&&(u=document.querySelectorAll(u)),Array.prototype.map.call(u,function(v){return s(v,p,m,d,h)}))}function c(u,p,m,d){return function(h){h.delegateTarget=a(h.target,p),h.delegateTarget&&d.call(u,h)}}n.exports=f},879:function(n,o){o.node=function(i){return i!==void 0&&i instanceof HTMLElement&&i.nodeType===1},o.nodeList=function(i){var a=Object.prototype.toString.call(i);return i!==void 0&&(a==="[object NodeList]"||a==="[object HTMLCollection]")&&"length"in i&&(i.length===0||o.node(i[0]))},o.string=function(i){return typeof i=="string"||i instanceof String},o.fn=function(i){var a=Object.prototype.toString.call(i);return a==="[object Function]"}},370:function(n,o,i){var a=i(879),s=i(438);function f(m,d,h){if(!m&&!d&&!h)throw new Error("Missing required arguments");if(!a.string(d))throw new TypeError("Second argument must be a String");if(!a.fn(h))throw new TypeError("Third argument must be a Function");if(a.node(m))return c(m,d,h);if(a.nodeList(m))return u(m,d,h);if(a.string(m))return p(m,d,h);throw new TypeError("First argument must be a String, HTMLElement, HTMLCollection, or NodeList")}function c(m,d,h){return m.addEventListener(d,h),{destroy:function(){m.removeEventListener(d,h)}}}function u(m,d,h){return Array.prototype.forEach.call(m,function(v){v.addEventListener(d,h)}),{destroy:function(){Array.prototype.forEach.call(m,function(v){v.removeEventListener(d,h)})}}}function p(m,d,h){return s(document.body,m,d,h)}n.exports=f},817:function(n){function o(i){var a;if(i.nodeName==="SELECT")i.focus(),a=i.value;else if(i.nodeName==="INPUT"||i.nodeName==="TEXTAREA"){var s=i.hasAttribute("readonly");s||i.setAttribute("readonly",""),i.select(),i.setSelectionRange(0,i.value.length),s||i.removeAttribute("readonly"),a=i.value}else{i.hasAttribute("contenteditable")&&i.focus();var f=window.getSelection(),c=document.createRange();c.selectNodeContents(i),f.removeAllRanges(),f.addRange(c),a=f.toString()}return a}n.exports=o},279:function(n){function o(){}o.prototype={on:function(i,a,s){var f=this.e||(this.e={});return(f[i]||(f[i]=[])).push({fn:a,ctx:s}),this},once:function(i,a,s){var f=this;function c(){f.off(i,c),a.apply(s,arguments)}return c._=a,this.on(i,c,s)},emit:function(i){var a=[].slice.call(arguments,1),s=((this.e||(this.e={}))[i]||[]).slice(),f=0,c=s.length;for(f;f{"use strict";/*! + * escape-html + * Copyright(c) 2012-2013 TJ Holowaychuk + * Copyright(c) 2015 Andreas Lubbe + * Copyright(c) 2015 Tiancheng "Timothy" Gu + * MIT Licensed + */var is=/["'&<>]/;Jo.exports=as;function as(e){var t=""+e,r=is.exec(t);if(!r)return t;var n,o="",i=0,a=0;for(i=r.index;i0&&i[i.length-1])&&(c[0]===6||c[0]===2)){r=0;continue}if(c[0]===3&&(!i||c[1]>i[0]&&c[1]=e.length&&(e=void 0),{value:e&&e[n++],done:!e}}};throw new TypeError(t?"Object is not iterable.":"Symbol.iterator is not defined.")}function W(e,t){var r=typeof Symbol=="function"&&e[Symbol.iterator];if(!r)return e;var n=r.call(e),o,i=[],a;try{for(;(t===void 0||t-- >0)&&!(o=n.next()).done;)i.push(o.value)}catch(s){a={error:s}}finally{try{o&&!o.done&&(r=n.return)&&r.call(n)}finally{if(a)throw a.error}}return i}function D(e,t,r){if(r||arguments.length===2)for(var n=0,o=t.length,i;n1||s(m,d)})})}function s(m,d){try{f(n[m](d))}catch(h){p(i[0][3],h)}}function f(m){m.value instanceof Xe?Promise.resolve(m.value.v).then(c,u):p(i[0][2],m)}function c(m){s("next",m)}function u(m){s("throw",m)}function p(m,d){m(d),i.shift(),i.length&&s(i[0][0],i[0][1])}}function mn(e){if(!Symbol.asyncIterator)throw new TypeError("Symbol.asyncIterator is not defined.");var t=e[Symbol.asyncIterator],r;return t?t.call(e):(e=typeof xe=="function"?xe(e):e[Symbol.iterator](),r={},n("next"),n("throw"),n("return"),r[Symbol.asyncIterator]=function(){return this},r);function n(i){r[i]=e[i]&&function(a){return new Promise(function(s,f){a=e[i](a),o(s,f,a.done,a.value)})}}function o(i,a,s,f){Promise.resolve(f).then(function(c){i({value:c,done:s})},a)}}function A(e){return typeof e=="function"}function at(e){var t=function(n){Error.call(n),n.stack=new Error().stack},r=e(t);return r.prototype=Object.create(Error.prototype),r.prototype.constructor=r,r}var $t=at(function(e){return function(r){e(this),this.message=r?r.length+` errors occurred during unsubscription: +`+r.map(function(n,o){return o+1+") "+n.toString()}).join(` + `):"",this.name="UnsubscriptionError",this.errors=r}});function De(e,t){if(e){var r=e.indexOf(t);0<=r&&e.splice(r,1)}}var Fe=function(){function e(t){this.initialTeardown=t,this.closed=!1,this._parentage=null,this._finalizers=null}return e.prototype.unsubscribe=function(){var t,r,n,o,i;if(!this.closed){this.closed=!0;var a=this._parentage;if(a)if(this._parentage=null,Array.isArray(a))try{for(var s=xe(a),f=s.next();!f.done;f=s.next()){var c=f.value;c.remove(this)}}catch(v){t={error:v}}finally{try{f&&!f.done&&(r=s.return)&&r.call(s)}finally{if(t)throw t.error}}else a.remove(this);var u=this.initialTeardown;if(A(u))try{u()}catch(v){i=v instanceof $t?v.errors:[v]}var p=this._finalizers;if(p){this._finalizers=null;try{for(var m=xe(p),d=m.next();!d.done;d=m.next()){var h=d.value;try{dn(h)}catch(v){i=i!=null?i:[],v instanceof $t?i=D(D([],W(i)),W(v.errors)):i.push(v)}}}catch(v){n={error:v}}finally{try{d&&!d.done&&(o=m.return)&&o.call(m)}finally{if(n)throw n.error}}}if(i)throw new $t(i)}},e.prototype.add=function(t){var r;if(t&&t!==this)if(this.closed)dn(t);else{if(t instanceof e){if(t.closed||t._hasParent(this))return;t._addParent(this)}(this._finalizers=(r=this._finalizers)!==null&&r!==void 0?r:[]).push(t)}},e.prototype._hasParent=function(t){var r=this._parentage;return r===t||Array.isArray(r)&&r.includes(t)},e.prototype._addParent=function(t){var r=this._parentage;this._parentage=Array.isArray(r)?(r.push(t),r):r?[r,t]:t},e.prototype._removeParent=function(t){var r=this._parentage;r===t?this._parentage=null:Array.isArray(r)&&De(r,t)},e.prototype.remove=function(t){var r=this._finalizers;r&&De(r,t),t instanceof e&&t._removeParent(this)},e.EMPTY=function(){var t=new e;return t.closed=!0,t}(),e}();var Or=Fe.EMPTY;function It(e){return e instanceof Fe||e&&"closed"in e&&A(e.remove)&&A(e.add)&&A(e.unsubscribe)}function dn(e){A(e)?e():e.unsubscribe()}var Ae={onUnhandledError:null,onStoppedNotification:null,Promise:void 0,useDeprecatedSynchronousErrorHandling:!1,useDeprecatedNextContext:!1};var st={setTimeout:function(e,t){for(var r=[],n=2;n0},enumerable:!1,configurable:!0}),t.prototype._trySubscribe=function(r){return this._throwIfClosed(),e.prototype._trySubscribe.call(this,r)},t.prototype._subscribe=function(r){return this._throwIfClosed(),this._checkFinalizedStatuses(r),this._innerSubscribe(r)},t.prototype._innerSubscribe=function(r){var n=this,o=this,i=o.hasError,a=o.isStopped,s=o.observers;return i||a?Or:(this.currentObservers=null,s.push(r),new Fe(function(){n.currentObservers=null,De(s,r)}))},t.prototype._checkFinalizedStatuses=function(r){var n=this,o=n.hasError,i=n.thrownError,a=n.isStopped;o?r.error(i):a&&r.complete()},t.prototype.asObservable=function(){var r=new U;return r.source=this,r},t.create=function(r,n){return new wn(r,n)},t}(U);var wn=function(e){ne(t,e);function t(r,n){var o=e.call(this)||this;return o.destination=r,o.source=n,o}return t.prototype.next=function(r){var n,o;(o=(n=this.destination)===null||n===void 0?void 0:n.next)===null||o===void 0||o.call(n,r)},t.prototype.error=function(r){var n,o;(o=(n=this.destination)===null||n===void 0?void 0:n.error)===null||o===void 0||o.call(n,r)},t.prototype.complete=function(){var r,n;(n=(r=this.destination)===null||r===void 0?void 0:r.complete)===null||n===void 0||n.call(r)},t.prototype._subscribe=function(r){var n,o;return(o=(n=this.source)===null||n===void 0?void 0:n.subscribe(r))!==null&&o!==void 0?o:Or},t}(E);var Et={now:function(){return(Et.delegate||Date).now()},delegate:void 0};var wt=function(e){ne(t,e);function t(r,n,o){r===void 0&&(r=1/0),n===void 0&&(n=1/0),o===void 0&&(o=Et);var i=e.call(this)||this;return i._bufferSize=r,i._windowTime=n,i._timestampProvider=o,i._buffer=[],i._infiniteTimeWindow=!0,i._infiniteTimeWindow=n===1/0,i._bufferSize=Math.max(1,r),i._windowTime=Math.max(1,n),i}return t.prototype.next=function(r){var n=this,o=n.isStopped,i=n._buffer,a=n._infiniteTimeWindow,s=n._timestampProvider,f=n._windowTime;o||(i.push(r),!a&&i.push(s.now()+f)),this._trimBuffer(),e.prototype.next.call(this,r)},t.prototype._subscribe=function(r){this._throwIfClosed(),this._trimBuffer();for(var n=this._innerSubscribe(r),o=this,i=o._infiniteTimeWindow,a=o._buffer,s=a.slice(),f=0;f0?e.prototype.requestAsyncId.call(this,r,n,o):(r.actions.push(this),r._scheduled||(r._scheduled=ut.requestAnimationFrame(function(){return r.flush(void 0)})))},t.prototype.recycleAsyncId=function(r,n,o){var i;if(o===void 0&&(o=0),o!=null?o>0:this.delay>0)return e.prototype.recycleAsyncId.call(this,r,n,o);var a=r.actions;n!=null&&((i=a[a.length-1])===null||i===void 0?void 0:i.id)!==n&&(ut.cancelAnimationFrame(n),r._scheduled=void 0)},t}(Ut);var On=function(e){ne(t,e);function t(){return e!==null&&e.apply(this,arguments)||this}return t.prototype.flush=function(r){this._active=!0;var n=this._scheduled;this._scheduled=void 0;var o=this.actions,i;r=r||o.shift();do if(i=r.execute(r.state,r.delay))break;while((r=o[0])&&r.id===n&&o.shift());if(this._active=!1,i){for(;(r=o[0])&&r.id===n&&o.shift();)r.unsubscribe();throw i}},t}(Wt);var we=new On(Tn);var R=new U(function(e){return e.complete()});function Dt(e){return e&&A(e.schedule)}function kr(e){return e[e.length-1]}function Qe(e){return A(kr(e))?e.pop():void 0}function Se(e){return Dt(kr(e))?e.pop():void 0}function Vt(e,t){return typeof kr(e)=="number"?e.pop():t}var pt=function(e){return e&&typeof e.length=="number"&&typeof e!="function"};function zt(e){return A(e==null?void 0:e.then)}function Nt(e){return A(e[ft])}function qt(e){return Symbol.asyncIterator&&A(e==null?void 0:e[Symbol.asyncIterator])}function Kt(e){return new TypeError("You provided "+(e!==null&&typeof e=="object"?"an invalid object":"'"+e+"'")+" where a stream was expected. You can provide an Observable, Promise, ReadableStream, Array, AsyncIterable, or Iterable.")}function Ki(){return typeof Symbol!="function"||!Symbol.iterator?"@@iterator":Symbol.iterator}var Qt=Ki();function Yt(e){return A(e==null?void 0:e[Qt])}function Gt(e){return ln(this,arguments,function(){var r,n,o,i;return Pt(this,function(a){switch(a.label){case 0:r=e.getReader(),a.label=1;case 1:a.trys.push([1,,9,10]),a.label=2;case 2:return[4,Xe(r.read())];case 3:return n=a.sent(),o=n.value,i=n.done,i?[4,Xe(void 0)]:[3,5];case 4:return[2,a.sent()];case 5:return[4,Xe(o)];case 6:return[4,a.sent()];case 7:return a.sent(),[3,2];case 8:return[3,10];case 9:return r.releaseLock(),[7];case 10:return[2]}})})}function Bt(e){return A(e==null?void 0:e.getReader)}function $(e){if(e instanceof U)return e;if(e!=null){if(Nt(e))return Qi(e);if(pt(e))return Yi(e);if(zt(e))return Gi(e);if(qt(e))return _n(e);if(Yt(e))return Bi(e);if(Bt(e))return Ji(e)}throw Kt(e)}function Qi(e){return new U(function(t){var r=e[ft]();if(A(r.subscribe))return r.subscribe(t);throw new TypeError("Provided object does not correctly implement Symbol.observable")})}function Yi(e){return new U(function(t){for(var r=0;r=2;return function(n){return n.pipe(e?_(function(o,i){return e(o,i,n)}):me,Oe(1),r?He(t):zn(function(){return new Xt}))}}function Nn(){for(var e=[],t=0;t=2,!0))}function fe(e){e===void 0&&(e={});var t=e.connector,r=t===void 0?function(){return new E}:t,n=e.resetOnError,o=n===void 0?!0:n,i=e.resetOnComplete,a=i===void 0?!0:i,s=e.resetOnRefCountZero,f=s===void 0?!0:s;return function(c){var u,p,m,d=0,h=!1,v=!1,B=function(){p==null||p.unsubscribe(),p=void 0},re=function(){B(),u=m=void 0,h=v=!1},z=function(){var T=u;re(),T==null||T.unsubscribe()};return g(function(T,Ke){d++,!v&&!h&&B();var We=m=m!=null?m:r();Ke.add(function(){d--,d===0&&!v&&!h&&(p=jr(z,f))}),We.subscribe(Ke),!u&&d>0&&(u=new et({next:function(Ie){return We.next(Ie)},error:function(Ie){v=!0,B(),p=jr(re,o,Ie),We.error(Ie)},complete:function(){h=!0,B(),p=jr(re,a),We.complete()}}),$(T).subscribe(u))})(c)}}function jr(e,t){for(var r=[],n=2;ne.next(document)),e}function K(e,t=document){return Array.from(t.querySelectorAll(e))}function V(e,t=document){let r=se(e,t);if(typeof r=="undefined")throw new ReferenceError(`Missing element: expected "${e}" to be present`);return r}function se(e,t=document){return t.querySelector(e)||void 0}function _e(){return document.activeElement instanceof HTMLElement&&document.activeElement||void 0}function tr(e){return L(b(document.body,"focusin"),b(document.body,"focusout")).pipe(ke(1),l(()=>{let t=_e();return typeof t!="undefined"?e.contains(t):!1}),N(e===_e()),Y())}function Be(e){return{x:e.offsetLeft,y:e.offsetTop}}function Yn(e){return L(b(window,"load"),b(window,"resize")).pipe(Ce(0,we),l(()=>Be(e)),N(Be(e)))}function rr(e){return{x:e.scrollLeft,y:e.scrollTop}}function dt(e){return L(b(e,"scroll"),b(window,"resize")).pipe(Ce(0,we),l(()=>rr(e)),N(rr(e)))}var Bn=function(){if(typeof Map!="undefined")return Map;function e(t,r){var n=-1;return t.some(function(o,i){return o[0]===r?(n=i,!0):!1}),n}return function(){function t(){this.__entries__=[]}return Object.defineProperty(t.prototype,"size",{get:function(){return this.__entries__.length},enumerable:!0,configurable:!0}),t.prototype.get=function(r){var n=e(this.__entries__,r),o=this.__entries__[n];return o&&o[1]},t.prototype.set=function(r,n){var o=e(this.__entries__,r);~o?this.__entries__[o][1]=n:this.__entries__.push([r,n])},t.prototype.delete=function(r){var n=this.__entries__,o=e(n,r);~o&&n.splice(o,1)},t.prototype.has=function(r){return!!~e(this.__entries__,r)},t.prototype.clear=function(){this.__entries__.splice(0)},t.prototype.forEach=function(r,n){n===void 0&&(n=null);for(var o=0,i=this.__entries__;o0},e.prototype.connect_=function(){!zr||this.connected_||(document.addEventListener("transitionend",this.onTransitionEnd_),window.addEventListener("resize",this.refresh),xa?(this.mutationsObserver_=new MutationObserver(this.refresh),this.mutationsObserver_.observe(document,{attributes:!0,childList:!0,characterData:!0,subtree:!0})):(document.addEventListener("DOMSubtreeModified",this.refresh),this.mutationEventsAdded_=!0),this.connected_=!0)},e.prototype.disconnect_=function(){!zr||!this.connected_||(document.removeEventListener("transitionend",this.onTransitionEnd_),window.removeEventListener("resize",this.refresh),this.mutationsObserver_&&this.mutationsObserver_.disconnect(),this.mutationEventsAdded_&&document.removeEventListener("DOMSubtreeModified",this.refresh),this.mutationsObserver_=null,this.mutationEventsAdded_=!1,this.connected_=!1)},e.prototype.onTransitionEnd_=function(t){var r=t.propertyName,n=r===void 0?"":r,o=ya.some(function(i){return!!~n.indexOf(i)});o&&this.refresh()},e.getInstance=function(){return this.instance_||(this.instance_=new e),this.instance_},e.instance_=null,e}(),Jn=function(e,t){for(var r=0,n=Object.keys(t);r0},e}(),Zn=typeof WeakMap!="undefined"?new WeakMap:new Bn,eo=function(){function e(t){if(!(this instanceof e))throw new TypeError("Cannot call a class as a function.");if(!arguments.length)throw new TypeError("1 argument required, but only 0 present.");var r=Ea.getInstance(),n=new Ra(t,r,this);Zn.set(this,n)}return e}();["observe","unobserve","disconnect"].forEach(function(e){eo.prototype[e]=function(){var t;return(t=Zn.get(this))[e].apply(t,arguments)}});var ka=function(){return typeof nr.ResizeObserver!="undefined"?nr.ResizeObserver:eo}(),to=ka;var ro=new E,Ha=I(()=>H(new to(e=>{for(let t of e)ro.next(t)}))).pipe(x(e=>L(Te,H(e)).pipe(C(()=>e.disconnect()))),J(1));function de(e){return{width:e.offsetWidth,height:e.offsetHeight}}function ge(e){return Ha.pipe(S(t=>t.observe(e)),x(t=>ro.pipe(_(({target:r})=>r===e),C(()=>t.unobserve(e)),l(()=>de(e)))),N(de(e)))}function bt(e){return{width:e.scrollWidth,height:e.scrollHeight}}function ar(e){let t=e.parentElement;for(;t&&(e.scrollWidth<=t.scrollWidth&&e.scrollHeight<=t.scrollHeight);)t=(e=t).parentElement;return t?e:void 0}var no=new E,Pa=I(()=>H(new IntersectionObserver(e=>{for(let t of e)no.next(t)},{threshold:0}))).pipe(x(e=>L(Te,H(e)).pipe(C(()=>e.disconnect()))),J(1));function sr(e){return Pa.pipe(S(t=>t.observe(e)),x(t=>no.pipe(_(({target:r})=>r===e),C(()=>t.unobserve(e)),l(({isIntersecting:r})=>r))))}function oo(e,t=16){return dt(e).pipe(l(({y:r})=>{let n=de(e),o=bt(e);return r>=o.height-n.height-t}),Y())}var cr={drawer:V("[data-md-toggle=drawer]"),search:V("[data-md-toggle=search]")};function io(e){return cr[e].checked}function qe(e,t){cr[e].checked!==t&&cr[e].click()}function je(e){let t=cr[e];return b(t,"change").pipe(l(()=>t.checked),N(t.checked))}function $a(e,t){switch(e.constructor){case HTMLInputElement:return e.type==="radio"?/^Arrow/.test(t):!0;case HTMLSelectElement:case HTMLTextAreaElement:return!0;default:return e.isContentEditable}}function Ia(){return L(b(window,"compositionstart").pipe(l(()=>!0)),b(window,"compositionend").pipe(l(()=>!1))).pipe(N(!1))}function ao(){let e=b(window,"keydown").pipe(_(t=>!(t.metaKey||t.ctrlKey)),l(t=>({mode:io("search")?"search":"global",type:t.key,claim(){t.preventDefault(),t.stopPropagation()}})),_(({mode:t,type:r})=>{if(t==="global"){let n=_e();if(typeof n!="undefined")return!$a(n,r)}return!0}),fe());return Ia().pipe(x(t=>t?R:e))}function Me(){return new URL(location.href)}function ot(e){location.href=e.href}function so(){return new E}function co(e,t){if(typeof t=="string"||typeof t=="number")e.innerHTML+=t.toString();else if(t instanceof Node)e.appendChild(t);else if(Array.isArray(t))for(let r of t)co(e,r)}function M(e,t,...r){let n=document.createElement(e);if(t)for(let o of Object.keys(t))typeof t[o]!="undefined"&&(typeof t[o]!="boolean"?n.setAttribute(o,t[o]):n.setAttribute(o,""));for(let o of r)co(n,o);return n}function fr(e){if(e>999){let t=+((e-950)%1e3>99);return`${((e+1e-6)/1e3).toFixed(t)}k`}else return e.toString()}function fo(){return location.hash.substring(1)}function uo(e){let t=M("a",{href:e});t.addEventListener("click",r=>r.stopPropagation()),t.click()}function Fa(){return b(window,"hashchange").pipe(l(fo),N(fo()),_(e=>e.length>0),J(1))}function po(){return Fa().pipe(l(e=>se(`[id="${e}"]`)),_(e=>typeof e!="undefined"))}function Nr(e){let t=matchMedia(e);return Zt(r=>t.addListener(()=>r(t.matches))).pipe(N(t.matches))}function lo(){let e=matchMedia("print");return L(b(window,"beforeprint").pipe(l(()=>!0)),b(window,"afterprint").pipe(l(()=>!1))).pipe(N(e.matches))}function qr(e,t){return e.pipe(x(r=>r?t():R))}function ur(e,t={credentials:"same-origin"}){return ve(fetch(`${e}`,t)).pipe(ce(()=>R),x(r=>r.status!==200?Tt(()=>new Error(r.statusText)):H(r)))}function Ue(e,t){return ur(e,t).pipe(x(r=>r.json()),J(1))}function mo(e,t){let r=new DOMParser;return ur(e,t).pipe(x(n=>n.text()),l(n=>r.parseFromString(n,"text/xml")),J(1))}function pr(e){let t=M("script",{src:e});return I(()=>(document.head.appendChild(t),L(b(t,"load"),b(t,"error").pipe(x(()=>Tt(()=>new ReferenceError(`Invalid script: ${e}`))))).pipe(l(()=>{}),C(()=>document.head.removeChild(t)),Oe(1))))}function ho(){return{x:Math.max(0,scrollX),y:Math.max(0,scrollY)}}function bo(){return L(b(window,"scroll",{passive:!0}),b(window,"resize",{passive:!0})).pipe(l(ho),N(ho()))}function vo(){return{width:innerWidth,height:innerHeight}}function go(){return b(window,"resize",{passive:!0}).pipe(l(vo),N(vo()))}function yo(){return Q([bo(),go()]).pipe(l(([e,t])=>({offset:e,size:t})),J(1))}function lr(e,{viewport$:t,header$:r}){let n=t.pipe(X("size")),o=Q([n,r]).pipe(l(()=>Be(e)));return Q([r,t,o]).pipe(l(([{height:i},{offset:a,size:s},{x:f,y:c}])=>({offset:{x:a.x-f,y:a.y-c+i},size:s})))}(()=>{function e(n,o){parent.postMessage(n,o||"*")}function t(...n){return n.reduce((o,i)=>o.then(()=>new Promise(a=>{let s=document.createElement("script");s.src=i,s.onload=a,document.body.appendChild(s)})),Promise.resolve())}var r=class{constructor(n){this.url=n,this.onerror=null,this.onmessage=null,this.onmessageerror=null,this.m=a=>{a.source===this.w&&(a.stopImmediatePropagation(),this.dispatchEvent(new MessageEvent("message",{data:a.data})),this.onmessage&&this.onmessage(a))},this.e=(a,s,f,c,u)=>{if(s===this.url.toString()){let p=new ErrorEvent("error",{message:a,filename:s,lineno:f,colno:c,error:u});this.dispatchEvent(p),this.onerror&&this.onerror(p)}};let o=new EventTarget;this.addEventListener=o.addEventListener.bind(o),this.removeEventListener=o.removeEventListener.bind(o),this.dispatchEvent=o.dispatchEvent.bind(o);let i=document.createElement("iframe");i.width=i.height=i.frameBorder="0",document.body.appendChild(this.iframe=i),this.w.document.open(),this.w.document.write(` + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + +

Glossary

+

Access Control Lists +: Access Control Lists (ACLs) allow you to assign different permissions +for different users or groups even though they do not correspond to the +original owner or the owning group.

+

Block Storage +: Block special files, or block devices, correspond to devices through which the system moves +data in the form of blocks. These device nodes often represent addressable devices such as +hard disks, CD-ROM drives, or memory regions. GlusterFS requires a filesystem (like XFS) that +supports extended attributes.

+

Brick +: A Brick is the basic unit of storage in GlusterFS, represented by an export directory +on a server in the trusted storage pool. +A brick is expressed by combining a server with an export directory in the following format:

+
SERVER:EXPORT
+For example:
+myhostname:/exports/myexportdir/
+
+

Client +: Any machine that mounts a GlusterFS volume. Any applications that use libgfapi access +mechanism can also be treated as clients in GlusterFS context.

+

Cluster +: A trusted pool of linked computers working together, resembling a single computing resource. +In GlusterFS, a cluster is also referred to as a trusted storage pool.

+

Distributed File System +: A file system that allows multiple clients to concurrently access data which is spread across +servers/bricks in a trusted storage pool. Data sharing among multiple locations is fundamental +to all distributed file systems.

+

Extended Attributes +: Extended file attributes (abbreviated xattr) is a filesystem feature that enables +users/programs to associate files/dirs with metadata. Gluster stores metadata in xattrs.

+

Filesystem +: A method of storing and organizing computer files and their data. +Essentially, it organizes these files into a database for the +storage, organization, manipulation, and retrieval by the computer's +operating system.

+

Source Wikipedia

+

FUSE +: Filesystem in Userspace (FUSE) is a loadable kernel module for Unix-like +computer operating systems that lets non-privileged users create their +own file systems without editing kernel code. This is achieved by +running file system code in user space while the FUSE module provides +only a "bridge" to the actual kernel interfaces. +Source: Wikipedia

+

GFID +: Each file/directory on a GlusterFS volume has a unique 128-bit number +associated with it called the GFID. This is analogous to inode in a +regular filesystem.

+

glusterd +: The Gluster daemon/service that manages volumes and cluster membership. It is required to +run on all the servers in the trusted storage pool.

+

Geo-Replication +: Geo-replication provides a continuous, asynchronous, and incremental +replication service from site to another over Local Area Networks +(LANs), Wide Area Network (WANs), and across the Internet.

+

Infiniband +InfiniBand is a switched fabric computer network communications link +used in high-performance computing and enterprise data centers.

+

Metadata +: Metadata is defined as data providing information about one or more +other pieces of data. There is no special metadata storage concept in +GlusterFS. The metadata is stored with the file data itself usually in the +form of extended attributes

+

Namespace +: A namespace is an abstract container or environment created to hold a +logical grouping of unique identifiers or symbols. Each Gluster volume +exposes a single namespace as a POSIX mount point that contains every +file in the cluster.

+

Node +: A server or computer that hosts one or more bricks.

+

N-way Replication +: Local synchronous data replication which is typically deployed across campus +or Amazon Web Services Availability Zones.

+

Petabyte +: A petabyte (derived from the SI prefix peta- ) is a unit of +information equal to one quadrillion (short scale) bytes, or 1000 +terabytes. The unit symbol for the petabyte is PB. The prefix peta- +(P) indicates a power of 1000:

+
1 PB = 1,000,000,000,000,000 B = 10005 B = 1015 B.
+
+The term "pebibyte" (PiB), using a binary prefix, is used for the
+corresponding power of 1024.
+
+

Source: Wikipedia

+

POSIX +: Portable Operating System Interface (for Unix) is the name of a family +of related standards specified by the IEEE to define the application +programming interface (API), along with shell and utilities interfaces +for software compatible with variants of the Unix operating system +Gluster exports a POSIX compatible file system.

+

Quorum +: The configuration of quorum in a trusted storage pool determines the +number of server failures that the trusted storage pool can sustain. +If an additional failure occurs, the trusted storage pool becomes +unavailable.

+

Quota +: Quota allows you to set limits on usage of disk space by directories or +by volumes.

+

RAID +: Redundant Array of Inexpensive Disks (RAID) is a technology that provides +increased storage reliability through redundancy, combining multiple +low-cost, less-reliable disk drives components into a logical unit where +all drives in the array are interdependent.

+

RDMA +: Remote direct memory access (RDMA) is a direct memory access from the +memory of one computer into that of another without involving either +one's operating system. This permits high-throughput, low-latency +networking, which is especially useful in massively parallel computer +clusters

+

Rebalance +: The process of redistributing data in a distributed volume when a +brick is added or removed.

+

RRDNS +: Round Robin Domain Name Service (RRDNS) is a method to distribute load +across application servers. It is implemented by creating multiple A +records with the same name and different IP addresses in the zone file +of a DNS server.

+

Samba +: Samba allows file and print sharing between computers running Windows and +computers running Linux. It is an implementation of several services and +protocols including SMB and CIFS.

+

Scale-Up Storage +: Increases the capacity of the storage device in a single dimension. +For example, adding additional disk capacity to an existing trusted storage pool.

+

Scale-Out Storage +: Scale out systems are designed to scale on both capacity and performance. +It increases the capability of a storage device in single dimension. +For example, adding more systems of the same size, or adding servers to a trusted storage pool +that increases CPU, disk capacity, and throughput for the trusted storage pool.

+

Self-Heal +: The self-heal daemon that runs in the background, identifies +inconsistencies in files/dirs in a replicated or erasure coded volume and then resolves +or heals them. This healing process is usually required when one or more +bricks of a volume goes down and then comes up later.

+

Server +: The machine (virtual or bare metal) that hosts the bricks in which data is stored.

+

Split-brain +: A situation where data on two or more bricks in a replicated +volume start to diverge in terms of content or metadata. In this state, +one cannot determine programmatically which set of data is "right" and +which is "wrong".

+

Subvolume +: A brick after being processed by at least one translator.

+

Translator +: Translators (also called xlators) are stackable modules where each +module has a very specific purpose. Translators are stacked in a +hierarchical structure called as graph. A translator receives data +from its parent translator, performs necessary operations and then +passes the data down to its child translator in hierarchy.

+

Trusted Storage Pool +: A storage pool is a trusted network of storage servers. When you start +the first server, the storage pool consists of that server alone.

+

Userspace +: Applications running in user space don’t directly interact with +hardware, instead using the kernel to moderate access. Userspace +applications are generally more portable than applications in kernel +space. Gluster is a user space application.

+

Virtual File System (VFS) +: VFS is a kernel software layer which handles all system calls related to the standard Linux file system. +It provides a common interface to several kinds of file systems.

+

Volume +: A volume is a logical collection of bricks.

+

Vol file +: Vol files or volume (.vol) files are configuration files that determine the behavior of the +Gluster trusted storage pool. It is a textual representation of a +collection of modules (also known as translators) that together implement the +various functions required.

+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/google64817fdc11b2f6b6.html b/google64817fdc11b2f6b6.html new file mode 100644 index 00000000..02ea257f --- /dev/null +++ b/google64817fdc11b2f6b6.html @@ -0,0 +1 @@ +google-site-verification: google64817fdc11b2f6b6.html \ No newline at end of file diff --git a/images/640px-GlusterFS-Architecture.png b/images/640px-GlusterFS-Architecture.png new file mode 100644 index 0000000000000000000000000000000000000000..95f89ec828668be6becbdabf94e46026f994519f GIT binary patch literal 97477 zcmb?jhd71K?@czz=Jz`F zz5l`QaqoS6zgIfrJzlTpdVRuFlpb9spdrA*!n!Q`SXvDW3+EXY7Irs29{eO~wu2D< zfopMJ@jez-Y4oLIn0Y~v*%b?hw!6kOITQL zJXl!2jIgjo6S1(U98;@R#o!O{OynO)W1V6ClTnu!3qQH&_*lmU3+oaY=6~2&$*I(s zA3k-wZ}Gnmf;DU7-370SB`bYj!((K%PV3b*i>Da}W(FH}o*S3W%WF5iV!SV;D6BWG zR{738DMo;7Br-8_Jk)o{b98Ai(sy`#DCw1nR>4(SUh9|{0mbqZ#US(866R?|?DKE# zA5@zaS0`%Ac2?*04c|MRc>zr5kJO&gayBYsm*lri!{8w^0+DPORW8;9kmH$mVHnw}@7gf$rHYqsr0Svao(0mXg}E?uT4#_<@^(VG|u^+;MX6`m89asFL2k zHTUvL9v;@&J`jWV#tLl?7+w3avOD7{y5&{3f8uxSC#jT2Ryk@Xvsq85*w?E zAkEbH`7_py8#g$)xZsz6SiZ^4y$RnXtZlYx+#WtVbI+T+3)6+x^QDQ-?^i_8$S7Fu zhmRg5rKU0`Dd-+uxGO5Uw7!0moIs}`2tSh%pXpg3qnO%j_v{h`J4HxHh=}Jlb9s6B zkgX6?4F0ui*8~IvNbqqtJ;bO(0*Y3M6+07Xqx;Oujj2N_5*%T5Gc&LEE(?Tbsi>rEviXVud!H7>>c$~zPZIxVMvEC9!1|M3@&`=q^oSmJW=5$NP*$g#Zs${Z9FM0ojf%wy*F1qG&~pX@$<`ZTk1V9yp5_}?cbpShsIm5`K@ zGTWOYkF0VGKyp2DcNb0(vSXr*n06Jty~Cr@ynxvdP8nsrjv8Wd}^OfYuI z%gg(m9xNj3yeJYnWian0?2ubs?G=X`PD1eSzMW&?T}W3~SNHidY=QZ`=(Y!jh72o% z#kWZCGghi!blo2KhKz=r^ey#HbAN%BDk2W^CZ0RyCZ&mwVVVAJf=I7Lu7G+zd0W)A z*)g;W?5B|3h%~&AUjxIvxecO$-c`e@s;YLh%i_|;#v=sc>Dox^mq=Q;I!u9qj4!YV z0yo!UJLg4Q76?O$8NTokezG6$INV*ih!<38JIUmO#xJtHRSvK4HTKr>#>Oquu+1+1 zp6??g;eE?*DEa<-iSBGGn!zId+3aWu6BCob`g)0{>tnJ4H2R34>tk)S+q(q6;6_E! zi+2nSQNwq%n5_40G2xAo{CD;Cy>t!+UiB>Aah}6}6iv@U8L{=dPr0wR*JQUowlbz7 z?w*V)Yn&V`Q(X3=XtjI|5yvU4m4U)obn|~7U^gs1DW2}ywfU31Nej44b>4g1Eo915 zI|ua1A>G6~hvztN-*%d7zrtlu+YaB~^H+Y}i?-Rag@1p)J>}rQQ{r^r zlnYsPPg$8*S$vl~;NHD=8+vc<%cv%%r!QhY7t}Gk(N9}H)s=PPnw*uOZD4Nb+pF-g z*D5E?t*pXMj`qr(X1^d}ZhCg^Q%<}uF0RR9g>oiwcI+Z?@>_jmbkuXJl`8CEcELKw ze}&sl=U{tKATl5T+rWEG5GMeOyWyBGGBUDY?!o?_o`gFV9+s9b+$Vh#jCk5Pifg4F zJZMI3x%_uGeDaUu&sPyJFY|bZ`Z2oj@iPcxDsY#l_jQ!N=?PKTF6$1%P&Ap-`;!m*hn;OVK!QFVWu@g z>}a)ob)t?op_3)z27Xh5NdgP>Qgu2kr~(^xd-PP?26G3C=@!i)mj^t`;`Yes6~crG zgYGeYzQkR7kB540+*En^v(uFgC%2)bQ5pJNX_(~BKKO6hFCBQF><(U~r(f9fKigYgT}2|M zIhH3wl~)l6GpHhPl_ZWgAJ_`fP9A)7{~*NsZ^bsL37X^%yjKb<6O)pr+aynUp)Bo* zeN#jrY)oD}o*g%(v|Xz9o1C$GcrZ|G7~MHP16!Y`&AGkZ$xTLx$8wGHZ|x>LS;?!5>k=2L-QC@@ zSqU(k#NfAoox653u;P}=w=hQhPw6MC7Lu`MB+nm3$KiZ}3PI~od!H$0>XGE(mBPZp zg1Jq5s96PC#l8N^d_O!cKh29(j%HZwh{J}CJ?%&hhkn=VyR$n7FQl17(!L>@e1*##;9J98rIYYUguVnrD9W>4$`d9-QtqK#OYgrLswkzAGdYHNMuo{KuG;J$ziT_7i>aU}r_l zb)_`1`)FitZtg=G`N?NRE60DbqdzZvUb|$%%8qJ&P@791J=(ogVGIQycC}@8>&T!1 za04s(%;t2nW%5IH^;oF;FR;R~2%55Q`j;S|Y&KqCa9rrRYT&iZQe9IsDBVOZb3IAH zdAXNG?35>bJfMiyGUUFDmg7NS3ks#mQ{^#Dm%ydZL{4xeOj=Vg$q&iL0fIlDgV+~ zrHJ}X>Gb7?`T0fHQ3&#}r}yl&%V%UxDdq`_vz4~ioV{xi62y+46zNObd)GbRnrU*^%dTm&DgnBmU*E}sPDfwKpQ7el&S=bMx`>qGTQ#odiX&5p{@UyS^|9FoukbELF@i96AG_ar?@M zLcK~%QzVIKwI}`TBhmAS!S{KQh~q)?n+}@fT=fB0_T+~;{O>JA=fjm``tmLD>_a0n z9+_D!(98~oEd5%2vQh7gX^L5%+05cEwk_zjB+@O6rU)8N*6w3a%JFd^LYpk|;c&h0 zUGDmQ94J!pKF!X%u6#*z?J+mrxSm&L^zt2dWsh62$z2&7?5V2v6XgnxEE)WAJjc^V zHfU*QXLfda^6}%x54sJ-x_5UbW4|&N*B`PQ6emcY^hs7^g);>NZtgR(!EWO6aIeBL z*Pb4(ELDGVZ#dG&3HW?_?(Ny*4!p^yRr z(DHQvtZ(gwXG=srx_v6yyw7z01FcU?uK=OIH3F7MSA%A<7ixOy4@|sXXFnp!UJ)(T zCQR>f@9BG7FUuZBU}|bwyEoxoyY+<@Fl7vqYp?Z|waoQ|PN%gI!J9X4Li4^wAFXGS zgJPeZoh7^+4L9Ikg@261U?d{nRe$mD@amjAMX%>&Lslh4#miwy7nR=br#hphm`Twq zPVm_xMn-foO&^+MT3XuB+G`ThzhHx&y-Q#{l)E23tU>tT(V&|KB1fB(PeS4wT*Syo#TFA)BeYjVLqoc>36UXdpfRf< z+q3F=&i53Gx8gWn58Ae%R`5e{QYFjjJY7Yvb+XMLr9R(sD|X7)nAzW&r4x01p>Q+E zco==kLwZOEp4IS;f8bUD_I0^0?_kZr zzmva5?17fnpdyu%i@Afpza*3)6DOybdyLPTrx{7`0T!^vb&v!5!iCKrwvGSu=P5v; zFIwo>*Scn)Ty1KduVP(?px@TF@Bq`tlg>IY8 z)aF#6`}|q^hSU4;+hD$kqr@m?p0bU(!1b_DoiS zIJsb8rWO{rE?>S3lw2uYBC>nAY1*0l((h{oQyshd1`VY7fdRLuZ%{_QN~AOOuzWEx zGJ;@oFGz92E*XZ#O-MwrM^`*34{aJ)tQcNP!h7dQMvj5miZ z(w%mHA?nXgHUI(^{+^7qx>qilaZtDLw%AgjW1`L*xcJMcC<6zP503L42m!@r^KxDR z0R{gv7Qj2ul``Is{cM;sz>POc(&-@o{(_VcmztKrup)Zx{s`H7E1Fi5txswcVbVi2 z?%WR^JRl@QmS#PJg?s{cM6I++tiO5vorX@(u6q2S1)F+?6t4ats0USRE>lk%)V;m$ z>Q&m$Hxo;~%g+xR7|=|b<@|R8EfcBcfw&P}6Kw&~WMyM>)GMbeqY7UZEZ@0$frn&PSC51 zq6NqVKJ6!mN&6gy28E9xQD8nc0s+jQjhtYh5Xm*`DjFuulZ{J=kyew#aKtCpQEursg{B6B;*`_)J?xR0zrodI8K9Bv-poW`ozzo_O z@zq@F%S1%Z(@p1z=tS;8{m;&)0TK!TrAWUfB-aXbxY2M;b#*eL&~u7?SlZg!Movy) z8tjTZbk_~kLs24FYo`*yD1z$^5y+de%ygjC6p|J@c&ByF^MUSj{7b80VGLbvJ608Q z?gG)2tEj`M>5o|_GJ^BPTG-r`OTdB}*Pg~EDyneCz0Y@$QnlNw@xl7}uuh}Hb2}IZ zYLe#f4g103!otXj34#Yq}QN)VhV-^C*=9x#|Q-(;~m?N z10mXp@+HQz4^b%8dR4;sAe;ZfX&um+E``9uLs6(LS7~X1MQ}i|FEH_%FH~X8K(Q<3 zb;_CEO;ZANzlCB)&Gd72_3-gKU9Rl@Y*v|>;aI$-gg`jnwd-OrFFHdmihoTvrhbp= z_+&TAN=}d};?y+4&kh=BDAcCu>(gI!8i4ZFZg+50jj!J&&Fwes!fQ7#2i%~0Tg$$- z7@gjIJREQjOR*T*tP^^ix>|`Xp>bo&ME%-37c=$|XVbZw{X)k(s~&0WUvpSQMMZ&J zY5v?AeM8*L^Ej^$+6^7iT_8z62L>>uQ8W$6|JRXQmD?hPIt_>08&l3`OM9zEWA|6x zPgp50iCk0Si62z1xZ}u55TaOD@8^E&J9c!?tHE!*z!M)h%y8nV_SMp^RY4e%w{9QeWS z-@l=k+OzS3Y^tqv&E3t-q;mK(4{L&4*SDMS(c#`)iD(NuOk;L=)e(m_-BTeCl%Trg z!msf$T(E9#G)gq7EVXkUezPFUs-mI-ZZU|%Dy(Ay*iSqs9KscD0thzmB+~;tDWytC z5RB6q`@j`Qa6CtlJc~xZlCKLby~B$S*khUaG}O{K_E~;Z$(ot&QBi!+#1)y*V2%VNhoRcdWuq&%@v3|tckf1kG7G&KfznbXkB^TBt=B`0{^^F_YbbzpM4@q_25;%qOe`&5q0u_O ze*MCKWFoT0McK>37rd?2-swymoBV>IkVlb+y6pL%Q7#GXBpjg>lgVGmu5Um+dG7;( zhZCZ0U%)3M)G;;&+V!>2@qo^X)OfeQQo2pFfoK|2LMO0qP!h5V>mnX2-as|W${eBC z4bG*YNPmkaCDYxaiJW#6S+8-=GNy))2Sx0gga45pXs%1Ye!afp^iNBygl;|8r!1LM zULIRrUCmv`TAX?cf|asR3ZT!ZZgWuQOFn*-Mk8;g_vrN(sS)0Q8 zpit~mnIxd2gIo!UJyR$slTZS`16F8;ZvSU??|D1>(~h`pyVxcSD;NwZ8U>ZQao$;6 zWyS8#NzFJ-x`o_l>`9N0f2gPk^qW4Yk1SGbf2cBCDyt+@s?R9XdTdo zfmAb-wyu54MzQbg?4;f~xg%IX_R;MR>F?IZR2e=g@pnT?$4*%&e>*PESwQi^t{knq z`8t=C0B(+kmX^!*oNRJRiW3?Mq~NcIAslH;O-M)xU$w=hN8FDb8HZ(m^qMGib|mjCx)yJKI!jZ_Gtgdgs2*hFp#bh1*yn z(*-zG3Q9_^A2WZzdFA7$v(z;SP@I>C=kweguCK4JtGoN`(;9%mZ#vBie51$hEudYu zbP;~dkxAAac`;aFvQsT<&he%qr8BsW>#lKnkP0gZMFstmTYmFBwwKC{oAEHJBXo&} z?(UVf_~)muNIh)4*z`b3C}kc*vz*M#fbJCGNhbkwd;4!RislnoAigvjk<^Ua`;L>h z1P7ll8~(r&f|aBY4=9h5vUzq3#OF9XO$0@HPiXV>Jp=;i2S301TyMm_I1$b%mMD90 z*(YlaB=e7nb@%(>dOwx!Byqw#UR-OO|>y2l{%)29&7IWC}vKHHv( ziHe37trRfp^a;4?=OMZHB_)-$w5Wi!0@5_5t{UfRn&ttB1aAYE>*3bz+qYGpl2 z$RH3LIF?E@yn9pms7O$^C>JeMKjI)_u$1yrq;&GKmX|{Ad+R?x$TtSM0U)hnz`xKD zjRfs4_!5<+z5Pu(B6%8Z68bB1v#E??xB$kY7$lWIfiBfvE$V$2Vhjzc@RzRspen+x zU#3N%W-)Mf&)%}etPq5jIGoPp<`MzRH^ZpbGDDll}PxYJ+GmIWece~nWs(glH z#7Y1+h?BBVD#oPLLp`~wsEA0hugZBIMZwm+xTB!lQv!wwhaFv*G)VhUG=T#G0eg?~ zsv9&4C`jPuz03c=*Ob4%6fb_2s7&T^o?=@OyMUmu-Vb!ehl~M%NcaMv*R@VfF+xwX z;Y#`^%{kKM=H+1|bx`T|J7OF-&f}IaAMT&s+VdEBE`ebOTOdyW5*^5`VE70q-hd@I!3BdwC?(Q}=SB1M=at>Es;b8~V;6c` zU8I@FehRs(kQKD$Th2M(De#!SZzXC=AHn91LIGFH*BE(iQGVTBD1{|W$0_Z3Gbk=dE(^sn9t-Tn9nTd%z^<6;)Hz==Yzy^NhoU;X3|Z>+ z>nEE$PEaFbj@m`&20YX)=H;0yqDpzOMm&$j=qqW&xVgA^JhlYFrpr0I6n8*T`aV2t zwHD`1V!uF>Qg_;0@Vk9YHaz&v4^t9%y=K+6% z;sB;Hu-b-6WoVh;@aFZ&iqPF8!T&xqWSP?+D=&|{q~+p6D?Nq_9uK*NP7<5 zcPMs1HVu=}qieY;tl&Qbuf$Z}-ocGMLx-?3&Npx{Zlw40jgHEjn%;nW;v*5=vv>o% zPgUN~=El`kPFR=F=29`*O0`T7QNW6AIC4KDDjqz#_xnr_{pXn=_wWagZ;LwxBTZf1 z-7#`G?dA!sn@%3-@g4a?f9zOT$Y&> zK2fSto*O3>7m_heX@|;xiz=SZ@7jum(giDfd%*H8u5NBRvkU)u88#UI81~rIbXGj) zpv#0<9V;PQqm#)(A`0#{@Oikt0CAw10en-1if|;77KJFZaMiyN^SGp>^?ij_4LtG*-__NpH596HH)BE01kfZenpQ7cOHbY<#ACkEVp96#Vump_ zbF8d0I+Z1)(sX{}VL4Nk20Jh8FRYZHpdiMK-8)hxZ)M-U%D}KVQegwX&9RHj*8s5j zPZ?-UKmMAo?M>eBZFlse6`4c*!A*bzKl}T?UQ+)n7VL|vkO!NTV)QP6xE^AD%HFkI z%Or;Fh6lNU!Dfvvy@P`mIVNjQk5ImLDC?Sh<@AHSn3yY22&^nEGfGRZJp1uO6>jwh zO}>Qrt6U!cR1$J4}c zNHm{ptuWI>&TM&9dF`S=75MA7y1iDoS*#qh)Vl0C(I+zjVtTnRsI_oSr7|5yQ41ngGRPb+j@X=v;}zq(orj)Hyc) zO0v&VnJ=0%b(W3QiyTsOv#gT)Y4SAKCuiJ`ne=VShd~$zK_vDu^RxLoEgXH-AfJM{ zII{&rc6B<*;w{D+(mdQ-fp`U|)y+Bb2_D;DZ$bIr#-aGDRMiuKBgmG}2@wkIxcBZg zJvL^z-#_4eTfAh*rr%k&AwWlTq^QcNPOdo2dkIg$3CR`$MHReJ0DJ)2FseJKU-1B^ zFMP^r?+|~Mi^a94@Ii&0;)h$euDrH=uiZnu?h5|VwM6efn#Gl6zZO4;2&%wbyuxLSb07Fjg@;?FNgCNLPjg4SS zy@S?&Ns-wn;45Pj%iZ9}eM#wjv;|tGv*Y9$3I*HifZppOr_vuRBzHt0D8RBe z-sEeYAyfq2KIU~w7kl8E%7bnN_;XLAEvS(W^o5zIj@x^sXrz)DeKgCP+@Zu^ zX7jie@cPbSGBT^3PdQS*Lhu7q;wA`Nz(HRG;QuLGaAQ2k{D6!ZBn32Vx~ z#i1TR^JbR|7fnJs9Jkl@j)0{hx!NGmfcDyD-||RpIPPqiJDNO`fb-!#_WAI>3AoJ1o1u~zLSyFP zF@#zWEC^D>=A?|!eCIgtsf)g~8DZ$FL)eY3LnGmnTAZB0t6im|W2TS(TPZPLLet@M zh5c`zN^p`Oca!daivHinH@tA^(k0e#DHoS=7MeY#*brlq=r(8q5cENE3DZZX*a~?+ ziY~BP@U4F#B9nb(pl}7cH@}ETJGiigPR_85aOpN5`Tu?d8+$dn&9wWQ?~rsY3Kfo7 zK@9T&G99C2A>w~ZcXDz95tzXh7KqWXO~)NEbhoD;xTkd6Qw;5&U_(1kN={}<>2}_l z!33sYRiW@>V5{HnYzD}a#3S-&E(BTvd@4pR5vcGoq>UH{PpzP~+PHZG5 z)Ro19N7oj*lK8=B0?_4ov{ng?x6%iRK$MJj`#N;uSXPdzB2rj#dL)iG0X0M8=Mxcm zoT*}Ta<~ii_xNIu*bdh5q^mdq&-!Ocv||V= z4L7c7fBJ({JuYN)M;!St#)ApiBs3heK;IZRJo%j`iScrznXasy5|aYBZ;!Vyg!hJcf!o(e(kd}@ zlQD&OB2zx9)B9)BbO|C=O-l<*p1YvmV9E;k2CGkx9#Cx^@m&-+KV)CIbG*}|4jM9s zE`y8GhuG6I;5Zk0j{Lk_4rz6farv+ldaU5x*v1~@OhDGV4K)x{-%`6#AsQMQ`XyI1 zi0nY0AglD&XIqvd?)JZQ(w=OoHgiI!s=B(1dhk-6+)Ff-iYmFTuCB*&P6Eb9rv|2> z!CpvuKV>Z<|5`3qGVQ4%w!kIIE;hzZ8REuAXmQZQjX??2l#dU(K)em#81gliX*Xn4 zwKNQDMCgQttA!$@larGzZEZiEo-!7vABX``$AlbwB)CB@#3X+{P%OLBAP`}#CIus* zV;ilb$Nc03Tiy+O-msio)1eGdxYK!fd5sm*_OsHyQsG!qvM2s1(A&+>^L$CH*Vr~r zijV8DkYq7bVtfuVH}iFS6SJVK0Gk3eRw>T~QT#bB@}q;Bf&I%(iT0I6XEeZcxb)ij z*8jynlvQcz>2K0R>e@-w6G0#jay@xMMT@@NNN^7Pjz%Z5 zSlP|5yvSnps59Wu-K`>vZaV;@wiC8-e=3MrFIM(D1wykKNSXDmz%*@DuuQVX83T04HZDS?23w3 zIfv5);SF8Ibj7eO53#`rI`-(313vXikf)9@$8_#WnK43zdLvzu4I~0-`Lo3BtkoxY8TUpAv$5xkWsWkP9Red z90saOetryWoL&^K_n6Fg^Kw9}ipt8uaQooxwu;XVi(y%S8jp|bgXfa~2G7*q5kto= z|KG5p08NGNR3hU0f3zkV3KY}`u7s6(cK;2$a*MR23I6OLxK98Dd0}A$Xy)_t-kR*K z-vZF%@dV{F=zLjA$hxSYV7B3G|HYX?ZG|Fotu(KHsbt~7y8Co>%KvEcR2ZUqk1Q;xe0^5{Nyg(o{=Kmvh(rs) z%7C``R^0m!bbt?Xze2^mtRG^>etq{J;3=VmV>9)%<{~c66J=$?}LfP7ahW$C~lUYOLfjRCk1<1K<982y~TMI*MrizVn9oLf|IPb&NFq87c&ZXQ|x#GklSl z#{3A#mU+&zc~ADg7isJUKm@h?Cl&p#-iL28g8c(j!7}+z#h(xVvVQbduQ4c8b5k_bc&dft$}R8Ug7wFAqB&524k2pJ=sBn_@m`eX{;U(QJH! zu5wgSG%a3=f4L|-FU3#zX|I_&M+1-N*yy&4EJAD`6bP-1mX%7n=w4T_ra zdN~3LnMU*S!oy?~YM|TJ#ndnT!->|cqX^tarfy(+Oc*cx8H9DoBU%y50#{=C!9@dn z`zVSo6DW6jPa9-74t9(E6=BuwD*?4UkVtO>#<6qo-R+wGpln_r_-X~*hC(S7HoP5wNT1l@t(bj~N>{09>fw69yT>Y$s%!5kU*kahs5cmT|Y zk=+7WfdCT&0=fp5bi(h*4qRYxXMI2k2GQ39mQ{Gd<6QBe4HOs11T-USo+^H8w6m{F zCWJs*a}Y-N$9|Ht*?x)|B`CvY<=ER`gPOBw3ZcBolXZ89Bml|%+TM;2#=ysl3S(+K z_!l_Wur}bCPJ!59P@Q+!~;@xdS(fJ2VjG{e~z!)0}>l;mV`xOz-6jOjh) zpuDK%znR`UTK@G*4U_(Z!axWNTv<69a@9WsE`wl19&9VrIXA})RT7kErMF^G(PDQS zf{5Avt4Iuqw`IFJG-9IlEy@8$(1HA>J1mJ-~tLXdqcsCS! zj75ZjH%?36uanEDLZybffJv3aG(TpR0?d)XZTL`?+;6{`p?7qY1M%6A$u~I{O2HFf z+-c)u9=rB7;rkv&@FsH&#|dXr@cK3G8-A>t-)!!(33i;O;tym_TU#wv3GymA5 zU_3FTp{q&`tn3-Z2^n_niKm3$u?Xnfe^F1PkqYHQ5_+b;J{*bpT{3lB`uH9TK$cUq z494!J6~E=~@Gi1V$|*UJH^lAf3>!QoaH>b*aKsUSn>|8#@h>FX+YS6U3PN*90q@*M==5E?(QiL^W3!G4190Rbu!Vy&wB`gPXf&o?(ssPU3>Rawb_`8Z2}nFIk0XsEeY2=Try?f_tc@_&v)zS9mNfN7BFk=(iE z0rvM))x&k@St6JYEpS1S_FStLW)*9Ie(r(eGOpXf=reQLk^=XKJ8B?2B9u z)3V{;5^a1N9Bx<@z^9<228b&G&S0Rups~s&EW8|seO|FmrB0gGJFH)TM@Znaf!00u90d9o@18-$0a*#V zvu>CEXC1;^ef~!&@2V1&Qs^c7*_#yIjOstX()j5P*yUw=i$gV^uh25gX=s>CZrQ3b zUEf0@k<7ezdNM4}dX)9_?Q+i-SM_XCj02WN7nt z)<5ZBoeFX*qiZ))x+gx}->PKs3o!=e>GSI9>h7uk#oA0(uK9v!Rcyj>qw5I9e3Ue; z*Tps)G~ZAUkh|d;3u0O!`<9hmiTLtYT1q@y2RK7%_5u&CYfj2B(wW+Or4kd<&q307 zb+RFSeSICFZd(7f0;RzYViQQATnX~*HRt~Z_mBKkN7g{>l_HW%I$RRKdBQ>ZD&7HVLPI+s8s+ss| z72oIeac_vNRhS=1Z~?27NWTTRg*YTWxC3ikMHPvSO0EfOv8fVrO&T=xPm81#-kyt9 zE(%ZeBLiOvqNX6h0DcSGha(ZbW+8br-1For?sOw*0O(zi1JbFqAFmuSZys9_1lOoB z=mJFKS;5lRjI++yFfsOvTK;;GkdZDjdt`N~y3~iC(A8H*DgJEoo^buX_a_x|UiB}_ zm(_tO^&WFB_M}0|5jauAxV^tE^LJa+37DZ|`KZQq*X)(*ig>fjuHX^mMyas2v`--p zkGOG2A|Do7Pdi9qS(uww`5ct>PpUFQ>bA+~Z;kK(`x?lmj;?N*Zmtil)N8pM5w~CD zAWAdGvH|dYA>9PJK44P^MNZkg*Qs~5*g`VD*ZEy~@BU{X3(HK=!darZw(q{39XGG| zS5TWkhh812Ska4q=BI8`xFQN%+eqYPpW8~fq2l0I4MMt8>uRA#k6FIl@!7|Rw^<#p zNdT%)T-O8kC;%w1=e9hMbISkG%Q1W5r!{E+2taSDk23|>1!V5<=m^ApK=xn;Li__# zKtNrfm%r(ppZ{&f)!ABpYwNs}z{jWK=wsxWgQyl`-0d=SdGfkFH(WyF&iLCNbrWq; ze1y%Q%#ua4mX6ZBahr{gFvA*_IE-Ykv zeF`NX{RP<`oaal5ilTs3YamV@>g6QT9`Yvf8*VLlJ&<^JIs9z}a7s|HAEIzS zf4&j?#FeHyBX_1FRmwhAPlDuDsp7w)p+d1KVmIgF$#I@A+K=~zF*W=$G;Vtbk~~yk z(YRkBIjfdyJYT0vd~;-7iw&)a?;$y8m{PH}M!{-=-mMeJE6lHG zVjY+pg*9m7zoEc`l|FjkZm83ODr9(fE><&mdG-~~+w}A{^UB%Z9vhae@ix+gkhlCQ zb+x#&yV_2fpI`JdB-0 zM-nCty%Ch2@K&Z_+bEc5(S7|2naYx)G<`{C~8mfQk zl0+b6RBiWm6LZSGSTaTStT7LKI`3RgclYT_P`3>{rh^5q-cEeX41W&~kO*?@_MZ7H!*x*zf-F(=7Q zSM@8**Bt6}`g56Y6DnEge82JAb|Prpei8J%x?=pP$fL8am|z$nOMQ$<@TmPAa=E2+>C#A#H7ho!&5*_Wnc;j@T4Qu*Y z3dFORcst$e)z7}mrGt_*2Zl(hgl|Y+o^tHtu2(%&AN%*1NF%AFVJ{wk z`=|zGq;70%?5B9M0CEdJK`~8d&hQ^!A4mji4rq?K`GO5?4S*%^5@0bwO9ri8I!l#Y zhJ}~^!C8*{8+jQRN>TRpJ-My^!<_q`dY~;Tye)UyT?&uZvv7O9EklZP>5CZ$pyLP3 zq@M2!{9Vv=x0K(`ZO?atFK<%HDv_QbZ4wX53mHWoR}ip0cDjW<&CmEhx^_JkgT+`__cQTM&FB7nnCdO;+^0D_r+#id>edv$$%0Y?1bm8CMjC(HkU zy#cNOy87S>478MfeDlP|=jHVDb!RjPV2&V?!gz<-br_<6J_ZcTpqMJXr*rC_q%Qg~ zLZAPs@+*ycDRkC&lRBY~{e>&(v90--uM4>ff6GhvfB=9T3xTIA`h)oeM_B=%23O3S zd+#&xMii|;Glq=L#__LfLbgNg@phvZpPSm;JoJkW(*E8WtQH1wVq2k6@nReHI1?CP zggLEFFsJCT-{DKc5$-xcfTa(L0Ic{RQsc{)FPP|qT0Ti+D+Iqf&tLp119>l?gmX}G zv$C^;pt-}bftv@e)A(QIw;Y(TapkC$rS|$&kZP}1`GoD{Ma3u)lDS&rihW!0KB^ZX z8pkT`j2boO;o-T*NCI+$_?txq1OhU#prQg?g=mMe!~rwBPl*?+kbnSi$_n<^U^zm* z&phLdiR?y@WeyHeP2}D22c9dYs_%)sSt`sb;95pLMwY#6+TM)%zhyli401^P3gCYZPi7UY(<4kM|b=8`7^u>iN&L`x2 z2fbzQS*Kj?vOVd0{vZ=E+e-gM+&no~XU!Qn3QX?~XvChLp03D{3qrIEsC3~Zzafk( zQ(~ehetuCQxs-uFtOfM3*X)%yx2rXS#R-WGKF_<75aMm1Ewi;bollQF|EvxN^(@BD z5dRZoRzPk3F*w);BXFMql7Vr)mxiZFeqON;qew!fvmmBpq$5)21eYpE%}VE+tNyK& zZj;gtgm;En>Vkhw7*kt86AO>2ZLyX zHrD0CkZp!kO5eo9Y2>FHq`1>7&40=<<6C8fftyCq3osoXlu>Z?K7`SMLIHWAFEDM= z2>^7=@;J|3O@8%coFOs*BiZUVI@r;O79R3nH3fF%n8 zrjfOEvNV%%MJ=y-3sGpANq)=kpUnBKo;dWv^u@&8Dlnx|ZJ$B=Wjp=Qx%8!d$+6%$ zf$SkY`-H0(qdXe{(82VMUK=AxRXVVK$xq7D$w1VwT0 z_A`KrFd)-AG}I1A6y)eV_)0J1oaKl4TosXtfd{+(`<2;(`Iz8k+}VB57O@vynNH!f_zKrym*> zLh|%>^YIuR-19kcUrShQFy}ByKFB`HLgJHR8YW7oAF6#0+PN(bha&%lh=`Pv*Fxd2Ta$hCBy-7`|yW&~7qMNTlO+%AaU7ZZg_zFx9V+LqbAnsi1wxS86y^kT_ zic!cJ`d&VS)3!C}e=?XV?ma+yw>%Ln{9@7>dpw8GXNw@NJJIULRbP)qJryoX3rgS zu&T%Hp*cc+aBz=c1TreXu>o2=t+@uzC5XgT;JWSfXU;-Es8VJ0XH+x8lk4hPS<9Tl zuQb13PRkgS{cJbep%9n=dsDQxmedo0M&AK}&l4uY&3>maikY9hx0GJ#FBxjD5a@$A&9W=ju-8{Vp9>O3(_le!J z`^7X&S76AaAn!eh1Ho8uxkV3%kelBrsS>Yrf=xf+f94CHTV7r^is9@H+#G&UQAqR~ z_KUXF-tH{vv-Y|cCjFvy2K02o<`SBKBOeKvo5?TAZ(MhDw0n}cb1OO~2`x8fL-MCa zQ}OTxE)v%l#_8bfBL#Jih>NE(|QS0mWnBi%VJ|)r} z6zK=w$bwo4fLWVU_sU%vRbUzg&+{PP3g-u+_i%E6h9UYJ{Tia5@P%RKSiZEhFe4C7 zHr%E#x}wbqp-YdMuv`0ImHjXy&kwGi=s_xsjt-`VE`liD)CB%{oApJPg5|3)9c)ti zEQ=K)CtGu|HIC@i_b4ftw1c;9<oiiYa`7fZ&0EDe;NDvvEDXwu*g8+X5xRV6RT8hp8Z zdt+XG%WGV7$6Q7gJV8v73xE~SQCKcufh&{Oh%P|5;5Ea}3ZxX->8%_b7F($dpX{xV zL(?vjmEIX``Vi>1{Z$`s5l#RjS3)fOeYn6)pc|qzs?qe|C_}r=M6p9p8%V4GOt>Ne zV7+)S0*bN*yK24$c#Pn*ltN4|D=&pr5qgSlEuCBCTEkFD;YJk=RzP_G zM;4ke_HX7AZEG>{IHz$$fYvDM<{uSEt;mpVOEa|aiuRsky9xt z*%fu?^!h~yc6WdDj#U3byX5s?QYe_Eff0BU87`K?TYudjp9okwj57ugNT`D8Kp6bi zD{uQWY^#YKh*pN#g0W9)T^(VJ<~^845G2SKocdZ@6Tta#uiv?bfzn{ANThFMMjaL+ zgCIeBGggOX1vGCM*98;6uVE5}Cqx*8d5S~Y-L|Er#QwXbDy*0hpM^!Lp^sKKKr;KX zxTAC1v}~9YiY2r^g5bb@6Mz>00-z&FUGE8Yu}Dm&p3O-xC{wF+hX5%CGJ_;g3h9gg zmB3NT%j#U9aty)bB>;sS7`yNgtG~$v>2GjuU;-mwqk3YeSfNyVs(`*8Sv2K11d*&m zgB_3NX?#co5W=`76M!q?y5Hk3hN+x#I6dnOFaZ;EuvDXHG<-5p?K|cPKqna>hhV*q zw)WKA*H|EEn4BC8rr1}GlCHwjPQcZ%x5wxK5C}sc^x>%+NlC^=M$cfkO99Z>Y)qtm zKxeUH!ekH;OFvyshkWUL@W0Ivd(3CKeqIco!T?VpXam!Ww6&)3r9zUrWEXh}Jnf^C zh2Z?IJOhk*1D3D04}aY1r2yA!<78rEt5=>IoC^o`n;wZb6gW~Oi&LYv%@(_nqshrM zkZf->dNw_6ynPT@5x?w;S2OP9+ckj?K_R&E(1!pYE&l2+z_{BpmehCZN!eV-N_e4i+_}aALofKr^ z`_eIUqXIN_E5zr(6^H2lW*vlRa+m)1j2W!N7*Kaw>wS-)?a%zoR|iw6NACRO%Iv^j zeBHIV$rJOK0`xKXyV|XIm8TLEJl{VOEJza|^y3+d2Y1>xSoGMQfsm-d9z1Su=*HD$ zKAbzn5-Z!g{ISBFhb9snM2pBDeFBkEnN=w5FA9Ar6kn=~)b;hd0R<0LyKw-Ha2HpU zQ6&eSg;4;&n%s%t*paO*k>gEB-z)YaT3gbOTgDB$u*; z;pb)752X3IbAae|{38~Ci{r5&e!NK1zbn=9U4(VKxbXn_WB8HlTar!VL0Wbpc z;UvzT-G1Q&2J>OnAeZ*m@6bw&zUPK#(cjGI^G8vjMPr77GarQF2TQ@jSZpLG>b{SS z!E+8Eg^S6e!xk1%vCnC(T@ZMbbSF*w@;hu~5jT@&<5U@z1ZsJ5o^{drc{wymil1PQ zH-Fa}ugu9g4=^`J4c-v+Urj|7$k@QY+Z4ZJ^MgVJBO%ZsVW;7yY_|Cyw}D*m-~T+O zCh3ncA0J;6>?U{JZ&D~k+a~|bI#Xgp{7`K7r#eI?FqR=EoL+0NA)d3PIbql*09P9` z0*!e*1k5EvtQL)Yh)CcF>TdtJ{kDP$g@QOhVH*MNakMSL5RxOM`>D8@o;J&~&gIR1 zplEuRKWcpn)j2(~HvTD)29E`TKrz_3m80Ft{)g%qtc-cw#GB*sw-!6~08%l2@_<2a zs2hFx-(iEzq0%z;edx(^7}Lt)TdFZU1;?h>QU`&6oFZnN9rR)!h~i+LAtB8akoDd} z%mDH=nJTYZThBvkZjqn!&hMjh@$bYs61}myg}7Tyun~xQwTW)>VjbDq*QKSrkYWYK zh3R{fM|G2?-~kr&)^RYb*n=~S5&uA~hhdk{(Y1JA|9|YJ`DOst+S(e%3&8|&KzMX_ z-%I|Qa3{110My!OrOW2jeGo(MLo`lVyaMU7oqpQq{|-6uA@>KAdKFen3JI}rQp?;H zLB(=&))c;d#1+AE_l)6oPfy);D%HhI3I1_+s z;A2&e(>Sd&zb=9$*&Gf>EijX{1I2En{!1=rJ3;daEuIFuS%LVsGOT!B!Vh>O7l`Pl zK!?C9(QO#p5CwxOjzeo7o*#6OEqQnYUT!Y$xbE7CY05X?4+`HnMp!O3n^Qo zsAQDNPIi)zy-AXfB3Ts@l9gA>;hx) z325CQLiqCK7+l@@Zd%u;TQG7RWvH@5QZcf)9^ta@-SW}`$ZCsD8Vem3*s38&;a8za z{_{B_uQ25|5%D3@Lu_mBFv79DL`^ti{=T$ix?9;@IaQNT{=a`NO&wkiTiHb__0T=x z4dX3Gc0f?K>SKc@^w%7-e;vmy*p%!N_M$*xSiCsdSUf+Rj$6=QDiGlLsrgb-H`J3I z8mx!K0KNTaih)}C6zrYQg<-14Yh}hui|3wko|p5$%)V`!kca4+Jl*N|ckyIa+?9+2 zQ8T-K06bH^x$$1(!2ebFX3}^5ryaWK9g$d!;;b39US-+xNEs>vcU;1<*CESGhR@~S zf%Jjy0NMv79hM)1gBUx(gl)_t*O=X|+cu_rI_xqxB7Ehb;M++X#@s{iN;KlOL@fPG z42uTnrM2=$3le6=9m=L2UJUTHx zf}LuZ1>O80h4~?;jxSUNC|J<|2;%5f#W+n zO0LNw6Im<`8$lvO=xtT1fb1i5%JGconG^H#qrwHWCPyq*9zkr6Y(c8e2#tp;e|wiZ z)9$$-iy(73yzA+HyyIeXFRlfV{1TZ+HC}jfcgVjGx-U3L$cKnep>83+3a}o=977ui zJTii1N{>#+w3iWWEeyGcwic=i0!}0J;HWFLN~K@Eoi)b+rqh^O^;P&l&oOfW17gqIc z_U(G^A2VA62@`?1Ry{T|MZP}j#>P43e}bTs^!|l)j&NqPSe}eJe&WOg8G8EsxnNkn zvi;gd!{9GZf@9r(gE1ivY%secZePi-UmJo-{ipjsrEnbN+w6e2R|7IQ$jLJ(g8*^} zZFFbsEnp1*EM!ug{8tEKKV? z%ln=1MkBZ zGT&3qaiXIpypfySd*TxYWvKrA{|eisqj7VC;6Te32gJ9)8sL_|Q{roZ!NKgVME;DW zIUr~H!ZiRmw{UP+LUB&iY3X!V`v32DKdP?-8&L_wNJnH^5Q1Zt!wO$FJ?W&FZ{P z(3v_oh``8GGmhM=UYl3tqDs*auEFGN5DA=kD4@^#K2ad;Y*--s?$Q`E-^7Okgeb8e zf`c2Zch02UbZJHtfkDux%4umt{@&x(w_$m-Y$+cf60ihf8d!v6U5phNH#eZMY_i3O zVYp_>T#DDc05r2+%Y$B_oujY0dKaG#bhNQL$manGNOA?Jg|qKv>FW9)Fd~pDFE0*i z4v^gHH6`oimms)!b7g#o;jJFHl~r^)mo=@-mn~fHocAbG~!kfzd(|Ce#AJxyuNS+teG1e`R+hVYz|Q> zbOjp?44^5%#Yc`NA#{h=Vh#>4B;SURc7T^5hd`z+^JEGct*L{5S9EkVnMWoKIWiV< zJft2L9S-{DYmfoB<+!QV!2HS$MLaR}L~i_!-~!dE>E;RrZkNWfYmW!q&)7V}R3A`v zv=$`GL6@q|%X59nU)l;R7RdTZ_%{lXGggn*={D55y?=^db~qv!BCFcwu2LX?NHE3s z_^{Vg*v2Acp%X}kD@tvx8aU9|L9gYr7!eS{*eda+!plRm1jX4~ts9ffPu+h!jH%rQ zL~FzW_%T3Gu_zc2cMPvixL5H=0Tn`(MEW37bO6l8N$JB8LjL%GNd_s-8mFlI7?cK! zi^Z4YZ`bva>I;(9%^Pp#bT~3iGq=Gd7rg^u0ubrYKLQoV@0yQo9z+Wqx2iIS02aX1 zk4-D%nCcpC$NuEolq9eXj0gTRaHhJc2yb$K>yB^r~!7)`mn(rLCx{mmSV z8X39Tba?SDo&p5Ow>{;(^d2;QbCE!g>9ydzat9zXx_O*pDXD7YGnDFeqgV6QhP!ytQ;)#wW_ZHE^giu!TW&NROLQ3Gd}`FRiG(~9qjZ$ZE? zeCfRpRoX9uQZSI76%2~qk|*gEi+&l`4@00|p$kSVBO@;actn2y4*w8jZPi+{Uk*p( zf^LDz6PB#{Xf6+4{jCoL5lpa%f_URi$V}hjc_DV-{0(3%BngJZNPhUyU~owC`tLvA zXXl_6B(rq|7j~i1eG63wOvmtgH+diDPn4VM4^QX0hNFjy4iE%s*5Txeyg&{Yg1!V2 z16)l0dHdGZ)n&H9!8rihC%pTC>a0&w%3sRi?R;5JI0Bvs_{JA7q>O6~witF0pf}OR zxXC>gXKjpN&vQQJ)D-Jg#C2}y(j!b*rZ=0!YscX;qdfhutxXLx$p01h{vG3$n+KXa zwX)>3vI8F(=brkMXaS8WJj^3v7|pPp5`NOj*f^Ss_>&z_hPY8dGN%pG=qc$^<0DczhsUYpjey=B96taa4;Amrd5Yg;h!Roa^^Sto z+RR^?U1&DMt3sY0;8#lPXnSeTfJ8jt9;XGX6G%e|z&sQnI0O3DGbSPkOn4!TruB9a@CFPHL z9`P4<6Iq6k!xUsj9BljRI7vkaCKDln9TrdI0qMg=<>_R1>Nq$7COkNdaA)fsJJKoz zlMZQ~_XDnb4jg#vH7C9j0JQ=Bi&@x?Ha|NNGn>6KT}G-XAQFe?Tf(BDW|b|Q+psdY zGUtUi>NRuKs}69c+wa1`{^>s(c+OkGbN#E>ZH(K43&JabNX){*+3fjo9bg^kz`*XJ z!M=3h1m*=IG)(+{JI~CuIqus^OAQ4Nz9mewFwzG_FB}mDT#@tes13urwu+a^zRua~ ze=xXMknT+um>#46OVngw1b}u8UCI&;GBz~@M-0E6mudE%nGj_{88Xp!()nKVAaR1j zv<|81Ny)PKoxO}3hSP8u82{Vp91eqM;3|)vO_0Ro_~q3tFs8Q%jEN{tu=zZPD5NLC zp^+O%0zazi(;R`;YHiH7sFCieP!_rxBw4J|#(Rn-ZgjLarBZ^w;Xfxe!#e!tM%}eF zO@}kIlWT%^hxu(`NHLzL*{ePQD3hUdJF8H*r*T)O#^Nc;SC%s3oFZIB7s&Ih)U8aCbf8F z$m3rwcE_uY~G4C3rRjBTLMsUL8qFziG*dK81R(vT$Qwsdvf#wZpH3LVLd z(q2wk8S(=7#G2in4luT!NDRm*^Vp0CSa`1u<**yIVJ%yw-H-~j>0^(=I~qn_P(WOS zJ06bxKfXNgtjU(SlXEFsB4keTB@s!}O1<>ft8&2HZXXnyiD?k#9l!m{71V|VIBehQXWTuyRF(&Kk zpoTnx9Du3OU%AZ~ZA#IKWush_k(WERIfxtGXvUNAhY>lr5m2!~186^8ER|m0WX+dr zd2V1OMyDcTW{)#+Kf2)W-+5315h^!g>dIIE(kwA=j%p;w704TUN}$B?kbohP;)@Z3 zhX@T~Wr0Z@6mgEdrB94S*NU<`qd0_v6WaT)=v^^NL|jGDpaZJ{t`Pt~G=SxB?vkP) zIS=;WW<>vn zxufA{tgO5SMyf;?VJ@lnRwQgi(_>@Lv4fAC-$cce8W*3xmjij4K-Z*yu;;vS`pj4+ z@0cMCx2J#$7p?TCf#AkIgCZKghLDT_DF7h7A5^eVIPCcy1sB?(3X~- z7Gz{VL@I7_NXdY#eH6+ym_ryytAwBD!j9yQx?ShI%>84H%nsf$x@FIPcF$fmc5-vd z?Z}WCy))Dy-uWiB!G;51%|i{H&d+L?aCzi(xAECg@!b+K`o%s8=-Vs>w2fM(rq7wzqL;162=y^;zZR47ak;o7P>^{AV;JOZtZnfEvyXYak` z|8Q6Tk3m>aTnCZGz@R|MpsjzZhe9SQN=ZyvEU&))C$>S5;v9(%U;m4;B9)p!G`1s-Be4CK-LM#*2?_( zsdS`g*jGp%kg2Wy$dF4P@+YfT<^1(sLw_cl?^LI0Ut&$E3;XV(_ikN-EnCCw6JlDF zsc8vmZyrGa1EXTNoWV3lj;n5Ps{68v?!O1@f24E8m@7Q8!3H5^-<+ope4LpW*18-p z10Bor4K5)Pl9HfX;e|>mJGNP>wF1sQIDAJfbBsx|kAwGSUPec+yPq%3Zsf{fGa3OcP zKp*-jJ+TOv!Y$AyH_CGPy|^FRg<}wRWuDK}ujwN; zYK;RD41KSu3ik39@1=4R8QRO>hdLb)*R9&`@Az$jRwQR8#uQe=PvCKU1!b`hcQEu$ zYq=~e=e#q6;)0vDm%q67p^@f(0LxH|knP})=fQQTo{*eiq5>V=YEOu2A(-~xWvlEv zmZObvi^Q#K!N$Il%70N%P!8j5G#X@QgM4he@T{G^{hQ3$?z9uPJA{9zp^}8VkCZ@g*?(cgictBakbb;XSIB|5c7s~jJ;6%xp5mOxVfT&uVzNum@M*hV`hL0Sf{;U=w`fbfG_^{muhrn1ls&9{GRx%h5L&m<(vWZMHSx zS^VX~M`z}?lr37QD(1U?aM8|it522`eI}s(u1Sy8!b#9<)t?D`EDS9&`oh)zBWMk6 zy2L`<{wMFH@06o`fGW)?|Jn&t!)TrDP=ghoa^1sR`ezM37(55Ir?l4e6Y%?nf+!m) zk^miipMHa(b);^Z|j*G>prt2GVAKKc;NHJO?sA&vJmVdwE z=&#jxK?J(uv~dl>a0bH*i?DByc~xk0Sa7RIuzXv57@nkev@*xumcP#B+V$BritlgS z;7IB8s~WQH{$UpP*<|B-;kHB{U#E-VRY%Q5?wh~c8_Rk2ic{#9N1TZJ;A7)qG(W#~ z^#WR|^1^b6pmTG%;C7|&9iB>v0epR9ad!Ga%(WAtGylps;<<55N_kcT;7T}ixV}L< zQ`kXtk%@W95iKnar?zgfx^g8FVCvll8}#0IOI^HIt};{VvjpH^L20=$CB|4T^g>BV zV_JKC@Foi%%b?L~e-qEGEj$&;T(@y^cGg)}Pk%du-EghEX~X}kB8?5yndj0aNu0p%E3W&Ohg@f zOB>7)YCFgs>-r|@8<fL2d=trI7ju3OYol66d;voAjVF=Q+aR*+CPGLl7kFw?uR z0pb<@!dC@d)-@k+Gw|@C_PC8MHO+f$z?NezT6(;Mw-I#r>@Lk!(OWFVYZNX&de!ku zzg?Co)Uc%`7gt7^*PQ$*y?eSZG$l35`PtoOi9_;1S-s|Bmz7nO&zw8A9%JMZ91ni3 zCKIOZCOuj($sZ$Tq*#7%UMu(7H@KHAj+8BnfEadQx{Q9Wc)IN*?!jE?3^QF=^UZAr zI?vOxZ0kaw{P~;J2VGY2-{CS)%w=b6`4xp9!Y}83GbyQOcyB`ZBXzb$0=5M>ej>GGXRf1Q$Yl9^LlPaLZ|;QNi^P1fejG-Z(}lSeUyqr3KhqPtR} zE*4AnCcr>XD|j8pQA#*R-#Nv>s%ix!i;(5GntFPzj}AEjO`mSJ5P&hR)pie4~Wuq#y)GA#gVwa@sWJKQ5DRgVlMHa!ES4(@+rz}DPc!{lF2eyuq* zH-=t8Be@cFS%ZyhVcxrHj)-72R5J2jjH-F@r^W_nR4MC5L+3r7WH*JH>7+V4)hS?} zVz2C>qbBLv^@H^`x^7ffjvHq-gq{0pD9v(+NQK5`PUnYMuVNXWVIVR0Dfql*_OT2@?)ICfk#<-EmBg~r)N4@ z*W~NkxSqQOSQVz%XI55^eAq#7%P^bp15-`Jkw!-CkCJZ^msUdV03E_sjG1oZ15iy& z#jZR?0g2}GYA+36<>^#!hD{sbMTw4-|B2@2P!&8kv(~R3(^>9C1|d=6Ti2Y;?TFNS zcjpfDb41?&ZjEf+f_n(@xkr{bazp@1UtadX@tf8vX~QlHRB>H%H`)fG<0#ut)_2(l zhf(c|*ejD5lr=)gz^S?ZbE~vM-+R=XWZWB^8HzzL7Hzx-BxGc253fue))R|5=jeDU z-28g1=j3{_1_%HbK>y668YXUw#-^u|=7v&;z5**8kUck#?G|omI>&9n=fw}?^YXvV zXn+^cOW@VPBmOPG8bW;{#*zIb6%{)HHV%!BCa2loa`f1oFRmhJ7}B^^LC4Y3?&`$J zZhrgc90d>d7QS;m7L)g@N@(?rOVlc^iZau)OEHosiu6k⋙dcW|peIYA1wWfe(P9 zAG?*VANqmoMotkTCc4mnvy4v%AEHqPMFxD-P=y!f$h*eTi{-I9y4yspA>LjFmk+?+ zmJ;4ly9XI#ELtx=0U6otR5oHiu+>ZUXlM7~wQEl_H9g)pvEMv4%As8irmPiXQ$4Or z&H_FnGi%pU5u2}}-@h>nM<5))!4QG~82=A;A`m{2wHH{t_5;EJsA(@^I0?#s_!Y+k zuKe=mz~m6$wjM?wyQb-&xVKM|{hg-{)Rm_`Fjh}N{sc>>@o5$N2|I|Qr_XPbJVcz3 zl7MM$M1di62uNu0-!Jf#+L)gIrR4pv^w!~0Ld3(V9VicIRG>Hj1eJ#2E zRfmO!;km=}v*#|{vM8BKADQww80jGDl$#;@%(tloKyopk9oacz|Z>OnFK zKKS%nD4sDUh|tskNXvmM!>U;f#`-}kk_9N>ZpeN|0Ni*jsAG4{|^d~J**m>&yHww|adLeoxTQJBVx6?b(fb2y6Z%Q{ z^Y<@tkOC)%J2bNx4L*jzVo-4ELa&1K0T`!1eDhn|T(W@>-YtY|d3}B>o-pVT>=`8X zX}v28UdW!+5)7CESXDuZvh_8uEH)FG7_T?3wDc*oUGvXQ6Ng(MfGf_3C}k@PWzC3W zt1xfu%3LhU=-DRc8-7p%0C^-w33&{y6|hvYiehp00@?Bhlc2J>Cg<6imF3=C*q*sl z^^VYda!SdG_i2o8wB&eo^%C~{1-1^s^VXc*ZQ$A9(>Y22-^LbGbDmI_0IzJoZzX2B zkaQZZ99Gqtg}DKlne&=9@~XjM${2LQ90i0z*BAa?09GWw@Jo{HvV%~Wi2M-H0cJfM z9_CEQ*%sl+nR3`yz&N03$Rw1ur>33-wAHP9*(#(m^8923G5N7CA&wj(_EfG&H)}D%%|5}l-E&s z-hen1U<-*q$GPfG0;~f!jcBj&ZZj(VNC5$Y0c+THK!!R?lnW5iWITA~0#VyWGKBbS z=oD80Rj`r-%PWti>mBPD^(d~(?*Q?J!wbF^3pi~&Jid5v!Q^D*8j9H+p5v5wXsa5A z&roLp?9A^({lUX|2iz)ZT0-1oN`p+&pksh0c{|Q5CIV{kwe(IN+mq{G#a{a@#GGBR z^YW%*GktAE)jG`pPq7dy4zyp#Rt~SyH)m1p&qjI8!7x)6{0ou`aSX!q9vis;Dalyz z)J7azST%&;F$3iJeN41T%AS{r%<&-YL_@y7Ld?Epw%6TcU%Dd<=TT@XEQk`;&KCY*)jmPo&t%#Tv`9t6HdKEaPjHWbE3QzY%)1dbj{ zdZU{P`UI@}0PaIFa3|op>8i)!IDoA#po}h}y2BaGxRhhXL0J5iKe5ZqAbFAXOa`-7 zU9>~s@0NS9kf#9cXHmE2<;xo|HUPOUDs*i3fzW4~Da5Fcp1*>%{M(&`eR6_nPsG2} zb#ZUjpt-4IBOj3jt2}^B_2CMbg7>f6Ecs?Srgt%rOk_a}fd`KthgMsJ^@^--&P$5# zsG7+m1mHuqrb60;hgUE%;rwF{+RhtGBNiCddJCimR|3i!R0m$on`^dufr{APd}wAH z5t0Fz2HiO_;x)d`3qlWgwLmffQebh@O4Uj^)T4vVv4dEr2r6;{hcyLiDFcVAr%%GS z8|zgnBp7g(z!;rq6M?&SII}VUsyV*C1s@8iqqTY@+c25ZdlVV>-TKSilXZAAAi)GEVU zHc%3*PW-oBdm|BzEo3%m#c$LAy$89DStM{d5DY@Wd(Gn=dcaAdfds`1aR(+KfE7Za zbE!8~Te90ezmpgdk_U?oW!uSK&_FB9BD-TWE#q_U%m=q^MC}9%sOE=Hp5PjrToZ#Q2Y7SCs;^j(86sY)j)xg&b@zYTs8YO% zNn#icY0tRVlEn%;WHWofljQ#8%{nFU?KfU{b%IW1JY!SS2`G~!_e-k^M^Gu(jZA1m zUVsyXb(f{eFycZKg#ZDy)qv|1Fgqmckj}6m~@t5B^7ZQ|L{Qp@=}x! zB;Yfw9aw)oEZW*OzY|hYTvblaVIXmUMIqCj8}lSt9Fv@-(E6bR&fI(aL;VkWm3)ae2sVwTEVy2y}Dik{sHVMI{Xr;Nw>&THS zW=55>r4bid=@fPj-d=kkzGHuQ6ZhIZt-Bf?>&)_Yyp;Rv_Bg6NX~0DCY3!jgt-dWM z`?z%?RgUuV{cw_Lc)wpDj#BEwoe$~;3h~}=CvJG@mz29iksNv+I zG5Nt?Q#)1?T{&3<@cp`r1@wuw_)gHWZUn<*T8TGaPq7-jzQ;L2hw z$}l2G7G~&IJkwzlz~~#oF8+ONWqAw=*dX}nq2WPEMv0qUjo|&;lu|0D z3A;@cb#ykrGTn_L8I@p5PfzH}mwU)$+qZVg4!F6d7@J-0#oK!7OxBPcC&w6(oafU>Z&V*$+;wZwa)DW#=cI4xwM zRF*|q=;Wj;X>_5htlZ=^s|@kh?t>Xw3Z7m^msn|Hk62M@Yir*PKl@0Xm*`Fby|eL0 zE~K=N;C2#2MD^0I0?%8Apmy>_d&$De8vpVo6Ml`s2fZw2Y-LvEW>k>lI$~u-ln9o& z9nhr!5{CEBoqH`h$FFs9OHmdbU9;Po`<4U)FN2@(d;Swp{011N8>q)6PzY#cjDnGME$vEXc6J>i zuNnM~sVH#n-`PqcTwy|rW%6oR!`J%;Tk=dlGP}i}r!y#>a04F0^g!2e$;{l`2ak>v z@T87HHVwl&K|e%{eFn;gFCDOx;#B*Kj20Xcg6|xL`T^q$thnI-R@P2!{`J$?2MNLW z#76(B&Cp9^oGR2ET8?K_prK>&G>6_96bqIMT1jwNN^!;;rni9m2QC_ylCmz{Fw;88 zcK@0_)Yg)cGPb7nnHFVSV0p_iqdGobwMmliyin&I6&{Sl`I=@1YNq&ASSnn&um{d1 z-bhB@fhUNb{jE90HmBlMST@nG;IxIy_h@dM#$+eO3*J0kd*1U>oEI~kfK{WESX0!U z)%78k!aw1O=k=@Zc6p``9y~xe*@%TL_ujys3Z*Iw8*Al3Nk7;O5j`*p9{d^-Do~}M zs)F__45BLh(H124A-Cf;tB9dDqc3V1vV5)J#?k@U#$TBFaRNhJ-%1zStl+o{`GKH_ zh%$ih|CahB*YZ(kDERnZTbpS}pZ+NrgA%8$t*Jx|GrVy;OiZ>wI}88FJHXN<2OLtX zgBhtn>|8+-kMStsVhf=m!=%VerYBG0m}(mu z2IAkL<+A#eW^mO}tZByr{*v?xBW=5p4|Dt%EhT7Uo7eeOl+&KVej3CRgn^It(n$Ky z*U;|7Nrv(WBv&2qmp>kd@V98f@MqV?xgfq?Q_&rY{p(fV#j?ZAl~@bWK?J%XE&>1I_to|0AWDY)YOR2DQyhoPQ;MS{!<#? zAQfGmuyLEQC>sfXrw<_?;z47u>bILEbJ_zk#`zs`UQ|BSm;pclx`~qyJa-F$iVU+( zHB^2^{Zoe_POS8x#p%HPCxae%BoZ-AZqS*GFE5Psp@v75sfgD|WDZcte1)GMsTUBG z12H>?)3;5|jS<62icl{&VI9;H&6{Yz`mkD-$a4_o5redijcK1~3h;!|%`Lb+-O8Q0 z*j;*kD_Lz14b8*z@03>jFT>3l;DKHMJ$(qO$BlQ?Hbj_iC^6rZ+u)z@-5N$=81MAk ztQg)vUDUl12l9?u)%;QV(e)U|2jP$Ba|>V4w$$9fpr)+|aEgwZS!S0w9i@JpLdD?b z%0`P!L#TV!qEY*t@4Q1;M8w3p5c+j^tRf9zrPhm#ySsN%pE+{|J86h-A{smNR#q}G zI_Bn~(A$D~R(lqC-$C$?$#$GFNWICb7=MGGRhd-oYSE4-r~kCELtM35F)d3bP=H|>3? zCr)Wmet8fIcbIw0mqRt=0?`U2-Y*u5YgiPNXmM=lDbHS<<-~JP)blnC-+4*+@{j$; z2aEpB^eSAPk%pEBTfd7-UP*IqK;i`s`2mR+PaEGH=MRGtT(})4|N3iI2y9Z#HoyZv zgi5Naz4Wg%>O%5G6c8=YL%>VQ7ZC|fA$vqbgmH<-^rX2l=LW7DvsPViF4K#bE$+pC zm2;s_4QaR!=dTXP_?@dg?(2}J|J*-%9N)2YI@8(8&5a|wZ25r3v12->ronhZ<%3h- z;GU{_^yqqol&BL_hqyLj4^aRVT&Lls4_%{ijy2JmzJ2=^w}dX!FebcAz~9T~l6mrn zX+@5onT8^bEt76?cFxXqX$I-YyF`SCE_EZa4+bh#LC2O(Mr11_>+bQzK1`S1>oebA z9Hqu{ubk|8K&1FiZ!zo3RJ#zp`Id?c${&DV*d`O5=SsiU58^++7Q0gtW#NpKlg2c$ z@T$IL$Ll*+=pMbBt@=lz_GYS*3JUzmQ`->Uz@R}Skz|Jm5!|EZRf6X7*OhCU!xR*4 zpMbzujLkqFiuXLg@jyhIWi^3mDhdTueN({Pe-8{pNKQahI`Cv;O-&7|uAfk}(e+qs zY~NI3P7HkrGRD{Blwaj2qnv5jhb09#MXIW*HTUy}E**ekFX!r*IV#pf{1DSjHlTZX zZ|%>Y59n|-Z{r@kQZ#|LlbD`P4~UmwOIVTV4RRncA%O~}v{3w)lL3i|;nC3=a1j8u z=#W4lXamTJb;oxK#!44l7XF?mu>&jDD7^6szp9RCoELY|CI~xcQR1J9OH#)CW)1p_}4ZuS+5}!92~rQz3|`w zEeC`}?)BEzfQ1+~6ZiokJB>MJ^g~Ei0Ksr3l;Ko?6K~R(m+Y92=;Y5xrm8YVBNeE! z&5qq{K$FR#X!=&>bPfl+wKqTpuheRv$BDFY1XwvB1+XYDkb_>E#WzJM5JONR{z7M$ z_~Hd4rj3bkoOb6vh~d$yp(&xk%Lf*;i!1E()Q=_%oa}>k6ZsS25amzYCwm=aVp1W% zqzE66L`RZ=iPHRPSE_2olf!60cYvHBV{ON7eLayxGno%o{Y@aePpqehKVd}Z`2Mxt zHo}?Q>H5Op*I(7#4pzw2^<$*UZD2SX&+PGJqo1rZDMC?5`&L+5s<`Bg#dpcB25KLw zwSWb2Iw2uHjWsX$X@bJSL^6$Vf(9?XeIzPklOThPk{0&pU5DN~=<+8#NvQhy(f8rz zPyLWSnxATzPD)+~VbQ%OCM8h=AHf;s4_^#S0sqwU}?K!)O47sPXvDnTM8JcX6Dck0@K~=dS64n4k0}nzC8Y0 zTP`2OMF{+AqEbY)%K6<5( z)Sf_Cu&>q^6rJ4s&qN6h267BB7<6H2{_^7km+4qpS&7L58akv(D(=xR!9Rf|mq*xu zLX!UqH7lbMnc+h#cN)W1$k+rAK*@*u;)0yo_C~YfNEnE7l3bDT@%`K*io>G zZZpocZF>1Df<Z-P)d<(^UssUeG9umK{jq{OU0BA*cr z5@fWf)Pj(9N&Sh6f&6>4+vNX=+8Vt!DL7GfBZJ*+9r8zs4&;>J+4t`=p?iR3E9E_P zYK+LCFsWAFbg25x6`Wp7QO4tj_%nhz#<4(GgmN1QUflENw74)hqG$-#?4(8$B`7F} z!oS8LyCjel^O2FlvcKz8D74_JjZbTsl*>W+?#EOr8Yk7|P@E1V0zBu8_Ut&Dl!)?u z_Zz71H?Vx<-0+HXgA}RG1)rrH8ZQvg(f(Qv&}tmeNMcUTL};h1@a!eO4>cLJtb*uj z#di8nm6vEXN!CjrZK`bh}Y`&E2j=h zK#Y=%|1KJ{M*5#60vSGoe7fu^NU&m876)gcb#TZqFX2Sex~Y~(Hk>121s+~%JzRqY z=VP;{`ip}k`!A(tpWc}v@W+pJv6p(1`&Ae7WQkid4)#rbVVX?C#$T>r-e`9c|87u* z4*4lx6YFsMS1rysI5>=DTA1ke`Y#tjgg&sm-rWaUYMhei&!3YHN|X)H6`_eOWWy~R zCa^Ue96#43vuqbJv_x=*L9N-9mT=!@wg(90|A$>h$Fwu0E9W9G>?)9_T; zjqUg#fTFLe{OMQkwT>M)D1XknG>&$nOuQ||fct||fl*@1SJazuQq^|)9JYDLzPoz+ zy-Yk#H@S^|6?YTY*7jY%rH6MuFfvqz`uh5DI9Q>+RMcxIyS8of?(9?3JG~3GPUPOB zZVZ_H*AUVDhIKXk88HwOUE+@x2OpQqy?-+UNg&Ui8h8(pdfr{b}L2Hhs6g5Uv%Or6o`r`qun&g8u#B=O)R~l5X+7Bp-Ue|rrob`h!o5)J~&x1 zGNH*W=%wVe+agItNcG5(b#EIR$x=D;+`zdYG9XF-!gU+35YK%t`BvJ9HjWDKtm`74 z1+IRT&0fW1T`MGi+uSW-a+CFD(_ME^)eH?;EnUAE7{~}gGe|llqOYN^hp^idITwcv zoF?X{;vkqOvhX&A>zMu|y>;1AXW5p%^e}w~8C8Q&8AN2;$fxC;&^7fM`ycbkk zc*^*LNiY~U(qWQ+%aQC;g{0efJ7ER7knr&m($X|fH0x-Sn`BtbMq z8X@%3>|>zEqav}e5#RJ`7 z%zI)PuX6gQ%8_ZfFHr-i|9HQ$Ob!Lh;*GMO#sd8OC|PnaxQnw=1*y&GmppPqP;Lgl zf3JxWR|!uYhytQK`L4zWlF2MYZGc|(;$K-wNk4EUmT-Y~FIa#6D@v*H_Xo!;@##?G zV91b=Uj*NPPN5+#-ArN*Kvo{t;z31 zk>`Ui&Zsp0`E(fMwmmsK0EB&2cqsiUya8$BZ3Z2$nwBy#WCVsJ8F&v@UmGjAiG=0WVDO&qcT?_8e;*p@ zam|yHs);(<8YWh>6p_{xN#`E|%X|=?rLc4L>$_o&oup@Y9T5Bjl*8PXe-{C6!c8EEru$USHAXN2oUeT%eGEPJMLc};E1D( zvIjWn&Gr%WPc&#|za2C@;D#Oq{w-+KaOl5dO#N(++f@yUb!*pBBrVJVJz+!tQj#cN z2+an-#Y*Ruzs`v$L{S)29uqI&oiFM-pfn=mBbTBp1cxb%R{-ZBM)w)YYn(SeKF$x6 zl1K$nENg3QiD&+GdUlDtP{ae@1{>cNyJ`$~Y=~)k*7ah?t|YoRZAMoCY6-woY#}*3 zJc>&OL9tW;tFSS11rReFeq1GN1qgHY?AwY2RUkn37D*Ao6&&xC-(9y1(hYOUx*N*b z=>ovg8vpJ>v8!|Hl;@F7mNdzLHdl}?sRxVacLi8}7j>+%l=uqdUm2lNZ<6}5F5Jq@4^u_T3Jx_&li?3P3Tpg2j7hvg0Hi$sWY576$Y*pyr2J5!m-~NF>byYZOjKuG z^ZB}rIPa)^)H5!Oq(ch^P*Th0ZVaZ_&NmPkn2ek4`d(Cl774`jome46bw@=4ojpyQ5-au)2=Q+9Glp$P-YK}gAOqZi%WLt1;kZlw+iHYYLMI?XFZ?dr1R$|+q+%BRH~JwZ zY`7%n=Zw+vq>4RMWTK+<6lj-c$v(CW*|rQ5Kd{gD=3W9UGAf%lV5@!IIZF1$aXe&s zZ-R+!x;XEI1fDLh6$0a)j$NOoeeRqPEGgcb{zh^q$A@PtqpHyStiTb^StD*Q->^Pi zh8k=I6FjT^laVLGOTa$i-?31rFoTP7d{NO>RE+2XYJu>KnRvCJ(nE?zaw;CLqz6)X zm9_LZFB1W$Kw+TZgw*DWQJuhD5yd;-nj!vgB1w zTPT^-FQ>x})vE8%8U_mkK&|}aJ4ldl)P?9;1Q^aO$KFjZ)R#828>_p@y!okk{AjGw zFV=tM_l|>*M1#Ski9XKnIT2!^a^}pyp$@0L532#O0r({~MC6XEvIKgm>r+JAS2|lG z-g9Bktv~xB=MSzFcAb^Oi$OEi#8Mt0d?Y#ZyZ5E%bDDG~S-K96@AvklxgAF#Hemka z+Wx5-Z-wEpT3t0CC5qLeGz$$sah!#1*J=0>h$=2QHGBanOBz5_=yEHIQ!`146_{ct zb;(UG4_XxVE0@a3r8&>ItlU=ScGT57tH^qaj*=zqhWlP|L2nJ#7yox3t8gDL9oSAz z4A6@1TqPp#2drn+{PfV(?i2n3-gqU%n+MxBT_;#PlhDM~B!v5S?|`!OPxj=huht~;=De@;J6^-Kq>^k#IKAO+Siy%;8pm zt4~ll^sRUGvTT6r9_+5$?6;dHXNF6-4Z-C&K3g9p(Ft_L5(~Gm*&+V^} z{>d835yo`k+nR4T&DSIF4{4Ww`kRoDu zxH53V*59eD0ooHl1#FE)Bd;Y0tMP0`pJUbZNSY!pdZ);&F(LcRimqUN7Y_#m6$Ow& zKH}+h6aI8`^+_I0=lL?O{r4^BCJ+AQQVt2_No| zOMg2F>fx|eK=dHk#rgG9+bu!G{(HxJJXzNNbbWed#yUphWwEc;nz=rt3-AW3Nd)9KapNzn{ZTyH;~B+7u4MNn$Q5m1DQPV zBy=|tl1;k5J14iBg&5zCNDHloSc89sK7N=bCF)IUxV${i$f!n8<4==EeT>=#>ze^I z6o8Uo=QEOz-2`X6ax9ntKhZR~>_07-Oh_xx2?X}W2Yq|L5ZDtu`78(CMjX>+82s@A zwk{#T!E|v*ou@pLP>%DM(B}RqR#pw zBLAQg6u+KUuC7e*UMEKwme187PKhZRRH+27CL=exWaic{D@zQ+emOXuASPRoCiR6? zLFj(FQk2?a$4lZK&=AB&phN#F**~D3#&-g@FSk6~r@{>amQB{Vr+Q%w>V}DZ_zeJ0 zf>8K^2}6}L1XAD@fQ*yQC92aNhYT}KRUs=QtN_JFN11RuY4R-5E}6pz2L01x80CQK zPNevH>A(aW=a|59Lb|vW!1b$_FYk{Zs$lhescm0HG6O1;u=Vg6McBK9#6VW=^`nga z*f&8eEX(FUW#dp2Vt!^pQ3w=!1ti`VaL^Fj>fu{;VX_|!e3iZ7Uu55x~ z`Yprs8u+LH?M13pAV2~*-74GM3P-F#_!NvrXR%-4u%Z-aKSxdmt5(C^5NgMTuYT$eLiQwvZlssi&VPQlli8*#eC;;sNk}x<0 z(@g&V%Gy7E{0fcZYSKn-_W))OBM{t4S}QAx*$gs*+SrnS#BrA zgLi?KkL9Frpdex;sy%teqW$Li&iXD_|It7stnI?mpXva5_PExe@g2)mUPLW{NlrJ6 z5`4f=dwYh7GI$V^t!i-S$UWW7+gy8Inx=9;x?t4~;P*6tqSOndvfvfePrpDHaSymf<7E1b5o zqXk0_pGSgB=&cbE7ABY{Oq5wbx)v00;A05lJ)*#aU_jf{6dL$I{4E#>C6p7EmJ}%N z0rHYV3Q;mHA8G3UVNh^-qTvc#hj;;22J*x@RBT||NwsRI{oj`_bU3a+opOqm@IN3m zBEJy|#_fZ+MfhcRAsd3+2v`FUNkA3gh#&?x-|71=7y4?gW05>bvt#g3q$KUdl52-w z`y9^IN*$Wg4eN6XHF2+9Z4)2~_)>A;o;s!kl+|Ah9%f&B_nQr$$W!wH&?+;|SEiYJ zwU-~=FZXOWuumxdFh6(0-0oV)c`qUV%R9pdJ+d0`_2 zx@G*|gfcx1OqIa42{E)o2J|$t8^X}oEo(h(ZHnBEcWc#eR#5`>2g2a%OSRgEt&MK7 zjSS(79vPnY!2f)`F?K4MT!mvky3`4Xa?QMg$$-s&SwJSVZyVORR-$A?3_wUG=}MiK z3f{+}qJSlUkfJpQCu;0$sMyW?@o;>iuQyl{ec{VmpZ0X$toS8z7+==%GXR`JM@pX~8}w+QK`w_exJJFxwd>F$MV_{zVo ztdruD&XC#qYQMPyi`3ZFdK0NJ56ZF~-_)ZwcY!SC)f3|VegScTSMR8bWRlFo<2#?K zdvP`VaoK?H&mSJbWXk=-!cHazr#w2CY?n7Ox(R(wBD7{;8St$LpVsYkv&GJ)UzglF zqK|;YN9iv>JZ4@ z&tHjtXZ!q9+U)_Fz0~*jG6^j1@(&JuVr$H~vvM!*)RX5nTez2`X>yQh!Ua3_I{Ko0>NDFGP~qG4qSVFC6Lm7BhPHEp~S9ZHXXm48Z-7PkP@ z)%kIX=5IVbgunO`Lkz&N>jLJ{8x$&sjK1oZvuoh{bI&~Xo8|D1x(T&lP z?hXgc4qR#sy06f$OVQ-7rWa?znddzj$$#yV&j9Vrf~&s5)9MQ&!O04ta+4#$%a50e zUdV5zeXdZjeu3)&Rf9g$EjOM-ZndwQs2Rr>_DCM`6PjVMJzGLRwe4F1nq^^S?Qbj^ zh|Wy!7x9vSAybYq`4DLQ(PhM;(Lq^&o)%gI94{N&vJjJ4Qd<235v93(zPWHPGY@I^ zqqx0*)z+wY;ATXN&C=)qZ?hm28)jfYBnJ>D;H3(vX>4Q&9}c=xf)iYypsrNJ86`$_ z&0xJDUVn|*+crTv2i8A&;Ff&2QP|UeSD?wuC_bM;>fJ?;^=Vkbk67(sd7KcXL&eVj z@L}`kZ{hDeTy5@IM7bQW{ws%^t*wn$Y2|87cVmS}u;iR%0ch@l`w@$3Y;%-e5)aE$ zk~XmRTP%tmi44mUH=z@PTGShlJ$cISf48vW?31qp#R{|tr?rqQ6G`o{C^1su5n2^# z7)9!553v8pz5wV{Fx5LevK3mXKwl~W(Y8y|X{a!QpWjUoN)kMAQ$WZ_UXkL1>VucJ zzen+F+%~1$mVf`g!qd;U*7KD&o_|2MWIXVhF z#^nAoU(sWQmA&0NsF|MZXJgeDy}dQ0=Icx|T#>jcv}w8Wx5~~h?2+HIF|6}Ws*tQ_ zsfmfQBg6$Lo{6+IGjjugEk$T%z{x{Q(wcN}OJFr=y8azgKBNw0DH1Eu2Dp4Jrv%YK z=qe~x5W^{6Mx-2Ll%1+46W}muo zxN^%iE-pNMXctj_lJ*`z12GLnRg4l^F|kEZ1^T{!m})B&Jm63eN3mNc2!=V^Vfa%H zTnbE8BfBIAgP_=zcrdIp-T50aGUA^YM}hE+D#kz9hHfvl>;0WtCK0=tb0e7KpJv<@ zoom?r$k$9!zPeG9%oPgGr(H<*!M1taF}X!=wRMtu+?7)YU}8ovHCCd z-+TN@N}g7_e_kL`mdx~2A-ABkKpmt=>w_NzU={MP z7YT`LP&KJuKCvN#&B1iQ>Y+UgFLMt|_`%S5#z%%#`5fsjaH`GDbKcF^W9&K^$OlB z$?Ck{!L$?TQENu})L3CCJUidL_W+OR`dcKt!5>RT96MicfPAWw^xHZ*l!-0R?;id? zbXE(UEC&)BR_N}!P2#HpaG-#is=?4TNA8nZaHkLUHHdx6Gb=;d)x zaC#)r1m>RAJPkqwWhx{(gXSW@Ordvrac#y4vtIOspcfX_<@&#z=Nm?+8-s~kU%Y^C zUn~5G*b$Yg#QyyaA4^f(5gg~d5qsO^Ol4J7zTn`HxE8zr3auNG7^UCcfAsFK&-ms? z2GqHyLTvJcd0rpbEMB)cl!5YH+G+O(viCPH+ePx+qfxFX4B+ZG|CU){Xn88t+uB0$ z?PjTEm#F~XbIXjbYBcZe9|;OmO_FjIw$w~ZR-xfNCAd$uK>ExiZDDt65S?%y-GmDH zKJW}L@ED*284_IKLy0Ilc-I!SJCQi_mTm_yCE|C0L*Bbe!m`Ijj(p)QRyoeAlJ|n1 zi6NCPn(v5v5c^3j78W!Au8G&XuYTW9A716T;2%w`;b`T0%T{dtyJw1Gk7J_q?*2cT z&I6pwzJLFStjeY#d#9`x$qFGml}!>UcM4@>L}X`^23b*Qppvpnc8UgNZ&6lO*8lwU z{Qk$$({Vrd)BW6E*Y~om*aQ}@o5trlXu9wQ&e6en_xojv~yeu!;Y6k@iu zVV6x=q*JtL&Mi~S>Y(JplDa~!S@9ZX4Vm}w?(+XvF;)(5#|#vOA}P@|Gw}=LKGbb_ zY0k*7To(}|c)6(PwNAF(~8s$Q_6Qy=Mq^jAx;oPIr!d( z$r=)aM~Fa9&Lr(5Zx}4zdL_k}&2C;b_6ihPxNXZqC1$SA?QPF3%Osy35wm~PvFc^6 z`j&N8TPfLJif$U7PUCkG^ZZ1`#u9tzEARRvXAX%P&4;S6^KaZTlUO5UAuq}HVeqjX zomd2Sgw&(P*_fDBZo8XiKX9%rv~5;e=vim^#+!qUtzUlS4rXyE(a7QWPm6%353l$H zE;2QsG?^T`r$onH>+zA_G?XaSBRl9z&j7z|=0ToUvh03*{HC14d#Ee`2U+aMK43bc^eOcr z`cwSB5mB!nA{Kig;)-0r&=#AeXc zNNZLs9t)P}33j#!st%sqncnwNy2fRpL{-i8m`cFatV7`{3=hV28cZmwmujOt9F_Z( zlaKFYGn1vYkBmPmZkW|_SkEMdr@CscTGY5+YUUV?Pq*H~R8WK0Oq7V{rU#1NW=i_F0J2sP1VMKAQi;sWP7PY$XcfuYUv(`~# ze79l!p(J-^`heKSCE4RQ(HWRX?1AVky>(25LWuPuCT1hg|4Kfk;(YL%qcFuMvC5dn zs4ZzXuk`kCrpg0jG-5p$*s<-yxWA%*3IRWk?9Vh5{0&uU8XtO9$^7*x46V+<%QsLW z(_Tu8I@s=XoKka(qMCel4D0WCMGX-j9)AAOD}6&vsy++7wEI2gB+=2ji8y0jK$L%Q zxM+|~IXQ@H)BbmpJBj!NJ_Hz%G@;~<_EYN2T7|UqpYOuKfow50h4juZxmBsEjU3DT zgw02lT|NlS_pk&;jRD~Q!K$M~{-_Jcn_Xu2cYD{Ni)z#>rmpO1AEd>NzWUvjV z6Y37gBT!JP;N^`8y1T;N(9OF}tS~2~K@dsAUgb+~%85`_u+^cEX2*%keT$HLypOsK z`?PF0FNb{9=}e!5bJCUVF)`0yd%9g-{D^_3B`^nf69Q7J40v zp{)5FW0sUl;-NhOTJ&OigUrQ0iVl;p@*QOlPWFnQU(Ug|6MeOXy$oLawW$R*n|B}X zzS*2&;Bw9WN&0^F##XuX!_wIm&Hi(;=exGP1LN7MZTn>*Htic8!}IGK>$YYsa`;M& zz7`PnDM*q{SzBD^XZhNPeqy3KIyLn?_44YSEA7Cr5^)S!)`HgV=g_hflkn%NUassN za>t^X=IQw@AVS4viAWsCs+bHX%-RLrsC=9iW#CcE}$o3q)| zBg3~~dq2Miy%cSm=5nS=mFFnT@7+xiM5dmkEY;6>`|+^_T1jb(#@wea73h;DSIXB8z=MN`o+XSY zFXH~gl4X#CgZz1Py^x=Omf*1D{rK^AJb`59G3ROfqTSHqC}VtkKNlJ^jUz{P+}M6FkV&a6I<}W3w(2(o{e<63Br$%-`QZiUQ(0Feo|P3_CiLA( zm$%5bD!bz&AsGWb8Ue)sXFvf*7H;1M4<2wAn5{NC9PnVVsF0_&!BEdig`T!fjE<|k*20Mn9o}j z+kdj(`_@^OGU#ehi8Gym6fB;6Ka)xmJx%q$AcKYJgK;BG(sF1w>^7u`$Z!XZU zd*X4$wru6u_dLrdsUO)S#~5qESr;lT6EDi*Zhv>*g#bw>TY%Kl7kU-DeERh^N39PMWa)rN-gCMxhn#xc z2VH@0*fBEq>p(3MR&bq;=!vj7l*lc7Tl?D`gP}~?>VT+|1^2BoDMBDwN8vZ!@2|SQ zlmD91pO0^`$@6rx zVZSuRUH!upJ^0~OF+uI}p3%dwu0Kr5kEOJp9kjP&FUhQmf!Q9&wqW`G8Mlv{y1F2qZpGx#3uw328Kl2nsIr zqyKqazHGNNw;h9ev;f3wft3PglVLxKYNWldunWEwBu-vtq zqHCf&C9nTZJ&z?v=Iht%QHeskqv~X~1p^=LrEm`E%~75+P5aJa)a9K$sK}Iq%!=*CM+9DW``_3 z4@!s__)KX7^uYtYVb+r|c&UK}# z(ID}x$Li_b=uuIYu%L^-$IY;pZ-FQ~e%rczAP9=WYY~=_p+>ChkH=wl=5?Y zaX7m(o0fzVL@nlYc9Yf_oxWa2NIU>tPn4|u`@B{M! zY;n{X(!kNh<^c6J3gUkvx6ap54}3{g*FK)dckRN@yM~pwvFYXtYu1%vNbdGCFc5PtMzeTq zr~@nAFY5Azwwp3hv_oz5zm3x!vqIFwBRsN zlkha+KMwypt2Fzkl?aNiqe|dQR2ufV^kjrvXWUYD`R0}z%=V?KCTBTN?UMNio@GqG zPuaUui{9W3Y2Fud>*v;+G&`86?l(R#p@wq4&OY3jq2N@F{)W!20# zjnrM*pG-aDDWsX$!pfVD)7Z4sVIvS2M6Cc_qRLTmbOKlh#o9^-WmE}ZDh#Mg!Fk@Z ziCY6A)$V+7qz1qlO-*~4LADr{$%cKdC-Eu4(IN}}(t`?tkV2S9P#qgeJtD%od!|8QD;wPb#n0tX+jnBDs9|G)Aq1ryr{jDObnipHRNQ8&7sx z&fjc_9Oo2FYI z+}J{@uy&QMQ}tlxNSt%1uZBs+mmhYfo3e? z4U}g#5YH%lU_ur~P5Zv`)rt$WdVbR;Dr%FQ){Abz?%`GVn7!!8aT}!zP@6wqub#L| zAWXFH)j{uV`De(G+ui0(_&g&6u@No|uRJd)iL@+# zJd_j?S1&8sQtJtk*drvtRnEU2+7r!G;t?*RK-^ z2@xS;c!(|$K1U+WI&(<@)(-XGY(gd$jz8XMJL;9_ByZ|g+j~3uM35Vgv4F^R0T$W# z^D#5yr(Yt_1zP=UkOwS`8u&$GTKmZkCj5Un8CAU(dZ3+mpg9`O3dsfTI>~_XDT3}m zK}C!z@agqIB)gfndC$2Q?amNc!9T!AoOF;RqK~D4L-p9Ow(sgf^FnzY%f8VgmO2bu ziu*z>KkOAtJVxckdUIP>-<3MnEz@G}lnX~)UHRhN8g|Q385(hje@o_Uzfch4NN64V zMpR>;1VR`fZ1+j5furPNCNBgAui!o-t8E+rU>iy(aH5A6EnCp=SsClT+Avse%}6hw zl;xk_&!?WZKCE6n_Zho}-hVq&eSdqEEWY2i@G06)onD)F4VoKmx1)?ABJYiko9nnM z9OR~nlWF102yS_w(EG;X=xAeTfBRFc zzeY(kBLIW?PL!73yN7-%3wsjKbotsbzuV5kgOP@s@1b=@kB*uB6;)ISG6;`pdl@sn z$8jLaeuC@rj|+FzZpQ8&tUV>NeEYHehvz^06V>X3^gP)&u0Ocr+> zi0*N^or1E9ltAFoU_#p<^E&>aV!Obmla0^hZZ{hm#(0TS+5s%!1LKVU!^)_x5fm=SFV~(fw!~^76Zf7 zsd$P+J7+j{Tgh9D2jaHi+g$PIIc;#T22_ZXuR1Xi{5U#m_(*Qm$9y~J@VzrlkjYeG zoB&B&q_<`xc}hmgt%g|>2Ht3#jwUeaXpWNDXDXO%#+uQE9`Zi z9-_lTp(?gnO6;ALA7h;xwbT1vm{Q8u)g0B)PMaw64=wV_F?`R20%l%U2+f1Qi z`{FxU6Prqg%mNm&fU!}3kxX^6&yw~JRggkp$UzSf12IoRDTT$_*I$EV>L zHx=Din@DB}9;xi5+C|B89U?(C*+NH4?j(4J(IYcKXWlBCJyvV~e{i*4BicX^U}$+f zDfF6}+6R2Kxi(b={}y9Oqc)Ph>#`;i6ybDvB!|Led5S{olaO(p$t}+~cOyf586kVW zj1C4pY`2%)&U5_H7rre9r6#hST?X040lCLucBP>C{LnrBh-{*|Y$A2tK&x)Io?Mf&xnJ7<3sE~+vh?8x z7En?g636#BcW9L@4|WGZe4?Xqlgsw?exUS+;T?%;5-B=q7MH$1 zl1_vLw?*ly1jorSs@mu3x>P|~JFmUkB~oXv_BGG^<%tWayZC*0?cIA_OK1Z;s09VCkZgP^a9s(iu8DKuEm_HWb$UDR zZ%*G6RaOU_3{H+Za?=YjKEE7)lrlD2QRiDfV^oqVRpHEyKf@9kYCwA|PpqvJ;oWwB zL=naQawYFFopsZ>%Kx>6`RGHcbLGbOk8@OGd@Dyrf*Mm~1^w(V3woXP!=rwjHBWqzYG!oog6j=aXK zVvk$-)Qu~5mECJsKUP23$l#dd8m!Iy#d*N=d97a7fa~&tj`h&uVa`XZ!6SboTG$E- zH(WK$I>WFrD$2C*^3LRc{M791 z`w>g+fMiTLLY;*hrGYDs^P-PQ?THNnCxj(MnCt{z@xADKbSZe^5Z$+xYvJq98VVc{ zXjQ*)VSqn+&j#)I!%E812lQtagNivC?)B+d#<}fRAG*S7lqyJ>R@5BqA}n;|=Tr^d zjuSTpBx|_nLbJEth;g9PR5Gr6NgFm8s_Q|oIVme#yPMabDe;O%^+6whTb2}l)+FIu z8z!kY3Q*PUAV&W>p_>wiU47>Gz9+phy)$!@y)%_{TC13fLE!jGLD7vrW_91^Cxv45 z=(OhanLT{!e028DIBYT2aut&}JfeNs!fb3yalN;`%ur+&zrb53NOX{>LhsmohXgsE zIf`8_+|pVeFO4Juh?J>km?MZ8$RBj>eYDC8l-9np$IvNbfIu*`aTZKG!2;*o^kkR%Tq^R;IoB(keqWHT0N5ne$QR+Y7#vK}@W(u{zNK z-DN@SteSn)+1f34gAU|J-eZ+{z_G)-@1e_k3t@>w#n^^YsfD$nPY;Was;NtyN@91Y z6LkG!_+-jqj;HzuZ*Pp(iO;;wS zerKl5C@eg+%r7zV7zW6V{8w?tL;BOWpfWTuUy6a!AIH`5hq}YO|C?nU&H4Kagc-tL zg>1Zzdk0sdPB7YO#ksaq3r*cVEU-aH8u`gk&0UPCs7|Q7!?pX}+%7X7fhU|vCuaEQ zryRa;No@R5FCJPIqwn?mP(h1@bzr@m>f*EAF2|dOJ*VU?O6X~A?okS~_`JHKbSv6A z^kofiA#jS{fN(&gvx1AMysu?R{&S(6YlBarM>RQiqJMeOEo0uCkGTRS8#42S zv*xGJb4yk&x`5-4PL~#;m?jJJtsBvMqhGOn;vNhaKI^4DvRk8w22L&-x?72hTH;iT zvp{WDLHO(UCzH3Az$L-xiz|%8gpyzh04StEG}-lPI}M3bTvJX01rZ#<1Iwu^hxRT@ z96kfN#&f&mW|{;EcazHpLpNL!U)xe{aGuxhNJ^f*h)sSZcXnip^V9x22?r;Iw<$U$ zhRU$ZzT`F;@De-0p&PW<(>^^TW_-SXrXpA9dV`ig;)Eu@l2R3m+LT7^PM*3;SJ|x& zOjS2nV93pBpxF*E93phUMCZ4zV7T4@^C^PVlc4Uwq0;iq6T2E|00^ zwLj4qL}vUNb60?<&EZJ(@GdDXiNtUgl_<@1fNh1|g@zrnASht*3wtM&me7?$?t{G) zkw@v%{`B+aU4oMWM@9wWlJ!BNLHE!?4pzs8nb}ANr7bCuE%bRA| z$I~-1xUHcV+>Dn{s!*>F1(5)UE4 zvp`-Jv)kVQgfmXWtj=s)e($op1Jo`9$8Bpjtg(JMH+zA@h;Hdnpi zZ0c^V$q%KJW}Sh#FYdU@OT-EbmqgZ^`Aga}3U-C;=Zb0d^3+u2Iyy4V8*#@sav;zC z*mIw;0=sKII;nSNwf(A@@!Es_$L63eW?$yq6<&>6n zoia{}f8i0U(<2*wHE*==9p9~8mHshoS1EM>)9y)Q%F<5Kza7#R=E8aF%Efj+44nHu zZN?ZyAXW^&y(>TqX+aKJS6f9aGLS&VNU{+nJv z%^~00u}rOZcDhlsYeRYhv-~HeimH$nU+Kq@^&SB+rWC`!PtkW>&5*JDxkIb?Jd32y zj_$<3rw$xXgeU3_pWedMJD0NZK4C~S*tOTvx+TtAw?0OoVD2rJeZ0$-sH&W^u!mNEVX26eS;*k3cMIYOL_)npUZc5_xgCzydKLW}tDL4bRc;N}e#2l}fU8+T`X+C27JFc|Wq z`gAubf7yRXUo03tNh?Q?s{Vh63}XA`O-Q< zi-LHb_uQq4Qe#T4vy92{p;A6CKkvO*@+&;jt58zAC;ksjftb(x7vEW_>6~uwPv3KQ zLr(v73I2{gVxCzNB5`#RflJtL{s>$2+?#Z+~^ z6HRRGuk5vLzYfUnbKiJFGWJp~AGfj&<$wj5M(gQUl=+b4~j=9dnHH7mMx$*8aAB(5EXr%315Ku49yC z>{PC%bCfQmYV)=?Q84o@__zTfL?+}53!5EE3%9(G-gP@uY+xJSWgsLm08)!(^8l3Cp+YN8Co)1`sg8iig}O(q%NwKH;x({1-MYtwBlD&0+= z)yJ==<|ih{u-^WFZ>WjNMebvkwp7}xbcdUTF8->ndY-4k7xHK4XW<~(8QY7yayE#w zKR7*|ZtIXy#r5ohjg8L7oB6KRdkpjkvLj*+%NQk`7DtWrp*|TOUuXv^!D<5DxN1MNt^+4t*uy`c~-h*WK!+APaVi?l$vV!)V6A8VV32=;z`R7wI6<185$j8aC%d;oNjxu zLc&r0i9+(IE$g7MW&RQVC(=XLU9^>7)4VOV)T795;Z`58@rbolc}d%~-}E6>{qJjx zt*l`M*J!@SU8Lc*6A=9{-2X*Q!h%CGxj%%4{+=m)qwpYOaBTLmO1nJzxxwUOfaA%6a=k@l`u=L+5kG}agTU=406fm`z|J7U3wVQ~G&5sVq2F7+L?B@i-7B5l@MGN@@16%@ zYHhB2m4G3Y;|C0GP|NncH1)d|Bv$`DTZ=tv zc1xMw^F3j%qE6+vf3fp3Ou72{h$p__PH+4pzik&sD?^pptE<7PG*n`S_n)`v3y+=F z=CpYiTk6v4-^?)UZp0zI?}U-@2&EQ-nr8uB?e${e%Tjh6%rOOSN(WCGvDVhrm`!W# zx5%($%{6||!jrYA_S(z8IW~|>sqBtMvZ1?x(aBhk&JAAdMH8G8$piWl^vNzaH}U5H z2PrKh9}qT0-%)lj++v@L9o7^|-I7T6(9b%YZ47x5v2Xt$(uBCFp^AVW%~xc)l*7lz zFmAoyMo@*X*{n_{qZv;c@5ZDm^~!HAqEL+QS)x*H@E+}~jts74tJFAF!7X!k>a^QS zsW++P%t0ODZ@KIA=1gy%(tOVl_~J&x=yl<5zi%X)(}{LZs>)Q|`of|eysP)Kj?O}N zZN(LpS9NAkinh`mPj8qT^qO<&gjSxS69)}U`qw{P7&vs3OL8Z2t%F1_izdFzh4afbURONCcwmd3dS z4!=7g_)RG;JnEeryTRxuErzHwW&OdsS&Y54Vu}xKF%nb{4y2;ppm`9x8R@~xnNN$j z;^Wyu-`h<#H_Ne&n@9vhvjj#3ta;nWI0vU_zT2=q5q}fBSG)7^^?EenR10H)BxVkQ z4kA!l*{`i4NTjEHZN40%%etC}t5b<}OY`M$)^7WFcF(*gua|gN)v0xfYmC{9ks=KW zH%@ai&L0ZDZ!5bMP5W`^TLd)tN2%3P9%^NWXWi+HkgRNa5kooPI?HEs39H!JoL{q` zpN`~bVlXm&tioN}@s#$kME5PBW}5rzmv9^NzUO2cdPp<1~VEQE&7bzb% zH|dndk%_UlNTvrXI+;yGB&fnx8cL;q(q*hc0}=m&6=C5GgXIDSb{fMYce6&h*`0D82;Eb=evdqr_P~|#dh?)E(#X0`S|8tS??lxXUD#-FKAv6%^0 zi1RBiP76D-H;av>R|>_CmGAac@1Nq(aESd4QpC`nKze_tj*+R&-t*ss;K70vYsthSkD2ML~bX+lcIm6(eTlG%pwuZ_( z4doyes7;^#`#`uBxnF*6C_Vv`6nxZ_tBQEiOW$lOE&V35Z}^ZvtbP0xIM2)D zj>>N_Lyaa-+~|XIn?bb(i+MFRPEL)E(FmNPuiG{FgD~*Az{SwBu;AInbs9SX2+`HE zrcRx(H?6wz^U2?femuYlxc;SP;~#GroHV+aye~g6Vd@i`aK;hyuatN6qIM@}(0iWR zQ?*m{=RwKoZoxabhZ4t1KA+op@<_{N*Tqy(r5ppkFy;p@i@idfzDPAIUr=I@ZlehWgPwD7C;T2l>Z!jorP;6h?#JRhpMUB&I1AjL}98(cLnNa^< zpp0B|cALD&^iqt}pW1Gglu3?njcV7W1zn7u+QxkZA@w9+%70_;TIh(w z13iL?2sS7P?I=XCPnFxF&m2)bEwK}r2(X($)J(f&SZ{3oGvWTyMTU*v{O|JZ|2sT0 z#A^1fc8i5uZ}FXZzqwhd`fc{TmiGU-6`W;>-TOYyPgb*D?mP3Tf@18a=TS(_<8tPd{n+p!m71T9mV7 za`Qnety1dVyU`U#8@X9uRU8xwVgnv00+!o$w`DU;cnhG4DVq$I$~sgk`-Gt^EsAnImPFH-h={pA`3|e#|Q1rl^4+Y%-+o>-InGoIQ$%Kbq$98!MsqBQ~sRziV=w_P%Lj6bAPL zf3UKxtu_XWX#dgZ)BH?DkLgWjqp2W~yBabEV6j6zyTq2cr03NyCyVFOWka==R4J4U-Ww+C;Ty-lCl z(Xo!MiV~a6v7Fv~MV1MCMe|SEu0+{4JzT83HnAeHi@xc6;H1^Qit5nh%TbGi=o1Lv zU{K0`%cnDcqzqO&wVOGQlH? z*~si?8MQ%kc3BPeK7wLQVk@z2cC+u|654j&ah{pORjttX17k!uaPU_QZB9gAqg4W8 zLliTR_Brszc3GE%{tL9ic!O}pp`bE(xvtfp;mFPBwPR^wVGNZW6=37&9q{!jz57%PN&px3z>! zIWpW;Vyzu@#2FKJ9*e@J5d)Ybf0f+py7wH33-VD(-y9?wRHAHqIVI0a&g$VqEjLCr zSKo5^YV8ijogviW-jQ_kna=s)D&}<=&0&)spb}%UyG_E%*-DKLmkr!MyveyY9sV#o z91X1X+p$?_VLA4 zUe!tz-11#jNJ#n(Q&=h8qy4>-2RxH>298x^OJs(r_uY8%;({L0&YrOLqCv{ktS-lD zl1?^QTjW!03Ymh@t_SeuaV6#?Htzz4Z%Y5NF8W|?_?Njm*KSuM;u9TgB2fR}Y5NgP z^&kTU!5DuZrw@tjp*F{x*qTHNj)c!?57^h`{jBjm&DQQa+#yw<>~P@fv;Xp!j!PQt z+H=D6m4DKb6gvX}$3VpozT<+_+F*IlV6u%SY+&;-Djm4AFyG*=B^c;sk|}ht?;SnG zs6Yw4V}y*rN{{_7excRJf5nWT=fyYcw}kAgO6P&A!}Yg|F*Pl6`fzBL)NcP?xv>M> zG@c8L(ZFz)g|@V`+|lz=X4H@KFg_5{8V)usdd?wZUMQa%hL@ccPtnP z3nDYd2DN+&{?T6GzM;VfN&xbm3UV-luX57ac>^c|_yw^k-ZFzcOT^e=1R3qWna&7= zTgoSvRgEZ=$G8kLD}f3^{*l<2_})Exf)UnZ2cl`3=Z!|=DuouVCF8dsvOwO@4>p?p z7@|Lk-~eVfP|d3#c_Y|Q0^I>TnB;Fk9!j*m+)6&12|5UwWT6B&2>BeSYfOrh6u3i) zh6{>jLX;*E9^ht>2nll=8v|4g&qrh!`1vkclm!3f5-i$$P3zG4_J;<{ALwpreSa65 zCmi`In>%^9Mm(%ffbBv1ZkE}prj^vIAD!&-$0R+Jl8k4Op-yn0q%#0f6*>|CZ!5tn zg*OoR5t6F`k#IlQm^EOBe7GZj4K6+iIsOf3P+&u22GS&fwV+3_1Lc97-gxAKA-_?5 z^ra6Mysg|{8QtF0LVt9j{)Qt)I~LAKG*M>Vxgr%27tzC|h?c{08_yGt3GM(8(*!E^+l z4G$^Ui-&MDLw4)8+|PR`r!*JX`$XaF3F-QZ!<2{7M2v1Xoi4rlJcx$LZREMa5 zd{Enn@*406lHvoA69CdPe@7)CNX{W7d&1a;4}8bNajF)j0Ma%?Y(WwSBzY&}MFNk= zuRqyiSX=oP1VLOk!cD zEfNm|Gpw%vq;qmz;Y8|+d!6+6Jf1z2B-n}Mg}|HWq{t&$)qqQQ<%n=W)tQF*ftYMc z%N}nlsq#o1O}C6@YmK$9Pg*5C39NtQA~12VnA$&_CR}=9sK~6K zHvFp4ieL5E(XVOy|5HpY{b*U_Q0JpwCTsO^+oE-42L0jGhrNRXZS?cab`J#pIh{KY zp@SHWT_j-&&(Z&aJdg@Ph!LM5KSG2f(b&WBgPvVOj6DMq4fV&z@#jdu8Q~j)2Zf(h zmx)(NS676Mc;LzC9;W1l#b*B-u-IihdnSF?B>&xfihIG$HO8G6l(nsTg0Hc|s`1x;GIzf6`DaSOs2ItMIarce#y><}C!-Facu}CJKDSMRZ za2Z6f*1%6nf`SS07V(Ohdu!o)$KAjP){@)s2jo%RB_K#J&mKH}Tun?R@OR_1A~)vt z?c4D=;Tg#$X$hECs3}F?|Exo|Ah$_J96t=EXYzSwg56X7@YQ)P{VIYpL21e^#;5S| z>gk&JhuNzUnwhZuY=(bpDp<;g5><-skwXdJ(@P zV+#CSvJSmz8e(*H$}RSprR_ofMq)QDM7#GSZE|iX{R%BC@qxq}C}~4yczS}j*YdWm z2oNUs6E|hD;3p=JB9I!WdGLs${LscA2O-wi!LPry=Nw|gt`S^TbvTDJZ$t6dma#V` zG80y5p7?%}r&8RUQ^U zhov3|Y4xEpvytUpr{zZ#)N0e10|7L_xm=|j9mI?x1>Qvb4i7^g7-kyS0fV)~5>ZVu zBCM@<1tG^gs2Z4aimL8e8d~u63}+Mi^jkE@T4{j7X~a!}gfY`nOW@U)P)j?5Wes%6Q;dAtXzXl73u=nFrxN zpE3`RqE)`?{s~xCb=j0G+>o%WtN(*ysrmXIBpfT;PPhdBgD##0ohjcGa{7xVhy_QCEJAC=<07pE6J)HO|oSocVImB6*?$U!?h;XmZ|Y#5)p|Hdo*qH zz1(NmRz}9~Z{yG#<%&*5we%EdGT>i>O?20rG!jNKth^>4SlMIs;56DYGMWT9?it2( z(|=kGq>2~RMqH4P8isicOn(r9V4i_)0SW{h)%a^jLrF3nXn`mf@qH)6F5~_HRx!4! zN)1|R+&#c_DWN{2r(C-Qhc-!gq9SIU(g|NCzjB*D^KiHvhrzgb7(13M z^hJ=22GAgh9}5Ktsv&}~L7<(X2 z5hQ7mkARJ@ER4V?a1rx+46#0kLB zfHU*d_t&}qr9ok!l-Z-8Li~o-)*meKr>SqUeix;f(r0L6nGfWj<}FIneN1uMzxlE|$Acp#&deKc9id8gD@`se0d+tkF%U7K zC>fFGNiqzu{o+s2wZqjJHy4f925O(=Mc4`zBGlRH4f0Bkx2cL(Czt&ZzTEw*qnHil zs`Z?`y?BuN>R-0lX_4PwRk<1;*<1SyCGU4&q&)LPnAt;g(%wi%nw}-HqA*w`Xo9K9 zV2@{Y98CF6%C%+PWH&Lg4s5~|J@!6EdY^LF^r&>60y zzzxt=fZJnyxV~QJlM+6U>&rLnOZ)`t@tHX8*H*jNuHf-1`TO%x2JKDba8!7`<;%%y zkrO_{9t8da4dqV%)7V`f{;- ze|ed7o8+ad=DWnJs2--}y{;E`H_58f`g~ArOYtd&t3ooN!GS5aV&s{O#Y2W9f~gzI zL$U_K=RM*joPwBPekEB>gk%9eH+j3#@kDd)l7Hm#P@yx-%0$l)ZV#KOm(u^m*jJc?EVs-zIwSieZKXX8D6bx_@&y`#bCr5edb`VCLof>_$kx+Q z<;p)}CS9Fv7e4Y$RC9%?qeVsKCFos=yfd6evGszU4fMsz&~BpsCw_C;6nL}kynF&v zoMlt&3(YHA_1X*oovydRZ^glZUhaxK&0C~{QlGhY`)j`xW#uqDK6mncV;oCj?Vs45 zh(j6)SBF~G$9AWg>`b7Zs-31)e9*GPatuZmQ_1(^W_0|{_b`q}w={14)T~g`gO_i7*^u5&brC;)Z0wS^L;^EKM;rmNW`$Q*J-`nwDN9chIZrd=4Jl z3=BSs44{S)S#@SN1O5WV`_YtH9ZHMjrtAu+f{P;F+kXZMG9Fw>B6gx;>0`i{Z$U(Ezy{2@&t3>>kj7@ph zwer=mHC_x_uX5QeeY z!||>2-ud-5`Luj>+ols|mf41X54s!>6$n^-XQ>sfQ7^OLLpkNkf0@1SnDULA#uvqI z7KIgau{FT50T_INS|qH;D8~whm$0#sbbJaHG-9;~$W3q(3CXN#wOHZc?nA2%j3P{m|xPUs&b8$IhSMpI7uw? zC2q=&Lo|k-0gwy)BTf^}9;qJTX=hLTzO>>;0w2joL0n4|DJNdMxFk^E`c%8em0g8D zOAfmc5Ten2frz4|8IZ1&7EPeV>4sXhFVayb)7ALmMxuZRQH_uJ9=iH0kS=fo<8~v_ zU$}=c&p>M?1LJyh&7qo_8k7{<+#W@1W)h3+p3NKZEMac|zQhJH6Z&$NCOd)QOBKR- zHBqSmJ;qXXgo-j|#|BeHccGHU)sNfM@%Av23x3D4cgNGvEt$?~wn{H=_HPu^R!?AU z9Ng+R)8P1d>#d73qABMLT)N9PzUkD*eT}ZQ>G`}Mh(=gplN?6OQ1EDy?gS}3VF=H` zFsy+QP#HW~a066e=y%4-YJ2aoyHLXtTm!~gpzwFSz7#<*dY%pqJLGdDHWo)MpuYj% z#)L5+LTJycAs|@fOYLR`^IIA(HQn0PdS`h>a!dbVABmLp7gA#yXQJg8GfMO}oce1< zlbqteu6X0BJwt#x?@==s6`f!Ymcj1~4jiXn-{bn!60R^19*R;7Q-2a-3|cWxIZ#h< zyI>`bz#Wo+H9UjdXS_9#4NH<~F&-oHjv(gy+}1FG;NzV^a4I%3=tB$)n9wa785;+J zRdPI3$Rx%F-zEVZ;E!biW{rCj*5hgr$swK~TEJ3kauM|agpClP)7g_2|N5D?u3N-o zaaX+#H}lVRse1Z#dMiOR+O#Mlp4 z0t@0L)#wPX0Feinh9eJ(1uZ(ze`u1SHC1~Sod4t3JD|GkWly5qAOjXt1D(72$*rsZ z@hdBSG8MZZXEc0$e1mT~Ma-Xh=eYf^6qN%SPp`ZkklWBo9r$cnP03f$CMP`j=?hEV zA(X)=wQ<35n2~;fKuS3|KeF9Jt=UuReybvN+0vST%gsv>?nez~DzWHMSP|$iDM&nP8TQq2#N&|0C zyB1%Y-d7yz<<>A$f9oFKG=ICI?>+txY>g*4?leUVezyId?4&aFbe`4H|T#sk7Dd(+bx!T=ImL!=I8j^ zI@why^?`OI+18&wleSFrH^AwpwaLun&LiTb6kXXJ{5h@KIQ{X3;?I8k{yiLD9pETr zXRs;c8uF4@z$(PF4*k^0`THF!g+ULLAGL^VS<`e7km^tlEG`-`S7Uuo`(y2SO?=T= zvz*q|ziRI(D6Uj$tE-yqJo{ZT1Sc;jG+aP(WZ_5zeo}G=t>Hg|_fdM2M_)`!z4zdR zA~7M${mQF?B#NaVoSvEr-S1`yG=Q$TvJln1a01aT&}oHfeOeGRsq|r9Zxf{{IOcM* z)jZ-#JN=h5Du_JAghZDu&CPEWjUaytJ3#3%7C##}it(k{rKL1E5R3#GACTS}`F&jX zN;AgVrxqL0Hwkz-%w@EfBqR??t9NlR*NU$b8J*l!&YhS`z4$JDPu zvUMtKsLC5J#Bm-px7Qm6sj}HIi!YczR7M~9(N{fJ{rQhyj9vXy0eHQVF7)`LrY+Bt zy&qSL>L0gm5dTo?$0Wff`{AKq)r4#Jp@AK>4+BxsB%6ClYE_sdP1gB7?$EwgYslR& zM$=fn1K$lES`BsFa=}&yR?Aq~#bF}-+w=A7*YG)h#Rf&^y|>SFLw@jQ9^fUTSt$y1 zEF|U}>n60p@M558%vg$-QZditlJ?grjF+#<_ArcfIkWm(Ii=j;N%}K)N12sMWpIW#I?r_ygv}m0HuvSr7h^>A5~vt~NCY$c9G>j7m4v12-d+Ay1-}yZ zuZN#xr`6@Xd41*Yp*SryuF6gIjIF0CTsSivUOO*;8mTFK{H)NAa#e}vxZ_0yh%&oe zRHYLWx&O!)uP5E*4^jPdTu=Tp?Z+;E+CjEx(IZtOmuwF0xpy(=Kk*v--2S;wIMpwyd>nOA-*>~Apz?^#wmW3gb--AHfBX;ut<%vlrV=us{_qNC zEHI;+7epi?9Ou+3Lju^y^iOMtW!*WIe~4zH=4HA2Qmwq%rzM{zt4Q^*9?t!`A=Jjl zqMD~}hOQ4-k`zy~UF2pP41L%+S%av1#u3_+OFI0WsUs@Tmq1Vbp1yGp&lPdM`2Jn; zLDxpoY3S0dDSqnR;TU2t~DK{E8~{+M{}VU6IMyS>?{>)KDm z{apAmd@L6;u<)l*^`w32uF0v}2zsgR^)6(T5=Rd@#(xGfzypXt7DUHb4UpW1^rUQS z(+6(7yNITFrec9uog|DP0N8h}?-LHE@cH%b>mH3a>r_0aZF`DmqrH6Zm?8q|A=i!k zF7ehQ$6wHbkOdN#m+xsYd)vHtG<^H3fbHZvSywJ41u~p`5OP?z6s69l7LqTVasT3(XI}eHq_Et# z3%oSQ^NrVDuk2O@7MIb`!R>D8z9N-0C{u(epsNLy5g?w20&qi%k9p@f?Q}Ql{ik0> zjd`YOqm-LMDo1kC#g} zf;=qt+aV@5s4{?>>`Q8PZLY9oi-vm?O<-;y$+q{C)?i66bRM(ur6rXD|1{zd>afqO zc~3*jNY$FN>B^J4`!4+o2)8`BqC(|R!xZyA;b^WSuMv3~!|ti~rXicgoln%mP)7;r z<N0bDtd*Uy3m=c&-;JK3$t7^KFmu6dNLN3`n61@8O8uwma&Zy5t zR~8-Fas+m$TNc)&|jxTL_Xc{$1W3U zI}r$l2Jla&!3G6vb&x7cmTOckk3kBtnGBqEDw-UxldA_#CkyJHnCMYL$0d|h&8~9 zfZFj{Lk>&?qJo)%-~-m0AFKpTaL>w7(v|*lU5(IgE#-5JmJ_ZH_;lu$T84a~Cj$m+ zl_ZhC*t*DY3UxPP|9IijEd_}gWwnSM;(biuo zXF$Rp9L}IrFv6l_GRQK}KLD|h2!jHk+P6zO{*;t(*&+VLz>sL}lIyCsN*;TQxbj=B zu)MV}`KhbysL0AZ_`=Wqtid;_^P>7)6;&n1`&bG#>o@I7ZVbAn_L1{sfBjhl^Vpmm zN^t3B7bJFyMj@galL{aigu86hhM~30#rTA82JRn2d+9Gd-%=aLxZ14)8Q> zSnbG8q0U0N9usXISjO$>s?T1gpVRk$ycXC`XE}M%k@JzpQ&|ehir{&NneZ1pO&{#Z z>kg8WqIrD8|GfIAVZse0fpm#T7bOALB%ow*^dGP1LeP&Qz#yE7+^9P;qvmD3-tC*q zALgzcTfZ(_rKHuOu($!|Vf&R zua)YO=eNVY81_S8#gMCnjJ8x9XLvGUfneHpZ`;17|eRtHUME!T`MGCGRD3lh*H8|2VPPr)x=~K<)D67{xI%n3ND{{MZenaq^4>7&@nZ_3!MV}qq4K3gBQ%p}?cJnk zSIe&NP`wic-4zsDpi1%{s=&_4ot(m3Xy+i;VUi?qZ+a9Z(+sMujX-C}hlpvVzrXpj z+3^>^J(<&K8oPIl-W_18xk9BiOdmhC+SDQSC9@Ed&$t?;OZ>MSTI z2*hKp$Cl4qp5%`ds_Mn8uaM;Iwc@bK@~mCjWh1bQRH2q5L424(`uphT15sh$e)jZL z4`#E=n(`)IjF}0rBkw$ks34$g7^uNW5bJ5+|0YkIl+S-Dm*r5H3PM75`ifCIhAPvp$Uf?O8t&|-JhI_i_M6>x010<*5ouq5&J8>rZo3b z?rWHChMN|#=2Ee_kkcQ%5dP0B{2VJ7>x1|P^GH}S;@l>}DIggb`j-A5;pj^E4#9~X z)E}@iWai@avpJCg$via_E1xPuBs&Lh)rEfIvd{Qy$1A0!Wx`|Mo2; zVC3o-mRkNUTbgZ_t_$~g9u=eg6t zvepMVodV*|-e}HRz5v%ZM@R9eV)isMWg9Sc;#lCdpxe6i$EUb>$ zq(v1AC%vXD4NMXo2y4zeBwg=|plKNC$^r$AaC%yRbdW+2i@KqAeZ0k)iAA&dqcr(HbaW}EQ0{nO-H?$MJBsrzf1@r|Yvtnv zQMgo3L;a=mvb^KZOaWOQ74g-oXt_^o1!`&(TG=@)-PdhCM+Mebo#T=3*dxoLjK2(_ z7kK^NymtLN+bE`%>8=}`+BH2hb92qs!lGfk?)Tiv;S^o->tIIFY!ZH8lMCRg@ZUx) z2ksZ>Yd%@9K0G%O`=3?1qI0arirGpL!}e$Ho;ydc?y=BkJscx>a_t_~wS!UWU+z9y z$mcw=#`dK0G6Qwep+u{r_v8^GY*u~(ebLSb$_2b5s##iaH6Vh6P-20N3g4JVGj*7r z{!?ywOlk1{G|(q#TJY#&7`{`k>3+#n(a_LHIX+&A&yb-Q&k@MG`3fNLdxj630u zmzaxz9R~@)ML=?rSI9_C3TGa43h!PIkD)Jc?Q;(vNt|UhmoENl=rM14)>C@UDR6+7 zaaj8OhubVuyOobUJi}J_KA-fHd-=%y*iTF}{#b74vij1ky{WGv9ne#S5N1_nBqkiE zw_}ecC!k|Z&0`tA_4lHOhKL*efPangwT=xDI62v!|E%%6td7z?g6_`K4_q@dT$=iG zD~q{Q{GE&8p9<|7fBBx-VgF=ODiKCgflT#_aSv8$Uq1NU6P0^^eYTQ+w2a@`kGnU= zkola5NEp&a&W^-IN`th3ukFe4&7Yznym9LYy8sb`Z`^l_5e?Qym5vFX>KRhh2&pqU zT(dn+(CGBuc3JhySanAP`fPduq&s*)c|A&Dw?@}hU+ zh`2_A|7V@sJTyJ;nh)LKJo;MX!GqCZ;r^9`{+YF?22+tAY`aLdd#!c%*Ix1Yd*G?K z_mj{IXG!X-zV@^A+@mezbUtuAlFTSeww_FBQH!>Q>Ilo=d)ZyjuR>T8Q2n!F52NWd z`4Ziio$Z%5gWPQ2p2>^|_WI%TG48BpnU-3}MdWqJBA&zjX#2>YxuNYczs zYKHv6D;) zJJr3uoveX!er@CM(W8@-9@CtfvQ(rv0%$3Adg!$H(0+DK?HaheZ+5O%oA&Cj?13fQ zO7NJ9b)#QV%0syltNI}`8d&Hw5aQ;1<3}mK?a1Nji*Nr(EziU#@j{3n+n{K7qeGPduOq3JyMgZBk=d%Ex2MwpFW=<9WAhr$CcpHoZ|_dB`J`=dS(S z2|pjj?PzvrqYGpIG0*#SWPQ`~ zZM#&qJMT3MeQ)jPo;}OULlVZb9VhQAzbI^`lA|W17Lz%Z5rnG02D!Q8wU4MJ{#}$t zw%&L7m7Uxs{M-+eF{gqV8h3kZ|NV!_MqjWu`$e6)Q+w{wj?0}(mJqk8(3&}DMdjC? z)yDW#e>`|HGHF4<&st6L@PXxp@R4#w_xU-G%yy6D9LKABD#*Td){b1ajVH!u<1AAaAbclC_fjOD6KM#kNRg?&BR%M5XR-eC_On3?8xMZ`Y__%K9l zo>BVz`DesIrX_Or6dofx^Og!NZg$4v9T}x0PS~t%`WI zut$f>?<>7Uio)%aT)CbfRS)e>-AVF8f9K7MV^_L!zWTSV1>6c6&>#yBDcsiK^IXB# zc1d(UySr9_PFBw5tIkP+`2|}>?AcaRi&_+E@U*xvYxu_Y@Oulqn>=i*RDAZ+ywk(5 zl$|n7+)GC*SM~`C3E4aS;wDxts)uIj#h1{Kl`VAlOFuLdoBN#)!IQp!gzf;zksTgUhM%uV(i-sB2OAGaVjcKkWqbzDxIjLXWQ5WIW6 z$-)HalH*T1{&mU`QP3mdb$9$O)C zxcj4CX1glSgFhYDZJl51ijoKHUH?hxYi=1o>TYFYqaOauhulJfntWsCUBjcpbeD%F zy7cX&406*(J9PxUDp3?4IBs?D;jVXz)lskZcTSUkm}(mh_!Gl@Fn56cL*8nq7SBPX zU8Awe;@^;DJCd5g<4~eo7jmma7wj#OX&~~$xz}1iXHxx`wf5_!2jH$Rc7+d43ww`Q z4~o=SO)iMEj?=<^o}6^Y<;gS2v*(VEZjPuQnO)|aa6S?!t#)%y=jZe78p96Peoy~S z*3V4Q$(pu^652X(;*5Gahl`5~)XpidD0H&MbPBegV|;%soisr<{d(w?Ok0<7{kqjv z5v(u0x+>%zfdd7wOV*f#&$?T)a6CgDYkU*3gNR)jy_&wxj*cIKRBQK=N>_xub(D&j zw}n{S4pmc%SJg3l2Z*{p#HR2o(O4#o(TJX}K7j#r#(Tz-kZt$axOUeD+@Y4-6||;8 zvaqM)#}E3vF5b;Q!4io08F+j%G3Cj%k9r*R_l#X#55;i&tdrQB)w`^fK$>*!vOjlN zvBL<3i>vEC>J)!d>BZ~Ip*PKoW_m4LZT~EP3omL7iJm4e`tMwoe$Ut9;=l%-!9QRg*+<@Ei= zgQ$eXPx)SD+&Fwv3!?vLY;WRg!Jd?2EYYTTP5MjJB&4 zT<#B6Em`MCsd;Rh)+XhH8v6U)hW0)G@A&=3kwWuA4kZjDwFJT$XW(-rr+kqf!!uUq z_H}*=VP?O*cQ576%*@PXENF(JyZyG6_S&$7;4X@7FtoJXgKdIcY#;0yK8>6?^LVe{`(&xQE1H^kHXu>K zWmgM5lsKAOTG~CkY;N%P&KDlvT{N$ntJ{v(nV$^jJGp3mf~sroac|~&z?YrU3TK?_ zssa>U?nhc&drc8Y;xiZDZ-|IZFgH(LS`hLVE+rbMWTF~Ja6O>S$$j%iaztx?tInkB zjw<`7ta;V;45rs=ar4WneHeeUiIbw;;5=>28VF}a%95ABwzFiDV2cq*sKS!tGp*nTD(p^Cp1cwCS4$^y!Jui_kB!z<E& z5c@uam;h@|dN~Gd0KHF&wuPoPk)I zKat;USj3$ieW@UPg`Qj{^D)1ZY{XfL_l4>W<+qFv_efhf+htB0TpUY(pgWOMx={L{ z(d>0j4qo7WktAl;6af*}XrfLZc*nQx7xi5n|~thTn&gw?R& z?n$WJ=?+Df_aBJiB4=ADw2>t%DC{fLevSN^eFo!*J@B^f+zcBqH%w`|@SG*0VqyC5yDtCzY z2sJn%!@rs29YjQgVk;4_5N3YuYu8$!C0fF=-!oP20p_h)=6?y((8l3L)7H~#N637G z^PCK)uh8R5=ypJyp5_cV7D>VL` zu>{&E1oRYf807+)?Gq^~6RVS~b)phmE6oLfpS|Z>6o}MBsJ6+JVxm;i0K(%e| zJ3KHhrYmZ`euP3)gj_!Mnc;rD0KWzD%KcZH4jD!!F=KS_wSP{OtX75)AH94Ssr7#fmB{cMSViaHGH z60B@M=ld^a4O+&9wF0EsbxuZ^9l?@`sHiAbszm2MUEYwco)cSGYwvn&fk!pJdx{V& zWb2lY1Xrhorq;t41QKT$;u(8-#Mt6fHX; zZBkFi$pt|s&Q9cSjOAvE+fKi!V=3Wu(7l?aH9mV?JgE-*bEp((uwEIyMGn+{D(qzf zQ^nZ}v>2kcVg$r7mVR6-j@=}le7RnhMN!6#l53T1XME%ei1{ zC&6sAe`>qoEuprU$)ZZ~0=3qsZiTNsu+vFUP|VAY71m*l?iDI95d4`BWvG$IegTWH(teb=Un|>ZKXHQ`m`J)&sxuF(@N5ZvoO>f&N zH1 zX=K}77kv+PC5fxuai)ck+&mE|xHBAE*1YTVk5CZqYA8sfrGJNAFxo*UDt5v&STgt#Ui{|4E(}>#o7Lo6_n}C z-e<0{bmngzamtz~?P~aNh12DN(f1!eK7f>ZCBYv}8Ey9XSI_Yd zAduZ%4;(qMZA-w<*uKJ%N$6prnLJnjQG@3oin5znVxgRl8toGv$!VeSBIDc;hmyUO z{kFxCXh)X2D>trV{U?GUVVqq)fk)_|+-a(i_wVIT6U#v5X&?`S?lt;+jFc%|_{%h& z8oc#3ue97o+IH^cI7M~o0kW|o! z*yT}(z5zjk+!%Hc#(pLiAdJvfSAsQ~9Sr9O?L&t)9mN=gv5ARE&gY@)Vy*F*t5 zCnwj@=SuyQZ%=-qx)AX5?AO8LJta8{YOyLH-I984G-b(LGpN zP=Y+HbA-bJKMoOM(UvdHf`dE0iHOW3w!;IU??iMkK4%OQ*hQm47bdaV5{ZiaO5#o9 zNGeB4fuSK9MQEqBp3eOKeS6d$Sm?V`amvRg7<-$P-?Z6pZLVb2wla99P`>Co`S?Lubp|+2j!w89=v#Q40^+azZ`!)L~;6APEWjtz7hik@0m`h~(Rs z`F(>=YFPK|>6}g4Oxg;t67;WJ&8)1=Z@vH}HHD_y-aw|+$zAU-xxc zy})`2IQ`o(qRqo)#Pa|*C8V)#I=;@zdOx#7tlKTawbPT>awl4>mljZHY`cveh7(t_ z2NGQST98r}+0=9yX!!5xX&l6lJkeHTUN1m@`cTw)LPJ4aIb`doZP^{HE`pVop&}*t zDsV^L-Q8d{VU(qQs? zgV7PCm%Y`94i!ci z9F;{{nHFNrj_3<}P`A)Ef_pruk==Q;`OCn-z@7L`9Ea`gn&8{^J-!q)(L*+;g?_ZM^o1(IJJ?1U&plGIYSlfg8Q6aV|pr!YcUU8e3#-@r+AvBU*4( zZqm5MKs+M<&6-_qgEan>uyClY_vCfm0v$NP6Y*1EyivQbH${XU+t}Mj)A-qRn&3_R z`g{aWpbD)VW}f=Iy9hFqQ{z5OMfyJKl#eQ@T_ulUwtJ8A$sQ4g*@@6Nsj+L7-yImx z1kWO6^?=W6TV*xIg<~g)LB4XUx3ao@C36|^d>5bk4(`_eeB+fy|GnWPdwJ_Bt@!iV zYfIg>xwNErD2^XU9h-GyQKG>O2Z9wx8}yn4%#2$6Og{&>gFQYuY^&kgq;eZ0+V;uI zmYqFlD80O%{-=2mhX9&Dc44nx@}1kBB7sw^nVpK<4!Z#8BHj*cGw0}^6T9-PZ0%%i zZ{hKW$ZzOR8R=_5LE+%#y$50R7Z74gr)Ndm7OqIojxh>^sI1y= zi{EcvG4B8XXF&e9&Y618^t=wui?##um>n^a{t;_Nvq%m7pg&NYEUg{QbC6lSRSg&> zPY`hq+l;7pAXLi0hA`ZqPNlW7_p*es5wnCH1BU8t+i-F2NZyveUq2cx;raOcLQ7g< z=ec{j;}kD^UkU+vrqc=4heVF8tq z#GQ4R`6yX)eDW=x9~9E>)G3_SRmc_`EmKQVxm9U*;?se$%88Xz?oe2&9Y>|IrY6J& zPi~$=tZldtsZ?_Nvu(tfZDniAtoW5*zll?YUL08ZxIZ~nPqAYHJ78otM)%KL(cCYK9xw0mD0F|OF8L+2(-X@5%3kB9syv2bip) zyI$?DhtaE^->Ou3u03}nhAaAgI3-Yb3|2l}H6NBW_}Q(Dj4{=-os9!5DKiq~Gq;K? zZ^?U(RmK)BoRKY`ck~_7w=92gQr2qbxp(Be<5a*W)vDaI$?sF=iyqvbupZKhrBu{6 z@)S?*OQS!z>XVhonULMZP11I8d%aKd!-~J_%dybDG6Y)*?g_7vmV1=n z#hce<70-A*?dB582VbEaiw(XkF3!88XNZ9^kR_nJB5~c~%;Pb&93O{l^pikyPMta> z$u;dza%nfqV-2%wFHMu3W6I-0OZyv&|2y{P&bQ>=ysQU@Z|(BC$Y#x>Ttfm)X=wk( zv@3k3Bn)wYG3ouV5+oYZe?tS|jpK&NZ5)5ooQ2i<4rJIckhNK9%G}T0xbTkKFsb=cj!B4>hmIOvAnIt<rgjtxwl7JZ zB#mQK)J2Yu?YwgvJF9y&reNrMJxEbwox-)x-o(0fXathWk_#k$-dw z2AE9e^Ao6C-7YMCjNci%J1-+uC!Hm(X%IdF1n0nKu(_9L%#~lzT$mViXF|DmTWG&J zTg|*lWw7?66P(JpbnToNmiPR8=WI9k5f@;+w||3?c{^eaan8~7igs8LB`m)25SP+2=w_&2>VycYd*o}gUJ_@M0tk%l|)SgkK zZ!)9Khg1A}vd>3Ldi!WqmUgN1Wvu!FV&|_|Wb5SSv$^&O;&h&*+y> z6WHSNVEI(<5T*Kn1F|g$mkk1#yhALop``L*!oELY{(j4slbqz_fQX&i0aBU7n?fiV6JKN)41fu>Z=`Y)9(mStU;No_oBDS= zajXxM{NMcYHD~c-O->(j0kl`|g+j738<5 z+Ft21i~MNMm}=yyUpC-mRGlqo&oNOuE@-sc^eFnz6r<45%&-OOzG*c{_LLE)8}DmL z9%|hg`;z_~STBHf0MFf5we=_yN241|B|kn{lCd;8?7m;=on+CiUY|Dl>AGKcq9y;0 zODCda38Ns5OwHfFf6uS?q1C=sK%uEQzQ6LoeCugb>h1Kx5^I<9$f66@s5jcWL!v9= z8qJ^qm>DQv>Z_TyVE|zi9$o}*Pu506g5vvSj!C^F|Af9-A0m|XfnDn*Fc z{|Ncc3H^KOYmeFv+>}^50wRg~=Y2Dn+tOBsZBWcqEb(oQqkBr@_u0u8cOda)-;uAc z-Z`IVfU3`1Plhx0PUFZA*OXYZlB_G&6E5iNX1ey4`P*sVN$p#6AyYJa+s^Ov;G#4U zy&NNNs%8E&u!S>w!CccIM2hWBg01xlsmaTGb$qktmwK2Q+P{9w4VW!1$-VZUDqox$ z0vdB4?K8DPT^M(IH=29NG}(hyd4tw@e9js&131mZ>kS(<9|V8|R$^p}m^J>?gpl%t z`GtmtX1BZdf)3gHpe)$^^8Z*}d;8oW>12sj;k{G};VRKB7#xNAy$*nqL=l=Q{VPVP z6mtryWo&^F8VrMtiK|5GN=kC4@M%#8w*kM}iDcG52P|z6ygOn1nd7r?V*PJinDPTCF$wuJDg9VR4s0izjxJ^$&VRY4!KoAVr)4oNfLL=in*S7i`J3 zJf}^B%4bOn)V&Dzu1Vev zJtS5F+mprif>pz;RNC9;vi|uW@ns^nIUin{_-fwIquho{NBp&AjM)+Fb3hf02{fqJ z3!qYsG0>Qa@Q?F;4Gj&2pEPJ{cxWl0{&kXK*9j!_30N$G1+yQqdIa7Dy6tann{VFR zGhl?k7*E)1GNZ!%b2t_1bK3#>N18J>g_P(4;+qOb++x@EAb}g}gs7C{{qk2bmIdAsTiIpekcM*-7b&?P$ zUreqbBQeP{@(7R3UkIsX%}t~TlqMMKgB6KSo_xRr{TtT&QpNoWPZ;W?Xypi#RW6B@ z-J4w!Yd8M^62vZ6lGoqe#<|~94h*kF|NdgnKvZ3v1JyGaE-`_7iT9^i^XXJ7Ts_n1 zyzDDuWL@eMmY8fI&>VUAmeb$S_R;SaL2L(xPCMQdijEMk*rnd3qt>&mH>7_Hh$rYB zG-iab`eEFRp}YH$^c&G?X=~2>8@`p{UOL_1Mj!R%l&SyP`L9zmVcuRAr&R77=ljUq zI+o#T?1yNw7e_|tR{nXY5yqHs%EHihFzeS3FCXTodr4WJ;vvK~65-2Lu~?AvlK_MX z@DEoq{+AqM?cheT(-Gl_v)Y~)LIRy!PG|L*TVJ869XQw{s3IUE zV4E`_Gqcoqq<&Cb-dE&9-8tXIfDSzGY?IWyCr`5GQNO=;G>JX0B0`t%rs5V$+IG}= z;`cg_2=7gOo3wvmgCk%s0brl%AtU*l9J9>avBbV=g6YJIBPW{I4IMEym3&vze8V~o zd>MWJa{0obZ?VYQuVg9jm3X~Lj=~M<&b1A7@5&CbGM7-gw|}eY#viB%`wlZrMmh@b zum~Kip_N){k8};`65aPdCU}13d@0(8$7; zV%BY4b=7a#kJ&}GD6VP9ze@}w1Wv5ORPoi1$`mW~(=|K{OFh(`*I77w$qssp{1zW^ zXTm~0fB8HOZUP8ryqlLUYHDimJRo!E7mff4>h)^RiH04|b#7a0mDdj5)8aRMyDDw@ zoaB%!_tz@ErFAdi>ZB|W|E#1I7H@MMUae^5W7MIqO&lF}oH&vBJwz6+``A)>wUNw( z&ky|{!PCLRCWI@=iv}Mj2!vhDbIZZL%VfYf=}om(-8+<+Kjqz3r9`mUcvxRg?w z{#W#mH^e)s`g+2s_&Ua4Xg?tQAd}x#jd*06rIgVk@94Efn3k3HMtGF>X}%kM^L|cD zR?W*TdZgo3QpDlbA>GUTt#`f2C_L~ekJosREtqg0zly#r$yn@k)1W=R~s zZ;TtExUeDVT7j^JUm>TvUyuds z%lXdK9rk`BY?8%PHT68|uK+rV%avF9BMPs6X&H|6WNT({({GC|D_{ELuv*x00KrzoLq^nJpNEH{so`F8nr;`#uzkkC`~BXsOv@` zXmO%oh2X=VKY#SD?LU+1I5QM<=ijepZ+&lX^>^YYQ?A@hW8gbo1->Lp&gpZy0%0{Y zd5{0`>#nZ3M`0GkCK5?6=y5KcM}i|FCEq$g0G*d{- z$_UCnU*-&D*_?CVCY?1h_w|;krw6E~%O10zUc5GVem{mS%sx1ZFzDhN&p62J_gFOt zYze>|LJ25Uu`rsxyzI|s=cdXw5IL7$lgR&%{~>f!m!ehD5Tf9Eb-2a!j9Q9G`B*M@ z7(i~qNC%szEL22XXa7uEVkg4NA;!UdgQ+Wq<4C=J8rwZGGtpODx{H^bD)|3*CdX7y zQ&yYo#jIj?{!-|n^oLWjfAEgmoAv}sL#S1ST}H6+c%r$(Fe>8y!~uv07n=ylN&nqZH#-@kNkIPZ)BY2k5{*8!*x$QXD@XyXa zinG3b9y1BK-)*pKd7chc<>+*AwSV6@)w6NF^*e8qT@nagxS2jT*h7dRtu8 z?+=u2Ug>Clopa>yqUW#g%SI5u-1&dMeoDS^B*P8z?~4#4|KAs`{{Mabe?LtqX8-T& z|M$a8=T31^1tq>0NIQI(seCm0LhN2Em79djrs4|ilb5Gc`V|<0S@}N8CNL-Y_-KrV zdz44qD)?0Um7OAS6kNsQJ^Kh*CmFxslb7fNv3>?zw9wLv^pIZSC?YPvEKZk{U*hx! z8}};XQz~iXclo{0!gE1=z^c^B?w$pyO)<=~Y2~)p*?;bvqGs~}7csdo2#AQ_OOMXN>(0W3!-khjHEssQhq*q0V~!7tP?7!_pgi*MQmn6n#y zf_?-F2MB*~2nY+LzT~FBW$Nhc%q=g!_w(n^9fw36=3O6|;zGB1S zkB{$AN`7g9`@4R`!By_5&gh}bZ4kL)#Sp9)gq~|2H#2RqNK->Hx+$JZ=?D6!Qh0YV?B&d?O=SmCHEa${LTIP2VegP0Zd3R@Ep%`x7z$o|Su=lSV z?O)yn*Y5F|`m^$aM(B2(zDM&yWQcfBpjRR;i={S~+oGCosPR0&vWLf}PeukC{k=kJ z1uQ9^IYRFEwU&>#^J-Q1k?EQ1FN4ZY;!zBx%r zicVRb?pRa-3Iq*U^{?R1Qp3$YIak{RIA}s6I=d&mQ5F=_+8Cb=!bI)yrP@Dwv~p77 zNx|JyFC7^O#%Hg$TwwjM3>!^3Uo8iH+vJBDdj{NH$0D;@PHjBD#uw+wdGxtn-g(zj zA2~GuE}mavTZ$l@@Zr>P(CE9T(U{jG`11rI$LjnqAlu;E~__Box#(7`8hybQVrdFN#P}t+Gj!7hcRRx=QlOcgkP;`NGE`A4am6 z!nKyYL++bZUUeNiy()Ju>i@95@NROR(Jc{5m5G(&q~{lUE*dOJ(}+?VCyp{_%IVU| z$$WerR{^b`H=}0Em{y9ad1>&faF}e=TIe?T8Voy>6wapG5|FjI`Kz2Yb)E-fwNp}G zx+1&(70jR1KUy3a2?NPef58g-`5WiDy`q+E6%_XP)?egLvm)B-UphXjGR34}=k#^qEloy8=e$F? zDMOL`wbfp#g|!}~7}=!KI;n?s2x8FXLn_1j8~Fkf36Nc`rMPN0N$>qx zu`AtETqKIER64s^8sv@znx|`I+%j6Ydl8Pg*n{G^gpWa|0bcukH2!I^PG?WFc9suc~WRm2dK||`)$*#UZ zorGe@7YCj1lqkt}rHb00c3;<^&VN&0-n&F?cj~ZJsXkog)}Z-VXy+>-OUQ;4VP13W z+Xm=}4nRs}tMkjLBm*16TL)V6IWbso;4~MYcvRSGuiaI-9e2u+1p?UP# zaqf+tx#;NV2yY9)c$6`ygPUoDJpLzaNj526U31k2eIV9MsQm@w?`du)7T&%}D5g4a z)86WSKdnH|#mPB+OYOG+^+aT_f`Y<~^8RPtq!%Md;D$+Au6LnJm={F;T1^VI!O&U_S-vH+|cPh`)&@~7n$PEgT#^QPaqKXPWic%`>5a>(1^sU9y~x)0C4 zQPs8laA+#IoLWVcXhYqxy0H13tfzy!4we5}&#mxz)2aOQ8dLbrm+rJGJWgxl!9HuI zmB!hgtar%d%4cdkOADq-)%K5#Ix0$iseY6{lg7S>IL0W)Sbw=QL2nJ<58VTv)As zxP-?&iYUF`y-{EFQ>lWz%a@+1<6y+}gc_sGyDO*D^VL+TE+3SKXcdVPlbxh-n8_t+ zUgx>J&{SXKDqIzu4`PRmBSbY z?Wg}kGl~9ZZ3|(kNYIAPq$0|#u5ni(5!yOk*J{F4u2Jo#vx zTM_x=ogRPl9PSo;^uZNw*LmqcyO}_CV^g%I8ZHSe7Y#^mK|x@8db)RHO~dK`&`RLL zUVD_T`S$A82INy=HG_KB0-P;7@em*0t3H|Qv-aY@(&r8$v=nOFC#xrH+3w_*mG$I5 znyuc|k}!Ghm?SDptP=QH{}ChpnZ^+Z>vwR>F~+I^IwF|mLwW8!a){>ghmJw&%Wc@N zwHvOd#$`rXs!%+dTW0C}N=@@1U}L}v)<0r`XT@R{nY2htu$i&7p0wrfCKf@!OYm%j z!K5j*6rmA2L{*yalR?$+1@T-E3C$n-&JZg5gFzIyan>|m0t8VP$hnzC4=BHSC*`RK}esB+4l{B0@4`9*fLL$`FOjp^{mq%ql`MXP#mwV+m1` z%$Y)_BvFP!^8TLA_kHXA?`yTrTBmmQ-p}*g_kCTz=}Ni6ZS|?X692?)AM&E4IM1|= zRquOwF0b1W`hp10`E1|d3z1=ADMkf4a^L(5s~{!E&}$2e48gRUm@DT0Ot|6-&leRA z8yMGCJ1Bp{{b2p5p!UKiDZQdzC~{USFKToaKYIp>yWOouGi#xCaqH`*hYucz>zORq z(gblK6c+Xk{acvZgaoZ;FhfN!$r9IbYm^ch3V9p{`MGCkFs1L?t*x2|m8DZVRQkR> zz978E&F1XJg(EDdOq!U-#VbWSZa)8EAAE?HJ1(fAv{xjY_c~_>nvnTl~3T?=% zT1^*sZ6PZ*R_eMhqh zWiH_Hvt9Xda9v(s%e5*n$h*fpSakc_I_f>mjPcc5a-B8}!uC5HJ$jrsR^%RU}= zw(Xf)?}`lVnc+^p+cq`uwU(Z3W0iN#Y+tYIt;ebngJU{WX&apDEX+f*>w{+$tCOEf zhCi=b`C4yN)#+K2=rImPBz>D{&NdcMU~Uu>R%fw%;;+d=@>HRa+^ZWCFqLu( z?I$`B*c=X5yoUz*t>bgGU5;qidwN)aIMBPQ%%Yh@xK<<)TvMt1dNKN_93z73@ds;c zXP4Y-?BqHJ9HD(BO#V`J7gsYnAN!Y~oddAmE%kLvZf+WUZ{$-ybQ$h6#9UfJmof@r zvqPzvONBAK>>_0fs-p;2zvN`0Y}-LhC;@p0S+#16i|JAsiWa{6*n*Tz+U2#G2Ogdi z(MVUK2C@ojF~EBQp8lb@RH5%gS!!czZMjZHNz=C&IS}#T-OU)V}NX zKf}j2VNf!9+V_4*JwMU+01di-e;1)IsH1O4G6G8Fae{E&m68@lVg-Pc>eCTA><@GIu_6InQAjjetV-DI3k!(@hrX00^b1lMiL2XH7Uh04+2`HY?&J}Mh>|Ic?fO-)nM zMOnBfGK$koP+Ug5<4NPy35u?AdIWsSCE70hB7$_)zf=whs>RHA>7GGE{F5X5Ey+dz z#RpgpV^buPw-oiRHP5h;Vorp@= zDgQ!h5~S+$MSr@VBQq-VQY*gcIZm}0G;zQefKnoz6QoS|e?}4Xni5%f3mhLfMBcWw zjmi8D+VJsSbJ#dSG-CzOKmZ9M!pX@-vuc8oW6!D{busj-{UrHTvkykvR9}pIRLNAK zX>979nMA2Z%ZiOFba)P zC2fFJ$o6PXe<)*FK7bfcVhUnjHz(iduQZ2d$=01iVzuC+jUg>1hQtpMc)PgG(IQd-vL6x zHG8}5{*q(?(}@StGo5jlwOxREO$N5><_vo6TECj`2PW<+9_#6cD?Fhrxuf~*Y5v~ z9e4=g8U{n}P_q;p+cs5!(O#LCc#zk}4GYWA_2CKm)F?L){xF5$7TUvV0o_JDE^BdZ zZ3K+ugpLu|_RP!-BG`LDXrItoM{-{{G4I&Yatk5v#@9!Stup?Kcrc)ZUpjTnU=O9m z=R}G}60W%#0ux6YgKot)(BHb^Av17;F+G5Fn4FBs+@*FJ%GxyyUgnta__4 zx%9gtuhTIE{bT7Z+WLBKe}-%C_OCudF!EGvY%IYax$*cwJO$o;>d%VI#uRc1M^Ny2 zt+bffKvYTmIk8cJ)XvY(A9=oBzJbCxRA#MI#vksG3gbxw0|R0&SaQdcfV~nsGPJ+YA+Yz8pv2JfJ{AA4JULBcp4vhs|=I_irIwud>V9Y+FSl zUZ5-cJKMEwV?~CHOJk2@$3ELwmQJSjIl=nUHV73Dl(H9e+W-PW>o}ru&}itDuYVe! zmIdMY^m;z*xY*5Cubra@2M~4wLZ~8e)>Q0Nu_TA11V{0?f}`ahPET(X@@~h2fZtF0 zINoHEa>;yvPchP>eiVF3rhdlS5dof`Au&b~{yB&DIB6-3v)=qJ!2Bb4?x0rTJ#KZ9 zctqCgIS`+WR8*TOp>VJ4vkI5(nQ?`=20P5E!jCT;nui|JZ{=q>#m@Bpr*474z}0j; z8>=-+;)94BFw6+o9MkYVvgAG##c+!BT}gYeS7{=&()XFk0+rLiVU|--yAa301dKR0 z?ZqgRS#EXwYQ;wU;3s`rFFVW}`UIhZ{Zzi~lJn##TQY)N0h+?S^%=Y``*}*;66sh%CsHS7h8O^s!$XWg zCPZije4dUfCxq~nVd{`1?7Lr@j4(M1GM1^}-@U!H%c6k25w?J(D7EnR7?$*nZxvI3 z8{laD4hL7dK^`~u1Po2Y+8IPd$D zg!2hPTo!*?j#0@lVOOcKKc&1ryX2S=86C7=&HInM@2*|xSTA6iIm_&pR&8ZFj6s%h|6|T6v`=!Y6S4a3o+6YM`b!K>KSE^ z>#X-}c6A%kb*rZvzT$ZRcEPOF$gIQ&uhJD>v*aQgkS9!XJWqU~#L4TXPCXo1diHlp z+QmEtZ3|j4%YBPt-VMpF03PCPBS=RyI(;S;3v=k=wtz+-P2~odA=ZSC;tSeiJYoNP zKhu0g+c0)|fvlYXPSG$1epjX$UtSh-uHdk7-9vsn5l{WXhWtL*mCH9Jo9aG3V5mGabtDDwtmz_X;Rae7j z^VR}6%G4Y-JkvKQ@K(?^iPC(42#voeU!>t3+Un3<)e$L7!gwgT<_|&yNofAw?c(Vu zFD>+H(xywum1m@Sc0nL4BErioqN`;>_|KU4???b1GApmoRI>Pd zEw6holSio0u^r1Z_Hd<;=e8av`=1F@*+0a8`(Q0N`w=WlT`#ve0rxc8wv*A z3B|Q;O1J~JqD~GiM8{*3#1N+XQO6e@_nf&?J%mRA-P(}-%u{_(pxiyA`97ouesNTN zDlU@L7;UB&!zIGX+CeQ;UZnSiH%|S*?tM+;4OEJ1X&bdACBEHc(XKD9H>5Alx2n~= zwyaA$t;M_tr%T~uzLPnt>-*1$0cFQnww_P>ZQUflx~ZHESJKx91RvFM8HveZeV3ZJ zMCY7PI08J^D(D_@sW7uq4n6T`Yr+merp9-cr6VUsU{9ld9Sc032tVL(K-ZALbYa+kBRvLuyx_P7s7 z@$>VWUb?zNL7|>n*wQ#(l!9(5)rD_cc}oCN?l$>x0c6J7G?ym%4vhJB+sv??19a4A zPT43=*{Q3G68JU-_#%J@NJj~IS3nbCf}(D_?r)7YqOjo%vwxK11*gtCBg^)N0#DAnz^-g zNM$L&!2JBeO&ko#!)Ml1Wxm0|2#eG3KG{^m24x=krt?TKhb75evWkYv&#_7xdyIMM zlNjSTHAYnJX5_x&r?nCAlr4`8*Z&H!cQ`&LdP1jHVlp(Q0yr)ABUP>ME1(|8ORk3b zV&WW{s~*=^M+y9ly;|)B*6~t1>R7`_#SyDXHfQ6XVgODDIT>!;-G;lCvstCW_|@z{ zx!F_0(eLi;iLH~bUK}+MRhn4$rHIK13|<=JESGGV`kAPN!p9e?dq znVsw2Wl9Xz0Rx^){UWZTTtlP1-SB!6blH(^U6Vc_<$FcbTvE6mZqq zJ_SnB&cOvBDYQ|GKW@~flFB730pOMmai7$<$^lTR&Nb(KcScewb2Ef0uL3 zNAj@bP5*1g{^Fzo)pN0M;?gbXmy0c`gf+_h{_*C!FJGZE%{$g9W*l3OznA}SKm2Y$5deM@R|L^G#-tWP)Vw5osX|wAjwBDppLHCBUaN<=&x6W0W z1Z1Z1euz*mc~WgVZ(C@n`@_VBcr3I5!~XN|Sv^%kC*EM(f?B;a?`4dz&A4(IP(M*EazO z8ZZ5;bvi8(Ee%|>N?Y09$r^4J6Yd(@AHPy~sQ zcp>?i7AZ8T3N#kGYL$+-AXnD_&i3hRO6Lbu|GE1hv1(K8;WCZ><|$__=3SF&p>(Cs@@Dcmui}-rFYroM-Hi4FD56c?FB#D zaIntwM_cXz@cCYkZq%3ZLqfEv=SJQgKim}vf8VyXYOjf?e@PhA?G(A45MW%O_&8d@ zu;DU~-wF}4Jo+?vBq{SBj`)PY`cbQevTpCG56fbO@7M=!a1ah24AZ4WQvo`C0`}T< z!(T>3GNxnl!6lEAk@Bg;@V(Y>nl}?9YgR~tp2Frckbhtj^Vy*mZN85 zV4;j=bL6|bnYB?&JQZaEye0+m(U#0RZt@SG1HSE`XZwXu;wqUf?R;Xk{=bfug8(*; z9=DWbK4;H)p-a1&wYa(rW_!6IEvtsZck7Ze2WS{*xfp5NKWzcgdRwO?829jMZ;_du z-RvnRe%_YwmzaDL=)xCZLq<3V?M?^3j;@oXMg^#RqGm)hbk&%D+(du_+Y;BwWISdq zr>;ls2rwm|Gb`;gvw!miwP@ED(d;{Z&F)lqnn#~KGb;E${+M?ic2o-CTLx`P+x@sI zE;@vjCH5jHM~`2{^+im8p)OG!bWxixvF0IWR=( z-*~<4nevoq8E`yYeogzaLE&ipNb!KFP$GHx%A`?m> zj7{8f%gtXQb?eZE-H1@AG00T=g*Si#9N!^#|JYb?42gxnXnM>A_P<#PrCDxT%-wmvXk z2!=IY9daM;yYFQ6_3pj-=YcpU$MD-Z7YTMYXAUiU8`>{w6bc(-DxD$(9*0adbe*6J z0HxvjX=asbQ%$;jILFu&$WL*-vQ zU>B^|1NRwn(CDu9poC{uvVLd~y#GtLGCNDDqIA@{m(1H3!h#K1?#r2_Z<&!@GuXhw z4``(@r55(qS+mJz`m?QM0)I^7(z?*ubYhcLY2SQzk=K2?NtKCs^Kze2vz@X}v@q~) zN1UJuVt1?!Mk)XYA%-ePeGSk~;l&!bPEZV4N#a34U7PIv=fs^x^Gn*r1EVhDBd%3U z^Z`U91mrki-4G!`P)0mKaQ6zw+p31?Yrw#>H97(NOT^-a=#rST)2>^#=|RgAQLb&mV<^1>sFZYlTu$U0&4$$sVSV z2_4+YAa~TaQ^c@?2zbCi|6y>D3M*s>_2%8XX^yAa zYNV|o*X%%C%N`wd&|%;Uk-o6*A?yvqJ0gN$!bVygF(H^RQ$Pq)1_Ft#L;tuvC@!59 z5>m)e;-kSq5X0KFAz&NMK4$YMjHDH|4ON-iG}mbe)z zNn#-orZ!|QKo&`~=NvL#99S3ewk&6hF4V*12$*;ac6V7W*+|wC5kSA8pQ|4nq@AdYli5{rK&h047VOr#&@(Jf;|H3CSGP^6zW8DiR?{ggXv!6huu| zuU>_|E8SCJ@FpN_Jh1pL)Qoi298gbVKL%Z(fC2%ZX!@aoCOZX&)B;thn_3{OGSISU zH^Fs+IvJ*8U#O9YR0qP(0{;#;6U@FWE-v;#{sb3P$%qjatNE+Vm0D(!MIr_^MFiep z=!wogASI9$O0ywa8YMLGT?ix;DR_svz`a5R0tTdB`zz47O=>Z`@ekp4fqobINq?l( zusc(ziOyk^dJ6A$^37FPrn7fN57{^2PvCd*O+4;k?&bB-k;=HJmlzd++XAgxHBe$K z;{WvLsOs@`kygh@V~TIkZnao)-|aIiumDi8uqP0j-lFg*3n)T%4q+IVFFFnj*DV~- zTtf2z2wf$DL$2gQG39w9qnBuX!WrGAIHlIUmf|407Z!aBi8J7ghdLxE{;ujsA8qW37J$NsvHav8f+}xg~ zU*J;k#<5LUZIByHWDr7<@F5|B3p*(88bDbzwQzTygwo*153cbe9bMO?Qg`~YDme*m z6z$mpr6TbxA#C!;N_DD)CpH||ETLx>kG zVt5kNg=Mg0;_#_(YQz3bMA~4=5bZV2|9k|}ltimJ_z1a2*l+OYL2{xTQ$D1r~ zxkGj+s*EdLw&7d8w&-9C;YS#4qdD=Dh{ZgtqL_U_fK{4Q4J|HVAx^mWCm`9DiW2lX z&LHY0C*qaZgz&J>UM0~D4G!9h=jIoJIlnFN1WOGmBj7~DZ!Hs012pS&>!-c2lM~)! zoToQjclEJ^2K4*xmGOGW$vQK@bx8mNc<`GgbWa3HP0kNFbnpqG$fb*GQA(G=O~EsQ z-*}*in<8LRds_7C?o}eq2!W5pV@%{5!K6X_GxDZ{=H0IZN6`5E_@=_ zS4<4D;!VkIDy&;^2!}Qj>?F#7d8v&|@l8I41d@)l z>`yp>A!o7s*h#z2^a9HQ#(snmp#3$&xG&}JYPY>`9sg#tnC4oKVg`&gJ7P$%0^d=}%a1LuTqol*CMZk^KMXu>v zkZJ)2#AijQ06|*ej1mr$`$WHpakd0jL^>npMJV!qy+pzSCQ#i;NNLfixR0((UH8Z^ zf~bk46hz_>@FCxQM;M4QSYjwxJU z2C9bh54s}Q8oKxj$>HdsP)DN|H7`+R)Kg{PcPpqfrpHpQFH68Hm-8r)M95D%ZC zn813%cZPNX51nFBoZ&{_tQ+=PoFFLu(3?O6SSnR`YqtzoFqMOMg{01A467}^wJ%SH z#`vCJ+(q}0JU0d9Dd*W zj>r9V6RA*x_{X-vgP7C((N^v^j0Peu!nun;UuidP6+RRM5LCkxh0O-JQ)mYN6M_Z0 zf!zQM3)XHPV^c@4K~cy?K59cA84gt);VHMOX@>ZRYM-8R(Lh~J3`Yys43CKT1V0Fx z1pJ(AJvM=Tw+9hG0;37Bf*{0PSXe-7(y+L=$XXex7X21&poj*6L^3O-3d^jfpVeu< zP+&mB=E0YRuPfgv0*eZQXtunbC9g*q-vQ<&l zfoeib{Y&!_V*kHYU?NhJ4Al;%+uMcsz+Hdp+Hl@-vhW- z&x}c>`=Lb)EZhWg2(5QCz{$hDEmI5k z=I?qPy#EZY>PrD-J!SNRgM%5IZ^mm@d9`4AbN40Uk9YpKA{^gjP0iaajN!VkouzNN6Q1i8fmI?IJbxw2_ z&XCMtlhI-hm>lY8)I8LI83ia4vY#J2Oafbo#ulZT%h=<6{;S^=w#NXt#qM#Q{c>r1 z|DWiUQ{H>wy5fu^Y#5N``Ek9^7UK4`&_ya*HdTZd$A7V|0lxiZ&ikX8b(n004>&?OxogLo{30F(E8%JmCj zzwnQa9+k2v&etLSium=vTw8>G`2YU;|608M*NbmtGgACE`6@>IY~q*4%@@`Qmo3Zx zpQWtDe`BBEWt*y!2u({03X1*idWJ3v4t92y_AV3@H~Oy$sB7@6X=wAd@xRt`{JBU+ aL2*23%cmgEVOtOPm(4(A=kWz=wSaYrTk&69(V)xy4d= zCFb1A8_|JTpQ7|GAf(yqu}MjJRDS$1q+0npE9i|rCL+9i3Zw6En=|c0csk3+sW*2R zUe5biypcX%cXBxWtKCJwu|3N1>0njfT#D4YD4v9wW9{}t{g-!E-tcukVflQKN)4`mEzQhk z*Vd4mzhxF(Wo28?dXj92FZ&ei)At536-Bn?$-b_4lj!exAyP|81e#=8e=7af$)gHS$+uKKTqNhQM zB3M`x<*9MYlcO%y*qmIsNiUAyNr-zn&_DwiN4%)nqY z*B+Nm#%WL!8yBaitJ`9+ju|ju_Jc{_y?3vFVBmDKg;O>6nSzK#;n+skRYs7 zk4xVXFI@S2rWt-iLQ3kgGF)s_e_0&&*w=S404D)?vJZFXvAY@<7w5GxRUaFBz0X@Q zK_u?OhmDyC1O3Vuw%=Y}KunvmfByKf@aea6I5+`EM@Q3*?@pHtPOa+YAV!_M1kQo!V$mhT=ePuUx#)erJ-z#fukv(&b0W9p{=pFa}(qoLXFTUm50;%uP?f zMM_F4C|LRZyLf*-4nCEdipq1$V&B787qbZ0+v5aPvc4oGC28jAS5#E+@$tQtbbaII z<~Ca8wl!RA-J8d?xw$DMEKFH_9t%qdXLx()RcI)IvvFUxy0H7U>rA-j!S=#dGh1Hg z_y{r;IikI9`S@`65HqWN~5l>UVC~XIO#Q6T3me7@6)GGxK||Zv+9+(tkF70oXpWf&UH{G4M`{| z5z#YoaTOGv4Q?*34@BJ7{dX3XqvT#3A7JPuUcaRijVBWK!V<&1L`cZ&_oJ~<)cXiB zS3p>JbbQ?4>ASq@ooF`^rJS6c=9iNS3O*2ZSGw4z zsqAkw(&gOiF10t<93bC>B!5ptLPO(qdUD*{++0?6-*>~}Dlzdx-2M5KcdV?e%`Gj) z#>T~c+)!vD7G)*b>2+L5qLsl6HwrR9aeEZSCn` zf${Fjh!hPWA>nwX%UYbE%dcO*Vq;@PPLCWHZ))K3@D!07cot-3$rux1)K zNVhsOQ_nJf?c29+5Ja3TEYBPrWA51w>)Ka8#_j6q>CrfZ>DKe>G?0I(#cw~hy}9|^ z!J*vy*xkGlGAmJDZ#(a?v#PJJZv>|SIwz;nc}c^>go*x9@c3Ie(TTW;_KVD|F#mvn zn3$M%lsvDAK1N3$v%6w-fP>g|&wrszL-b)0;S51)U5%ot73>BYDvP;p3W!K`O4E^y}~!IE^PE_w;2M()cK-Mb}MT_2X=xDEvbjpP8$HJJeC-Ujvo z?X4a?i>0^T1hd-4t&5z$hiQy8BYAzf=7Vi_p*nYbYq|L4!VZ?xU7PY3{tIMjJqDwd zxDMk8CCi9M42HiApx`OW%crf4Rl4LMPfs9;oSmJ!y1F1$VZa;sF?;21wEE2IYJ5zL z?C4l6Qq-A|pMPL%9`ON-*_Ri^MK?04>%a1X(kX&YPH*vQGrUBCY9% z0gC?Q)Rd8xmB5ST!5Dt~Y{IZ&ul_8hT93uvtgaNPnCR$=va;o|>KK8+U%#qetmtTJ zYFb+_b#^NGi$5F~E;J1e3k&=3;e$K*+lKO62GUsKk{JaIND7_t^Fm47vmw^*>RIm; zylx1M7OX}c930$~{VbiP;^A@l`?m?E;F&rqCr7u+l}p`Wx*_BUuelF5g~|A_R?hme zM1Rmsf>?ZL*|j(}tdr_SiW?oFM))-=TXhz;@YxqHUZ`a8GB709)WmZI9H};|$5|^S z$1C#2e@9iX3l%)GRqMy1Sjpti3Ez5YPKd^~I4jJNoaH(^W{v?BJI# z&t+3|JV1{{u&hw|tY^3saEdMBeLEFTd4kvxn?V2y`QTaK*qEnXlwMvQRZ;1VSzKRV zAFKAr(=FxT=jZ1+8(R$_)G{(M=L9!5HhwlW-SBIg*=%ZRT3oaeZ-TY<^76VwUu4#% zlmGOgva+(FVUilFUXAAiYa8l&Ht;!VMa7Pmmavu?r)g%zcpdxXsf39vv?1QlQcv$(7&d&FdkOYl9I3+Hhliu zT70Z+r|#I^lnXu(bFDBQ<$=5P_xF$B&}(jOl~YtSr#WX`-yw2{`tpTWP*9MKZK*d) zm1@ox*;t6iwI-t{tuLl8FTt^vLIk&yVV#Z{UwLmXRd6wHbf4Z&>7jmw(*oR)_zn*v zYyTqR6$!>OdE=_tivL`YzjaFC#jh3tr~BD`a~C?O;_f?4?QKlY%*=2{*$jWNgzW*K z4?H>miKI%47RO3S8X}*Do7WD{rPdIgGqAwblV;o;uuZc25l9SMiZW~Xg9i^_uY-Al z_vbHODSJM1S^b8f;7C{3hhk#PJwiH>>qJC}YOJEi+p3O^TPq`_Y~jt6JZ9G)Joi)H zoU99gvr#MeA3QKMH_svby*gUiy8_crK}orRf0(zj^+RsFxVTteL7~R$uw*d>jYbdT z8D!W+eEvK-#RP;o*uivf~To0OipgJ@sGcy71qg88y*@G_CC_eQi~UKvHMl#U}k34JGHQ| zP*9RGEU0OKs++VjkQR)Jic-x`Sn0`lS5&lBVBDDb>g_ufs7ebuFn^07j+mQF9#8Sb zn*KIN6mge-X>RK^bZQmhpx;1cTO`xQ!;50^?HCq1d|&genBHIR#z1Q)#G21~bz{$c z#dX_QI!Qz2!zf}hO=tAOAa{|j`yuBPCHpo})auic-S?SMjsCIZw#z!x58C<25bV~= z%52!3ID8xhgXA~*^?4P}J+?-&H;<2xfBpKj{tX#92O?4wDdOBYJPZ{svO-Ty4T@#f z2uVxE}0Fz60^=fCBf~MNL zsD$_5QP{YYaSd;%EL{lK{hpi%kO}q38q?4ii}<4|S+mrh5Uv2sJn~o9*42&p^hv37 z;o7kH1P2MXGD1FRnD#6oeKe@%F6jqvNvM5Bm6d5Vd7Evw>-<|mRAB7rEtne4L}1^Q zWL^9jk|C&mZXhst&=IjTO!1FI`|)JryCu6vXIEEHa4-alg0%GAyLS@_noSBbGg-{KU-nNbQ!BY0)h<#7U^69%s&;4f;K9B}<$fJjJZqPPrX+=VT_4j`a8}V_H_KO!g&{^Ch z397C>{>-kcHPYJL+&wT*i#)AS$*OW$i?69Uv9CRr6yAn@5+ZVYzN@aiJ@g<~(Ajge z!Z|%XeR~wTSAeC|)YJeNQ&XAx_p~c#tqjNph;~*+=BB0?K1jdDr~VutuRwzWEUBRI z!r7URmv?)8{l3#Yk@Z@JLfq!gj>cNBwGYZv>pT&+KnR(jH{mn?W|mgI>TIoln?{$M z?Cde*sTW+t`}iPNyGXKHNV6X5M$PV+hg$tR(UUT06c(|qZ61*s-$1DIu1ds*o5qR7 z|E-YuLsvAKXCMmS1c2cn>;?K5HAZgzSN}lO5E#w*jY-)+;fC=RzuU}V@hE5+e6O{b)T~m6Q3UKZ9qI8X=3}?W#f76bUq&I% z_^U1A8-cg!${xD@7O&CB+LU=G6!)s?YQY=M(h3UqxVV(7Q8_F$C4_o+ z>4Ni5^;kT7?B)ROyng*Uj>aE%l;i2VWXfR?Cdw@cNT@M5Gf(l{7`UZ z(z*i!0}l?|`&m&D0p@Q5=SViGVy2?B`ul!c`C*245VAL3$QIct_eZ)lu$VmdlOqqo z^ePdmPot-!k?7l|?5JUyrJAWIxsVKrk+}q4>$x14Cx66F_`P3VURF+#>~itBe~(>Ll-7qkBN#SA>~T< zYS#YXvf%0lp~;$C2av$Hmkg zgT3kZDCpfggCaBh;~gU)B_<~)p*`u>Ql{ykQjYMgbz11o)h#Vc_s49mza`#4i0d1x z3X!%{L?c4-^+)2ZFa5By>}PML;_0C9M+JUeNY0O^5;7<4$kcB9&X5a)&y2&TDS6K>XVNC{CQhYFhW77Y44I*OCR}es1XaEzO6tACKOB2 zGnWDP(ylaVh-;~#uCE_oUjD$@>kU-C=$M$fxj9wB0`%2^&5d1-7e;Q92YVaF6#_!$ zekU<_s;WIWgtSU@wztEdu(7c%9xMTjC@J}jc^UuKV5i%~EqdzmEkQw#p+eIEv|bJh z#pE}?u%JvAnsu+9DtmB`vi=MqAdmUUgpdbcBA5 z->R2oG1}%Fvl5?KJfHi)c2aS{N?ak}Ub|?@ zZFJoDBB^I*;=^&|X?0i_sk5dug?O}j`1|+ymOZ!n7-b_k($Jj^6)Hz9jeV7aK{J#K z0JO(lr$ef$gyJ7QoSxlHd4J=Pd8^CoW=;H0*zMZaU&gno|KVje%63 z8epw2Hj$FF{WCa2J`Q>$p6~_gvXEX*attd6$87X9`iOwJ+l(c*p>_P(hr3j~QvStA z7Vp=hctcaqr_6;Z0luyxplB6!#NnI%H$4x7w>rq^2V|Ij|FOHd3H{Dhve)m; zET5!t-@N&_xDB{PKqtzaD=4=5TF+$nIZEOVX~;7J_l27w>=I{bO5|aqADDQx_ zP6n#yKSP-H>sKj|PoF-8&RYA=;pFrs^Py!Gx3IB~xOn$}#Z(h{uqa8=9IgIpzPx6!Jv*whMAuJNU1^RDJ- z#1ah0A^UO56&UE5_deZL4W$)kWx8@u#2atchkWQZ-UMR|3sWarZKe30QD<)vR5+T2VP>o%Rp6~1Hb8v8gW(m+g zw2MMG7a9!I)iEcBYpUdt86t!xSEnieZSlxR>YzviO?Slq31X(P7d}mrj2F41`=@-*9CA6Kd@a6N!v@!1{Ho z7L?ha5M{koOmujmWnckRmoNM#gbWkvNkwKi=;;k%A)llbRtt8oVig!-O}O1;@rZ=B z9#H5)fM_G6y)(#Kap}YF37Z^kF|~MZM+7$NrtK zBBoC*FLwiB0-F2&{(d0vFZ;jj3aza4u(RWE_5cdFH{{nZgo}#{T0hg3iR2QE7I42t zMpIvX&*!30_w2{y_-l+C3CPLGq4onh8mn>(fs)b)-fk| zevFO%)!X|iHns|680V#akUt=aVL*m3-o7p48u5Q&qmDZPyYX;yTUlFQqSr086Ly@H zb9Z;Qv$KX^Z@H32g=6QKSf`O}R*#V0dp6AYxIiT#Jp&=-i(2*F?)EtzxwziIWAFcp4 z?>4PDIavDoRSe2RyiDR8tB8nqTQnbF$KH*;9L<1$fZf&681ug-C|#s4cQvBx6R-q2 zACV&#*Aw60)F3AzA%PB4S$SitL)6^V)S-T6F3S8tT8-M|m-_DV5D$=Y$;k}a>j0saS5~qD^gTD~B?1V_%gTtv zX+=Emo3%#5EM@f{uU4)R&?onlrdCX=1zp@$D20@R%?4+bf)m z^mO=~R&muMhH{TxbXwX?`ZKcO@6-SXwqG;VkcLI~lND^);MB_xc)HV=)8TYn&*YX~Wa z$we)^t&Z$W?<|?waSzzwKOT)pvdN-ogP>9EPLqY{hqZ3u6UD+B4GdgPDk~doYXe=> z{;Ll*G@OuUi6R#2tl=6O8c-Z>elHAw>zcNF)~kFG;x5qN(~}~C`Yg=^WRHM=fEpkX zo<#}VHxU2w@*!4&$fJ#bj0|%_!%3*W8o4@W#2ILAgxBw{FZShtP9;>G`8rjA2%X<& zaHZ^WWOTHKnwrvhCtLUvRaI5Yq+^IR@Pj~<6X4=X%gX~FwHGh!;d+^8mX)2|7NC?S z8W9BoMy7HKq&kHJX`v()GotAVZd60bHk%0|yCq5gEF?^W`l!nx4M2{A;0C#^$YEMi zR8$l#a~^B{+M}c3Hc-MkPfJTmIL_2yaxyZRG?m2?fPx7JY^RC#UVd9*yM>LqS!t&M)MQg83D!JiS@}k$bhh0@>W(>0Nos&oOrpoVEK#j z!~=74*)dUm8e0F(;_MqvPz_Ag2bbE8Na7Ba*y=ud^as+gLG0-G82`!@4V0F{83VF4 z*YhnOO-v5UdCfSxxG!(rq2TlH@q1L4w2hpB{)MLJdATR-t5CywANp>c$LhRLytyXa z^#A5=oPoFci>1kO`H2#jkT|2oAXyCz5&-jEzmC#x7#QFO2*AP;NJK(GQNKqILObdA zGcN!IXk$o^wm#YM8n3Obt%XV;X+R06udfde4~KSo%6y8`gj?Ij#s;DnhK&s>(?LG+ zZc;^s`{B-tu*c5zdp42lGB^C7ZvUvSzsbzZ%)~@NLDBi=1N(YiYK*k9#jtrI5I5aQ zLL|jMYmLS}a0Ducii!%CzM|q(c%gR=r#AWzpVj2>Aqh{w8s(&1uG-g9Tryg{_*4EqUvDL<&~j85(kfqLBUP^G$kP`ti@tN5dzEhZn3U zvhR1G@CG-Y4$Fr_`o|ir*e7a!yFdQ;1J`XUmkd%U;elg%10jW2uz+h2HdY0}QJNKd za8OVdkaZvnhlIolI9uUtp9waObL}8FUi{+^Coz}K4cRMLoqh8zFR(hOL9RS&-GFoo z;UCi~e{PDiZDC`xJTdWX;w$z6p0kA}P3;em3Gr*n0eE0JtlN>n;a~Dz{HvngOLRe& z8Y?HFDlt=;NL=o%yc(SqJ^Syx-F;e9i%E|3Bz-(Z$`b`$oie%^x74)9by}_mB-)C6 zpfKHPxb=q0Kd9j6e1xPd6G_1a+?$1eqP2~gnQ17sAo}JhAlZnHv{fC7_CrtPX-Em?6bmwREb!sfEuUpvv2D@IKv^{S0 z*qc{w_C>}$h&hvPY>YOi-qZW?n7?&jNPDqpw>J*~b$?tTSnr5aMNTZe1%LyB~#D^qFOjIk&X4d3o}? zJvrg>h~s*~ZI`vrtHYJ=YS#xUUqnQRN(<+oeytI%)vI_!XK?b9dCk97cx&rtoPka4 z2gfjhG7p31R>~gXfa*vCrMrytahj+14Cms7>!fOJY)q~en_LijiV!N0-RYurg*#{{hcCS1;@lL9+u|yBua4dxt2G}u5w5&! zKR#A$&S|}`(FkkZnf8U-BT$>luq!X&ajkdP2jcOBJ>S~(AUfpRH+%UBZU1aW&%C zjJ+yjzXD~b_hG``yajdEw!eNggDB!?D@&F2{JUjyyLJ{N_&v-l{nGsX)BGQ|cBeHW zrOnyL7b5{jSA5biBP_XhuNPlXHzwd6?FPB`_Pk4$T63l}?$D?0p?cXNU&q-R${T_a zQ8F~1M|%Zn{uk&|=M^?$?5hmHchfeW;4ZDNVK-LFiHxBtc+sqMGVyJsP;9aCO2W?* zGD5m;LTzoGLW1|9-^SWCoW<2zN(Iay;yBX6hw8k_V{2ea=$uEse)T!29C$}3nvS=- zWUdcfJlp`~@5Ks-QS;iPXF{w-KbsW?lyKVQSwf0 z<<98P+Ip$X^ySyZs=(XyRrG6o9`|zeJK&h!^xukTH}53mdeZ%^lk3x1rJ^EkN>#(3 z6W|XMK+S(TJqZ<(PI$O%pDv8~KpAHg2Y-0C<2*o#&B8)OQQ2fX#qEvJGPjnLaU^p4 zaEyQFaBn&Iz2;-cABTN|70t&J%DUA@H#DTe~M&}gxX z!8o*z?NP@|Lq)wCyK`;wPsXkiZHLy>)ZBU60BhR2;b7b-*~t7UT=d!U_-T)#h6>JH zwV?SJ4qmlX$a5EJr{Ci^CpPqc#8{d!yy&M|g7|LyuB(SheSJtJ(by|jB9n4*NXa6U zG&IcY?df(NvYgsfiQ2Eu=APQ`{BrXnO4yj53uv8TZJ_%zS#Qi!#nc`hv@ng!=q%)| z@2sR1Y*>vAKJ5BaC^RM)X5WL@BI%u3b^iEkciOb)l zje5r%d|0%I!B+MN9Goh~^l2(?lT#NShrA>*GO|fNP{OmMM^<<2vPj^$aN%QgT$|Tj zhP%HXvve@Bvr{#E?}=z~&nD&;;N!Zd+{*%}K(dxshv7qD2;7qx8;}4H69bYOAy|H{w`Eq~Kx^LgT%gg(;gJv6p zkB<*OO=O|L^Y_G5K_N6FW6oo(dSJLIi3bpOYiDPEQ3{l__O9mTZ(qN@Un;^me_rBb z*|!^DB{)$)qb3yxr~owe61Z1vZE2{evXxjexk-Y4gY}e(*P;`Tv+6DwQMcwgGE!4r zU0tDZ3#Ab(Nlf%%)zH)=r=hX6KO1y3GuGL1GJ` z6&~*G?X9mjg zlX?9Z9Tn#1kEp0do_a4z+S#EiD+TFO^Yibrut>9p>u6~~>!$XKU*+h{0p! zj?AO&;MS#~ps+AAYv>_oXJTUF;6NuQPt7GD+d=FmutGJdKdpO>5Bv;V7MiW+<*`Tm z^!G57SCFHrsj1K)17UA!W|p3jVIA97@^%U=xbR`vZxPOxiyJxp>IMWA$?yA2I<;>8 zMY*W${|Dt__pPy!nUV1o5ko`^BO_x@PEMB3e`jl2!s;Xc@cd8iN3nV4`B|?`Z-F4k ztO5Wk>xYIrShFK!hbkwpO)6_G;&GSv6L_;6g@?PlZ7iC0u~8~nsN7s2#+;p;s@yTG^r>II02!Lp z%UlZfz}>a+3_R7m>wZLw7$5ia^i`ELr%Uv}UVvdL(N6He1L)C-#SwaX&?(pM&r*vw zf!hxn=auETON=7|PTP>fRU8FxZeP*{e|Lkj(pH^n zM1YlE|4bsTw){nUb+D=RqqKW^d%sxqfs)12*Xv8f4Gto(rW1*u9IqkQAn@15s{P)> zLWAj#SiG>Ph*=@l!q%4kgY=y{cW8xNzfxTsV7N?q?$u!V1+4j((4Ix^7T-Z?^$!ka zo+tqiyI;q>RrC3X9e9KL0s`RQ+N~oLMWN9pMMZ2ZEJ`O?>FJVcLcr?+7ty-{93Gfx zM!_Y$QDQrSOAnktaA05}!R3XPo*o`PK6Q?WJcC*hUS74QkJ0B!N=kZ1v>UGAW&sbi zkThEE7~s4|66D8Wpsd`rsGRxwhtAmpqh~BcftLf@IM}1ho=tu~+TWsl2hF`BaAy#$ z;O>v+wN!yX($R@dNl5{Pho0&FpM#%YGfC-FL`jC?X43BwLo6W!?O8kwbxx-2uC3lUC>}EvE%yTz2 zwNG-lQbHB~iD{v5R+2VwoNfKt(qe64al@|@Ni1s&5ByK8P473*QD*1ntWngkagma$ z0{I93B|t;dCg2GS29&PbSKoa6`0;V@wPJ^>Le3YabV7IF0ZGZpBf!%9{{7RZUpHf@ z4GhM?c?0~rxHM>Ez-(bsaKr<}lji$!A_@u|p!9()M$f-M3>7>mfO2e^C%{S!wr)#s zhMl{pW?!d+sO{*pDW@`XWyc#~;^|!oM(p z55&#GgH#+iSz>Z>DbX*k8&hzL$MByIN$>pp{VkVUz#o1qEd29l0@%brSvbe`oJ_02 z$x>dv&F>N^Kmj0)GhYF31)ROd!S-#8Ex+bySy>sV3!I#sUS7wrJA$A6vGB0%NC`Wl z>=|%w*REXyAMUu@S1Ljh$UefY4TZvjAwYeta9@gTp_$toKh3WQ{VT T2wV-}Z&)&q6&|6*pT7J*l->8& literal 0 HcmV?d00001 diff --git a/images/Bugzilla-watching.png b/images/Bugzilla-watching.png new file mode 100644 index 0000000000000000000000000000000000000000..f2be77c5baea0b05dc693d50582ac57ff72fd3fc GIT binary patch literal 11400 zcmb_?WmuHqy6#|+(g=d0C@qbEw2tH`-Hl3vAl;!fNJ%#c2ndLD3rHg&-QCh4EpQ%O z`>boPwf2v5UHiO0_-2@y?|b8k`?>GOUshWDIuee{2j+ z;#|9Y)`AXqem2QC({jiC7LU2+`Ppe=Vq)6IkHL3@yUfn#LWG4*fAnua3J0-pCgg z7MpW(;h~{#HYWu?VVY~0EuiC3x||&?og8d#E-f9bWF}jUb+)&YbLjb{rOhq(q;)6q z8)_yvKA#A}FEs2<%1m|}?df5E`0#fi>$kc{9N4K3^Ld14$3wMAyw;1!=YI@sZEe-n zN28cEV%GWXH=avKka3$0@2w7HO2wDz*1yxLva`fK``ssV_3G7w&8a1&!`^4C1V}4u z>pMa&!^6Xe6ZTV5Ndml#jK9`KitINhY23A+>Dt}*`m;Y~;qC1W|B;fBIfolWv+77= zykJGVVZCNHQm8+es}>v_40m1XPOi0Dz$%+i$dq1KTr{8hb~@V>xVzX%^o!NDEH^jT zad+|P@Q|34)ctgC=p3f5w}6j8)Ts||eJSAF^sk_(&={YX*mF8<3#aqNx!Iee!fKZc zE3YgsKm0Y2b9QzHYcw}EudlC%37KuTx1)G?cucN5AAu)wJ$(2)?%_(^+smb;r9C}8 zRd(y-TqgZLQ=fXid&i<(-TGi)e0*(sdU|0&Nm*I!y7SMJCy|kn!smZ>Q=WLf=XW?g zw#|?q>F@8avfnhHsD%4%F%pb0Gcm2fvq@RC^}aF7DJl|^lee4RMj%qKLv5Jro11+c zzrJh=ND_1wIvBTRVPWYOJh*itiMFfXQ16NUfEdew>&^Nom-%(%vnxAGnV57kNdjOc zH8rP)+jBZjD;bkD&P((2Y?M{ z^+3Cv<*vjS=@kl(BSfmJt9vn~ZZz9O<&6vtsboC@e^HvtC#;YTznF>yzdVEo9&#)dRTh1+T6j7$g5wRBw-Oz;jV5fLvi{=B@` zdV2HQb1gCnkDffzLB(*H4%$z-mRT=#9UmX-xSvM)B~1$o3Sv^h4iYwm2CLl|uXYS~ zY(KLH%k*IJsYUq(1<`n=XJ!r!UW`wfnM6o~u7tKfN*VH6l{Uu9KU3$LgYJ8=SuA5x z`TP5Og%%eVgV~N(*~{c9i;Id%zMpO))maYk3D^o6A-``Z((N_sUaB!50^cr}Ltwp_!<(%_%MA=HgO%DL~7t`qcw} z=j>#2)uu8oE-oU1*m?kF`!jn>1@i~kbg9|M!IV4rDGY146Q0naE|_9%}Vdt z=?ToMX6rk)^TCFGDIG1X+y02*P`(ZxF7E!p!Bn9>wl!nq_mPpK?93z+bMuX!w3bGH zqSzoJI_ct)lGnz@H~DO`0t2y&wu`!jD8M=4Q(wG0T_>gW=ciNWd+TF7j~>OG)?UNJ z#8gvL+uPe~@WsQ!#-3ZBx?yQ)sZnOm%)o$&iwoZ7wl|=HNi|Vr&!ch*(JS%F`B>oH zp@AHghVS1mUB1k!RjIe43LkW=6r6Y)6#4o4A3=zr6mXDLSFe0CgGBn>6z+k~m#M#^ zjIKwbB4c8P!C2T|HRB@-i;CccZJ(iEg#c=B*ob!Vy*JE%FaHvpadWP~M=1@A|ND>X zm5^?3XQ5P2iLpe*#a@XGQaCQXShr3|v`O-^QKh~;80wg*xpXsI`jYs=AG4BQ9T2^k zUkIa^)C#Q@P@eTa!+Hc3(>vmEV>HZ&VlK~iaNM|k^s?tjCx&fdRH-Gsj8ld)u!WY= zqp2aSaHf;Y8(D8=X=_^p!8u>MhKV?ajJuV4qtDp(!!-Zytglf zX;Sj1u%&L49G3UHvLtfIDt30e_j;n^mG5nR zUckb{>@j-SG1o18p5U0$V}wK^F{t$Q^$Q9LP#L`g1K>Rq71qAwV_+G5eSPS-x0V+d zOD*TbN_P_Z?XO+C#>dNhVWBGd+FTp7C2l5+?~{`R9Cu<3lPs;QuyAn3Yn*wC3NkYE z_kImPo`fK_G7oA#dm&e|WGwW>>1SSMw zjuKM24wpp93CV3z5sgcf`JVSU1IS&*b{leDpC0dNYipx2tm@xg4GIdXz7>$Em{l&D zsaKN}BjA;t=U4p|LrT#EQSJ0ThwFzW6^F0=+AzD(&lI|37pudq8Su@2q)m)I5lKnR zqWD&J^YJLAb#jm2(e@Q|cjw;X3-`R~QNipWZ`Wnpx}jifY@DN?{ote!V$^iWDlYGq|*?M(R3)>DWerzZzegG>=F z3}_N7iEZy@U&OfL`gx%!JL4ssM0;P36ND{hS<~LXpO%}u_T3xX(b3VQByRMoNqva$ zS^4MBp8+HZ3!l%oh12fd6?&C@AIW@=Mh;0NZ{#FP@vo;*?YR3CPGS4Z$_bZENt9sEgav`(@XY*C^gjhtHmxl{pU4#iKsUTEFKya{-)x3nX zbg*(xqJSgEn23hokb|(p5^PFjqd_YM_ z3Alob4CmXoZ;;YEZ~;cRxw-YD-meOPIRWCjeyu_HY&+ze>j~uK0bRE0{ZUg$05lfe zV`Uc34i2)b+Xn}%^z>{Stw9t#F)eVrlA@wCJ74u$iH__R%c+$Iq)P z@}H(_5=tAsRXgGJk(kkay6$aV-QybVeEDn+PR`gMEbotBz63jJs;IQM(0wl_oZ|yX zr2^*bSdA?|7F*D>`G5H%1bkV!f!ekvC ziBPPhCHB=~G+ht&8S+j0mEOtLA0Io-z3WfRphnH+6txQI-NBHOV8A49$KhW_qDdq> z--`^qGS&QN%Pxv5*-;LYg!ETL?hsM9afAb#)j_7TVmMKoo^;ks0# zaZ;s5QHbFnT3^)-Oux~9HflD#^Uv~%iV=Bv>)@&YlX(ogZW*LiRXK}_UfnIPuGY*} zGAhEQe zRj4H5;^SlGxw*N=Q38U3KYsjJuoFplV@YN4^6@b+G&C(4EioNxZfW79sHv_-p-{u~ zg;#@K-JsJq{KxjBNG>KqxcnflK=ncU25gWE>ZUzQhJ2gd`tjcCEWdR{Rh5deGDpY= z`6zOh>B9&5)X#cSc&SfFvFey$dn}s&;CF>8DJn|Dc2^H(H+l2XtnM}Hk&DWU(>#>( zzBIk3I2af&VXaBnBH=gwYJ6>!%%hDe%K6gA50Bmys$jy*v$L~fV`HC1F%^`Q=yQKo z{M-~sYFeV6r(ET*{W22Ow6E9TbKR-tTVdf&qWxs5G8q9u*oO~<6co=e0Enr5)kWLQ zxPtW4&flDzyA}?vA%OndmH8rjE!ESkDlPW7J_74uy($PKJ43s_9L~1$A z1@Qy~1OT$apqi*WWetMFvb)sH%EDr9W>yVpT2nJYwr^I4Y{oV;fZQWuGM35c!^o0H z#M`^Qz0cNfvnCV9ziMi1q^6@&`8()nXky2cGvu@7<>gKO4tSAHN_g&gjDE{AV)b#)=gfsMp$ zMsbb}<*Loi&o>Iy*l%i^n0y9wUsPl*CU%X>&7=>xXN_t5VsoUuh6QMz6MZ7Vak$3R|9dLdzpXnU;054`ya< zMY8zDpg!28HqiR=J>DRiQCL|^TC?0t*kK7$8XEw1+m2PtqWRDfA$R>)* zLZWT$Fh%T_-XbN`dFGuIC(vAR)mz&*d#QaNqprLhcKHXUM7zIF;CT*8{&_t2>*n`2 z<)ii=lia*{1Zcb#>Ygav^gTB8`AV0g9SHgQH4B%p<%`$FEk;$x-|;zooGoAeGq3f= z+}L;oybr?sCz<3)X*5CHev^_t76LYOJxOWl!4(^@R0akHs0B-G9wBUQ3&>e&suI*s zX{Q&Kl^twvkCa*HtO|Aeupq1--?(!p*)Z9)w4lJUJ%Ztm+hJ-(16s=Jw12?d_kiL> zq6<#I;nwu}UAX>y5&2lcutvvl>4j}s29HJEjkR)p>q$wmYUUTk3Zzi~?*7WjFBR_` zr03&vS|2Tm3cfUoEo6DuYu8(E_0+(s>iCDw+0LZ%rl`2M1|NQQ2gTQMwHM85di9?~ zO;)EH(wRF;#Iu}ggxJ`&U_cL(EakKFuAhi=O<1|C@0IHfhD4{m%X6*0B3CGc&ipinMlg z*t~f&J?Wzug&Xe+xbMY_7Z3q&_uaB#bD2eaQk!_alEHCIhD_eM?gZwos0_sTu4n$9PjpX^KYn-tL;6zuZf6086YAtotF=*{$F z9-cBdp#ZZ8GWS=n=A1WcTE?dnsuV3w5_^L9>4^zV$IC58Mn=~90QcaMv88RB)BAn}PZKZ$kpb8L^wsw+w-ltEWZ0*H(Hc})c-a{=5 zURPj2s{BPWHYR3ydD*Big*}Y@Q1_ae=y=JaCSso%X=Bp#&F6AkoJonbzS=?1c`e)U0sFC^ z9l@5a_@l`6WdCS)i3qvam00z9jN6XiHoY+fBWzHgMmaqbU_KWYm;CY7LEnSG&b+G3 zs$A!83LaEb4P9Ant^1QFXcp+d#Qe8Uj*cwt?-$=0G2ctdY(5dOD45Y~#-TMU z)!Hc#=v3u2sSu{EaDtS|{@FE-E5Ag-MZH}o642TyoS*x%BeRr#0ii-q+(E!7h{G^o zMM6RXwQzLBIZ{hYYwDY8U|?XZu$(x`=&%t0wrW_?B$PnRxLP>p=Z_xo@}BPeVC|q` z@T{L+Tx=dc*GQEeuXRs$N|T)AF&@dhr?gG3PeM%0q*eJF*r<|{60gvWi7HOgl5!=v zr;!xG7CdxxbPZ|eEl<)F_!5JUI^s2Csi}}umJ9Rq@T20dU$^qgCdmbz9$MqE`(w^G zGjWJ@yk2bTn_XM$a2*=U)8JX0OdG!;x*w)NLJ-5ul#0T_z9_&FBhU{|kVh>FSDjYD~ zuiXdtuUV zBWK-be+D>LViSBurC(UCvy)R)bo8#>C-VrhTKQ}}`zqjka;0OdIr;KEyNl5ff zO{*aE0=v^0%PC%+g50^iQITa|wL}tT+a z7m1~BGi=X<4^&Eoh&aVXNAp1S5YFYvHJeYUZYg6=g;eq%?^9n+8VBA3im%+PtgO5| zDQW2&BlPTNF)?KO8=I1nl7)QwJ6+4wxG2oF=4M*3a86F^whuJG|IJ>_n3_onbSiVE zWGJJHcEGl-u;UiFPz0$DR47zPOT@o;BRG@_E}lH}P&gVl(NdzjNn~-49^nzJLFYODQ06MGWO~ zm~IpBEKB^JprFn2&veOf5x%R+9a~w-<9&%3F5l@CBU=UYyf@lhAX0syXHn>*FW7(!?-D8F(^g0gZ6~}G>cs=@@Fa)@#jYEBAILklF`@`55D|zb<0P^ z?B~^q*#ua(MdWd}fa%;{>01IEP1_I(E2PH6#^$%0$FP`-)^X!~*@)p;C*hBmEW2t& z^q}~(H{%&ZnY_F_k`-cA)<63%S7Q&lmis(_oqO!#`rdc${Oq?nq!k!V8!ko%?mm zLbYl?r^2zMuc?F>K2k?;m#qe^pexvh)|fB&~1-3>ve zezn0c=!-mtN35geT$yh81ymSWNG_qa+tC65!$O%dvk?x<*+yKvZ0nV;3ZEP)OrE{I z=2`zf3~185!kHw(u4Li!=z9_G&D4h&KCoqv!KJ4$4K~ra^!JW zNp3C^H@9-OVr69|lmo!Be&x3qEpD5dn(FEb#=l7QPJ6$AE3mgeI6QQy(YDYW_;-G3 zo6KSFnVEE|?Amw*CmY+(wzd^-Qtfi5Jw}a8Wa@MY7r>OYHM9K{0K(`vWU@Ls-GMFT zNMtK8TkjPZ`|+u$si{TMde}mMaJOE3E0zYRngCU3`MUoKAu@}=ij$$=8z^R#Y(*z$ zXF#pm8_N_!4D8AMnLwGlEXEopBqRW{3FUpK8Szd14ZV~o;3sdKJ7eB){Viu;U|?-+ zZD6nn1Wma?CwY)qc=W}vKiXLUH(OEa{!hYYE!pjG#&>*t95fgrB)7$+%kghW?Rh;$ zP{Z{0^#$dqmzissn}1QyV_{_MH7TKuUmRqusrXCkxM}7Lh>3~O($anq&aa`sxi;y% z@sF%uQe4a?qw;_Aek*nZ>;eW?2J$)q@;@1+zBE5Sj8tlhA=-$?CSJ5vTiY#nRFsq; zU(GKqB?~xy;Jb1!%|bJ?(M7&9Nsu3i?99HzN0#@5g_B3ad4KfEruB+(iM944NHGHneHjA!#r~4X4Mn;Z~yhYZ)^8gL9o0*y9 z^b4vBC_lVHfl~>&$@!_cI6}o`Yq}nA8l2yvIdQ;SmDABR(La2sIJm5^8u3Bq*W)W!t}v+= z8~y6fPD)Bz(-9Sm<-eJwkB0CNRijZn|EmR#6d8&m+Xb z!ir+ia@w91U3~bn#@T-Hdpd$QSz1D3x`|0q_9aRU0<$yhrkdJ_AHjX)T-5@D&N!$( zuHSmJwUq2`_vQ`0Bu5!{1t#%kRd{%~bml7PNPSlUZm6Y!wZSbf&W49)sHsUGc+jx0 zFcF)GitxAUHxLNbh;z)9)zu$os~a2Ia&q-%!0o6oMs~b{fROaVmhX1Sum9jJMuZDk z`@Ha3S@WTsZ%BS0jp2naBO|kzNFObMGk&bmlOUl4nUr2bV+?elWPW?|t_0rM$Tyf7 zidm_2NeVx#`wOqslA$5C=!kN>z}264Uh{@R9)_1!`lnAEW5P%CL?!C$Gc)kW`lJGfQj4i? zppOD`DJCK!05T&|>LLb;io#O?6xJLq#&a5UuuU2QqJuIG!U`TH=Jw9c2g;|badRkX zX-a-OroG~x8s`HK4-d5>14*+ustEZo9|$9FPWE5Z_TS_*ShlIeB%)<=Cq!}NbDLb! zU}k3S?CY}zW;{(S2n!Ey6AJz9i)B3%fMs>Q|CE!nDt^bc*sz<@AgxjE)88Pv;EZ{< zr`Y(01cN#Pf%EX97og9eBc@&($3Z~rn~NieVMvuHdQXJV5PTGWkk+QK{I$`N)#f0I zzD$`i8>XnRgoNC_6=0l!q5&pxw0SgC2I0|_?VEA=D{6m#Ur0y@*p)fAm{V1tg&aXQ zqoAP3d>*?q<9nyHfP0<@kz)Ow?jH%e%>4gDa=u9O|6{f(L$0lQ|XaxOXCKFpwq63~T?IeJc9wEc=8H`kT6Mk3te z_t1X6HEe6^4HH4SX-F)s=^>Cy04A-htU4&KAp{obbG)LWqHgos<$_l&btS^7-o8Vm zI@psY4r;!^3jO1xA#LY%XcGXc0Z~cb&iEux8J?|9Tt`rR zL@Q@+=fh=i?+E>Ys1Ca2Qy(1Anw)05Nr4~5 zpaPlqM^~3YAPB_PbslJ-YOY0vhT;c_H3*#j+3k)+2jzvD+6!M^S@{M9MUEmP6n#pJ z10ZK*^$58fvIz>h0{acBKLwAu-h-(MYeQY#yF}ffb!Lxn$Lh^3mP33O3!&^3$`HF3 z@tgrO;u{5*ihxz-Pas@@fQvvayt|4`PC?N`F)PLE|B9}^GU9&5Km#cyrQ6vb$GKSq z;`I|X8Z*G!XMc{oLMyD7%*RTDCX&L!QpO)IS%%4RuI;z4$8niHB$ooI zOIv#qf-ffrM-~;F@7ij9?U(Fqxoky{p~Qo40fz~4Q#hTB^z-M`Ug;SbdFmwrHSM5s zwl_A4h>MGt`hdI7FD?=yi`Cab@&S&vMTC-o;0Z=7w1I$%(+-?}ZiL45a5&nC8-Z2v_n&qGvxBD>n;@h`N z-r`Rk)UZ#NxFrNgF)ohW-}VwhqVatHrKo5VbmF)}qr>741*lkH{h)K+^@8DYv=9yb z8eXAbVm{v9?E2oN*4y6#nuM-nV5k-w_4H>eKqJ9Ot3wUP#ws4t*zCMk0u$ht38%xUy5J92wCN>i! zrN2c!XxY@Xw9So;npJkp92`4Pm?2!M8DmFmJLNMt*LH5#J$>bd%N z@E$jiZ2%B>BWLF4e}ge>3>SDFDcIVUQ4o^)bGv6!L3b>4po4{vl(w`*<&h=tS z3y99^6x{DCQhu$kyFzb>ojbsT?ZSJ&l9yWo>QS(NH6_NGDY@9+B%nVjONfNrsFJ4+ z4KUTvtO9y~HDH6#f>$j`{Pw~4JDcw3&JbmiUH5NNQ40&ZuMI=CgpdS9;K;;;R;p}y zW#t*LEd6XCYjVZ*J2bXQMa0Kb9BBQu8I)v7<3##AYJ>P?!~X8>tA-oKe*6p6!HKe~ zdHnL!xH?i&*AKAp5guv+g(x;M9EcH6srU$!kdlJ=Mmr9DRAhVr?OTEX`@5nDN4qj) z>B-5-V_JZYnE+gx{p+6#J?{Gu0TH=biy+S?n>G5rNd z-;LR+7#Z6K2kij)!~kG4veSWDU70zo+F7O(uvLVDO(1H{w?@GFtYzPVw8xRbPLBf3v#5-2LAKm6*fBL!kIbNl78J z12Ip-!V;`bSHh%{EaVyx7&wy27-<3!3xuAW0qA#9&5)064Pv99h=-OY$Us0K4UdkJ zZ{q*I$UX05i~o&zF}KYUxhq$_SWXM&*z!-RZHbPU=LG+-`r(SWgoF;9&_*8-yEz(# zEL#TgfJR($yeX*cX{F*Cg__Dp(dw8ec`G2Yz6L_MT5xu5E_Ra*b{qPMAS?ty@%!g+ zdkq&kRw48+IEodMN={CWF|vJNU@t91Xtc->uZW$B>g~uRSPRdTi#4bT{e0Q_6~HI7 z1mFJgP4iPm#&zVYz;6usnEvHC4q8jD>}Tr1 z*GylXS#WziI-|pdoL_nV>vK-d^`P=*U`sOS%}7ugh3>Uyhjql%K*QOb53HE43LIDQ z4v#26jp-5DJ7saWP#TOsJ2^GcTi{^}A^`l}RX|{3JW|kUzgXfi+($YgKF;a)vgK1} z?3^p1ezhpJhve1Ou9Vz7MF#@VE|dflZ|J2ehT1Q>Oh&Sa>sbq$vS%&oYe9kB@Q7_q zK6}7;DWc9vZMCBPSZ(DQ)-IMsM!lz~tQ#vd+g-E9sAHU#Eg@djN`BGh!6Y_B=y$#T z;UxsV{s8NTj#U8<;s}{rOoZajtDjHbwIuxHSAXt+|8@nfx5qv8rZonD!uQIU++Mno z2wj>tSx8geK@Yv(GPkyRFI&y@@_3Zg$zk^P)Yf3!CDpb^ zZtiOS*4^7emLB`wL-&_IJFdZ}^SMWoY}s?**HMad2V<1W@OyImU%7erm%|IqEVagKI4`7Ap|%7 literal 0 HcmV?d00001 diff --git a/images/Distribute.png b/images/Distribute.png new file mode 100644 index 0000000000000000000000000000000000000000..063526374f8ed425518c035ff4277ff1a9425e68 GIT binary patch literal 37440 zcmZ^KWmFtZwC&&?2p$L;+}+*XU4py2y9EgD9w4{{cXxLP?(XhzJKufx=j*i?re~&S zO`obdXP>=yg)7L3Bf;aqgFql8NeK}p5D2^tc-{*O4m@5kt5O30z?_uCg+P@PUypzj zIC}|AClCk$?cX05C_NJ!coD`~QdSgZ1r8gH8evw+Fa!i51WAers<$|AEAtiw`uyL#FC)G!{x@($HCIlVq|CIN8vW!K3rTJAA@&YKY8aW7`Nop zj1qg7F<{GR23?+pS1B{vV~SSD`CJi`F*6%&bm&!mZ_mt@lO)kMfpT@-Jch@h@=wC} zyAqWDje|=l1YX}FocaPyU^X@uBq<3zSXjiyfmv9f;35F02@sl^geU^z>EqDI**evE zwPj@qwN9cmc>mpHDw8rIOIC}`N;I<>U2tgj{dM5+eD^Hc@qCc!xYYbfzqbs5@Rf*o zZ@ty!E?k>C*uzDTkYcBNPItSf&(7Xn6g3JEuxWn}Jtii)hhtV=HZ~RtSR59At$G1E zzg`6}{Ln`xIOH(#82w3nScIF^4j-#X%<<}?e{lvbqsL*f$AUGS$btwbk^idy(VY_g zgg19$FpTAjr*83_|T(;))B!(mX8f_i+e<#2VXCO-P{Nc8! zffRfI3L&I=CG0n1cLL)5N`=-T%A`7$?R%TCwf7`*Yoom%lXyJ98x_%~(*fy-zni)asBS@SlSP3WO5Kwd?Kd7R%F&)E3kq z4jm%|E5xYPM_OHKZPu2*m-JoGLkB$-Z|c%97KD(1`G1DJ=F8?u+dh;?F?jdnE!QS5 z`@Z%!FC=#g(!pUj0AgD%tM2XD0v&$es?0B#kT{UttAUs(;NZ+gMuPCOXH_-)%~^Sq z==x>)EHJOVa2V{SQ)q8T12<}EyliWbi4MDzN3 zO0DX6yt?GN#-%BPXi>8YD`-l_XcTHPTX>&D#d&{faWlzpSb^CT{|(Y?;7?Fe&@nCel<=?4XOVz>mHygPu)EVdR zhQYtgw5oNkMYr=sa4Ye=4+ z|IU}0Yyh{+wl54Vt;-xe}k9UiiZY|BQpgBty9i7!?rvtH{*jt;MQ|(?4vqkb5QV7@R$M~zTT!{a*! zVZYX{-BXYvl?8*`u>bD+7;9#yXV3-8z@m4jL7=4Ut2WA$O3LJN{xd!-sDop50>8c< zEhv~=T}?77B0rF*X{gnDb?vwt3v0N z%|1!Yo29QgG%nitM%Gy8{C>usHVsRI;C6nw=XkJOj~9)HhB21>(rA;>*VE%Mj-%0dgH(SdL#ZH>c|5O>L)N z&ScFmHQ`2EHd|Xct+SKbpV$T_M#@{=5PYLTYM94!!wuhg!)^{jXb{k$a&uqhvaD%o zs6<3An{FYXHCkJ_fyDRn%2}m9oI;m`7A!3WP32A?`_a}eSjDb@j)95U;B+uCoiEEgdTW3BE-id>aSje&|FjOUhm;Rg>;!0Cn8$Mek%Ij(Fbw3t~4GTM^t!3KZA4jYR zw{y?!CKV#&x;H{2E)ESkJxiO&a)E;zDUhZ;U;X^}asuXW5?4UVY@t+HMvVl5hvIhp z?fK$r;p&>*N=**)8S>D1X!Xh9-A(RO07}{X{+oc}AGfN$NqK(XwvmBQFi_f0JT@7Z zyVA`|gWjeD!#$G2a$Z zt@^G-=To6I5Q5+Y{1oeTn!6k3)B*Zr8pXvDBS=s=X2{;lSt?Oh_98{nwouX8zoe#o`aiE z;gI_iS-79C4v%JwCUf}d$w*_=@6SNJ43GhMY1lFz5Rsu(qKx*ATm6w(AFnqt(;7dZ z=1-L~|Mh|E|2PBl3UKk<9?6j@-i3=OQx?Cui>ffW?QRgHia7Je3y+dP|_ z4Pr+PnTuR_DxzTYTP@0&nLT*Y^v=ZJ-TuB<$6)wL`K45`*PJO53QD=vwPhgcuJe_c z-&ta{uB5|6TDVNDAU{CQiHNE9pvCO5Yy^J|_wLDXvDTeO_12zo~gI=~37^qI` zECd0ouTKOQH=|OwHJX6GJytvbMosOfl0|yc2WCt6Qrg5A2^v~LM9sO~iZtCg z5o|yZL_&r%17fN(M6bV6`03e(0&2f;f0hJYSc$T#a#5+F;m|knzL$H+{RvgX7kP-IS;5)OF8bq3g!Nh$>zuWIu(?&rWJTl1V#S$)%*HjH=M*G7L>ECNQuTqrpC)_T71c{BhYK0|FK{e9Q{eQuLdkk!G+F z&4*DV%_iMJzvlCR)c&_xtv6INEbK;F&o?(#R)R2Xrg>KqwZ(05wTTGvQ?fm&O4;=J zTKs5Ah;FN8aL&4m*GLA8?O?rVv6988cAq*K2K$VrwwWiOT`bV7~5uBV!nKI?N z)ECpeu6LFmjK_oHQI)Er*f>;8&XPk1!o8u;3DG6Wg9_~}4tJJIPd6Sf;(db&{2$iS z`C(`GXYq#DEL9YtV6#FRpn_=)*zf(l&ySCdy=Bql999Gs-IG_$+{urwAbw6R>8Bp`#JEhe(i#?wujZ8B5{_4PV-`}<$H?Nx55V)&>T_vKkyX({&)6FBS@9CE(qxPZQa) z^olIE=M}uJGHr$-{b|gGA%TiiitZepVn{%Lwmc&bYd+PbP?dev2j?JzIJuC#e?vqU z{qx7>$D11&NTT30mGM}+F__s@ktFskc!j z(Z)vTq!}f=%kgiZG~F~r!vrhji$&qES{k$9j1SiS>ukMnI^g?b6B3vWdV(=&wN5uW z`Ev|oB}gD(KG9gNrk@lolv}LUQ|D{5+bE%W+!Rht$E{a?bLs*iHU2>B1%a5(%8S*$ z*?N+=oz9gshanfWHYpew1co6$!9&%W%HAK(Ybsf+gBmH{GRS?hay7=@AM4wiwIq^B$IP@jxR|(IjibNhkBkTr5drzmaaZE*eC^9>xg1Cz8`7Vml*7r~q~G>$ zEUS%HrBulKi@f_ua@RsKr#nNwnS6yLY8F@0LRpGfiO6CV!t8AL`x`43-S36cq{fJb zZh!31A+t@eUsme0y}weiXBHQKDYRAdxUMuj$HvYrRJaTe_eJ3fCBe(4{^IlF8}AL; zM?G|wC>YT@O&}Kq)=dl^nz+bil$i6q_4*)iXLc`LsnU>qIWZ_b0k8l9*)4=!VLy~6a_2oz<{CZ?UFOQh{s`{)&Rfx^UXMbbbP*AY_jX!K@X{n>5Lq40Q zyNU!lh!-h!a&h*T0>wh5?n<@Mpb*+mT=rkie5?wE51FiMpyd_i8cDqfmZBE zg$(`1N(_SGMHJ(tiTgOxH$L55G_u3e9&nfISMmQS4bfh8$b;{$_V#~k%_cXygKnba zwY9b7(^=py=^*~&Jo3PdW{@y3F}u6Flc-dgJ}gEODZv9k$f5@SCQ>By81C!+irw_| zH2>=*#DgMn93&JJm@D(YE_?s2OPOvIc@;8Zb93_xv#CytbBW1l^8Nk2;C}~G8scP0 z@Bi{EsXQn`W)qodKwa(*g3)fUh65##$>vd%+HG|BuOa@wp8`?kjrj5Qu(3OwU^bb( zmM@O1{Ma*Bq8!NwAGVsZctG@Fj0D^jC7QjAFsCA7TwI*dVD#Bc!KmAY4-DuKBLrVa z@%i>v>FmP)NeBbNU#6rlDd0bb09^r*Dv64a@RvbvsB|*T)|ETyMEv1WZEX(0YK!Rh zggIvd0>~d=0x*4^Q5F{Xy!l*74gagIrV#-qns#<}^z>D<+70#*xqX)*e(y`O)kd*Z z^twf#s_h6Ai_f~f9S9-ivxi#U-0JI8$VlfooDWk)h`Z+lyY)wEvbvofIu)8hAXuVM z1IxwA@q?R-wN~}#r_VYgr>Cc1m^(H)KNM^$2YQ1RTk0F!?DDJV?X`J7L!$JZ>=Aw* zdHL}~LI;@)6*~OV`}(a{z>(>6-Nxa$j)g`>bk_RqxjvazdpS=OfnlZh)F?0|ZU7)f z9=&mugEc_6$zkZWBApxrL2^YW>+N1rQX_9K4;%NWW3&7o*CZV7>gMJeLPA<<$q{)f zhSdWDVkSoU^)-gHwA~}{U%z%ue?C3^^#iYpfdD=eW^MyY+NqKiSlZ`ui!@0{s8TB>7gaNxf>V|LN1?u;-VE3VcW2f z9C8dwhVozBASpQL>mR5=JloYj*gt+Hm4b=L1BZYh;P>^u+7^|T9(AMk7M#sfD#7Q0 zjfsemlad-AAD_2$@oeul{~aYZU~b&BPP?Dqkq3(cj*oox1eqfb59u8zL6Q_bD#wt%9 z`_Jcb?4LjaxA`jvuCMVKw}uPIPQb!?5={*QAjQQ}?ZNSUnQEy@0KL6ClyY=;Ho(Vt z;qZx(!x1qsP_PlP@{x^HDe?vgC@Ap$p+WFJ=kZ=|lDmVp@uK5)QK9k5&#^f;-Fg}S z<%M;Re7ZqTL}xpaI7}j0XhsRqav$}$QSocOh{uv`coDbO>B@S}petboYu|6e3{=n zxOQGRft)32q3PnSO*!AvAC|3Jd7%CnFHWOfTk;X#D}TWk2@oaC->dvc75Kh2cj>MH zs|1t&yZ7Em5)dQ59({HomvnoFc&Bf{C7W<=^xSFZ;x@kBOjhE} zk$@e4z3;9^dk~HmS#3QhA@}{T5xv2zAVGB~9NFAHUT*T)=<={aZ0X@%TEz%AIXr)w z#WlTmxw`!%79gtzS<1D_>9%IAxRc zO*q$Llj3XK71q*}J2P0l?#&ESn+=15gMWVIyneapB;a;E9i;lZB5T&~T}Qs{)ItA` zRn=fgtVV(iZa9B;+uh{r#X2*e>c|lo$}bUGg{B!rg%pD9v?^aQ%e?dvs%67{GUI9!HL@a_oxsER?B#0Qq=&#C%4EK+%-xpR+4v6iq)3N*6@y(fU zclk%Rk2w9hZl?`_2+C@)V7Jj{AsPf4q=_WUpPC&XF>q(p^Hy701(?FZ!+a26Ep2}7 zwdyW|5$gpih^t>LB3$lD%#IWPmf zx>|m+Shd{d!L7GJ1RL%^Yrv37FTj#&{YjMBGmDLi&txxMcFW+wFi-E0#j-?Yf9r_T zd#KCT$1|ee&{jyq(sLoWq_!nWZ?4IRKOLMxmm)`l%*@Y64T450Rjcsa?t=?8fDNQ5%bcCbUTt!)nJx5av?E_y5=^3+ z`VHrN3d?Gl=+3boux-Q#O}f}11r>%YECI{o4a>-=Z({Ov)_B9^cD{dfbR{W7aCr&& z532Q3hGLX`=ybZ`QMy^L`>5Px8;6F`?6Z5eA$f_ zWL8=2<}_0x-Pw&gguZ|mI*BPsE9C^+2v1Uky+wPYdRiiI=aZkfVNgI34B02_)dLB6OroBRcylLYq#`4KNBP65yduJ)A&z^_nIEdQ5}9}-%nm(aua7T8>?yHd zaMhYljtwb(LPliKj(pxpg*Rrv1_+CT-Q7B$m-|I^6){9St_Ih$-<$fy3b`n`{wCci zC&~6sLBk0KJ&+oWbiT`VxnN+!j?IXR|8O1i&F!aNw9KcUGHSlFGwKL=7Pxfwku(^! z3kvJ{JYb1&o==DOrEp+p(DG+Irh-RETZ45=I$!q+{Mhi9IC7NCyi`lQ_s1;E%no9d zq0eamGB-LroYVL9F$}hJ^M)2NR44e_HK&IvH&YVkJXEa`(e2z=TZ@X%5$xZfDo1;y zFggY+ST0@!BRXDfxNZ{lQ zBI;w(-`gZcl}^c)+gkDjy4bHg?hF@He1Az)m06*opmaNZ-==bfKYjW{ueKsu%ehIvsF(e16YZsIOfvJi#ON=&KE?Itd9WX7ES6QlP}((xQYyEDD+ zb9boxv@TF}OP8Byg_RQ)@tW)WPKp_gEFz9qsy1Na>B-m!w0Y=; zxW8O)m7Sffrly7(-bb=}B~PKB=61A;<7{2Af2Ij~ zc1l7*99O%%{ql4p){cqDWxnpiF`9Q&>udjDZ#P%$kk+VKi0Jorw}^zxmI<&jQb|-z zCVl!KLPEl><>eOX6gmJZmn-In#>61U430i*1K1~~SG$kJUV4!)TF!dz&-4_*yY*CM zq~L>UTXmuTPhRRm6aIE@_S;Y1sax^D)ihdl=Cg&eXDf{#zbc4wsu*yR9*^iZ9v{2L zGxmrIyQilaNlEwSMW2B7U~qD>%4VgZ5K6n+xl5lSxLHUPB20mzuCcL7 zr`hr0d=2K)S92qypm_-B8wdAD0g2$LZf13Y_0G4m>o<=TDIeAga;%7MtY5kOwy$@M zNn-ic))>7v2b0{+hnG7s{P#z*FqgKAsr2odl#!}K5rS2VgZ=%wZSD?0r-ebKV!qaL zm(A-iSnV67q+#n6v{<`g*3S% zIdN`@&o<+W8MNBycuQxs($Uo&*CUQ1Q!isJ*R0*W+{#i^Eb)9(XEFL=5?~@o2x4b% z#ODo_Nn;Wv{v6pKf4xZaEx2;EX=SDg?6S{Yxt-~hI!IAlA)!6Q+&wP#=*+Zr*S)+x zf0VnAKN(p9e>vwZ;Qzb;tNafa{L*5DW~NEQwfJ~=x&T<{>gtNZW%p${7;g;^6_0Ue zwdAl{OJ}oAg$AKGp8QpG+7|KDOo}idWQrG$FE29NLUX9sV)5(fU#OV5b!K8^?VdQN z_JrruMEN3VyG+9Et&y23p{2<`|S-HNx z4iJxL>+P)-6%|ja#wMh17k&?br0cgQWnQ5*^$V=d7OCDcp!P<6?mH=&kUK|ka94%) zd(+8MIkqMRL>mEt>*K37hOaq#wTuv%{N6|KpWp+xAD%4(F;5S27}G@^sj1}n$J5v zTRbF4jAHBsnA5KFr6x~-$9D{7(LJM)S`9QsQ;=6zyT|j6dU`%P1D2IxZ>r(%b1H^^ z88UvrexJ-vubcjmDRKxXN8WgL{3&fI#2(1~^C8oq?zf7K@oWi`kJR zYAhMot)6H--=(hveigznshnsy>~u>>s&pj^U{osp{veP)x7%ipU$k7h^IU(y&BKQL z{lf!2ObgNE_~pjgU=04)YCAbO9)rp3CQ2-Rg7drm*LE0Xj_#$fGZXbMbzx@Zax~F%;na;l&1J&Niy2g<{>q~t1!j@ zmSS*aQ%C7njh)T2#cpd&p@DsFm;V_N`fyqp7yUO?#~uCtPbdVPJUGgI8nVcDH4N=n zp-j=Y7SDPbnxCEKagM7^I=Lw3IDj}amTJ)KRJ41LlT^^rZ)Y9id$wE;z!Kc3fqwyS zu>0D(+nmV5!{d2-I9;jBO;1nn=H|wiW9O?_Sii1b`L)(s^YU+Xi)=>MT6^sC2RtrA za!4pru7U95Irrns#m?LFS{l3E{%=X;4(GKlfXqKXTbK3H&{*1%_*)Zi2u{@EXsjJI zVhbQPAFOE*tcCf|$VBN{DYvlSe0?~Iq2{IS+O zkLQ_xV&}R#8aCxnz#q>!oUUot>IYmuY?uIS88-BMm0cGvJ|%W#=Sob9v_ zzjLt^@i22cyDh9(RL%QA&QBQR$CXBDKq2;dVS@i&4^V;c+Mat6475?@O8tL3+B<|{ zDng*FYBtcJ{9cJVJ(Oe9rRItaPUodp3ZvGY2YsJJeINfkUhmCq_$K6V5UG{KYDEPG zG2^ncrK+vZ9mRHb;t6)`jh19uPFqgT6kPvd2#^Y~TwwWx^V|0C<()w9;j~(HbsMwU zjOnCWCg(S;4xW|8MFEsjMk7Z~Gi1pgf3S2)Wg0+QNcm1@AERk(ysPK;=5o2&otTK8 zB(-;N@W9#WtW;9x^>8j6_Nn4~dy5G1=MG2LJ?$UKtzEJiVzN0Md&{f~i|q1v+di7j zSI z&)^JpC_wK40tl%7;Gh^7Nl6i761#5jNdAw+a1ol(o_z6Fv&l#82IJ+w3|x34VoLem}nwgk_ zuz-1>-K$?b`iGNKIFN$^)w7bt5D9F39xDAO%{~VL9)g|0e#hqi0_hSuIz&*Lw`4Sq z3Et=KgDZ_-LaK;Hw<_avoxR_ZR1-8u&J?UkI<0CAmPwhDX4GoGk)WLqV}Cms1xdPQ za+Cjz{KoA%=Mcu@-bKLF$HkkqiRtBg9o*`_ugo&Wc=G6Rn{^%&F=$S|j*#w4E=}`dAf|5b-j*^lGiNK59UY+` zZ!vx+$smx}&#AtCQZmxd%0>Hy1*Fl~xoqqxGzc#bW*zQviHRZ;*%f2Ml7H$=u%YR6 zQO}mX9Zd^+ooNAIq-1VB^2gwKwy3{IF1$%s)}1HW0D!{r7PBgfijwM;zcCQ;INjs` z=3oEOettZogppBFK8FkjYOi#@FAxeI^o!zeo#g%DXTN*vy`8~f;G&gAGd3&JR9@|? zTZ`;{GRXz=;7xrCdwWwIote0A7U$=4ao>p3eeuft_J2wg2?xQHfpzKo_ebtEH}}zZ z?uv+!@p{D9StuvRAhY_mnJA$ej3^fc`F$j1a>M~h`EO&@LffuRc6YZ@+z)_UZW@Nt z)YO#E;ada5x|IeSysuvi<+9S{8N0&1U{SANXQ8?^ZfX3{H>$T>I$Jil5E}j-7LC{4 z`^{siW~1ZKN>Wnx#p9`zY~xn>uf|q*ipgE&(R?Y)fh!F3r-#c+iqv0q*`cdCq z{(2%HP=JDr^6knxTri64EFgFCdR*^5-5fNJ7mt*-ueZ2Z2?>E8wC!!~=U%zvyQh-T zpc0^fRqDpCG(wH1e*pn536Y+O)ZB8-1aOeJxao4;>iyXn03s9U0|>izjVL%^D^yTn zs|z7nkhB{la9J3}vdWxY3c$M7v(xx^*@4<=l~Sk2Um?^{z`6sjVx z(^Bc|h&!o?>_08nY$pCpjam&t!5ioH(PHs!J8r$W2B&;|y90#M1adaNcS`McE?f{S z_{_`?i&@8&`dKi*grwbAxwXu?wblwwC-RLPCL{#z^vt4?%61(+G!*_|T=^4zIT(nK zfXij(>WRTHcpgyXpKg+>u1SFR(gS&9p2N$c-d_4~*(Aacr|rpUe@H-Pu-kEY-Ji~v zso#0}FgDWj@%g?z-|gq!@RF9Nq^K9kWsUq{*Bn{g-ILxABlzQ%Ti0Un?5x`{-9kUn zw$|dru5D9(^nfJ0wP!Oj=9iF$o%DKZy0Yh#%TVNEAsQq>V)#>x2Z~KzsDKnUScu|| z!k=;png7-Pc&;R0JjT<*Bk4>)-rfBXXwDGchDmUKisi?|Oai?#f^6YzZjro4yO&jy z2{y;hTvZOHfcEy%H{wScgbemkiR(MG_*9AgT@*OD-^0sGE3=t~n|xfH)1JIDCmYqM z*wESh2ldt^><^PGe;MXW6@la``%=V&2-wdqEo%dX#v}m+cey@_*^G)#2ZhzTxJr3Y zqp{Lq1bv>m`6m<84?w@4Eu@qgR6@_QX!26m9|_J=SZUx=Zhv|?`FIkqaViRCegcX& z8tRZ@{`IwtEU7b>^HKcemT-27N+x!T-v;_B3=7hJU>7<*7^u5D{-RU;_M}L&h6Fbv zTEG5}!sFv32`MSB=WX%t5o4LF(2H=bifkqmDb4WE#=yL;72MV@Qf0VQs^HI&QHndu zlSj-4EY`E*>+@Sy*|X{m^-{4EB<>bFWaEI6qFel(zQX*^AB8C~Ue^zxZ5WF7>)An& z+5p>*uK<86K-K})xf0D<)3mTqfQ+c4nAT^(i4x0KE3;Xu*(+5o1HMEyUL+UE)|HJY zrFMUyBvC{>0WoNyA|cIB5(Ti?X3M1xr?4;6;K%rM9^j&)N+2LCtcD20|59KW9u97X zs;kM1Y|b1>D#7DsVzW&=+n`@+l(#k;GA>NptE}p!I_&lkD-cjt-ezriNNf zo>s*=AS9BX3K&U!K>JMNZ6m1LiHeKM>U;!!kF+yQ17=7?o7>Z2k`;TzF-D3S3ou>V z*|5BDnnEd`*P*O%S7xl(K^y=_eS0(;gU7Ab>Fe_d${)hV#|OG12L}gH(#NYo0xVjs zTdIk+SV$5$N6!4|HtBA_i*@U)Ruoxu^qPkC6`kQr^MiFQ3b>V(SpINMvsP47I7WXW zk)8n$?@@uYlkYp{!ItoQcTl|F2L>RI7tEu66Dtl8N;c3xm>9F`pv*O4dZ+;jyxg~p zXP6vJBH*&ItaeV?EvsA{rY=?_3}aVduC*i`pS@us=-P|EozNd;az;4psNC#RlA<;| zey^W4Pn04B`fYnhN6VSt!!;(Owtza|akCGQ)mCFvmDVspQDQsfe!`4<9l}3RaCj|^ z+cC3f&i1%i@iQngv@m{5XE*njsa65#zasAxUGaC8ktS$pMDd*8p}QmRVN(zyX-Si& zchiy-_Ho0{EThp}vzUVcyXop>kudmtt#uI?2LL-X0CYn+&Y}0y6wBGkiMF|vo7)2} zyY1`ktbBh43}23mW1e0IN&WJo8WlNALBVIzbys@&2<^$ZNtgh_3j$H1BzO#-hCWtY zsU;*KSSq#O%F4s#<%^vA`+2*@GviTIjGuZF!@nbc`4Zd>0tcHbGt+6~IC#8bK}LQC zusnz`spius8Do+d5FEOh6jVj(cmZZn6gJu(>OZ zcI@U;n<;v}Usx?w(#9%TMB^a@tgWpjB!(ukxWhi-c|Kk5d50G$9Sy~O>+R|B_VF1n zI>L$wfm&&)-vaC#=EeSbAW$~%&fSsGYF2i=h1&zb8l8g$egB&SQNZE=xDLF1E&7-CMtj7fP7fy4 zft8%8RNZJMLY!O%NlxBkAl#cw8{g^%wJL*>1b?`XvTeWnx?umu(1#txm69OHC!)!a_tLR&};8wBD+= znKK>=>nD{WR~u@F2tZlMXL4Kt%N3v;IwzS-u*bw7NQgj>uBWNcXD`DQ{fI$t8zw;# z@Gr?x6#&8jIP!?2@7EEB$GLtnS@?ge+-kEoqIF!S*2l96y9b8f#u`jm(;IRT^{Oz@ zMyD!8l{G>2TC{NzGQ|Eb->9gL7xn!9aU($W0v;n!S{(PJfHk64uE7YTjfX47UF{)y z9-h~k0_hk6{tl;uXsLqhySuxGhl{zA+3USgG*r|kVQ+8m{qc-Nz!|1AkTYt0`b!W1 zjUj@9YCw+cGrXU`>#or5fkJSUT1_g|<@Nzci?!B*@bI?hJ0t+tC6KpCrvztl9r5r; zwe|qEW>!mVxl9x^NCre2y=>(?b8I#lpWEWcK>}I;A9HZc3=i)f%@SK#kufNIqo`La z(Ml>+3R$m{OpX=sbe}?nbNc82jP?9>-#>vgv}?bKmk$1{#$oJfvC+;-!nnC6y_g}* z{T`#$xPX=8r2otGT&Mr}j>*q2%fmS^1D6+YnLay@20+*>*G-3uSgW94oNC7DOw?Ix z8fOCrsN8A7@L=6`t-@KNS(KQ1R%U&FF)EXd((yffD!}FS`J0&kQl{9dvmu`!dvDUm zIgb~1!{(4dJ0^df34~zY>0@H7GK`&0yNb860z+YdC~SyMC#$hQ&bqQ%!yzj)(bR4Y z39<{p2eZl4>_AnE7Beby72bwsyTi(bnaoOyjd(|>`)D#PU_N1yjlp3(e%SDPxVk!8 zZN35U$Y2aXPF1__N=h*IrQgTP2H@-Wxma&MJT&L#Zsh|;^n7gv@PiBvhLH>d<^^W6 z3Hv;Qxne~?x7#vZN*-wM=4=5v@7Fu^h5hlGAQ;l4BawPbz1H#<-@_(v^a?~==%>0gfdngclfZ<(cKdG4ZXjqXJ*Q9 zaWLq%9uNHVXn#qR3Q;L#&!khYg0HQe-s;_kfa%-cP%ja3IWJF;+Ti-Mu5*F8s`6aR zgz7HK`I8fOv7&UTu&QSuz~C}Ski+}IEWbKrGA~nR`0Yv0h+uG7Fo;9Rr7LMO1V&G>*mRB{&-K?8%Pv@N*@v&yt}`zR`uNn;G@C7z`z3psFJST?S?u4 z`{6rK+5t}E4cK}REGiFA>zpDzp~i>^xTf_%>0;h|)Ny|bnHA81Jco(tp>0%D@L7n+ z%HYb=$}}B13fkct6bYOmBQ;9f_YMZ2PBa^w}VHRvb?}9b>%>?t(@t@)qUZ8Z>1VID7Wg$P^_{77 zyt9>!-C|q4t%Si2cH7nS~0;Hme|kJy?!=HBlCmeT%GSDN;sm$(tn$2%e}tJ zlnCy(Qp4sU=pkXaTR%JFC`MfzBfcm^^u0$zt48nOmK5Oqf%Ne&#RCurJ|r;mkR~d$ zD^a>KB(w3h+{!{7OkGq{5Cz5as5mI|^7fYMDr97XciOx@!EDTS&FHtZi99AuYU2({0ArtVqo@&%<=^kbcnJp*8 z$M^L1BIEP?Em0;{S6>9;K^Pnu6odrb3y1|A{R(q`nGcXk74xAR<9Tc)8TX*^W88oR z5cf?SFcCi-&sza04zSNpPE0`8e#X?1)`Od7bnvs=m@S-iFA)l`$}gXyswL3n#zbdX znx3xSePG-;C{7KYOElhy4!XqrYnN&ap1sj&c%W2r_N!!7EG61DY5w`@%n|kf zz(p)<1Fi<3V>BMOsfo#agUt%SJ}s1~SGk@oqlQDOPb`9ZYXBGgSOFCNJg)cN3vCK* zxMGa+u)KH1D|o@>`W_aN6`QR?naWyod?Xdj%iGv17-H$xVy)9%3 zLDzpz<3FtEcLCi*~ z(;!mohQ^QV}R zCT}l^WJLeAp>t4s``aa;_aou%@9Y5LC&AZsh40*bu(@4Z`If8}Ry)QOvzuZy)}=}4 zzrJ-wRjB2wwR_%4S$JMYLXVg`mmj{(S7GW#ESq+9=i^+OTwPrOMoUV*>hD#^h=_== zupFMZ<^Vz)(}TFlxQ)|;X#1l7WCIV%m3MXeEJWb!4o*apFkD_y@%HuxG|fi`2WI!P zt({Q&vsRzn4Jz6`Pe=9ypf*w=G)KGp1Ns#aw5}C@e0{iUx`@*=dK83+kC`@+#gheKl4%V*_t0vp+rKkcCf3*>)3*iB4m|B&%=)z;Jm zTy7uaTJ09( zpdpE$lJ346t%_a&6n;arvR?>3+-BC-2D{f*jQ2kWp3e1U*0~4C2`fWq3y`3XmS?S+ zs4cMc69-G6lJX!=%0m6h=5!zSHaYZa*J-pGKrH3M=r-t5 zj}_g~+0$Runz0!TQkALMd_#vH=`MhS3?!3H2ewn?0q$kM99{z~f_hJn_O1snSC~(A ziWGl`hDboEyerqQ9*m;{$znFfrg>T11%QiwOnql)Fc2!jXz(`;Zm-X3nb@z>MN^+;!vsK+NKn4h*bYSfDu{_dZ&O0$JGqnVraT zFfcf>S%Iee-lot;yZ#gcbS~Sw|BgiZ=c4kTVLB5d#vV2|fImJhjkAD<0gxyh3)+;) zE23oMEY!+}-rrvq%c>$oXmTdozyo-U@Bj^PDI`2p#&f^0r*$E2$NH^=~c85R-!OJwak`kBPHk( zia2=cx9lQpAkEyc!3WU}q((A^P$ zDL~)(JdXkX%h~^q-rk^>jrsYc$%8T#sw}ybtgMRJqs*)<{unw+>LjVoH&^G6cc1&S zl}_s=dFd3()6*fbDD<_KqvyL>^bMGv_-(W&OgecKFnKdtNCwr0C(ku&rHs zqBNCx|AmPuev;PJ_8H^XZvMI;o&5nU0>Vm@!w&FcfWf1or5)YPK6Y*ac1bj~v_xYw z<^dVHe?vBSYCglw?$ym@A$u?}_jIj}W^|+ikH>PJ5uGi4kby?9BS|l~*~>Fb`)D@i$=OESWcfq zo0xHW9IiS$zvpAsz+gxDj--r@`*lLd<8t(c;?c1&)Nm2F{|>+t;L{7NYJf8a65jCW zXqn^Q2q07Bh^L@~fdo<5*w}Uke7J2_8(IE|!25^O`OKy~UsxjuB>|Iyy~`dqckRN0 zrp(;hUiBLw0m|DG%%Xtdud+zQF&2b84s#hCiOBEOZ(?$&qu#J2@FT`L3 zeKZKBADMt`GS$EX&~d{E61TQqX|;c|;N?{DkjqCj*d(lXPBzw?c;3xGL+xEZ`J>2W znm5~Kt?K{SxzS50RU#E{#}^eRzdN&XI*OMdGb*p3{`;39>{G#Fu^?DiBvu;~0!>d3 zq$lrOiP2X^q4~{+i)AR z+Aq>X=w|_jls_Vj}1SACM?(S|WDQS@I?(UZEl#(tf>6Y$plrCwIZt2;4@67N^ zFHt$?*|FAL%UUB3Cb>vWXhoPS4sH*{w$y_8_TtA?<1TZj1<*9!r(_{38~6WAr@g`rYBg^AHLv?CPibi_=}0O*E%)<$y((^;7$iA%4OSl? zc&PKcV8tQqs5eG6pwsj4pwVh@0#a=~;ETizsVI)3{*I|~2T^U84ezZ1Jr@?dI0cdU`4nu+0_; z{Z9{%x~*)G{;zQ73qKVGlqf?ST!R%ZAV5G9n3E%tXYjLKCYA^@2G7*ie9%G07?_Rk zuWEFEvY7u>QKij+IP6+tu~;)0nr@$_{`zb47w+7sh-d1leU-`v$&x-$<>l>|Y<&`S>D3yt;+!hw@7} zOMurnI1~Ykc>V(m%|QXwB6;~9$N3PyH|`0?xy!$rbUUrlDMTu!t9 z{ONWfCXT$M+5W{OO4{m>XVe=yc5{3U135iyq8LAWcbPAf3R9ttA9tN*;7}_s(uvE*Gf$f&t%7g`PW19SVEG6wCHiGpUIy9-dJ zfu7a}h-gGV0wA9$HFY*7est$eFFC*|fOHMOx+7W*RtaLm zJ5>HevD4`ZOY)}Y<(;kA8x`o$DS1844~PGJbB9EON$xio69gOdo?uU>T(+^1$r3D1 zGSiHyCoLBmo3hHNwf(Sd3~*DJw_~*0sIQLJ8bk=gn3tx@y@e`%sI&yUA^9XFBX6;{ z9=fdYb@^88jGygUZrO)F4N`tsqsxSAShG}6TKnMnpUFP38#={mUXXmq$`Oi*JBZrnc3kX1PJ8)*_+y z=UfgPDJdzCMz*`3X6sckKeqv=>8>%{DYUp!O-+sXdJ!CGnKc@$l0n1va09uakj-ti z{A*?+_L;@e4d#=?YUktG$h42Uv~950FW){BVoP4UXTnN*)zZA?bW-|!uI5# z4A(a(RV|FZt=htY`;uypSV~M@kH=FZ{jn2e+TL7quH@GW-_0#IA%FXF zIB!)hc|GWnB;yDK+Z?CE-)IXSJ3$e*lory?Ep;~^=MFbcoMf2YV1B}E(00p3vayU+ zEiCF6YSZf+C zGr?aTSrFB0?q<(?r6=Caq*SeB(^pxtAR~LE^t+FUla)%}h@nPI?ss3MlJVeIjr{lC zA{;lN(%6u+*mkRI>%=mZ>M&yMT$#7$p4%rA6DQeOS$D^2Y3%Z*o)(@&0s`6pf>p{` z2jVki+T+ngk)RI_u0AK@PYmVe%iJ$o9qyx)~)5^=gUYNjr< z-p^15EA#_nqU)d{2H#H?AH|^1oj815fv~SjN(4AK9%rY$w_DHY>^uTOhmgi>idS1* z^_Vu-UK?(B?)#^z*Fogke!IMtNBDg>#wnjV&fEc@3k?konF}C4Acdu4^tG_C@bh~H zE_(B^ZySIAwty=RXwojV1npy3SqDsIqy9j8Kw$ZkzO3~LVfizESh5xOrqq8gO_?(6 zptpvetmh|4H|^0QQHl|Y{41L;+4J0JzzCYs3wORkJ2fMt6_@~qhNMtX zQ1W*C3Z*E}sHuzN>V{(uuk4;VR?=s9O1jfh{WsAf z?ndjolY`N|8w6sMSR`FPfiRtz!^Uf4vy)xO5@&&-R_~IgFeXTftijQx&e5S#$~8&x z-8P-)AgTT&;SxvrFOo@rKGRZx;Hzd_^V>?jwr%yieMJmTaMU<%4J3clL3IK8K0-d1 zeZU{5FzXANjwJ2x?*jyi(`MmJj-M!!`D=)ynU~j0Zy2wqr&m_2A&!A2;plE1!{nt_ zHg(J|eA;}Y$geZ?vJ|zL5f(MyzDkR8aj{WTd%L?6Cy%s4wN{O>&;2Qsv|m^}=i>S| zL+NPnXUo(_Atxnip_U9Ljz|e8I)Sa-*N2Cj+waW4wo0!>J}YysQq||70#-UM0m8I0 zkQ8gnc#tBQUg#vh(?LX3>Ca9-l)qS@C7!wYFEjhT{wSl-ng|@Bn#zDyesl9dAkf5T z`BUTmfj^_gN&IMUQ0+%wawlH!%dWC3xUazx4Va=Y&v$!(+M4`wLo5k6<{S*8PB zFX%bE0MnD+&rEO~;@a1GlvOyd7Sw${_W5y&oK86NtnyM3YeQ5q=r5{^7o%8&0x# z7~5Y85CM)X?+rm1B}@B*xesu0N9&xfy_Dkx(LaMSn#S;Z9SKCI^7r=Vh~tc+G0T-D zqTkM2=m`BZjYDRalwJ1d$)#V+#W600vemv2A72Upz^fVKVy&r6gqX6;A68KsGBTsN za?M>j`+oq33q@WAL%{ge_iLgbA(??dZ6B;vI0$E?{u>SeMixAd*tUIXUH4qtP@V8yiJg{5!;7jzGpwR6h@UVu$DFdL!}J_*`YmiTTv0d+X($(PVk~ z$NOtu)5gIbNiK-4RDG*Zq?+5C-S5he`|n*Zi+9BbZ8sj zDW(UBtxs3pfAG=xU8dYrIXoaCXcXLD$BvC>l`g2uZYte9m3K2e{jQ^fLGGg+lj~k= ziI?jDo82s;1*h}fd!yCB>$&XsVG|`KCCXO5m*tCo>@DaF7pHSaZi{sX+yNXJ%nLQ> zjxl)|o;JI=odrU+LS%@24qnaSrbnpW<#$gxVpJO%V>Km?dxjyNnv&e(3e7~tZZ7CD zJK^~_1tJ+aS&4p~hJzx={75Fa@u0Q-j&S`|iBRS!+90}RE~xkcj9 zr3SDr!1UykCaLLkI;{$}w90ML$LrIB=mq0{EO-(&IXzYhs zsBEdIsAypD2lN#Kam4E&1;*dsA7mIcT1;=M&R>h}df&?L&)dJBbSC!gtI|V$_fGg1 z305!rJO`EGcm28yk?KYjgc)V3FCm~veD10x=xT4UJ{`|+H#=ErFdjh4Vl|GHrK*CE z0BG0%kg9RpE^Y7cFH~qZk$w#QU7>yJNr)0iSu9I?cC*W6qqz2y^c}oGu{`7cs`l+x z+~2^!d;(c26Bdl83grzDas>@7_vJC~r{hP45F6uZs&c6Jf6(`IK^OL8rMcw&2+=;TE~ zgA8hKRVZ8{Z7(Xng8wH>kpl0`#FMh4Np(I_`c(+c<;_wXgEf}}#*Eo+&Oh(@X7raf z8?}C~AHi_V4(FRWYIaXHBGEGg8|^;|`jr=&thxhSo&5x_h4mH79iY{}J*igZ#@dpB z3+d7Y1BhW1ZjTn(IXSPvDGPYQaUgJ#jK-+}0UDqfOs0ZjC~E`UQI$qbKKMOw9HYR) zD?Z&0bRs~W|58ZU8aRI1fcqwpCKV7#&ODY=w>SB5Z({2Er&DmhOBK$zKh3SIth}n) zoBKyctrw~d0kw2-aq(4(BLC(4Nf_~sMvIH~?2k!O&v9)AWR2_w>XD%z^<#NgHSbbr zN1a0`1#kCefr{kp={CsiMCY#Y!?^-1QgrL}pG)yC-DNqK%qftQP!Hv?5P6kFmSWT5 zm5|W^dy#{-3_mL0PjLw7ts_7WbUDU_u^C5;8>!qlAu||{=f)e+9%=pCS!Hhpxgh$H z`Fd2^Kg2B4?*51z$8%M&%-t>{7zfWi4O(&XTDpH_%%%~=uwXxBeiW!xYS-$=2wJoN zj~5#ruF>Xk4(w78xj^p(h{GHK&svEBTGkXnlkJ3adD{+y83P_d9>shiQ=T{Ysv8tBzX4oB#TIjEup zh|x9WUlh!rH`T;*eUrS;f3Me>O?=l|`a#K4aLKV`-Grud=#Cv>(XlBJ%rM@O1kd}^ zD`%zQ$MQG(Xg=A9cv0BYcQ(uIJ`zD)f9cgKm}*D|amr@@ibr9+MTCS8ihsH7eBrcT z^SRg_27xJ_MJTAoHsIVz3~+0`L* zOTsV2kF8*YYAvGVu=CrGx|H^(-&5)rdKpw?E!&OWF$VSYfm74cet~}~C|(YyEgA@D zv|b+9f9-`76qA_gUcvNSL&$G&2?D36G%l6Tc>@OEf+nZnQ zn1Oa@*ViAMcV!(M9Dq_lMddephR>xQ@HG>C@T#|6=3->D1tlt2asV8jW$t|3+}eUR zi6QG*j){v?0P6Mf#E5A9eLn;N*x|=8VbU%$_A^aAl0QuMropv|^9u7cn56b#$ucu2QCIJs1SJ@hj({FYv%+2lU?JWS7 zY4?+rX$y|C?ME{RDS3GS4NQ!Vdg}#w`D@DUs?`?D)zbQOjfK$r@!w z_N_jAaz{f;zs2cJA6lwVvl>Tbv$|w@uB+33iR+f`{R`GFLUWsz|4UF`MtOuUaRpL* z&QsT96`Ka_@2?L3Krb0gx-Kv&WFH!M(S-x*#s^@L0RWv?`_$XRk z4^tXkf=ppkWVGLlTzFXreC1aAQ|!;04L)ITb%j4xED-nYJ-2)Do|wcM`Ua) zmJbB88g#mTF>nQ2{we}FT?XJ(0qWBq%7r690t-q}&%Fsk((d!EL81Ho0-zWsw}z1t zfj?Om{QJVz@;-2Np59W{_%T`ZA$ z`DTA(p*U`02%G|&-Q{HE?r56gdzlA}@V8$tG%c>k@b{;02L%o6|j zIg_Pj3WV;&Mp;THCVX&tQymA2-9KEuO>#_eaznNLOKIiH3D^Ff_e>jri<0$Vjb#GVqU2P2XoLFm-n$N2rNe z@9GzQJ0P)`)qwH>AdMg-EokxW`BzV?h9z*5CjX?Tq$~uh0I*GuF*b6j4UY zN)~q}SXjF7^UD+skg@5FB}m}ocLqxCUE^!7q<9iK1~Ig^jdfc9<*E5;fj!xKzNPH$ zUTLMu#hL&>PF7fc{o;2>I) zVYgaGlhQ``U4^sZB}j+p>hP$muVqF!q&(anWD<;6h;ebXg~9Fp@gHxLl|udCBNY;o zcCuE;&SsG!P5>t&0`e_w-bsyJI^5U8K!|>n+l97Y$J*KZOaKS|ViR|Q}#$j3&v2k(Ks*D@gUy4bCK~?qBE{naMpWTT&aain~rKGT8ya*ho2CPvurIU-09;cLow3Qu$qPV53C^AU6#!sSG zqp8xv#E&U)GUZ}Z7P3?hIMdXR4u?-Ip;?V3j0-kCT`d6afYW7a1(||bs}5<*Mj7m< z5~UEjWD@Q?Q<=TzCs&~7l+UzqzpKR|Em4ahWX*S&BB_vq(Qc#}h^f0hGx+kNTVs>v z)g740YMdQTjYDYbe(`;5H0tS?dnfQsBQ7p&Wu?GqPlsD7|n)I zZRacb46}-4Qu`RS`Kq*2(b36Y4w=(ZvE2{VBv`L|Ypea*jgGZ;8mu=TeMZJ(fWGxD zGG{ElI!OWq#Qt@)X_V2O6R>LL2SSYuhS^SaaabygthR{XZ#Atam8sL;o{WPW>y1tE z8tD)@Dvf#?`!j=zJ29|E`P|(uF1I!}iJ*i=(a2O-sgd+e^!K;>A5Ea@sH}wDMAY=2Zzfm2_z9+qF$9lP;5m6v>^YMCK zAFLD^of}wV9tUAA&stlHlJiu31XH%hdHq_|G8H!5=L?ybBiffH4NbG6}5eWI~-;SU3ZHF&3k;`%%fuF20-<9jL{}nLA5>ho94; z%jLB4_R{#_%JFdFMh<< zw*WZNZxyR!t=WYdw`97S82k)Ws6w>m<;&3&+NY=H>qBP(0%=7dY6WSt@mfdct&Ruz zt*zeO9VPIQ?~N&y%{~q#XhAHrYYQ3%>S}zmoX!_WLn0Pp0)gmU7V$u+i4xKvHEU&K z98@e@gNV%OcPZB)ALe)esogfiXb81pQN6~yNtPir?_(6v_)OssV!!H$=-&!y`Gz<= zZnBNmsgDzcN;WprUO&Mpq7w`Lk>eS0VzjU7RYY5xJ*_{_45k%acB4%$Otimz0sBs0 z%Md-%QBQ9TlrlFbtKNXl0@277YBGP-dX|NI+WTymo4AQNK9VJ)TWgd>&<;%qx}U{; zDv$)QWf`|ge(f?>nk?5-hJAXIg`mm8nXmOOOh{AoK0j%$pRrbOJ)YfdZS&&6E!>lR z+VGiMS~M#zioMRJQlZ;;yxPuNgILy~WYo*cTdTK#4RQDACu9}f&GEefLAZM%tkKM! z7a9N=1l81tBf%)h#VaZ-q@a5IoC03oe{tgO%39}!Uf3%xw{UQ%9_SaVoh znu*8YjXmCZ`uPE{yF4wyaB3nevc?$Ew6^x>r(4D5h3R?Ix|QHlJt~9-7A(q~+rei2 zm(DhIX5R)Yaa|l<-rxJvZ`!>!+`O?0ySj>9*03&J8wfeH=a_Vv`w9vgol%% zel#gf`1$z;5^RZ1n(cHCO{VE0TcJku-uS5XbdQvhKPwDuZ4fMPxkg^O`Xognq})~@ zlyt?w`j?#3jqc>1Yv;$yn|)%_p~$@8N+7K?(D+s(i89z5W=bw;_3PI`gLSf8`Zv&c zJ34K?4=kZ+@9g~haB2&tI`I1Z#DcH@WW{Gjt!n4J-M7R~U!EwJJ9HD$KD`|mfFR(f zGR6T0eK-j9s>mig)vKe@hl65)W&&v896_LT@|&gGNnyOm{%{Q;tQ4R233yt@GSlo2 zkfHnmEdqXI8Vn7Jz;O7jp#+dUu475bNJV7^hz@G`%qgpe!hWm*%caPEZN{tGwwn2z z^2=v}Mz5`v7Du@#8~S-mF`_U`V(XDr{1TE66dXsY947hZjkX+eU;Y`6KDeX1oviR$ z&y_d#rq?2URN@$QwDL}!8d6y6QToa!C%>|;O-`_aN;ePh9Vyy0N${c8vilhtPPJ!n z%i1xF0i-4Y7+K%vydPXJ`ZMjImo;Ha<3$G^LEx^!mPyRX`SNgM3$n43G059;{rNnV z`8=xU2E)+t4Aj;AdP6fWGM~n9OEr+d4%cY3$fBshiNjOnaVdF3F zStJMy!i|BVC$$#3S-)aq%2N;HMa4r4d}4mLi>bFn&lAIRq_3r^)ab4mtnj`kjt zz_|+K%1Rx}L&DZpXCk5%Km&w;880h55NGEb6Ir7eH@n-4P4-d6^5ac48=8&#V8F?* z3a!HEhy=uZNQf)1-<_Qk0-*(;(6_pSYLAyO$t5tLx(a1BlQ3H3@Pno_6EkIt3#aSM z55>sFSVZfPKZar1E#xlL6-#kI_)mueEA0o}es-jh@j@I}XkiK1{#wdZTz-C#lBQ;5 zHNsB34%Gb5h`lyhwT!{4)ted06D-ykZ%~MYg(0SqckY!(hEO$CnFMbW$0z4yvu|`A zE@>p)xd)`~smhI2=S+Ay_vTAVDmkrwdterOxXBs5lf=QqbiFy!EST;)_n6-7=W@IG z)ueaG=@>hFHdhK8R4JVLB4xVw4@o45bpS^xKGDbbG~W?ya!*CYDQo92gJ=*AG7y80 z&8){D=2M%GUm#{Ik5U!! zm-`#nE}V-?;#AgV0>lq^qR(`gajag3(-ExSzfZk;*T$;=p-2M;Whf{#>C6&(dN`Oe zBrO!hP%)OelMCK_Eq@~QCmkFOu{xZY(w9?QYL}yp7oBB z`j^7)uz>G8v7d#{70uJlF&yY0+&2Qht20ZE7L0B0iYhBB`}jNpWEt42kGHo~(%+Dg zk$D^ju*Sy3>{QebQHh2d;*9<=GyTch);=rrEQJ^A)Xoo|CmsdjT5Z>JWq4S5@8pf7 z9Y*a;;J;&5XvJTi-K6;*^GWoSMTAj#)wo&og}<+nd|tfRGVk59^+7})#6iZXZZG2& z^_RKZ@4_KRpv82!+)l_6im}9-^Vr1JPpXOCp`Sqp9kp(|mn)EI$Y%@S;^EzUP3SHC zmQD%5ez=%AOppp5jHad8?J>aFJ{;BZgAWYJc%mF-A249aEuQt)h>jVct`DDjqo_q? z#Om|XUgYcDJ6$kCuin?{HVle7NiiJ1M^+&c{~ewwE2Xe^u*}SjNbE)o7Pm{E&c-qqa+@=wQPBrVklq-GsGerEJ_=eDEO7NDP z8>NurufB*QE2bIMI&b4vlO8&j6h=5Jggxsiw6QS8P!haljP|adv&$hf`hdc>tA1?d z(ko7sbxfDnvKmN;TU`#E{{E)@^5un_GgyNHike!GKVy?8_h8TY?EbC>g=nhId@uD` z^HMBCwMZ_T=lfC8+M14^AX}O+8S5y<(0HN1hvq$1p}f4JDaFKYh}+Yso8!dsoT_Zl z*T)GtVIY*NT!1^T#f8>T7-BN0*|8p}A_f&)9C0zp#|wG;b8=rc0)JYoXNf6)Hvpwk zB*?hIo-6H5$|~L&AL#$|_3Qr0>UBzrqx)X8#L#HLh{tp=l>k`N;ES3X%v-Srr6-Ti zzq0^nI9V$6w(E^F^YQe;`9ts#0L9^Qo988fa9V%<)L6^X{PeqQ-eIv8=(Uh_YGFcA znC%n1*SDb{t}iEVj0Z3(H7`GB@dem_?-7C5ZS&fCJV+Ee(y}Fi%qllmEvhErGTRC0 zdAI@EFvdR-P+xep;FsQfC+GgpY z(Y^W$eaXW%hdbF6w~+BGo$wEcG&Bq=;rq5@c8M6PWAEFm6JRt_NaMS{riYa=jwSlg z0ur}C;Y82C;PW|BJFg40sMPXOhzP;BUR^9K)+sH{=7Vi*<~quXsKy-mvt^hm_)WHn zaX5zVTcIV7+boK6?y`Fcyq!jNwBYSxJ=mce(odvnq%j~Lz zyHz2GdHFuh!hoM)g_QI;2iZDB|EI%75Q-pbgqXcsWHn&p`CP3;MS%#{I#_RG{tJL4 zeBy{&7Bm*;^_SN=6@z1HrGev76<^w7955a2vf;96ce;OS!mogSHU7>ndIJItDOpDP3UkW{LB>+E7c= z)0iS~I2f3($+Xq5N}*=L@5lxYYPSsZybx-y;gbn{0m5 zJd9^xg2r*QBWp;i7yJOOhe@t~S^WIU?_xnjTSyqb`-w_4zIke+gX7@fRIcUcbPMkZ=08=3I7=Zip_ z9W4aD7WEl?>_}gws+A)oV;PW8xUmqubYX$WpoHeBNG_7}iUZNW+-!0k$927BIvh%g z?eezu91Tz9{)l!1n{2v@qER-^N2zej&Az~#FKWoRM)oWukx*$!{Jf6S({I3`wR0p- zr&dA}nNK54fu>pW^v6?>k#XGX9gBEGqA$z9-Q68vO!Yhc+Jl060k#TP}$y<`vAsp=Ok4gB!+sYaO&6CABu@F(i zT0plv6i*8AF_J#IX@Hta#~d`2W@F!`0j~UtO8!teaMjEE)8h&>&P-#r%W8hlZ4~?& zKh$Cl)^ty|KC76Psr=OCZ|7}sZgmUwy2iPz0R_p+9kul@Do@eCZ>HVL8HZEdipB4Y zPwXs~=M>AZE=|1sK`zt7Ka9HpQ?_q>s#|-5Ypv9D)9XwLf8G&Z?S6~E2~v*ee31Y3 zJpxQfvGn3QJ{||tNvxLV4D2#&Z5@{zz0}n^Y#Oe{xznlSqQQp`O3YG>$EUQlc2_&Q zHiz@r*vE^VW>(f(01}p#l6iY~7y^Hoo!8|CB&5w?Y^aeTE^Fxhby}tFwFG&+WkISm zacej$tb&3aP!ofY41gX9`AG5eo`Z*BtZLKyAfd>Tf?_fD4McPxx4MshRqBm+t|FjzcsMQm{k#6@fpfW$RV{ttyz%3&T$RYI%p>i32V(w4c~IM|W&f!1!qk6j zI2^C0^I>GD!+;!iF<)v{$-O&0IA*Evy?t>g0Z;j0W zBi7Ql^hG!KFa*l1y_Fr|tLwAf(~g3}@(09g0D9`{ z=uXxCHCB`@uEb|8M6Gn{F1FujN%npDI}kwB8`W<|civXGqhmy+K<%+ho84R{@OGpl zv{+6lyFj|Ndyh@!`me*TYUa*WSMVq`-%hdVkF|M$P5QG{cV}e7)#AJ7tE0V^CF>Trv_C!r% zYdHX4eo#7(i!!j%2!n3KIa(Xa1<_;<>*FSY!jpBXA5UhsHsfhnNX*v(~s zYD2f2A=sJ=2g%UTu%n{`K(HR;`2mUv9zpqe09yxkmwL5Oa0CKJILAxU=ni16VxptV zKNrUWt*=%E)xE#4hpmSBZs48ktOW;%^9?8BrQ`0bc6+KGIDX)cK2nH+Ks17cKqW!q zb+0ad9z87lng4+{^i z51BlUKyH{;tdKQh!GR%KFGUf(wyP1k=Mjko#>e@BbSiMF_mB#W{qq~X&&pw5u|C{> z_8F%b@Q?o%`mH~4;#AvxYuC0x>#1^{3TF!un~4Ju_JFkmSO$4EY|jG&-cqA2J}&NJ zmA()^KmUcs+h7Dryxi{wwh<8#K(;_IdV$m0(lS?~v~?gq`1trJ0xj4Pf97y@+v)x9 zj=jh!Bptg*Mp3}qX0+#t{@ML%J53$K_Tjqs^6~NR5}3{bJ*h@aOni8cA8tXCzoGjc zIfXLEMHm$xQ;{R-rYE&V_h0F()nLq>&+44+bRGA+^?So9sKI19!r&<{G~9i5;>QN7 zxy~>yyEQCyEG#wFADEYCGXVYK=i#wg`I!(7+v9y`NrDP*k5c6ZZ*6(3Q**pKo00O= z=yP)WYcI~G$!KYh7uV8>Q&h!Kll&25G+yhiBfr`h?wVGH&qi0Ch6t+)Ds33*fJUJ* zaM$kw{uhwG+;Ek^(FFsQXA~K_XG`^7bedc;TgdtAL<1t8DTX8`&;F=ap49l!!68I;Y8AOjF6|J3G@#e!8- zf~jSw8s5Xhl#zRRGxe9_2L=X#_33kSv`G3fEY?DJct_eC#eV%qgRRXmk?HsE)KuRo zR_kgSvN#+!K|NZjNHYefHbkugOyj`60g`;GfRPtq%k?8!OAFy0|Den!nXC+?xE zlm0tozU}LSGl78!5H9(a4O?f&QWgeWzq-vnX{L)Ho5>}{hTa%xd8(QhhrMo(z_`TZ zww0CA&&BB(`fnK@tN9E6l!}n-{|f=N?)st zNB|G7X1zxZ0Nji0JF|e>Gh7ZHCXf@qsHwTQExPLOtrbDTSu`k2c27<^{GPlbJPG$x ztF4TTHiO^l4-D!%J2tNt$E{7G=;(+6(EsG*pJo{PWZ$9B)nsF%JZLTA1w0JKMvUA~ zu6TU4j!#dy-Hxh(bb#Aw+=PT8Kv)cVV>~e_={Feu286jFj|@w1qS+`#2du^Wy_~JL zZ%aW?!ti+e#&!}WD#W{Y2KI3}x|Ff8xoUM>T$x#fJbZ!eAEXMmdE0O=E=cGRFD~LG z$mcBjJUvy+ifH+lOQykaot4g0F{L$Ryx>`oSZKGZ1qv&_pr!P1Sf~K7Aho+e5(Nkf zczp>#Y1MAxt$1{nl$2CnZc{NApO63mqs21y7ZB$#Wkt*?OFD@U>0cy50ac1&k$$D$ zW`_eSOJ%V5LY6iyoi;WxJX~irn~sdfQLfvXq9V=Dvv7BRFa46}YCnZ3DF*$&Cq_25 zgMM{$+bpuhNElydjtgnNj)mRg$=+kd;**qO^+a5o&4&~cwNa&Duw+Dw2o6bKqbi1f zj|>C#9yz#4f-g8M%-qxzY*$37Xn~$(IPfQ;Z$f2v{BC(1H(@3(0Z9@3VwR4Y@wzKs z9}@i=WzVaq2?!>@NJZ3DVXy~b$nfAz4?=-l;|(H~Y_`B1I0O2q!1oJ#dD`9k%z~>fPBb=f^6H`jY8N@P{0eu(=$E?MT>F& zx{Jjq+r7HxZHY>0Xk8tcQC8{q1WW9?#w;ap-y*z`qL@@ff)HbE+YNg-;viE>&@S=wU8sSM zmM~kWzIsR4!lYF%^-b*~SSo$LglW3J0Bu1i@LYgEl~)y(GG$j{nM#Su*L@4K34%@D zl4=4z7~3UbdHL*$3dVB|u)sDgNs@%Y1P0Ir&4FkYphCJL2Jtb#BieYh%!RY^MLOEivQB;eCzslYdbA!onK)a~!D4kQYuySq1lL~hr_sD`oEBD|T7Yqkui z2R+@~zIz9PSBA_MQVB(tAjPEHvVC~i>Un8qC?hXF0a}oyddo|w>73UMDU3?8ijE0a zM*=LZ(%^oNI@WD=qKx+!di^t!`~VX8KyZ13u?DXrB|&KU!Wy`bOwG;F>A?;3x}TvS z{ME{IK}Ta~xbdqM%n1ZibUhyP(L81xq32JbeQoa<#^teKz@;e0fvN`sS{INFqjux)yaSota|L0>8C(;0 zcc=D3QK(v5+n+2=z5{w(9>X)@zc;#PFT)}_)Zf3;dfE{c7gwcHs$2u}nvwzxqKJF; zEV%$9YHk)crDBr*8gDQjP%l9qO>FQ0IHKA40*1LXTWFR&hLxbG{QVc=zL<$ME%`2?P88-pd)8?OV2&_ZS#1pdtclCFw$C=RhC<$^mTg z8^QCOchDjK2~QF}yWIv?-=_S&CS zjRvFh=|&q=3;!|lm5FK^OxxQ$J9G4&x3g_yNDOM>(*PD>+csoejGLKqOQ4>N3Rqh{c- zcwD6eS%bqe#^BaE7)M46G_Y)JJ?-r$24FJ^y!2Zr<_;94!Vh-krhCJHwLH4adiqbu zZxwK*?D#`2H9OBvHvA0!6NT1Ss8^*RE&U2lg4-AY5fNyLr1S`&0u(E~f)z9$;Z-gS|UvKL~2JdV3PbOqrd)rE~v;xF> zG!lZb0nS2$%v@tdVptfQoOg4JSG~nXn_*Co0S({ZoG;%wj;|k1BkfiRcATnysIEqu zMuD|j?Nper$qxKBC^ZYC%ggP?lEuV+iuf_&Z(aE6y&BjU#`>qHKa!na+`1pGE`ya; z!y6WXZkv@UFp>El4z5)>350VL>zp+EG%Hs}FM7qs5+URLL=fV{t2YRsqZyi=n|pl@ zy;a*Ol80cJrA!ctP&|^nI+=s5UbYMSr_QTEl!bh>Cr;?cM^POF1jXZ}N?=`ZknHKS znkC`po~)&S2Xi0~fIgUz#XuN@K!r$?FffcozF`JcGVphAzo9^-qv-pk*!_eI@n>s` z9jF$8r@|KulL57XfbTuWq_cweJC;yrIFrvs3!x#n07XiYKG**;tX8EqvhxV6CnmL< zcl60^$`Rwln;^gI>7mMGp}J=vM$Y{-(#W_FSY2>oJDyvFrHqYx*IOvHeku~;mcHC* z|6iTNSIQr`Bt;_7pW}$`=~Nf=+6my0rmm06t}p5RLcpYLN~%aey%F%$S6i#Mw||IZ z1x70(&XKk5fE+e)bkwe*B4z8j5+K7VQ;9%+%Ss9(O=)jF$gKP0hgR}xHR*D9UT_*L zkV$}19e^(Ac+q>6N$h>5SEu{(sAOy08%I$32Fy?}MWbYhY=rT@*H9M>^Z#`cORE3rB*?cDGZ{djC@+vCNr$jX>@d8Ah)+tY z2fGMBitFv(+_bb2hD6>v7U@){h~NkXO(Z%~m}DGL46)F2L7X5Is?d{RdTQAHm7~Q> zA1`UZF3%rNaQNL{JC~@A9#511wNwaFKDe8feAlxM5V{WnE2)h1PADu$7+Cpg6`5eo zfYk1{_{YZ|eyF=I*MDMqG||*(*D|y|Y<9JtPFc|jj-MY(vy#~h+gaDt#PFYgyo;M; zvr5uUrn3t}kqLnjKrx??vYKkKOYTzd61(`@aXUV z#8YV@V??%^8xZwJ@{?TQa)a~VrP+gxkJ2oNlf6JHv$7(P!o=9}6_@=t7gsh=NFwT? zM;mrQcsDG4b_j!i;d66wa=Aj)0dA)7!+Cc?t}(qbanVqm-Xz+_%1WXPZfwm0dwn=_ z^SHO@Hc_~F3dFz|n(4yVg=01AM8EtC%-jOubCHPLDWiXRIOSEZGE4dwU)3wcHY{%c4o=qxdxgy-9|g`xLn*2`WMLhA$6|U|{{)8d1tes(zz##XoIr zgFPxtgQ9Vv@txBB#?g8_0vH4FUq!=$amGEN{B_>N#`0Uk8sS%79A?o^;qTw206XUJq03PIYRG>)dLzg4 z6?&&@nE+o4D#Qe!(qB~#dx#Nrb-?Qn+`2fsj_)vl-!CUqR(tp0^yMcyn7{;%1J@Iq ze`gS#mGV0Q%zKi3Z8q=Zy4pCP&2d^nQ7s0O$332W_gX%sReo!Q)vC?(Rzoy|L zQrKNudFjDaHtgV_Gg&5RQeYK;eGi1d2SU4M&^83r+_2a|sAej&9tJLIB@k3Uv zaG9UPy?m%}UlL1G*Y#kRFbZ;O;=sk5mcc`|jhMDT3G`6XO3yW%0Uj@_flM>9TPk%C z{GT{{;zU6_4~WA=Vs6mZ4LYs3(70--9;KJlXmEPGst{-VfsMp z{(QeZCNrL&UpxoxESQS27%fh8ygsHVypk=}>Z%PsJ82cqQAy;bFH4(#W*aRL@}IaT zh!q_9Du$+ctn}qK30rH(o8!-wRAo)f8&oYmH0rT8fxZfFxPO*Y4%{X|KI6^9i}}5F`Bw zcYKzXw!1Z?Wa1ip(eUK%(7INS&=1{P9I1j_)d0G4YgY!h=h)qNGT9m5Eyec>l&O55 zI|lrob1gwW6rE$*|I{`FI@v%)5h$zUkY)kkhDr0uK*B0Uj0YH^IvZ$`YfJytBQWE^*YQCvmPt`GVN|0r?>B{f4!BQMpAwY9sXp)p5X90n;7vg@G#%wJvbhd$B zpE&#Fi%VQSFHuajcH$q8p8n#OU=|Z%Ew|w{Cf^ENOkeNAa8Sr6al&VKYY7(HLoWVc z;80XN7aIoR((J5n8IF@_eZw322gZsfa~+@ankN-yGaXkwC34|i@VsTqXdSf@e^#M4 zK3)u0XDKAuzP?dGQ-hR+{>Mb`8Iu-pQFe6rns5zQ>HU~32O)fZaVqvl=kl;-_pPG+mGYl&kaED46z^Wc&^Y#mRNJZ+$=4-#M&rw0 z`V_XT6&xpjgr`NaG{}ancv*Zd-;5DC%6L;6;@YPN$H)|oH$|_UCNxL8GN_j$Zq)hL zq#EfRXlYHfw94DCL=Qk~Fyfa|M(C`*Vrr|smY+}~0 zl#Md6b|%g!&Bxk^7J4JSXhF&8ov^l960VF3f%sYX*MzIwdmCA;n%Ar6S@raSC!6bE zdc=H#_R+U$aTf}#RT|S<$4VMBejZcMbtuwa)*Rhf+E`1NI4Yf2fZ0Q31lgb1W$kdk%K z`Gez>#Zl{-xpy1_%M*=qwqz0BlXX5indy?1uVx^?z;Cfc1u4qYsH-ddmP$pju_;1J zJfk!H5Nf6V*i1eJlxkkO>y+*WjBlsWa0iitTjhnvP5BuizPH#9WfG70WjMQy=tiTa z3)}z#B%bs>qpC|a*M(wly_Zk##eM8mKR~k~O$oght#CzY=3wu7<)Q(oB)dm>4!XMOw!AKMGKm%()d9^8jJ?r*Fqi$uXD+n>)^bIBlE# zppzbX5E1VPS<`6+-wp;wdB|KJnVb0k%4rx!i-*)4Pr>0CHq&De(OK?Fi!6CCf9@8X6MBYUA-DxQ=_f2k09l` ztZZWth(|KETth>4@_cxohY10JPaqS4klk}nmzf)xTPmGN;YOjnwv@kqtp*9kZLB_= zLa~Nwug%TPbfi*+C=62WeLI%X72B{R3W-Ff*k%vR0a)cnBB68f_tMf+dFLah1nqZp zAF*qT$Lovg$(^ZJX3*t{>Rj^4bbexsyNJkbB&7so18C~w=Tm7V@wmKP7J6CGW;kvL zNy0p6GdI=t=3x7p87AaoZQs

4HogZh(~O%Ox$LNzEmnl#Kh7WP*JN##EdCo6mqh sd2AVwWq(fgr?h8(h8etR?;HHdT{bk%JmY90d6)?fw2p~(xt3k%zYW8*cK`qY literal 0 HcmV?d00001 diff --git a/images/Distributed-Replicated-Volume.png b/images/Distributed-Replicated-Volume.png new file mode 100644 index 0000000000000000000000000000000000000000..22daecdb90394f2f147d08813f479efe3455a819 GIT binary patch literal 62929 zcmXtf1y~gC_dOsj-6$d55+dE*y>xdY-6BY*bc56aE8UHf3WC5QwbIhNq^z|6;q&|c zAD(%Dnc1D0_r34A=bn2eK}Sm&ANLt93JMCos)~Xh3JO{l3JU56Y)oJeQO9W~@C)5Z zR#O%Qr8({Coec)?KdP^uvK&goG|d6<&kH*hJxvsp5LOhFchM*)H^8oU`zR>>d?+Y~ z)+i`qc_=89Z;Ez+g!9_HR*Zid!pN$AG z6b9+Azt+ZMG5!t5#6idYaJy#T`d88@aPcIqF^4<2vXE-l!=$bD5tT>HqJRfn-4-JmZPZ63o+D1?{FoeB|dX08nVP- zDGx)H%R7KQ8B%lbT2w|wXRPWc(fy}^K;@oPv284?ncXW~k-7;n&2fp~E9r+QF0$!| zqUYpKSU$J)Vs0}6;S01G&C7-|=q(@VF0x79mhE6cJCw1%_rZ>c&A2s~V*0_#xnAfb zFt>)K8~bHTTAzD;;1JR<6@dZX>(!%0wKH6{Tc$)p282H@y?#a6h9pX)WmA@aTm$=w*mq6c( zC1pYuWpPBuY_ni&fxEOv@wxA2-5O&XemJ)o>5^eEDD{S)=k{8(8p&U#BNO1V)+Q9S z#|d+mE9$pHtv{gm_H3sDrxj!Q$_=U}#$Dh2Xwp57tZK37&{G}v`6?6`(^Z&B671`U z(I3#v*F-M9A`Qk|Io|{4bmJ`Ha?6Tkk|g|hQty+O8(g1v9}jS5dk+y$0DtL5?lsY> z{@-zuZ^x7JpGARZN`8JpwpjM7<00GovyZjffE(|B*HB>Oz|MIxE;KFC;;^;r#xNsO~YxbDqIM$UEh(5WO8X#3w5FI2%un84Rzr~RNa4FA-sL)APSNaOrnjE z@_P^IUTgclVXxADZjw+NG?!hP7$x_6*r?K`Q7Nz5rc}3}?2T#psel((Rk(dYp#fK| zEgY)(oUHj!Lh|;@jae;R-o-G>W;uz>C1D7urf^*gzht;n^=yQMDERJ7SSB2$+ z?&mehAju2&Mu-?>H}Ps922NTV*YQiBg9BeKbm@29_8Ux(n{ ze)Tx~&=)+5OrI{Z2d#TVxBI&cGj~~n@!BMoS2fZ5HNbDGZRoG>b=zy6 zYlPj#r$7qkfyL2S*|La9|5EFo$aFMajw59q=yT+0ADtqJ7N~D@bb{5{G_HP}6 zPepkaYO%+{fEWCFb&gLx|5_MwMfVf0!)i(Sm zyZ6PLv5gt&{S_SU?jbW(9{vkq()L67-WO)cF+kXB5%H}!Jn?q(61Fg?cjW%`A7k_- zWweSQbH@>x^$-I~3eR=DN!h%U(GRAFl628t_T1NQmp=&A&Mnk1k;4)pvBVY0^+%;O zIr;t8PA6f?fqR8F53fcXrp&^DH4a|X|7Lx3dPrZbL>;k?n{wCINHP#` z)@a4iv4l_L!(J#fFi`67467=1N#o};Qdkl?l5sdnpwno}bE@tRz8q&KTtUAWrNCcI zUE*cf!8Oc2u4Y}be5pzXD_D7}lFVy}d)wSDxp~q$kH11f&2F@v2#-O;DYh2oL-AL8ylE(CPIr^=rDx|N=X zrHCNEj36D|!@ox(9e>@hwbHNTbQrn|gxY--;*!5@5(=b4RDhc>45L2!PYMvr!}N?X z9C@2GB61K+bVD47xVHYLxt2h>GY)th5}$#aTYe>tJk$oW!eT&hQ2IUA*Ac1WlM9tU zjb+FfKk0;)KvZc|xI`8(-*Z2j+PamM+{|2k*_vLeAmAE^r>f%O`k}SO{o+gd)++3AoK4yT zZVdl}PU?1UoyfzmMY;dOYXt#U>S{EHUc<5dZ`rS(!Jle}h6J|qGS@q~lv>=%LFclO z|78GvJF8DmN@TYgf^X+$cj?Rxi<#;X-{INupt0qjyQw*j8dinkH!HvR zSD?X*D+@zTg?9BGEa3?EYN>-ICvdT3SiOgXpPLhxTBP%KYF0o~25G%L&(PXLcYlJ* zjs8+-`2eAk7;^K-MeUZ~rS#x01JiB#^~2M-qqLZ6|B~~!#-{r~&qaREjQZZt zAl%;!kS3OwXADU}u4F@@ZC4u=?e4$kM`ATK-Iv-d-dX{fOo37L{UHR`QKNLEt(G*O z--G1#Sn|1ME7k~!Yx+~8U%6L!aLI`Uf8Hc}Xw(_a$k+5v=k2Ai08N-#_wC=bW*a;G zfyQR$Cyp9kzvq&sZS{t)Y7l1`r4R^(6`>i?-=`2rNLaB8s#%B7x09s*GNXy@*dJB% zyA)o&+A;!7-wJ1MzYz?oFOhju`Ad7>%gE$TOP$t!&kjl|^W_C`>g#oNnTt6Ud!wH+oW`bGkJ>TnjIr5>)Z@=QRrj zy5b-Glel0dlsM1wBT(Z1^6^V4{f)4;)V-;37=Od+X?q7&@cF@F_rJ1;$^Um`$aFEi~Elpk&cVI3zU0-4UfCs7LlP^2N< z!=(PTl;(dKeoQd!?vlF0V^u3!AY`F!^yim*n|tTXxt}6B<&+8AbA$r>>}yF>Q{VKi8h}7$ z5YEJM9TO8(5QrQE0tpHUZE>7$1a!~(7jJFZa!(HJ_}EZkN|h3pu8+(7Ly;;?i;P6Q zxw&!naI32BjT;!S#Kgqp!-IPBL3>rzflI%gu^5#JG5vkuoFtC+J>S_ER&w}Z`TRq_ z{a+@-LNi(2*9IzeJN4ZHqR|Qh+!K?NwG9nQi~h*fKr2@_Hy3wz`@{L#8c3o`K^{24 z!Q|@xZ8Q<^S2_D1B(faBDTkeynE0mKJc}$>Aw?ym{|PoK*k$W7#KqGSTwkA(no1B7 z5^{PShW=jmz+81(MwYYV;o*H` z#|68j@hOQFZbrx%qrd-B z`mJqnds&@%xC#u8J%CpG!kfo^W@NF-Ahg}b)MP)uJJRbJePLrG% zu#0Yz-w^77_0c?56e>4#`2tc zBHUaRW{k*egH~8}PPlWlK;;ze0W+(GJ?Rg+ZgwZ((VR^|T>~ClGftip=g_g*`A3}X2c(k_dyP&maL!1gI+uN>FJW)+}_Fy$wtLSIutk; zq%I!eb122fA9$x&&z#)Rnhemoag?W82sfm*%n68zi#vOGWVck1BrrE(+t^SiwD*y} zqG~^8Nbjd}d8ea8Hm7!*QQL3tZfAm-$k(i22(~TA2z#gb9zq$DK@%XZM57nuTIg@gv^T`8cnFJEf*85 znqu>0C-+kjsF^!yYm3pm0jKMiXMm9K_QiD|LJmL*)z~~G6e5ltO3hc+(yiq9O>zw4 z%}9A$cE#A%h5QxuF#+TsiyN)-qT@_BZXEqv5vL+=%vVm+=y2W9)1hY!=n%-<;^Ll& zpFFK(?xdnoATO@kyp*CYpT?7)iT-l0HGMl`B4UfR2iNI+lTW2PVKFv&c>s!1*$*Zg zfeg#s#DKv4^Y`!GZ3YlHtJsscU=Ob7Y@d(Lou*m(@L3LyIUID&5Ujbu4x$nx)EI5f5OPmc*F?1Oh<; zW~nTD;k|c?XJ_H@@k@8MSMlHjEji!G79Fw}V24wPgxD@I+k?-(lqU2P>n=XtPu2zt zitw>xM`Bl(b6$l@_0llNyc^$6Em#r4Bh}RBZ!VsI9CF^h9ZasxQem#h&^^(~p*YYe zG@fpY#l3Y37u7$SJnceY&@)Q>pl2w>nI^{tI}3v zjRa06@0t5lr`(P`TriHqrU0JLpJP`sg~yf5Rpfi6mxLzxp8 zLVA0p z68}(})2Wcb5qi?!DQSV{7Z|whbAXN}JC6D%NTRK?vkYSTow2A1x&(#3U*BV+!Xv>( zFY1~EQdO8#C~?~4j+`#J0q6@K-1ytWgA`_;<(-r?*u}blBFDkDfTE}22^!as4SG3v zRgz9vY2;8vQP+J3M^P380wUnxM76>Clh>sg<_kPg{Xgb>`X`!4w=)){jRodwP52 zlWfqYZE2b=)Re~b2`rfP_4MSFmE$tA!rs`=;fAkS{(2>-B_MHg;L}aThb8Bo{{A*h zir&GY3LWkJ`fXWil}+A$j}o7Anmd{?IDy9qvfW}KcNHz) zr@Gk?Uo0YSMYt}tW7LiE1}ZteY>9lAKN9nDWzimJPr)i#pcTUW{fcW^yl%!W&CXTRhK-PB_majGcnO6KQDGOd7ld+XJJ8$KzLjadu0KMeN1zyTm`YQ;$v@hCdH4Nv$`t6yBE$&B-l< z=+ypp`9-==XSwT#&t+LEvGUs5+6sro&Q97O5HJ@kEG$6c9fw>ch`!bNyj(c)F4Zt#MJ^MHQ#NKU}Mr>9)P$2>I8FpO2AZ9h-p&{IRA_1v3d) zwZzIGe+^q&HIVNp$RXLu>cgbX)rpIZ-+d? zzAKQ4UmB&xj^b(f9pBhP@B{ zEUJ5}pQDqJlcPo(_+l0h0f2_~MaJOxn@Zd)wvu@M*}NB{{u3NxNfMW8Cft}^1^2%} z5@nV9sC`nriganMymJkeT_O{+06t};+bm+CXSxNm73oo&5tdXL9idvGkdZ6;j{En` z(fVsF>OMXB^wQUN^&c#4aG)(cqOWIE>u>tIGmzrr;EQS1^ExQWi)K35m**FJ8fPEeEF{sf-}utHz{wzWfUvB7Xf}jOuzJumYH@?yPxL zb43Obz4jRBinTAudD28ZU1M%mF zWI0=9D*k=%xJXthPhBy^)85A-%GlQg{`6_mB!2ti7D!P|m={$_REoe@5|4!#erAF_iyi9hj4@RZlgw#DfEbgZ0PtZ}Osk z%+Q?x#qfZ+O4a?BMg>N)Kv~xEo`?dxnbiBECE1wAYclbh@xh%?Gz;=c zaI9=1yF#e#&Lm~$yDN8Es5x=B``hS4(zVX@I@gjQU$cb<{|>5>oKB4tMru0GPFT7) zHy4-Pcq+oVfNqf(Z=5Z_&u?a71rs+bH=6-{n%#+yY6KAg{rU4iqiXzlG2XbUp^;H! z(-CUfZ7G^mV{XYB9kD)3qmSFIFWjS+#27n{o-N?1Qiw*txjmp@IPB>X5y5wCA5=sDnUkZ;=R;yjWE? zx971oBG1-ynrfa4Pk+PUh~9DbdLElVEjr5cv92!p;o)saNKZ3kN8HiEB1}10QB-nO z5@-9fM~av54*8CTtMkX2nx*u>#}#4TYUUhlmh-W-w{)d|Q^z2bKsroM)Mp-d%A3Zt z^o>FkBN?*9a`ZWuS!?*RZ=Z)7vnJ7poh8FU7Agp2C=dSvX8n2j_O7^8 zkyvGp`v(*Yi&g&81oKf^rJn|UmR8@9rzf^w1p~{ooE#h+va)k^4a}2ah#Z0{1xD(2 zAkc7{?3~2g+uNNRNTLQxjGv@O%ICOSp^B!-z1yk_)RTkN&#=E?kI5wZUcuBNVCsI!etw~sxk<6Hm}+Y3P28lNlZd4n2xqnrGs(A_3TxVl zdKfHkaM9c#P;}@yTgneADu2JXoYsmHN~UNmZHa5Rqpf0uTo_<2cBQA)*eUTgAj~JG z7l#G`0ih<{ko*SES|r0askeh^<>gy79zDmla`)&Vp&`r7cFI|H`##LawQI3oo(0F)W~2b&1L9e_PtMnlPP*DSzM?( zIIzUg_+ZnA{~?BT?7VdB?dr-KCX_Bpc`n=%fG|aQtSQ0TrCQ3$Sd9ZWu9l7R66P-{ z*-FC%P`n23g#dahDrsqALOo6w%8k<4caB?JTzox%PNHg#Yd9G?oI^pTsL0D1_c9db z>`PZ1b7A|YEOSdF4c8dCKGecnQT|Sn&&DRw!;Vm$vB^VqR%6P`%V%7VWuu#vF9ksrHC^$wP_V#^w2Zz!AU(~% zV9zroH6&OpEiJXo%s#bFy%cVKb#NKN@$w~juu-}}^XlZC^g9@0Oi=Ui@Q|H{YZM!E zRuDg}dgLum@>L$#MKp=$VTf|z^H9uF)KGpsp!zHn^4R)mWL4EGep7Xyq5g7ngjCyp zI2Jz);bt{y5?bQFU~)J3@9#eiT)c+htYIG!7a4FKy^KC>W!b%zRN=RSq~p z?g!S08$-NT{U`6{&vWpwKp+mzm(N=KPt6+K#A5k$K-oY!yBQE_+!~;1K=LODx)wm7 z7O}-~%rHoZho@tqj}XZ^b0<^$vVF$aTt;Vu1CWIr7hOHQLTIhZs!iYU2q7ujTiMf4A#xle}D9+sC~pyF0m7yCtE=szEKA`{56dN|Jo^Z@Tw|HU0Rm5r&X>5}gc$=cdlo6EzWx&%89w3HkS!}D(od~iz zJ4K_^gx1v5d>j~{J9(FjdV-<}D=4DLNuRD_sf$kiJu|2Y^#ivuby8AN>YJD(0Nfw? zI@$Vz^^6S2ATde39Q?W1hv{t^4Z{?Ax_&+ldJ^yy1_7gQc5IiGF%frVh2*GaaejV@7lVW5EdzSv z;DcCokq>SXCGYN9=!rj)HfSRfrOvdSbJrNCXMu87-1MUR^Nx=$2&CwCv{9~Qu}CfK zNrONja54{j&0lhIab;T4f7>5jIA`?)q^8-KwsHp3wX^oYR>)4hz3Sw1Yi0H4Xc+xn z0%h+-7*p+>#Z03m`#)r|b7Npc9%wkk+0h|3tzS#{xS=NYYZ6<>W(++=&v8gIFwKzx zk%>D_qLEJ0UAJ>{!#%WCqb27=UCfC;b#mrWpyOl5^7eDcfST%bxE)i&Cf1~+xr|;z z7t*gaEnS1v128~8vC+|p*O}5s3`Yfc&>r3)1PdsD(R=dwXm)$A zC053l&Xfv)l)F6ZY0%fTOoh};J6Du*Qhj+LRa$Dz_YUU4Cb}rdQ$|{@;HQ%|E`KrY zc=zzfYC2Zl%8I_RANdSLTjC5o;s#opC+5$YYY{-5YZ=0UVq_KG{t6K`t4N9ZX2~Yb zdz9RW&t?eybHFjMI$S&l?*p`?@BLnOZJ?oOi6aCbw;Ud`F#xxwT&j!5Kt>j$@`REK zKz+Mc97rcADOnV9SX%^$FXlm;F@Ao2ZvbIP=_iyAZ{T+I#OM4Q8&mYgD5cf!J?Vel zP$6yUd%>!zsyk%o;LaB;di0G2Gq0V`PY8d%f^UeKosPj+`9j}zPDTG>qG ze8-m?4!Rf54F#6OXrg4_KSZKVOmOOu;j2?Ii8=5v{x#WrAOFGE<)254G|@BNHY^g4 zxF2okLv)a26*AtYA*QUdSxxmNf~s0<~u-@!6o^ ze-XOq}W%rb#>Ny+m$*C z=)Gx$;7FT-yjaN)7j*n6rJ0;?LCw2J6k1w<^8z*fegP2BfC@>bNFDaYP$BVVxj8fZ zKAen_oIe9li64NO@m_Yw<@Z|ai`G9L9UV1~xW5F>UudvUS66>}vG+rH5uyAQfaGJ5 zh1J#7wvG<5VCmr^0GA953@Rm09}$Kh(1}-8R%$+d+Fr6tb3m-LQ-@a*NL^X8UHARi zvx9YbX!X^AU48|?JTyuq@^f-1a(Jz#)0OGI0b%+bs1^R*?Mb7GYxyAI%P4*U0X}QN zdx|mKBX-y4J3z^*qo+57ibeA|iFe@ZNCGLKlA~kNPW*U_Y}E?04-6FE+1cSX@A_c% z?`AP^88Kr9Wr5!!Bn5|t2E_FrY;cA;of$Mj z!vxx7qru0~Q5V$Qvc@UO*iRMNkwZ^%<1!AtvkcPgCZcd?9`c_-I6vvkzcGuE_S(|p zanzo7nU-$yvX)GfhNmIeV(S zy$Raj5BjPPqz{tkRza7YV3(}i+=?1b2&9blxj!ZDx5Jva>59Qd!cOtC8lwhm@wY%? zh>eZy`qTpuoOfKzPlMOP|DH{7H3Aw9JFe(wBXe(ze}5zXjRQITn>uelnXpt-OH0P0 zRYdXn-LCMT%gd^|TnMBn^tk(R_JHwhvi^dmC9)ZvOfv!V?8OIx9yyiC;M@LJE4Xjw zb(Q&p&+P#;c)osxNlQ!H_wV603?LAD$*-n2O8?!lk+Nz1e)og`Wn)&Ee&>-;JmMAQ z;FaRsHs*WTM{ooVOl^%O1)q&l7SDe3fORyxKv_|MY$}1W7PO7u(0eTQ?Af!cVfqId z01<%i+$&CBv`~Z!KElD!eR`+FrhJK_e1&`@Sitt)I*yVe{>!f1wK%f1x zH~}CgqKVA?W)<7o+L|8E1io}y;zHv($Wv~?@5x{LyNIhU`wNQo zhPvccmIte0`RWn`PCUapHH9*TkHmTyAN#b(|R}^4`(1@e@birq?svL5eMfjSufz-9^8|-;Y!gId(jk9F%+;oFro_=^?1%lZDL&C6r>zHrJ%++UKW}- zob7%1&f(oA78J|zs+w5L`?PDLa(M5}I_Zg1y9aYd7%3fw2Dx1Nbkgyga{r=)xU3kq zwJHc_4iTVO@_U-oK6%oM-0J+qtX8)+Kd%5R_U-Ysy_=)Pz3aWwJ%9)}94*vSXGa{> zcYoWcP34mtWi@^QcTRc1bufkQ2Hzzx19|^Ak>+qrTB&;jUtiR=7{T^ zTtL`&n9QjvI>!tpI|p({z!vMU^l(LHc6Rps!UEbUkcEJz&{C7l6M&=J0tHV zURYr45=by3dl$`N*FH{Wii(Q!muI*4MG+6zC|S8MpAk}44WNX@F8z_a(9D9N1qLKS z__k{>ETTFOjDyisZK^cD)fV_?rC{xNu4`o($0+P+=sfWoa7}j8`~AAiX87o$4VGSW zb?tCPE>e0Dhl$*9WL6G3kA|V3keGftM9uHas}OYAUjKDdq`1rnpvC%@>L&h@EFgb_ zc|jd>DFVa#J;ER;D(F3 zvq<#QqrimqOn_c9ABVS9zyB8^f-WJFkT+1lWB0+JQ7o zOiJXS%cu-z_U<+hL$;P;Y-DOnsfI|e_XWRtL^zYaApS=x2L)?xpHK4cePb)ta2w%+ zIw@Alzjl})`y3E9G;M#C2LyNODp0UPUrP7wJ6!Beg6}#3Nvxz6&N!RpAya6h$|3!)JG6GSTwjtpv7nfHrxL zQ9If1+!F6rq)DN{iQ$i|xVrT1v!-ICY9=d7eX0H5%8#@Y$Z1b;%BpI{w_05@^BsH@ ztl=Je+QelHUr)1MO`jNV=4s0`$9W;NVbF&h2mBn5nCidTGf~gXR#<;~m9yh2?rgKy zLaMuZA`@3svkbLme90BLU2&(%3Z)d92uG>jSxYl6VHtj|?N;F%%A50!W+k?S-@^o5_?Pwz{RA*kcdqdu7L-ps6pN_eC)VN(W;v2|4ww&EX z^pheTA)Pj{Woa%L>_J8j6vVpZ&3Yoa$L`#~wB8*W4f;GBA0O|giXXqJB;3~3*B|D6 zd-fENA3Qufv<^RZu7xDYeAe;>!~=To(=S-9dnKXgf1ui}!55CxQnzrM#n#3~URL63 zrZkDIyN4CGMt&eAIeyREj;WlZB{<{{M2J~r5?9wE2 zO7DKNnFsG=zmwd$&KCZl77*t}Frzy#gr2=jP^Y ztS(dx8_d5&JY3Lgi5)P%J$+CA=AbH|-Y=ceYPY}^kI=sjl$`0zpZ6|w%_T%MCFZUE1SJA>G#~}MYVf3K-yYix=sh@u zt99)&!?cQF2yjaai6)eaYTwC8{b>~oOIEJYh^FW9yqs2(YQDX zKt!Nr#J&Ic@sEgyS9ED{@&uyjP{H7b>%B*=-+i@}R1BK8KJxhYSRMo7paN8Iq0#Cj znFRRMW#8>_?_<<(=!2~bSRdCtZ~vc+i)*X9ElQv+d9x9THMh1l-57E2Mj*ETg5sq) z6_bf**202eNys4wt5#9GOMz~Z6$+rf*iB3S3wxxeKy=D#YZE>q0T7Ih`pTt0_S(#X zdZdZ4PghUg<|e)DiLceKZt*`deE3r>-3&k!V8Pj_D<{@)lxV8+W?(FRVR zJH%9TW79Q+##h&|8niYsvB{t_%p_qW%h7L$zF0=j#=VNB2JHCW$?c@Weqv0W_wY60 z;D_QV6SvH`lYWLOO{dP>>`w&;hp7aAa`>|{EEILgj{7bMT;0ZF&UJg1d;FhPRXKP= z?+gqLTU;+W{WL4u>hM4yUGi*eJ{2b?HmOoxd!sMY#XQti_yvM4B(P4eaX^p)deA`q z-_yVpZEgLr6wnZUc?z>su^5BjWR$xV?A}=dolYYoHgfWg3zq^`pn%!-3%vY&tEbMK z!LKFu<3c0;m0Z5%$LmuBw7Q~|i+Rx4Q=1n@yf$#jMxb66OC!3vjAznPhV%v}NAiMJ z3t<;SGx4F-1t+FF(R2dBrmy!Zp~X8T>!(n8_hfv)DSDoz!beIpN%ly(I%`6zC+}H% ziT3hJp!s2iR4tm=NrBR;G)u)#$rP)4TB3(3wVjTTFAeKGzU&DH^jjyOi#Ie>Rx-Pq zK}P8yu9I|_cc_4h*VsrnDZs(QHN1TW#9%fky9ToB{Py<#Er60zr9WnG2pSJ5h5`uO zTwUQBM3BTEdlgmf7?XnDy{ld3o`7Afn0fEpae6v$YJfy}vT}|JbGNdt;Ab`8FXkeO zX11Z>P1YFV@&O`VAF}xf9hTM_G`^b0M|$C|!ts6b|h zTA){X6kwwG4=)=XjLTvK%S6-^t1H3j8HY*}W18T)09+OjeoctT&uA)hK~nhW`QMM| zJq$A`EUA!zu_&YX4E9r z#8>6y1dM4b(^ZQ}(nRk*$h(-ZYQj3Vy+h$Z&w9A3w;bFCI7oP|)%C{X?-~F2a07P5 zZlypN@7|CAAD-1vkT1x$?BKmiOSHpSJE7$4@bvE&oAzJMygX;8D?J)fmVY{TRHtIH zH`d7clG_FKB4(8^xJjL2+8<4bF)p~=(4jVWEPY6{xqaslY-Oy>BUPJ)uiZLi-Zv|4y^2niIARHcc>0%21mdP z83YzGgsyIbfG(r!FE~XiY|>rs>>M&n6w1@k?-}tTOy3}uta7B^ zh)r$3P_5ZkZ_kaK9L<)`O}wd)I;n6@Usn$*Sz_gAW--kJhDE%5%uh=a#6+Ckpqn_YXbkbMjpr3%qsK8iI7-~PD$FPlFpM$OyJNh3q&-c1 zuV&<$$I8zyV3yBBc2-@6y~CiK+;r5##Msd}dajL6lk_L(`#|YG{^L1QSR{Nc`Se)k zN}OSz1*$t_J}x>Y?b!DtF@EYmw7Ass@VoC32en6z%A6tyl=Y#=g|qV|PoNLgO40%3 zw6Y*0FjT_1KuOg8TG$$kdlIvcBZ_ z6VGD}-ObQ6Y;2x^U&)CEe)~6UVH$TTWakm_bD>jfa(Pm_$gahMu@*oAgVuFQdowpA znNJ2oH2L38c{)$mT6E?ua^Gjc*YJML1f3J4N z37!4m5XAz=5E-(+ql?phL*;HVub*KSO^=00Y&FU2y-{d)0b*GeS1Qm+=awbtH z^bf6J?`+f5m1fH?esHU!t z*%|#{f8Tnpa*Vjo2XHA2ZG``5*&5Sc>BS>^^$w!wJeqWcOvQTPu1Yl&9IPkGl=|Dc z06Fa7HS0tUQr1Lx0{+qubcL9&~ZDN{a&SFV!0C6E96$cJ+-f z$KfOPr0PkQYZb%+Q`hGdjdA%N8#C(9U%)DH5F}AzJT8zD3Oz07o7115Rs+&Q0l3BU zUJU3Mwt(Fq-QW4v7~y3xQ%J_&3VDDlT2Ke{y7vA9)r1D^@GsE}8!@pS6rD>(kUBJv{NvC&7>Z0|E!!W0X{yef(+wt z%8yZ&YDi%loF<@n#OZzPPC}SxMpOJ=lFvq>(S9tq>5#--RX#U*ee^gF0Q8v6)0M@lMOtql`oOG=rQ_on1wZMISG@Y<1 zKnn(_zq}f)=V9c&U2bkJph)1qlisKXME~gqf@q9vhTzeqHn;Z{ZsVqH&bnM zsRfHN8;f1uqlgKV;Ir4_CV4UA+hu)qcumUx0ICT{0rT_oBDGNlidEdq!_HKgmW{a& zZ_ScmIWBF0a}97M0Y>Q_&v92RDrwZ{emz)=2KQt+aHBun3@aeGRV)rigV`YdL%wVA0xA*UW6iek4TqxFT6! zsnh_ktG_>ftj!D*st%Z2jxGf@Xsw5$&)iqKaG-*-{S9*6XaBx@dI8YP*>!1w+5y1D&s=S~ zrWTzO$mwxR#pC6ZDfBhR3SQRr_0@iF?71$fyA@=Epn9%K`890)VJr&cX`WQLSgm0cOV2e&ViXLa z1OiET67KAfr+&BPbKbbTvHcA+VErm7Eqb2k9*@z^ooo5wnWZpL;{qi;z9x*n0*JkW(SJ~l_7xKlq!E+kRXH)-8mCyAH{k*{UG{&irwU%znRTXK9tpnzw2iHRUb~dJGxoTutV~DS3lWFVeoLvF zeI@{$1c!_?fGZ2I1xT^h<^*!beB4PvlN}~BW`~gAmf`J{p+9Bk#McbQ3W=>dCHUEn zvP}c6kJDEw&~j4#@wNIX$&f+G;v&3HvP9Vvz zGl`E*@)m{bCF{~*v{>pLBKO7rG3Dz|adJD^L}HbfJ5!Ra2S1KzCT_3szwY>S<)5HSaK>6jsHLW|coU@GWs31xla7|T)-c!BU2~R~YELNL+k>CGqd>0uD4#HA;S|C@MM(WiRoC@toKL-9exu=Hjxf zst-kmSy2G5DJ2Zy-5~HpIokWejU9j=W!S{wRsCGm-C!bcJ8_Y~aw0Ghe3M!>z9odOo_0313>c3knLHU0u=O z7_R|(43wLCmX_1eIHY`<&~FCB4;;~-1vLTb{q3MMC*{sKmo&J&p8VvUaMqu&-@}>! z^i}Er*@oov$y3oRVtqryuW{#Ou%|w7^0hNPOmfR2J3q|;n@vJKgSF-g;r5Y2g_d5X zXrP8m8;~+XxBdwMT{mO^DInPucdd*JJM0SzlJSLuimmUP+^k4G-Ke*~u;EHh^SAcI zOsGJw5&M~s9yuWSW~5hbwD+Ngt$p6Wt|Ia2ik5jXvr6|_F7Jt|9wy6r&YGV;a*HO@ zk@*>=O9H;d z_{QxNJIOnd@qLfpXM0o~q$9WPj{Zp|JFB^++0ay>z#xzJ4lMXrAOn0&V5L32#+5;7 zUZVGTx+gZ)TDj0n7v=RAwO_`?UKkiYl{+5dQLN5t?AK-@h5~|e z9Nc_;M9u6@zqe_&J(B46M1B!}Hz4qyA^Q`gyjGU{X{sJl)77-~yWD{6X8>!}UHN~{ z9<2D&d><`Yq??6YG0BJ3Bb3%6$h}%1DhlZ0qiBK<#A=F)#vQHB;!PVq!SFtnSYCt0 zp)}v*S{RXd`j}|+&nN|zs5z3RR&ig@2SnpBGi_Jzr6IdfK?Dwl zUuD@Hu4n{7IHF01){K`Qp6s0~r0T@_4;25SI&31BE3(VlrfM8uyfL<^=D+J@+48b# zl?OjoE01Hsp_Uk4OWQZJa86KM+Q1P_TBsb$j4KluEXSW!#&Kei=$qU=saRxJZWpWk z457R9C4JnFcH(mCTGh2gr%*g0M_4aF_rKYW0{Xd5B0Q5aqf)kW zC4g!n6qu!-FE5M|#cMXcxla1EmfQudW z5Ev4De^J3O%^y(7|D~-X`}3@ZG1yu?^q~@u=T*F|>i;54>51b^Zca|8&6UnrB4Ew( zVb=vJU~C)lY`Kr!zv8VG?ke?pU!7}=YwV;tl8{O;#}J=b$z*SVuO+ z#Z_!faTtq#howPcbp0mpKGWLL^}0rd)J%@4@8vF>?^o22zVQMoF@7^o#sL?^djfM` zBp$&7N7wGy_l}hzbbk&Dbe_;)GfnP?op6g@O^pO2m-Ha-M?#SiLpmzsJ=<+$eE8c zlLFl}!KnJ$oGM;kaREamC$bL-#xr5EXfDg;8*59q*4ETVC~WW5x)F$zMh%cx=F(=6 z-CP@d%$!M9-bYveVIkJc7E0C&wYNzw173Z|4jXy;VS%E4*ole?Q&6kB08_pB$OD1P zlr2w|CFxD5DMNPCqMCBm(h{DviFWn}(s;l!U}1C-m+86yL!hkelNd~dVWE<(KT=4otYXA_urqdf8U@XDHd^Qtfam>~(neAMozu>p0)6sq_3Q zV7G6WpSTPI+BBsx!!TCJksTrP5bDZNU5a#+gWjiWX)QWdwBC5#kE1@vtjG-erv`klbC%v-G}j_X#C?FSxfnktZJ!cE_9d!!y=GP?2lV;o@ezZqftM`tnwdg#CdG zKh+zWg-aLFr=l#soQslcr{9|Pxz$Ebr4U`CR-RhZVDQ^U;#ht(ONs!Hr0A8`e z9zV|IqKK$xk(aQn@4&WjPzA&aaOFM@nGmp16@$#pzS zZMz><@U3SQ8&UGzVCX}Sw(g4T;~MpqyIGpo2))-cX&PMGwcvfPlmCWqbn?pjiJ^{! zpVj=*?7JcZ+5Tu%$}9bW)z8NusCYTs8Vvjkv;-8PWDU{PhkE@_KT-y=c@@&33AM8F z8vY{(a`OHA_iOe0laN8qWBX+YyeE{eAQLu~=l0RO6@9&VviCaq>0xr`!K5T3wAxYATk z_Ytlfnx}`NBK{_7wAjq2;UWZMsby1mH(AdWvKe#^woN1)#y0o9usZL~kz995d{lSK z?FpHs+o$fkpRNe83A$e6C6XtUm%H~l`Ba9sogQ*CBnoytnD#>eFwl=8eGQ?(pS)ZO zxXqjZ*xtK_58gUI46hCF0y*-y>GXxVdI!q`CN{Qo==Opp*j1F{?6Bu6f*i`na{JKB zMHSgE_vdGAgv+$j!pys0o%-&stkGh5*)=BdAEck~k`H4XHLWs#rS}g_NLLPHNZ5>w zVs80fJSSnOg~`dHzw|Xl?mk`no5K25mcqK9TW!=mzqyNrM2}X_q5(d(%VgN2rJEL{7}> zFq!R-YtbanHkh>7Hf1hbsg8WhW0Nd$*5!}OOZi4HExB+W-C9CQ6!?XQLO<|((Cru9 z#Z6j6+lpk#J#D~9hPKTv%mi`(=}y$_&g6twhfkA$7lo*^R)ai2gFU=RPhPa|1sys+ zDz#fr=rE~f+Mp&>*3j`4`@5JH$Ue8-S%~o3o%@+zWC$+7su8ZWCi#%Idyl9#Z9eh7 z$ ziB}Pv9%-b>MX?&4PVQ`79U-Qtxqyp{8;vZ`OqT;>U*vDWw3_q9oXD zQ*j7Bj9aa=hAwJ09^KqL-oM-6)>7#hm|FpzGEgk%e*c~cjDk;THStu6?kyF%2YMCGmW-fBO7gGu^?-M^KB1Dz9t4N|Q=1 z^YN4EMNaw{7z5ID>$vYt25z2k*Yx1!-f~P{ENo2LH z3ib-5!~tF4yROM$kNv|jMGek)6 za9eRlQ1h(vg%g>w0oHgmHvHcb%)&wa~V2{OT_0qha= z`?C@GQccJ;jU`hB8ginPRYCniVwe_1mj+_IJ2|i1P*(*46*yJu8Thc7P%N_^o@W$0 zeAZQg&@7BeA85_eNuk9LGnM|$<;E#@tYtyf48aC0dK29K2j)_k&I2R}3&^W&`mCUl zesW+KV%f7w^~(-SX=bl+1?IwQNE!#{X&!&5kv}&b)(57mzg~$V=2y{ zl zUYTszwNK$QF99-5bTzn*gAjpMwd)Uo=E%)Xf{I1b=dr1YiSBsohW0%5Z$}00@yju*P?tn>WP<;rT(}(1HLS{L<*w;5$yh*>MIPl)MTbr^TAunl$n{ z1pf0HzveO_e$^C#+<4>KU!=W0hbpTS(PB>U@OsuoM`~;Pfr7pdpQUtDf)?8cnkoiz z{F(j3gbxp13`30x?wyp#Z?Lgg-IG#Cc5*}9Djov_qXbu3LmtiGBMotcGIMO$N%U@WhLt~--*9e7#*zoburCXg@Ryphhor$CIs+&1%_p?3?sARi^NqUKoD_Um*os*V3uC|OK5O# z9=13DDyTi-YJUZB^1I(Oeno(VAWjW^gM)LFJGE0M>^!f!kYArp@w%E;R>nKkU4*8A zi|UK!(EP3wY18Nmc&AAtt0BqvkHz^HZco{i)v4e6@gR9lXL6dq7=K5ZCX=yUwBli< zfQjyFw$0+Fl9E1OMZw$w{0xBTkQh*deZ91slc;X;Lpr?|apk+?s1uwZ_rE?nQ<9=( zYUxLKj+0Ff3nO%GZ&74B>?-w#}L6;=t(9X8PL#t@%|=60>U88K;%WUiH?)g zE8hx3j*bz~8KC7V0N#ON*R`tlfs32{er<1c*sh&E8U}mYqUgSWRn_z*ghvHX;!@Lc zb}|Zy5gX4=tP#9q(s`B5s^RbzPKXFb3ltjXsz=^xO~U)igDCU;=Le zCHuv3$}QU31B=E%5CUYt!bd;>qJi8)7@j*5M74KI3>c&_a-$qSG`{nwK$WtbIuZ1) z%0G~eD;h@pyWQ~g%ihvY^aLjoCs4eak0xaYc*n+cidm(=nXXXG0tPjS;Uf(d9i zKV7PklUC(pH4+FUg6pNOZWfSTRaIrE!N#st)O2{##nUqf&?D|&z{H>6|3G+OK(S&$ zSJ8OT!hvKWX`-8ea>Ko60u2_3UtR$%UI%2i#qxXua2+5_fb^_$HdKtN^bDTu7q5co z@jnR!^#!;O2tL>IO|SyA0XilP5V^rof3#Ma`E{I7(IX~td{`!#kg*sbUQ`&$#$FAY4KWg?dR|FY~r}W#xFW` zx%0>abtacdh%RkaiNHFA19kZuU-j>k6Ft|^2MXdl+`tgI|F*;r!^QV{SRsdQdAe-2r;vh}C?KH^b*@PtQG zv9-0enW16RVQooCf8Vntm~x%b9FAF2D#ZZs;$TM2l4IPmENoE%i~j~ymB1*+Cod0G zE-zIz87jPu50B5K6Px=3usmVb(dR}WI>K>DZJTTV>WIeEY=B)@pu}Kf@`M|$GpVf~ zK^L~xU>>)yw2#@}msSXaz0-c81r?PDpVZqboB&eP-5zMF{8+aULd$l_GP5&h)umBZ zwgp?n?Go?K-u*}N4=;^M2i44C{#-{xc6Lr`QOW>+9Bv7MgSTz&+W|=|gkD`)!EqT| zLE@q#GVQa;NqsZr$B(GM*W$rD=90<6Oa2f|OhJopo1X7@w z9zSYvlm`>tQIz+AdGHGVeMI8)I`7yzAF6hafrY|Oj(G_T$r@&(Q-Bql*&QT z2hupAJFz`QJ)+oNfVV)|f}$`>_yI$F2QCKR;ac?j5`)$+IR-~3?xP&i%ohPi zph#RR(3u3Ci8AqJg#ox8(!+d^GeH*@mSm$i4SCN-^g8V|{71y}_r51f^`=x*+^R$? zC@Y7Fh%92B4$)le+I-y)uie)4muX7dEU^V3xTVcaGbl{0HJ+Uc?~mIFLn#GWm_cvF z39XR^SmmT5K|^NEh$N6mR{;n4-|<14*elqKQH!8~X+&t! zEnv-h;fjNnNEWn%28SJ*oC;qNpweO>%LZ=8Hpsr9HIvvgX+NNVBxTV2;{uc!q1Muv zgh8%x4BDW1*pyOX>?8FSSaJo2Pcx(SXF!qh3Lteveg<2=7ysl3FKBGkRkUwBHQJTq z-2Lub3zA5EH zYWD*gWYj?%1NJ(JP{I0P3qxeFmVho`E&6aCq?Lv+aY_ubQhKrTQ&iQsI7YWGl%~wT zAg?C8AFiwjv1VM>=(lTryU(z}FC^9Z0s;eLAKB1?IH75EC+&^k-Nd`lT=3Qik3;Ro zS;T>;pMwKY0pvWW$(r8BN`$AMkKPFIZ&xWANU`6ymktJhs@gk99!n?vJ2i+Zk;w9# zq}&&!=l*>=iW!@16j-K!+kAqTG58q)gq=jKmBZr{lt%U`c@H!Ka=rdZzz*R}-zy@R zbsaV3{}q>mdn&PQ*2u64W<5mmF!cqtX0YnuK|{p*e%DV1RGTo5R`Mq!WVj_UoollW zwWf`f5=?O+ipt}23jt}Vt1Zesj_Lz_l3^mU6d!>TYGBM^k{!R^`xKR)#sm}tKGJB?_05Nbw^Q69_@yMfH)Jyfe7)ebg!#3d(-%E?RgWxH*x71vnqLQ^#U#2GhA<+sQI68U zwP<5vb-O2&ekuccTgjVoS0(yCy>jZ%HEJ!pG^rbww5xKz92eIWG}jCr3D31NS+5yt zNaK|X3gV0?YqU1*k9c98>sQFfaMg1)E3G9-{4t8W65TnUe_sLh?-nO>z{{Td-q6!O zI1z;weqG)9#;N2@Ihrt1BIJ%pj7CZh75D7-uIi*!=0C+NAsNvH4@KtL2X}i1e+_Hq ze?iURa~2nd78Yv#IH#aMQR~8!)n=_5cJHDT?V+UdV`d)PkM$&1K`L|OOLG1|)*4Fo zmUPJ&5RThopi6WvLC_5xFBb!R3jPN3Uk3ZuyW(_nVGI995xxI3>oC!qDKR9P>-G z+WSuq!(8ra4Hdx>%y$>?y;Ix}Y*SNK5v`sXjfji{hVoNdp1pn94WbLo4;}OD&gcb`BhtA4TU12u757(pB?!Yb}7o#cQ-~= z?4J*3vHT4qZTvUA?o;(vQwajkZL%^M4i z1d9tr^3dsgdD}gFBFqw7%(JJeqazrz$xi!kB&1XY|H9gZc5vQHqa}u#6j1wqr;*dP z)h|0>41#W=qq76lryJIvpfo0A zLr_Ko$@_#;Fi0_Zhn-EyzVbow|3W5Yus9D+#Ty zyyE}-7L6bGM7&{seO4~wWBJ#Xp14QN=P%EjKPJN5x#qVE@*aTy7n6iwt-gN9w>*OH zRgdtZMR|4u2x{3gGb!Axj@ub|Hvi}wvNBSSCThXL4~w@^dwG9%4#7VvyR`Hx3Uw7KE#Pa|ktYk;0Jzy@)YJrQ z2iTtffdh(Su*vA1>{C=y;(*u!lh=PCF#abU4cKx%LmBAE*bC5o3iJFne6-r98RaWJ(4d8JA+IXepp5xfD0iAUjTkFL9>P`~Gfp zC&Z;tv#qJES>D(H%bR!<3aqbcE?@ctmW)}i>->htX`X#ue>MymwTNnd2Nhg5Y1PmNJ99IZukZ<%TY&;$< zCfHRiXT^VjIO!4Wx&ok~t^z&emUF%9BA7Za_h$la-uU@h|1sQycM!ps0_|N;v+@A8 z+?hYqh+Gm%%>Z~zA43^y2I84ZZM~^#+KZsTz+v>*(ky$g*d+BTYuQ;Sv=|!MoZ}8dXMn;wV?eWMuEgN{JuHMpUob@EF z;$5{pN_6$9qM*LRFMBY%OwDL3#)e;!$#CXZBc(4^sI$NuQk>I!WPH!dn_5vxWRDbQ zv++z6Zs&g#1H_;e6%XM$Q6tKFB-Ez*@b_F4tM*T%is5bQOF!eVHgax6@fQefIuJER z+X4{$EMnNvo=3|WUq67P6w%}yj14!mf|Cf6Gb1unL~Y&-aEaR@4=4wy857=}4;4Fo z1W*_#T!X!?3Nmd(OoD73=Me~qaULPn60K@m(FessCO_^N*nf}#4zQ?FI}MD?M^J#72X}M zU1P_{Q&7{vzLOPiF1)W7kgYHa^DN74Ro7f$7RjCY<4Su( z`T4H8h1>9^e(|^;7ZsS+Rm5<)k-%poQ52o-^>&h_FU~<2QuDEEsh$_2ITQbS+$-+> z@@YMW<0F*ps+i7FsU;|m{@xZOu*I%d4)pi`ye>_NIEgxr+u%i+BJK%m+r`+UqpG8o zZx+_pi%{jpUAxH3qFFEpwYzsKH3MQ=cKJ*t7F+CMs2FHd?2~yM!2%WOj^0=~D5fg>{z0<)q7d)dEVaDonQNB5tIbbt2Iv9D!<%ITRdyA`uw(SmhKDD- zK3PL?PzZ2T6b>^C$X$IpxW5OU9jH^ng?C35atFyQsOh6O940CfFPMg;j;8$O*V}Ff zq|NN0Kc0rMNPqQ{_a`-dz$})>L;!3I2weW|IWbd6CG~~<1^smfWNK%p`;BGgj$fJU8gOEEtPfLnkw)X%CW+&?BH%t6voH^{|vLf>vqHp zi<+lL+g0Hk482FmXGKux5qLNoHD;5r>W<;jNp_1`k+_;VonOk7EJhucb-t%2-YRW& zNHf;<^iYl3naNr5;)~5XubcRWSBZLWuY9oKumZCrtEsDxr}EU_9uF118y-?F-fXaM z*t>NHRBN)}Mty3RoW5}|I~sA%bE?*f=MGfv!YO!SY^Qxl72>>?_xkS!I4&DhvRjC# zAUDR^{cvIlqs})dBOnRK9o#VBksL~!Ba;-w#03_Mo1~d23_bRnoJ@LtDnOQXz0vx9 z;zK!bw}lwUIOdWRrwnMeA#3)oea}g5FV#+PQBoOk;5dLzUZ<*(NDGqd59X>Of^E;z zqjFkO0u>1M3RcN|pLlqPf#25U&a0t@_v2qy6cUH1Lh=`p)7ZR=8GCIIX6p=6SL3lV zi>{=*@#}`J<_?B-Om8IRke)b+GyD_NTn=#u%~op$4}Hy9h@^u^d_Y-g0*C+4v%UMR{N4>zgNUhNY7A zmO81astL~qSsvi~dVP=}W9rK>y|;@o^_EuSi8(Dlc86${;L^jlDJP&TKYvd46-r(! z`{{vW>ec1v+#aDhjW;EO9T?83@l`+Ew14j3TOh6 zTKad|38(z-NzUQK{gzN%=~H9Q1Jm?*3Mt} zw?E+3!8)L#_fpIpGT)rM0&w>F1a7dv!BB{~3(vL`lZw zLSUn67UvO=ipDcjKfS%yJ+{9lzXKKD4z$-yqE~ZP1rZ7a2SK5s$iX58y?3DN{J-H~;T*bTy%hF(5nd z0aJ(`$e|L7>YyeZh5P_CZxLS|tGcb${2;w?*PU)Xhf&k~7qwB}3LE06kN@I<;#y$9 zJq&1aL{y7}WE%NeA3Nt~N;=tRT~a08!;kO%@A9|P7xy@tuGU1i^n%92e}?I%q`y!{ z1XDH=+k6nbmqD15SwM(@eXM^FE!1KpKJ$1>{lvm^+rT(YzCkjn{|COzx zjn+G}No@yTm0peaXI>Bz8g-a*Q;bVF8c@o9xmiDY&}l%jw(=4a>Je0!>;K*309VI? zndfZLV@bC>U_Dg${D@839tg;PU>qKfJG1;KC3_k6{kdfT#zdplr57u~$x89h+E5;QG$9BAuKny|&`^(L)2-~~ zEdFQ>QDM5Xk2|n);E8`(`mt~S*cb?Doc_!Xcwgh?=T`+)CH#vl7PwrnuuoT0vF)C| z{7S3!L5aaaC;(79Pg3_g8%1pWc4~{{KYbjLgeF6JtufhG?Wm8}KWn(ER1~;qKmJ7< zPeU!^oo3GT`f2txI31*bf#m$=*p+JJg z_fBa3^`tfo%ud-{+~fr!4DpiAwxb)NyA#F!0+0b%?-`%jj^hoF@m1+tZ1S+NfD&KkN;c@x2Fd4N2hl?nEeu z(kMsLN(A?Hty!YYu{DLs=bA!v&+bV-kj8JBeKn)LC^Rg39n&d!G(Uz}UL3z8r!y&m zI1?c5AQ1Qdg=$TDu?@*r2EJCM!mt(EHitVPAB9r^;FJOe`J6>S-xr`Te0NCIg5+5N z0DH6)_qAU)rjF=LzEX)FoaqPBBM0kYreZVq@}a?rT^;pzlw)u!Z;MNh(F)s?(j#u> zb}FRt3;WkBJpO}?pbaH_vODlWme!puK}Lt<^v1;ANy)j(OaW12?m1DFQ6r)n zokBkK3sNB_pqT<#*??3*dU&So?CNVLV$s5(3mCfW`S15fsP=JR&)9O8C>iJqA3Ct5 zAE7a{JlLx2$lo6opNGUvJJFZ)1Vr)YP$WhF4@_rFddNSg-dWo#<8k4wNmLEl*k`@e zAwL0TMcv#-;W|Wt6b7&N3Co_~LIhgqFd_d9!+Mkn12Rsut<~x_2_g~aR zbF?)YOE)Oh->YU&s1>b@Unt>Tobp|HoiFUxX4=>#O4rA6ZVX!21<=@WIkZK7q1Hkb z`Gsed2F~wAY7G7iUQ?5vid+8F4+0|hIi1d02Sd-E0&NL51A_*o_wuUM=GO@~8k0+$ zc1l$Ar?QhvX1*C@VkaDT++G{U!RsZB7!!}l$x7{3l^BE&jc!CYWHE>ydz^*-F-z{= z&keQ^3=H^S8!9rQp&T|aYogof@+*_f5-T3gPPvZpq0iKk=?}zv$hWn3lVwig&4nfR z{TKRZ508iA4tFS8o)JmnBVQG+H#XWcNP8YNh&D95ya+!ZJi-M%41tnA93QjrsQwNe z?qu=^oL9rTy3RKfYnRy2>0B*M%LrGYd|3~8Df2B2A~|PeWxUewi}KD~jw->;U-ir0 z+-Kz}ufN3{tcBkn=+z&#EW?qP$CBo>!&3jjQ1yn~L#rvuw)8MGP3Xy9;caq#Hbh0uc*JfN?6jL^suNpRCeidpBQOK)|=t zSdN1sT!rWcOFj87PFD&Uj;sh3O5fcylFV-Y^ZkMCYj}y@irgGB$^6FNFlUhY$H zbM9U8-`a<;Ge=KjIv>jsQopcQZur)SE=j45U4HRGK*Tq$csGqDzCUm@{B>96Zrbmv zcLzeJEL%zWI~VIk)W8ynU#(HB?9&v}+ek&}`VE*%&at zGhcgD%T{B$HOlwqxueZ%UMhIWy>$=JAI<4}zaG<>e^P$VRy2RktrC3bo zWF(8lX`9&gi}i^lg2v=SHZ}oK*~tvDQo5lSj^&oMXMC?{>zTG$>zTNVsO$R!w^P+0 z*>e~qRai#bGZ6af?b3{L5ce6CnJA$*YwO4{=pd^vV@l%oIqLDC@?>ko z-hFlTWtiQlUw@ds+WY;Kdec|B&Nh!!f;EmX0#W*uQEKT$zvXXp4XXqTMCg}&V)`jR zY;F-TO^)8`Z zFAe{RMHDv7;*z-PSgsqlt}|D4eT065O#f512QT|a%~Y%3(FA1mH9GSNQMnLVKYb8U zBC>BXvz(0-j<@Xm>S}$E;*4L?~y(#rk%`vFVtBlTUcAs?SZ|6d*|V!hnWzL z{o+BTm>k((`cr6zO%Ih?w|=zB+OdE#-LZC;Xzz-Z7hI&QnR3D#uCiIm%(QPfaTpYj z(w@R5O)L~PDx!CKaG%3Oxuh@i|6?pZ3+fJ&6;^iXkfU$H>?V#FgyhfaSfqj zP*0VYlp&uU_*Uo;QVkwGOt0T&?1`Njot&L=;CyY^zInSxBszR~VA-HC$#Q?P6XQqi zAj-4odnZS9jZ;J4m1|Z>(7zqfFqFqhpn570 z1!U}28;(5oPbE_#AEDBVyQ&=37d#u(zb$<@d4CL@j}L2H6{lOFl&7cI!-l+SM%{G> zlt7Sf4R6f^kyqNYuO-7)(J!PR`|Vq=aOV5oBidO5qb%=K%8?)Em4`<|;X8fp-9is# zD6%V~8IoPt3)jlYaFDxuLgh}*Caz=qhuGt{3jmmGXNSaw9%M*sEy?#1@}mKrEfs0B zZQAQP6iuKuImKbm3=Ro!?pW7$Tcv~XwgCS~zZVqV$tz7Rzi+!My%oWn z-v|UHJZm=QH{Ixds7J<1iWd%)9l8WU>(4gNdRZQ5dF*s|QS?hYN=qdlRXq*IlieR_ zwviRB`6sS#Q1)4=uwjhiZhvP-jK^Xk%tu}KGZ+0hfv;B@4=y@uQ@ETh@N>;8RR0rq zg<}>^nyHsuSJ~pIT(5b?HD8tB;J`CHe#Ut2??jnDihnBOLkH=?>G2{{UdI^Nbha!H zjGp#NRIRSWkkves;@%6FLkfWv#~Y4tA^in7n{>fQxGnlc6er%Y7G3r_W+A?I+{$Ir z-KC2wZEQU~^d*Lj@1Y8rU@M9->GwW8>Q2j7Mj_N(TeJb{4fwD#PMtn{Q&4-gNIDDQ zip=Th-`1YF>nDlCmxGnAjt$q2$4-2VC?_ndO;i0p|A?K2g=iYQAFJ5*#C;cf2al-Y zJJjFD3|y}R?1c&ZlvRA{aTXo@s>^UyWdCnou2G@T#^>Cf!9b2Ij|sv5-uKhGUIo~c zFjX6ZSDj|BJRoCx*=BJ1Gplx^1K!`|fKJJ7aPa%LWg5NpR^bZAW-%VM`^oCP3jqZ@ z`&pn48aeo8umwJvb;TdJewHBBA5x{n+v@s5;M7(AmV!FLg&kR8?}my7a4MAbt1Ld6 zRT94LX=ujyT}dq1W&9Ci#@~T&1v5VFU!C^L25@3d(y3E#Niwb%0M4ufy+VxyyQ{kH z`$A1rl;j@$&)#}0(Fjg`pqq*=zXx7Y794TUlE=sV$ec2;NRFs~rKRYW?}_=3wbjxA z1!e+dK4GHuUp9ZQ6vTcb79rg~*ur^Rzewe6+PB*)(RGk~mTDg=!t~nwbAU8KS;h8P z1D1q{`5tV=xTI3E}xd(i99N9QHg z(^a_l+>f~m*DI-MA61P_;&X(^=r+E$S6y1yusfKxDAy$)nl(C;rj}%3&oXNvs5qV` zK5Y2q)V#FmSYm-}Q07pwdkAj*sM9>$t0_6f*F7ygj&4L56`iE|#lT2a5jG{D=XurJ zXq9M8y|yw_evuHrto&TX>`3o#)(cf|4|spO-c!KQlOafFq?zRuFgl!9JSffnLk@;lbuJ zMgcdHlyx7n9HEUzt;zNzmOUbib)M@7s|N699$?VK-v^`al8Pf(0gBL9Lq&Xayjn4z z+Y@@(lOpT9*-FkUtD`$mVrsNwr1n_VjQ}vSfn&9(h-WyXwtzn28Dnw1pjHtZS&r|0 zFsf(rT^}-c;Ztx?kRH-o9(}_e@z0FAVtAm;G4*vp*&1JWa9&L>ck!^ExpN$*=8n1@ zT1;J&z~zdDE9f!ku0N|KkyRCy!$^_h%9ABYe8-$EhR&I)e^noIQ2f^ zS<&6(yB>#xhLzv-^*4eXh%I%8))#(|A=9YP8X~&z4cTK=5 z#7c>RUFkBrlD0VZOY{wc&O3t_SZs{#L?1Avm|xa7@GWmlFV9n+$&P-O-+fnX*pz#H z^B2Zh{xC1EO<`u)+)mn3uGu{Y!{5G^ezzh5qvMJkalOWE9LXErD! zpT-?b$@udwn@Q%^c4kI*gg3=0Svo#S3F^jHT2elngyW%k);LHXbS*tsLutHKkFCI%9y~!o6)M~mr1xv?wEYayX%UY@Ifi6Ms#W)+fdqmlbAiA-%*<<- z#yD@aAV9%5NGA9HHP>;P$_~EF+DARY-K?-wP7c<4e$lYSDfhMevCRVPrGS+*XB4_# ztgoY-s<15cn^8GQHOB9pfz2+2;tExsljv>>8|4w_YzIrM&3vRX_SWgTb^g)hzLv*; z0-V`qk^Y+9{+#dic-{U#+w1|(`rqP0*oDcZ>~&ZsP9ML#vH>mu+-#d4=u-X5i)PV` z1B16>1WFlWZ#hY!Z&(DWDK^WetFxAw9|FAFpoZ@_tRq?1=B zxzes5}#sB2KE3# zo6J(P(%Is+ON9y&5)sX@`016=@Tubc0j`EX{bG@(wO@f5%(~I7xIEdujAzO$wSl*9 zCrDcRn;-2YE9-fAJx)4uUI`JS@`TkxZhT}>TzbgxH69ZhH1V5|nmMMtMN!@YLccpy zEW~l=6~r&+*R*~=md5)Sa#c_t4#8-?YJApsibSyfeFGAsNiHhFPTZYJVj_D_ybZC< z#5_B3Z`LYI-;I%`?B!;YYc(c?$MOjLi!~eB#!k_Fdh*~F>*E*(7LJBT7F*cktx#l` zz)dBen21;}C%YlKLxJOPda`M}O|NWIt6k#6lL(5mvy~ym<5YM(s3*L1uFpSNR*JQ2 zzS4Dg*pn-=Kz`ia9f69I?3-(^yB1I=FFD{v%)J%FlcQJFdnnWeOX}N`S?XBV(?iy( zGj|5Jc@~so^R}}S931jB>cnVio>qQcSz6%gJj9J5jnx8E(N5%RM>`^O&qKVy4x&xe z|4&7@p^M@=80EL!sguv@{{Bn*ak4yC*Wc02J5m}O)4^C#nSa1l3^PUb*RNZ*f0pFv z)Wzhr1SVQa86@vCUCV06LLQ5qbfg}_qwcEv>_goR9RFCGh4a5B2*d=V4{6jouKvlH zt>5?ID1w##DWsRh!|qrC?2JOD?3x3``nraoOT~^0)EFeMTv5i-4cR#`dYdX&ze}Cv z#EHShbwK-WoKS;h_Zny~1}SYW?9gUqG%qLgr!N1%i*@SegO_;$@~Vir%;mk3;~<_9 z0AO-Q%&0L(Lxl-$(?d<|_+^zm(}Sw%sB<(Ur@!f#6Z`rg(1bajw(zexF=p^|=-c7a zOyFIg*ggIU?>=;A{-#@xzX$1$dqUz{9m~B(gQ}3M*r;m`K$^S@-POU%3wW|| z20wm@O(p#P^tFrn(4$!v&94dtPA7Z$QDo0R4uxb?a6VT`d1T120pp|lQUsEzFMpix zSm>5`4=a7CFBFz-Y&+H0N{Jy3I~Hn}6Ld~!#DcX6@w^J4fQp4TjHkRtYcUEB9 zSyIsAK49c$T3)BO*EwS`kR$jP2XNW*mX`JnTT=EiPd*2@?U$FYiclR57i7K}<@Pob z^FSuQh!y3>KN^gTOV3|l6{D0K_-Bc}Rn9FOlxFeY3c1OcJW}G5y6G{WCJD`6C^iK9 zPq0G7SyGSr`9jsJusZfs{Tm4e{Wi?1(bZ1=!POhK;m&f1e7YS0~g$u#huNV-*Yz}1(|I5|o2z0sI1h0k1& zcm4lbH*LHFJ?~$`k(m4Z4#d?&|F<;)C(VBOpX(5~O-BF!Uw@u`{^~YmvM%ORs6N?eQ{$3^=R7aZq zx@rb}K#8XKe0IsO-ymbPTgGu(5qGiXwW!!J{Q-ZyOe&>?-eO{l0-^QD3b!g(9ue%n z=x%Na3;*y@#$h}-JsW9Y-O)=T!C@UJG9{Iq;q{2yuu0ma{fHc=y+$eZ+OlosJu!(i zRtQyLjx|4QNxf>3x9$H9$2kA1xl^`va^CU5*pxqVZiJahA${er-qT9W+w;M!zyU$hUy%hoRIdl|GE1}t0_ni2;1y42l!@@Ns3HGUcls?e3a+Qk0h#O{IiYf z5)HfL_(}&|jG=8gygFM|n=zl5C+c5TS5CD4HL>t_lW1&|<0ofXGnM~ZhMyh}%n-}u zN;x>@|WlXa|_lQ)bR`To5scX6G z(lVX8zup&xzwv0JxY7xS8k3(}--I(LUsJ9sXPn$EbmJL1rJ{kkYz&nZp?@pkY^{j&80XQ>K=&veuU1XV9CslsjEd6Uiu|uHjHW z%(6?GwkwgRD?a(MqhM*h5iUY8N_cN8?;U#bv#L9-3(qpanI6|Hdg|5W_sXSrqaD2e+i~c{1yRQX_(#RumsSX+4Ps4t7Td=|NdN>+s*%>pF2Rm($z*zQ^l+-LL24$?}M<-KEgmr-jJ5EX#89jGta38wVce&GJ9YR+3nj>DoV*hlBVv^a}hkDtbX zqwh3YB8BQh83#llP*qpy&;8MpxEdWhM5K+0kFEUmD@Y%zJ6V~Q;IkiIS>w8K7QDWI z4>2}O1#V;jj{*tFqm+(_#T@XNbCl21}!yXTO*c**VEA2t_WJD z(l=kpl;Q961mtcOcd$XXlP00@@2_j7+4JZcwoUg17=}(+=+R(+4bvihyw`4Qq*xLF5LnACY+vqhmx<^;I z(LEYl`}!r9)@a&6#tiX2v_9rr29=d1lodKbctQ1?Pyo!Z_;h^ zeIwEnGNDh=HnQ|%p_4OFTRNVzDpo2$Ml*a=ps%e`c;_df`n%-|Yk5LdY&V5Y?~4=r zboZM?p<|jIxl~l><8eWvbs(_-iCDl#G@C?ooM!+t40cc&Rb|%;h&M7PTh+kK%L}v( zv97ZHXbYePh*Eo#mXKi020&R~0y_InU<j!|)A-J-ggJ)NN~L#ibsL;W+X~_w3m+kYD1KV4&Xfx4A>6RL z`s6wV*~oV%8hCVmoxr~^b^fkf!omqfB_=9Q4xzbS%Y_niIQGdP(JAa zjJ*DBN6C{d5*4YdeIK&ms~=c_dnH+TxLXYnyc5O#M0bXm$6-{Fpk=4rtAe}yPI~-U zBCu%+nJky>GDa)?xqi5DKk;hg(ic@Qxjvk5Pjk-t!%QZHe)rJ4_wp_M?qkYcvJM+h zLvKHG7nG)#@8VIcwKmUf`dT0}%dWr}El=Ku<$z?btc2*lvi zhA*2^MBlOxl0ICahd((8bF5W!ov_C4eYP1a|BTzqnoPSxrF#v^{M|ny*c$HF^Vwn%~u+)R6lJCPM^ma>PWY=SQy5Kk1tL5kAEc3mZQ@#tNJY_L{63 zyg7-*y>L*T*#HFOSy;d%kJ008n5J)bj(k z*p)`f7me|FKMm%o=Dg})yT`9q6&e5jXRCyU4Cv_qZvR)3&b4lRRU7d(x1@cVq)IF z;L;l;i*}9%L$d(|Kz>2%-i+Iox=h!F$~h2vJazzux4>-IPh+{MEE3cDh7M?6?M!t& zS{fgT-h0!E#(#FY^@hCu)Kll@#N(G%{?HFPt4^R z&>F-Y07Dlur;n>BU+!ff_<9LSgPolxLD@NAHv>Zx;J()mifR8$o-NUadKOqdXjr*U zBz(x7?&{kQX4A9+!Sx>In)p<+Z6om!4fypk_Kf>NXyLUy z0&ea_r?S&q!lQ(^b%qX4P_%JG154tU^B_yWcS zaM}(&m}3S-_`ur<7`p*HwzOE1$3#S5ve$-x)e~IcyRB;QG@Iw#rM4VBYAh~^I|v4O#>_93cNO)j5{c|0)<)h-1nli3Y(f#zCX zCggM~WQc_1v(n|8cbpb6f~LZ&1ada$&t7SMC50=f2F1fHo!U1VION)?+cHKlwucgN z4NRo?g)b`VA$@$PA{NmLW6 zr1n#I9kLdQYI$xD>Ue{9KnRCZf8wX}j$9s28RN{K+zsr;E>_l$Ax(l@^ab4a?rE=` zd_o6*JPIFY>wMHaRkUnn`e5McL)iwNL6^oyj}VE0YtNuAIUAn@d%{1<;LWqE^#!}R z<$BKdRu9|JUb6t)l98S5S929W9)8aGFVHAd-D%tzkk-oaEUnqN;|`b*3BhMT1QT(7 zn0Vd_x*fS9oeYb3i4DG=|5iMaV9?oee$s*l1+&RY=wzghq4$h9x zeB7JbzgEUv`(n6P+T*c_8xGeNVAzA6+3<6jU!d|)V{10RdAMNpZ{l#v#|G*IEG?W) zvvAaqg^V}f(cS%WmsdmgAHr8J64i@KfO}?0wC-!b2xbi-TTa~op^qIHrwWod{fWbc z95f#G7&q&siU~QFm?g{rWD;A~vL8^t5o>G@@t}PCpa~aD1o+6Zh)cWmEc5@7Ki`oj z;>DJLrEfbh%#8$#`gk?o&WnFMy?K{2XyqQq4-i1gvLKi1Mk^!hHZe5oa5&3-g-bew z={>jT6lR>e$e1NoRbi54*Y%dQI_ypkM@b8R21hd;) z69J?A_4N`Xv>(vMzNTA#GAp+oMr#cMfyrtI+6N$J0}0SjvPEpYDPt_e6D$zup)blX zu#kBHk8&FTb|68qSuu$}B|`E;fD>PlTyL+}#jm&*($M(;nL@H}Jfu_S{mt7uD3WJC zu0v)B!`ESX+!>tES3DF`-yQnkx+ILkQ|A~$(a#{M6L!hl$0*0l^c%W#x2gP&L{#qB zh3Q~=m;?ykb7lCk4=#DAOYaXt3PH#0Twix|#|n$fFRbkVd! zD6E^u&*3~ZJQ3YN#3BI-MS06vz}9pvHPNEA&SN0~_+F_RcEz)Z-vzxKFrmuT<4k71 z*2l-iId%$OkGB39N%Mp&8JvBd#{ggxOw8SHI=#a<1~ItyGv{oV`#(81kZ;6<9;Js~ zSnNL>=&1h_4+P}Zhm&1$kpPKiYN!V3q#FbvB$EnTg+mAIA8F$7$hzQaO^vv8Pn3kd}BK{#qb66fj$FQJS{CPM^{%BWo2b|5&@LqMUtqiY)YW68$c2+{6}WGn?zCPItBO)iNH^f zm4p~ea};I=@PPp>2SUP2-~$>S5xG|Y|MU-Nd^&{(Wx@eHD-Vf8I+6wg=Ph74@446- zR^0x!qhH3I#1p8wZ0+qYFC32}!2}_V?OV=F#4Rq~2K*!P>@_c*My9XK=-%bD#2KB? zWBn{((+2~(>}Z$r2Cgo+ko&6@jGLjohB9d%Z$8A9@NXl zo_H@+JrzY!9<<7OHEMWd-RAo9H{o3Z$ht6F@ zVv2|y7+7cxkS!-dZWixx98tis4^)WL8#+z9&}U~x2Y8K>v=R*yfqfdWKV-knoUx(g z9L;(!L-NaEw2Hjc`_m)%yOc8Iitoq@r_hRT|H^?6YZTa;nm-Z0apmrfE5+`OWTauU zei)Z8&nUh;lvucCpwzT03EcWi;F&hR zV=#ax@;NyN;v!k3_AJmYl+0)kJ2G(VWVg4rD%2`>E*sGyfp~7O9$Po#2^@tN+o&P! zvIn!_#A$?Rzy@LsSw?ZDg^}ezV1xDriDU-D3b?-kKHJ5?ol>g+33nDT*H*wZGY3^! z?J9qtHyD-}!5_H0KfU*5H%-*W4rUUPcu;!PB@^+LYOvUjms~F2Cp>7igp~vn5lB79 zyi)x#KRn#^BG<_fU+7BMvk)ywxP_@a=H@oQ|6Kz}FCeE_`h*MOc5lEifeyTw35uCO zutaQm;w;r5j1hGiw;s%vtp|9C;pu5A5)+`sBznWm25goAmsqXxE$|S&vG*l;V1N1S zn9LT_Bf((kuRkSr-PQigYl&TW&>=|Pm47AfVY8!IVmlXQv9%oad&tm>`(BYW$>HPSo;nVi z@6BZRKAK?;Npl!;v~M!89-}B3kNuVI@w0e1Vb8`1C@2#(qdeE}wt$S^!QWz)D6dIX@Ig%d99X4cfajDb8@O)`0&K=u zbIvBz1*Qe{t_eyAu7On?E>re$Hfx{Ie9pX(v<&X~*HP=q>!W{)ba7?O(QccnfvCE@ zTpDOyph%VT{Y#jc3g;S+CKJn&$K?a1xA-|R`fq!xjWBP#j|9XSsERJj!R6lFrcgD4 z_B5#UC_ZZ*%VT9oy_ik7GVQv3@vHABC@r!g&J9UV+Ec}19_Q5qS^0K?KPrz;2t-3U z#AOH3fVyD?g0ndDFT14-7yg`>eb7{wp=A%do6S0PAYo`(#Q)BZ^hM4@^~|V-g!*dg zSZ;s}!j1Zy4`Y%iDkNB>ye+lUMfQT4C_DijCSY~rl2b#r(tKfc%VX5zV4m8piyz6q zy+`aas_*^%cIt=`c7?x79?VK@>2(+(U~e9!B@)!w`&POadlYq#klsW3S7G^@CI_!) zQA$M&v6S!d(O@mK<*x^v$eulg9Wl7^#dc=5UG*pu7EtCk;4A*8t* zb(NOvzbA{Y<$i0XakUwvF#9+3-Z2Vk-Sn*)>()olx8feyi?y{M`!;LEZxj!QnzhdN zsaLMwxi1}*nlDPnH=};kGWTfjvV_=nDufAn%oOY47SmByr=p5IN`58c_KqTJ*CL`% zUpM)H_X02Fd&t+D=JKN5U*SUDMbd(r>rT)>JRtzDv+4iZ#Tj>XKA; zI4FGEy`56i;=Zj!i9&uA-KKo~3W%%Fla;n$0_8QEtxKtD`V0bqqn$NdK=uN5L1BZ& zjlCmP|1XN3PkWap@IF7*`0;ow@7w#yP3!rC#y`h;n+8(TM|p`lqC2>kmq znD+bvJ>>-(ALfyM*|(y;cmBBi4(91EH(R!?$7hKFXOhp$ClZeyy#_|=&tb3|_wL;z zd9?$021L_iU~6fDKbE*d?B$vNeI)b2d_)v`69>MJp$*IWQHn>&jd%M?oc@w(K=HLq zCad1%&37iVsGH(>`6SdGrOY2PT%Ex%O1JVAgtF_{dhbOn0w(Ief7eKUY$fVDYicir zrwgj6sj1cavlSUo5xr2oKr|Pw7lRAN1(pluFV%qOE$nV2Dl8nH7`%KcaWj-rL!8rV z*jQ=N(D>CY)>D+G$Q#=1Jw>L4N1#Jydhr_ye0gEh<9w39R=-uOW3!(-*oL%WGvnL- z;D)~$myi%(Ljuwh^#By<1(e3NV7i9Q%*;mumWSZl*`gun&GxgU&Rq%E!*x;IkIuw` z?c-WSiBE4Fc|}=X;03`hruHV%yZyR4S3#*O>-3lqL##QpmO;Gac!h@JN4^#FH79?XaMm6O6Bwxez=7~Ro-LO9BIqR7oive>@Tgu3{So>W#2hoPxuuFJCC2y`2C z!+9K?ng!LOS}4~pj^U@dz;;!$VP}3p3_fpOEvPNKXCR`<8YV9nX6<6!AvYl}88JLu z!#(&-CrcviBKcL=s;ilXfO|)AM7`-4P!h(hKM}Ss%ysJrL!oOgVnd1mLpmTP_$QQcy67?gL5!2UZl^d$4Jm0Rw$D#!PmY%XF3E5c9B znWd!Xa!TEeiB`~B;4mNboU&;{A+T!w&jX%EhD4Ug9kE-y48G1kZfGwDtsl)a{_f1% zyl^r;=!xN82#+-!is~+#-Z9Hz1{LadWq21+QF~3`Zw6(;mdJm(A8zICFA7DhXKN^ zv&{$U{~5GpmVuYgb{pO5k2w|+*Vti<4i;JM8{%&kcN)c;zdE3N^1&{v(GPobvTfN& z*=sbBy7ret6RO&nH`bfP0fo}QpN*@lS?rt*xIfxId(a$bZi(}VXR2C1{j~76=D`0B zE*OGSQoVakW?SCh+R>51f5BfqWM}X^e(K85Z&3~!OKQ6HtxFR(!t}czY4t4@(UXD1 zBI2+a5T|TmuzJTk^N3)qOSa`(V;$na5-*8SxqH}geo7M=8R?0G;M#N$4II2)RAsWP z)2q57gBo{+R%S`3il)edv~(&D(}?+z=*(|x7%hPN(}^6*O7MTNfM)nLsGCyP)}}0c zL~c-(>rn4Y&ee%Qb_H)l}}~jMQO%x zFpZ}ot3VGuPWG#9YKw;fdNCd@Q3P?DGpoV&t0mr*2o34Bvww6`t+Wlr#a-6+{@%UA zyh!N|PNuncAtm_uw&j8{^>*xnxSMy)K&h2kdipUYty!*}mQByi)@#&=b9vtikSvRy z{i>8&2t7bv_%%OEIai2;%aOG4zgK62E@8ATyd9}|e60eF!4@1KW%@^0<`-5I``2a~ z$D3O0=E0~s5Z(K?vmUZ(t`X8QxTfsk0Rd!6e!T^=-@kVT8}+ziGaJ3{x%!^2GV@A9)7`Zs)~iX4rHA_c3fb z<}GK6rZ41UxAh6RJcSSlsb>c6FsHt`69WZ@CO1lKV58Qpf^}b zF!n+Ee|S++hnB?G*AacgnJ(udqZ7_p^Su*0$&n{KShOx_2Cmg)gY`2}c;%Qz6*@x!3wvFBgx6 zbT1oCh47a7+3uZu%phbjmDYYuc&t^W@yD}6V}2c<<(r+->cg6IivRGv@|R!OIJ~>~ z9UAV5LQL}MrXPf%Psd^Nw0B#&#BkNWPJZA+VuM9jCy^|y z($({#X$EtTPJfg?-MXcb2DPSns(HTx{K#{0X2I{Y+GI9MpA+1uc9*|Y9do!^XwpgG z_B!Mt+l)DOuJo;QgH=_%VY3zCj1CqW{-Vq=g~wa&@E=V<&;yOp9;v-#2{1(NL3?{= zUPWm^)hVJ$k!^Xe)@Tl0O{b!!`{B^BMZNds{hNWIGfUB0+5Gz#jqx4OrK}5xKNz=g z#9N+@FmsO%eTPq&U#y=uv!#jxzA1kmsSB9+UFQbdD6RHf95<)wqhtkf>%C&wY-!KZ z;osQKcqza((nC;(vCylKo~tjVeCQ9X5{^XOn*>5v1l!)>xEw% zOmy|IqJApbcp0>5yZ?(_p{y)ef#XScw8O(k$2V5}v#qdmFEydjkl0LFIH!BW-7H!< zJ=K}L2WNBxzj_-P;C4E`cN_{eH&R(~Vw}MqG^*x> zQeMd1gLNs%92O*ghteW?wFN-%cTe0#H~KIpy*7N~J|~I7+Q}X0_}>6o)C0tx29vnU zEjLGnNFR^q?9PiAH{Zqp8|z_~-R48i%+A@s`yMK`%@6-nfC}p_J*$ruRV; z27SZhk6-NrXnJ~cy~%7Tm<-gXm%_J0cD^dZLZuzm;Nq*!mUYK=_m`b5!OwfgfCK7*E)%rgwJGIRreRCvL9Tx zFJqfjMe*0z+^w7ojF+AXC1ds=Y`s~#E!H{ABTR=c_hoPDLm4i;wUKh^e-V*>(*d*3 zJi+F1&U*dFWBmu$T-(9-StY*s@2He#RrRLEf?O%fW0nfx5hda@S?vSQ;YDC@nx?aK zH)T9k?7N@zl$)mP-K9I1?tFTP5xx7TIUR{awKbtnWtp4WK8=UfiA`MoVSF;snlsTf z_vn1h*Q2W>*r8sJ8L??15qHteEe*~j?(xeVjI#d25q*~1*$b{8mUJI0xvM8dC&GgkG51$;v{`*9~is^5EaXX1lrp6`$?W4tW=Md+=K;dkw`bKYF zpSH)7mEEk8G&la#-m9cpJv!5{fzgUi;m`ot6h)!|*J1ni`%vz?>rHz=kj8;fDN|6k zDoFCZj8%x@gIMA`URc@rSf=4cbe^3%d``O3q5DtW0uuZ?N%-z>)cBOIm^)qIhe_TM z7Nb;IZ)lv~*S7IB!yG-P;Z2A5Z&L z!%VT}7>o*NrtZVsuy9X9hi(t%@{Jm?P(&w2CCF-Xy;Ygt==PG&;*T3l;u^W{j(cq{ zUK}fru^S6D$XG4S-o$nc4oKu}HJBejJ!v{KHFD_W=4T@UA@v2*{Y%9O5kC*7ZaPT3 zPpPOk)Yb-nPgV@VQ2Na# z_-p8hP+yiQEKVGjO6SYw=|Q&ldeHe!MEBv<)mImm{aAyMkjnaynLp+Mdwk%(I}T z@RN&^C*Bp)jUwFb%d3K?gumt&p7fGYDXCBVe&ZS@$SiG_ z6GTJhWt|J&uwFBzNrl(WGxZ#nCuLYB$ME^*O*mq`Aiu7ZbLN>Mi(HCce|KR3?UW-t zlSTH0K9kOld?bp8$z5bolwBg%;NVLZ0q#T|2e-w&{GS%!I#A^Wr5p<#->ge&Oz8Z``Ky|~ zcsxBnBjGI4f}0vT|4ian{pzXPCvNpXHGLfJ5vnaZ80XO>%5NBS3DpvMRMO>n9v(^t z&a=U#Z?{W}3!r|`xJycMJU?4?AL1eJeH3>!r}rJ90qO_mpkKQIb}rJW%7}YmQl_b} zfme`~mTmJa!Y@KU%-Wmx#TmmRe7>Wa50GrxCd=vjkpKIS zFf}i&b^rQrxPYY~>&4gH(dc-uZ>`{LNE0q+CK^M4(;c zqYr&n<>QGO7*xwdcJuX%0!1%uh*$C3w5tMbHwzJZZfJ>5I4`j8bqS4cbyn}4nl_&w zpMMJhvX>R^`EbbmOO_F*O&1%us;lhJX^_$hjxBRA0Vm`>Rv|64Imhjo#iKT|5$6yk z!*gCtZ;R6Ie1RqVaG;_sf35%6u$<19HC6Z6!hvnVS`T8i=}p|xa+x;%F3CI>0vlgG zsxaLZuG;LcC_aBe%e-9P$nm%=`hPwj34WiBd{zW5Hpu1jdJLX4zKSx$d8i&efJY>6otf=X zaZl~nEHk?2aK{{H;N?)Kf3A-0|74DLe9--rf;?e|x6K6_;v}QH?PQyMaA(3;6E4^5 zu&E5<)C+@ny7 z`^URl+oj9~@wnkW9vC)>@s*$8MR-zonAeY#Y%>1+c;LVP-kUdjt_~JE<1;3-LuG75 zb@19421miA5iZ6#`Lg2I9TCW3Tbix9kbMB1wk4tY6NRjvJUAuxdG5KC+?2$devrxxe5g2s8MrqGqq+ufB zc+xG$Y;7*yL!{Pt&tIaxrg+b5AI76CB0=6oZVYe2-HYG<@RJt9(;VmBGQB%(PyOJ} zH&5MTvv=btx?GJOvx#iQMW%FJTY_4dc*gFzxty!45+zQbiL znXv>@uaNu&IbX@EL)Z6v|Kic?9F&6OgAqflW)YYAJ^DGh2gV)ByXOucEAdx+J}Ps( zT1Kk&nu0vduvV^9M)1tblXZw^E}YMuLmA@HXg_}}U6yX~EU=y*x}@Ia2G)_Q@QC#B zGc)t1RmKOmwo`YgkX`%<}n{Q-sH=!k9z%cXQ9uZ5>fW+WO97>4BA{hm~`? z3o_IEA1V!L4eh-|Cw$Ym2jUjGcFO}H%kq!9;xoqW`o(K0_N;;n>^fTe6Yin-@k52J z-)fT0y0&r)aUJ8cw^&jmoR%I3JMC3pMz*{2D%R&b6)R@tX<+c2@{=nKe~jqGL*aM z+mg;~mMje5hws7NiJsdo`d!Wgp&xq;8U2r!;a`8yl?;s;mjHbqNUDTC;Q zU%Oy5f@1Iqbq?9wi>*noTre&>N1I^-9Oti(V{AE(8~gEHFN5@HlBdyME^14rnnYd} zo41WZ{>4~6Wq+ui-HR!M`vfMXo4A0@!Ll$zy{`uuxdS}NrJvyzW4F2!#^<5j)Q4V~ zbj!|PSviapaxK-2!CItpjWSg118G*K(DN771e7Rb(RqjAZi4pdKFQoi9 z7}vIO(-0giDGkPE5YYP4W%J&%weW@(Rfjb<;XiMcoLx+H5-$66-@nb7kuy7QhIM{q_@w8>b&~5J=rgppjOgZr zx4txgbzl3_{@3CWWCdDduJ=l42>@w%_q7E5U$ zyh+wwDIB4h6xK6uIC7v&9XDh68^JHMI#@aPpPte-dCdkKfBn1Z-{Q{dvHpbg-TU6& z?9J9{^hj2z-O`nim#buH5mSsk4WCs4$WA&#-m3Ts_B`>FH1b;@fgg9jv=QIW`lp?M zb2+Dz$`o5UV`pu-Xj|DbzO@NjGfwmT>QbxTn{bU`hV_eg#x)5%{kPMk-a2~A=N6r2 zECf|<^!#<)a$;p|5kBx&8x7b+hMFAgF_w(yUvP}cJX&i z_TDpXVHbWDk&aA{9NWGWY0yb}cKymri)hQPGx?Vy*|8C2>96mF+$ds7KsNL--%l~4 zc4bL}zS5(Y8O#ig?xxLJ;i&Q&EY>YgzEtv9Rr8(7L&IZOY20=JY^Q-KsO?Nd&`gl1^ITUFPkE3{wzHrzI9w&>&rcb3x~SB z<4JXgS&W5@^Rs4JIsP*nWir|KLIm6cAxt$Eq#Vq;remt3``6X855m?irj>2)GNH;1 zj)Zyl&QxD(4AW(Kn1$GWV_sU+)9iCMP8A(otfuJtu|~ULc4^@L!M6?E&fw2!)$g70eZTPYET`2986G`0ibrM(#b!Ti?yu82^euy64jAfM^unuy+}ZP8 z*?2jV31WH2DK?i(B{!d#2MuhDmixZ&E#sP74z~E(TAXk|OyLwiDpN2{|g!z&>-9&q!t6zhjP^0_OWy;4Sc8uZmZfg^ZcJ(Rb? zxpd?3cA_!dSd_a{yn|J}DNlcM@y_LXF+_Tat_G^xP(An4_ESvkEr|;j7~keBU-mWb z%iXWX4eJT84#mQ1zg*p_BHyurYjT0X&OliAd~w{f63_fhx&{pm>(+(>mWr$39r;4->`jWZCH+b>CX5eQ&ivha+FA zWqMrKdr^r?w{Iup6Ub9qQOfZq-2FHm_2H1cM*LHJT|u?sKi3Tkr89OCg}cG5OOY20 zkLWAcgxCYwlZL27j?#d(cz62j`#}1L)yu9D=UxAU&XVhE&-7kh?{%=A@=tm0%GomW zH>lCMJLJTm#+~Hr%MoS98b+i1M%}XF6!hfrnHirYrQ}cwm`enx9XvbUr|?~1IXH?=VIVKm-ODpj^yY}IW ziOO65c7|_Qy#4zNehKl6HS*t&2KVH`%l@dZTv)*jnHxY$& zCFn>qV}KeFBmJG;uKWI%ud5Jx{(3T|f(!elw~9>hX`5ErhIus2-=G5~qEfclCtAz- zwY7C+Y`$x8M!%;kS)mI~(J8(9r~3k3RNf-pOZ0&jA|C&`5WNikSWpp;9&*cl~5u_yAk8}E( z{9Y`FXma{JH$9LqcJ~h~I?LZqV9O!kJwEt1$_F^0^yJ1a1m+%{d((4&BanPdrL0E4icH;NHi# zEhO0@w^><9NK}%^Zx2aK-x?_P8_RHhMywL~--}P9uQ&65QaaJmMFN#?-v9q^gdiS+ z-R98AIPow1>x|INf4OSEsqb<$IDReIu*wW_gI^GCCj6_r$6+kACUE>*YpkUij)>>( zL|vrJZz{|l^B(4BeQ1%!b57=Y#x{?qLT-QkM&8tx1h#%dR(xvR=@>@$o{~A(>@E|e zpC9#T9wc9@&a`*rI+mRNGM3l)r=VJd*%zZ<>#*#g+E^x1)_&lFvc-4KxPt0}>VF#d zHO{)tyUcgR`KPI+9rWN=ujBkek+M)z7FsqG#;uMZUU`sx7Xh_x{>9I_4}&K_Q{S##9C;}vp_58{N?w6a$3wPlsiZ{6?+{(0-w zQ_FS1zwA1KgiFxx_A&@}9i`H3&qV*ZMwA}P(!&N`T2_Z7SfJ6B3Bd(Av^4|>}VJ~zm@-&5H?cVl9?2+f_%AO*UP)) z6CEh?@*sAHB*#i-!u_b?rkbo1;rBXh(RX@5aK1iRozKcedQEI_W(CV;T-)n_cKJ1$ z%Mpl5-=Cne`g`^f51p|Rha*q&lu`Nk{=<{DACf6mzGh{mN(Q|lL(qLHFbaXAwCyJxTi!XZU=yILYs#`z5OpeJ$z|+7fepVidz@g=mB}Vr? zm&RD*UVc>TL{ZmntlAeGxT}K>aoc67%@nzPH zm#vl}`sadpq8t~;;gsTwl)T?L-H+eJY@&|G^MwW(YYHvrC$&r7l527}Cwb)C_g^cM zB}-l~H-?jN(YkHl&iWqQ2j5fHj+E(QmQ~@DZC4iDdHwDOF65kf_N3MJN#UNn%Y$Cr z!IErxc491K^0QR*vP2Fa6prx35;uL+hcym|DC0-yPpiE7hjX|b`_mDHh|{XHWtWJ9 z$D#fqLAKhGef+at><^y~^e%(LF07bUx%hzx$0_yMTDF?N@7O;v2hcYf4`#njugc;) zI;DG!yZArsX>984Yv9=A;~)%~->gr>n&zV3&8}eD+s#q6E5!!TEMe3?)ff5I%miYC z0*yY3E1 zA6WVC9Ivh2Jnd|%_{SmG*uCEA|I$qxnxK1q+Mz$y7!LdKd~31+IXz}wtPe^uKANyjJV~Cei zS+n*1PGM-G12=!ZhPyPRD;=MJQ2tXBgQd`dk13Cgr*XbV74{z;6DORN3b1+gJJLgS zJRLW;m^U_V>c9$Us8nGZR|E%|>(wX1Tpv9MOuAUHWeHsw+oXgL)VIkD7_tr?`;TeN zxiwFgoRUZn^P7@iWxs@@ZZ$&nNX?hB4?at=3J)?e>MEH%9+pCN(f3>zc&u(hRwOsB zT;_~S9MgC0rgUdO3 zer*wK2fq)MW6w$tD^thbH?tp}$}C5VbZQUWPI_X{=;i9-?Oo7!Xl4~1{?y#atP@RX z_l1SB_Lmt)hmbeC0LES_zKyz-zH=)bf95aRjn3x;Tww$2d; z!<1#|!6S5k$!W*soCQl4G$n&a<-_J8apxU&9iBPD4A*gHk*{3KI#O3r#m7ZgQH)Q8 z7454&{Lx7v4kl~ml{>f@{LnYX*vvW2s2SQ;Hb~!({&GKia!K-)!9r%XeWBAEGpP#K zIeY6ldlChk0Meg<{?C_x-PWtT*BU zfA(@~V+S>W*Vm8{4C5AqrElVdbf3LEQ1LvwREYjZ{nagxI}hg6b$cHbz&bkmwB2F! z221L7O^AfY9JW>`YIM&iS`?2Tbnajw- zKjoJg*}2k6piSu57{aXn-oCx-A0IOPm1rJ0GGVm1knGm|) zNq%phADx<>aQsMB&^_6TPlEbkU{7YT*0ug@!gx^k?JvA?H{!Di1s};FqM8{suwhAX zfJT$k=J$F6k@!-q+%Qxzz0g45E#gdn%naJ>zC#iRA6}d+OL07NIz}U!-zHgxD!<&l zp`xfRQuHLhBx7nJsdmoQ72E#0TI@?+N#S(hZR}-6(F8^+UF0cthbO79frCo70?GkO z9}MQQ^{8}z-Ppd=C5+ZOa#pXapWEM3lsWw!!pioMjIqNaG}S7-OPdc=TGcGltd%MF z_}jJNXIRM(`4}dA#Kp5q-+$*|Y^EM)Gjc>{WMqa~OGQ zvdY+ZwFEp|#~lf~_L&f_IKRAeV%s@7E26<4bk4*w~>31qrEQcbax-Ql>k-aU~_8q&f6)cfu0N4U=RCix*Y3I6YpToSr# zJ$6=EE1;Ace}C}8XQsm|6r6GXZ~e^)he?|D73hdNGK8TAWXd4|e_WKULdny1{jC)} zs3{7!(q%5E`7QS|m$ru=$Xo4wHDMS@n;CV%*Dkc6MCDA_7B&RF?B=RJ@pegrWXl_$ zqvrPU*KwuaDBezm73BMqU@sPid6EsIe`ZLA+(1V`e-7>FyxKV$`aKr=WY9)sRq%D< z+6ASGmwO-HvK<9(1#Hk7&XA9tJ;R7@Hnf@Rtlq>4;6C!)w(p8ymkgV0fJ-w!NEQk|7syhRbzT+Vj-b9?CN3Ah}6f;aoWY!GhNrvC2L>*pSJkoE4W(x zj|>M&PI~%eyP}Chv|#Y*wkWh2&*(oep5!J!gJV0 z^BfeMoKE<|UjDhxRWO|_({(J1lV{=9QStKhyb^V}3qKSteJwVBXpa8J`!l~?ni}*h zO)$mf;K@@$rE3dW+5>DG%X!F6_FzZjF^^zjxbc5`3vZQ%r>CDKj@#&;^bgzyT}#$w z+!o)d@hwT6mo(!TY;GOOsE7l?|7tg#*u$D$&)w?(gSB#t^oc-rb0Dkx09 z3(zspbuBH4GWc$h_w}(3kCSzNX>`vWeI`^B37+R6Cige+9Pw3(k&o&t_Q9gG8UyRW|VfH}CHl4Y)eWIn%0_Gs=Q8)XKWYNbqRSfz#%9ftlutx;homhQDTzWVNIJZpS= zke&3{=IDEA6MfXp%rEq#JI^kpE&t1u=IlN4z9f%F*~plW>pw%RA}@y~AoE{aY|$nt z1Mcj~YwfDYkg5yLtlBCPp>?vljn|1;9|t7ekX4mxk4qsPh`OFMu-F?#ck@A?o3s-G z2^pI}-IAk2e!kz+Q>)116ZP(t6I71p&-%SM3Ml=0>|>2TOmh`f^Z6MM-$iak7`Lkt znfe@&6E+=TdHRFierT}T+mY`&ela{d5plXl=#$OA5`oo8I;t~ICL>CsW~1KS@^A^phIeTtCoAgOGm1(wRTWpt^3q8`B<>qT zGpS<3xU>7ZO(Dxz-$GHhX_b}4-TH(ET#O~&QN5Ng3{dH+KX+|+cpSR8=1M5x*bN=) zzStk%WgVkB3Ypivl3#QY8Q(VZhNlqH%+&qc8}bf9w#3cp@tBk8>EE2TlK{%vQbw;H zc!E7NCdT;6((a9HE%uLTj>`aa&)z@Vv8!%)Ix_e7*x~r}RD}9l*6|o1Nr6 z4EAso0~dqToo9d6uT_AF7^kEnDQg}vXHW>h{HTD5F3WAAr(K*Pmca`pQeOgACS<{l z(EqxP0SgmM!Ryd*MOh+a>-_+$<>?6rm*Z`%@2$6~4`;$81F^nL3N3p&N=FaB^k~I* zX#ETv{<>24!eicW&PL+7Cy=x$m(TRGt<4^ipr-8qdyfZVt+AHGu`RxDPWENl{9;z_Hs(2U5)V51uj!$P6?NsyxCSD*BGbzTTk=r zr%CFrp$*cm|du`j1vf7!iC~>f(oaIJwx+iMiX6^f`#MdXJBi)IcZa5AKl8F zjArv48SkEi8DSzgs3gH5vbY?NZIj*%G}e!az{a1o16Bu@9;6BM4}2^{rl3cRz+G;EmL5cI`_JbRVACS zWdso)Rd8vIpM}*JFrKEz9m@eY*r|x`ZKJ?CBmbvt zS8r8@ho^(%IRL?jSP^uo&4&y$uYH7LN)Grbc&s~C*ormF*RVx+6Mnj&l#UGyJ!QGG zxmCt)*-Wqpi6eSywv$R;1<$OzEoMu|*q*F=hZruC^TZd1LVRr&4UM?`t9QwP(I1-A zlsXx_mgqw;$nUL@uoLaImr*j}GU8=tS5U@eJRB-JrsMYffcxbIS$Hf%$KDn{qTv#Y zqG34$Pj2RCy--An2(P&hB3~F@ylyn30v zvtQj~30C3c={+!Su zpkvY$sMg+kZaRE#hc7KqcktKFba4NgG0?&V%E9p9Tyh)6ab%cC{F3$Q-0u!`Pggf z?Q+6S$>Y$mJHgj7y8ur;)!8VJz;&OIy7~6)dzWR(U5D-6JaZ=7*J`sGk?G}q_xEX# z<=G%u zmno9v%W4-1qeSG^kz{n^tuDzzY)INdptMW9wRc@3USeWN{_@;G{reet7kEYsaB=de`eUMmm_GBd$#|=xQYDsUrJ}txU?6oQ~qH zEjwvZIso~=4>$UMcEho~$M!{zt_w%ay`sczoV(CPF%Br#qBD+)@wLh653H>cXis~) zneLTXw|MNCxrn=)3jT)vCJ?}!>ZS@MQmJWjsX!(t6aK@fv9nvsWkU?)me;--Dnjp7 z(B8k04_Xq5Do;y!y<&jlPM-p`At5mDSd((Bsk>X)@Hl;E%I4_?N8@s8*(eX+3)NsVfjP-f;p05Mg(wsq zMbd{|_1MjaJvGgR!cNoonwh$={UgYUWXXbt@97rn!7Up4nBeL$0_&F@oa24EAR`YC zPzON2smO1$?|r9jmmxlC^lqL4y5niyR}nliD)_tH8lEHc%qz_ajr5G7)P2+B>AkRW z-q6Yg=xNj>nnL85W%A~u&8N~|7I}I6FJ5AUmf#G_x{&te5(X^Vv?Z9$o7q-ma+4i- z(DM8JJKYbDOBcxLefivS;J1`S)kyT%`tcdOSpw30)~BmdQ6_3(inhzZ2fTFBL9j1g z%Qxb<4G>PEoALOwli?Y5j^yjR5tU;DA2;~?{koZd{y`$~6Wtv)#bUo9xWSAF{y1gfx+n?~T@2n!RhMOws z^V4Ud5CgZzA9OsHkH4HE{AWLmm7HWwy4UIfj#6}G(8k#7To>tg-l)K`?-uj+6s3Rh zg|B+NqIyTvGYdC(4wxf?0=5g3RMp>Nf_hsk_w$uW;BPtU)5T-gX!d%XW$ zExvCzs`4R=661*XjDy62-~EYIeZzS|MpyX5>Bg2p%B#x|iTJB|Q~FlY`-hk~M4t{Y zzP6S0>3#NqvNR05{`k{No0&DW&I@VOQ2o*)gLux!@HdtuF$3E5&q42&VjNm}XWf7d zmhfKGqeJIX^vi>W104-zfGpL~Ra9gDr}PuS1+fL?mHC4vcTbN_F2FYvn>1v$JDHQtIvJFym6q0607eshkMT^-EvCL z*Nn@Fb$3_{vB6^6UgAZ%MmA5*sQE0PRzfV*4)c@E9fj)2wJPiQ z%{p=pviQ*vDT@Mmz)*0}M6%P5&8Xzre%jTOsI$OUQYk>(OMtDhturhI;}H)NS}=02 zz7)jABQ>d@9N_p0U~jNZ5%FVxy?Kz-$qvlz_MZbcQEc^uS#Ang3&F^MteDF*L3!27 zpziJwI!jSy0e*o%_^{OhN4j!?_Iu=$M_7*BbOU#ER<@t?OhV^pXyWLY-}!*H-x?%& z4pTr&s-CXKz&#O`cja?_1z#r-H3n-Mo?u!8Y@$;Z=#Ki&+a&6n_n*&;>GzMTVA%L1 zgr{dKA^W8>)8Oss#r3rqh(He{7?hQlQpxZ4N1dPUPzWky35T6*$o(KoYE5+`0oe6fr7En4m44fbx**)s|@;90Z}gLqM!BPWki0wW^IH!K>AyNFQcCNgKM zFnjsg&KSMp*uUY`0B0iT13)Vk!gx)BH#mDA9m06aG{8vTk!L@vT(Ws<_2dB=S!ka( z?<+u+h9-8UATP*MEe2+|GnHv3czgyB4)XRqp5(2mmDg3r%t@JAY zTxUAcz4zO<75%%WWVg^jyBUjE`3mzb%=N-Vo~yvRJW5XNh}g8qb#iKI$1iGkcp%N2 zi@Am#RSmf}3oDdRCN|d#$s?+fZPj6k4Ke;v^eHr(ZB))s40e#~%#-{1)Hf*wYz9H9Sg-7S-)t+1FGcP->JLMiM}CBoA4Wo=}pHP4bVI>hKg}vWGcs zu@|QOPo-e|MXHt+p|C4rO>y zG~{0Zq%a$}QlP~#EM|r-u1RQY+uysz=Xp-uqGUPaOnYca4*CCgy(PHv)QJpv|3FtbNiVe{B*%v z;HvM%?fYZ2GQ#%=u*c;Luq(yk^ZL&|1&Nj9zz@uRpT!2-sMr44u^}$^N>y6 zT9AzUf$3^9h^0{X>CkwHrgZK^BxN=ZV7w7VEz~{Odg0Rc5*gA!MNI4E&_vw6yf=Db%R_ zu&b5Wp9=IjCAAf(WhJyG`;cPkl<^L4RKjBg8S6{Pb$^vrc73zh6|s0n6p@`xM>$(V z?0;~mz)sUqP&5DD{ne)YcPsmtkKcg&_qztFEogXEp>Dux!4W2x3A3)EDYMgoehBj8(GU5<3blkTX12Q@1LeGty@mmYuV}KCz{!P#Puhsp6 z-of}X#SXzjS#A_+4QKDuef9(h7u}K~i3yxiTQoSVVn8nf_@un5XPv6L7c*0f zPSACbeK2vqX9)_J%6yfFZ$2CPXi0`S94+d#_}7o5iLw1qVOMN(DVZf3Dizb!XV=F!>*Z+)aFxA7$_6L^X>Dfj~1N4&2I|$ayBwI|V73(c|?gzYg+0s_IEbgROoA zn+L7h7-IYoAVk~!^!d6~^(BLrYL~^xP`&<2aX;ob?_x6F*MV`RPdjbFFrx<2QA6dX zI*_8$V!Z8yVn^O+9;&p8Jm7TAT^7n1JgraHZ~@6Jcc5`7a5__t*%(4M_ls$3C+DAa z?_QJ|tQB}w@WT3U^rAd+ppt(}JdAfbC^0}0+01QWFdrVC5x(GW*`>q*uDw1R>q#%K zv|tV74Z22w^FV37P=H>qd}q4W3z?IwE3Yx`ZCF)^L_+`tdM0CoBdJt%Iwq_MsSb_<oJy(v3;Mz zpBcKarH$46>=@6cl@e#l7Bzie*R|`7F)0oOhMQusHt*Iig^Idtj*qOZO;KGy zu0{wCCO$Ix$vhmy$Z=(tALqY!oOiBMZl53&FumTtUv@0~A#tycQ@(A7yxL$9|1D5b zNA32!M$dIUVXD#|p#7SU6p4+lz5o3vzc^seL#?2jEJivro8&j6N`4;ewj~@JN-Uaj~#c zHQ&;f^zGAkJtU%EE4pTsrv%gfves8Q@=f8u_lf`6yMtZ-G{#kG(A46u6DhOfZhu;+2H=v7Sn|yx{MDRw}71f z=qy>ExW3Dk#(mYlXMLV*&Y-@1p5)|>uSF#;`keU#y?`%J#|swrY_>W>XkMojFvx=%3>E5JotqoVmzC^bCg z>f1JJsE@U|>pm=T5|>$}iOg^?*3XFh*#vqx+?I(p&Z}`Esb>DlVM>$yLyIALYL)NON9Lrk}r<{fyH-*9*5)wk3p(&Ccr)^i7iVFPj zC)1P1+jDl{Kr=yX%x`677=Z!MGF(K};9e^-EzK-Hoe~4JU=s09FpBjnZ~N#{+RT}B z_0G$g7376yL{>(Iym-cIQ&T)GtrvJW63(S;q)O2->OqlD-Y$3A$)xc4Td;?2Z*$@) zIOCgh3rD#=k2m#Xj8)8i403dKrQ6oV34R(bZyARW%0{vUX+Mu4I z@-o}1E}%;xkR#}q_|D(5vyz6PVbbN!!>Py1G&bc5F}G19?kv_1xo7mUnA`aQ4xs+)fU@4{;$K7<60n|NgBZll751f`(ng?-|YDZ*Kv)4Xr_Gi1Vk;!~kMhcL{v@jkq zkCDi+WXcKAXeGRo6bJ!U&BTP_@MdCxAqDTekLtZ_)#k$_hkNcW+$kf#Qx$uPxoa!_ zC5RCK`yWg39tD4%i#-8roaEIcD6jwCizUxA1+P*cVlqZ`2+*nrt-{+a$bbL>^=oNv zVOu>vA7T7}e`025o014({!kPUVqG^23}6IOGXR1+D;qm3JY0?3qbnrqJCah#ATrC)aPiGlYH1FU*Z z0ZnN#v6wtS4~e8+-AyRnKns`VKjcv0SUN#l+2Jj>cRRUycx(-*f9)j>31Tv8JXCwz zAE5cIiY*$TdzBLm`PfXXfn~Ak1d?t(#2s`=ZYRts6BW%=0B3@h!%gs+2wsy&KVu4) zad%}(&P1?KWXSVV1q>$nGBQtd4Cr{<+TJdEF5-h=&(|VJd#+R#@915ljhMJp82i><4G-;RPi+hm$3PV(G*w)rE4MRb5T+MGB#& zH3l8VEr$QWGOa$a>2AB`Exp84me19+ey6Nc+I<2ZukT&|GPZK}l0jXR_lOQpU&_Dx zcPgYhO6XyYo=^sHibNG zqQXk)6KB95YCOhkMTFRx##V^Z~r9_!$-7vf9ZGj@wthx}Nq+vHm1iR6$K`w|91v zAzdkF0Eg&;e@9T`qc;9HWU%H0Am{*=qeQ@q7F5$eaPxES`jhw6hhSpbLt~WKVb$dT z8$=gh(LFND0NAM9d$SxkvbAlj{*rAzxO@`u5@diOgn87~`qk_I^9lTn#8kux$4tih zMb>;l31~^aCc%yW3krM*{dB2)5D3KPDzE2iX6$M%Zt83fTtI^S0)jmJ!aRZk+5-II zF9gN;UvdFo`1$9S`*r_E1ACa6wFUftZ%~YK+XWg>|LZ{uX5s2#>}(G5@bKWZcCd9Z bHFh-Tg*jWM9e`?_1n$^fh)Du} z5SnVJz;JH={>pDJO$P1|zf(1I#la!p{`(s@Wc*$_4o(n`8cg26Yj(%@PWsQ@DTm9l z_?y1(ec!(AHv|h5f745I3_Yu0L<< z*E!w}$@1cyUAv|nspQ$N$6u};A6%)^nceo^c<Lt>8U0=JELX2u%5c`I zQ0VIMGZX7U!%MWR-Au`ni^b9y^Ft{*kE`*kNAt$Fs{hgM<(b^M8s|R8GU6po%U*OL zT5q;Os5v?VBVukcb{Km)UrBva-e;M~yZ-T7IxzksSrEK#^Cx8`OiyytQ`mkA{KP1(Pd*xF?%{_jad zQ+oB%!Gbn||KH0AJWkb~^_e|dI*#CMPSbDwdU}PG& ze?9&q?;791ct|T?XXIul&+o?h<#N;BorilMK2I8K=J zKF6}ZBM=VGFSChKxT-E8JuJoju(z)XfaHXS=n3d867+u8LjB z?35*it39I%%t%!&A^otNXExu-`YiA@7&3TsBP2a_}8BXU&L)c!_Xo?uvoi{($+j{_aigAteSvZ5rq*YS6{Vhwti zYdMlZ6xZ%KDPjJ%JFJHzWWySf@ye>TK6_BmvH-gkbzLC%p?%#Ymr)3?~!z{iTp z>>?BT`fgr*Vic<8?REwQ@q!y=oHzC`H<8kf!>M)I!KPj*V_uJLSul;D=46mBH zz;gdvxBw^lsO$)i3;GGprMi!>=C?%ONY!$n7&(^{h&u6ZZzKb2DpX4{MJFE44| zhUwAzx$IDMQxg~85zgu+$G`=)SmoWK;g(9jbqEz~l=}rp28u4j#<;(3qy~zq37t)! zx5$ylUFrN}LorY^hn+u`4Pwg*6x({Q0&hE#E%dhxGe6ZS!Pc%{XJ_9$yy!BN57T2I z2%J<^4}ThBT^}IO+Ucu5^A^j*eIA)SfY5JM)}PV7h@?=z)E}cDuEE}Tl)NABLacHg zp?(3+MJQj6gyo(S?EQ?a$1QBS_Uz16p#F^LCh<~{RrdC2 z5Zsoxv8SlPVNiCSIYa2k(yF=scOyS4dis1}@%mDw;k3Xy&~%P*c#;y6-q1pcZWx-K zTc0+O-rX@6o7L7g`1k)$>MJN$YLSh+mnw%Nrng+XwX@f~Hv!oXfr#GFew_38A2a|# z{Wd5t1kPiS5BdLSvD!^Uj$ye+74{c9fSZZgs&~;+rE>CrfB1WEAPSEh6Td8pFfy7f zU$4zQb*nh9EIRpi^X%&B4}-p&zhVD6{AwfwzCh%m?cuOzEfAe@b6VeGrb$axDAMF# z?I2v=f^QS_YI1$JZvAz5eWOls*px?wjnxLn=S6Amb-~O)$VNW)rls*p=jGwzUdJsl zW75>?axBf|^{=Vx3?rir-OHv*(XG3`01Xr-bbMfWY16YDC1n3j)OKuBtKunEzI6pD zNqrC3eXMAdR>%$hsJfJi-%{U_#!vVl zGDT`0_qxUwQ|q6+8sV<~owbUypNcYdyio92TicN8{7mBO1^k6m)-J<>Q;9`^CI-d4 z$oo$@Vs~T9sbBKnw3aI|FruIEbD;X-9gCd{ea3S@jOHzWaG*pQ z8u{-?$zO7Jy#A-#cMqvwei=y6T;Sf_&k8Ah?_lk!GCHjMrM)AF#0p1`Mk`Aap*lM? zJ6u9+cIh3E`|pzm7O{-u)56?`Ss{hUVQW{GdQ4(T1^$FZz?5Ee#Sne+l!u7Jh)M6+ z{eQ;e!8hAIt+J@Gj#;GArdk=F?OoO}!GC@dBr(am6W7)_)%x(X?yJ!MNu)~7x6bf* zZoT>ROlt~J=>A3}ar#lr(gS$Y|7k37?wy&GXExk4t9ANO^wNWb-pKt&|J)>rR?(Px zgq{P{PrG>%ZrY4L!ToUyHrM~;^ zX?q=)jupCv5gQwSoz-Kh{?wBV?WUUb6MMh=-J^AwfQW-*?MP&zL^HPL)8+t2UQuaY zKxVG@I*rfKkZAeS%4ZXm&v`RN9Kb_4qG=C#O*9A7cuPrXMZUMXW37+Yhw1~aeGV3` z?i|@{Pm~LJ?7RR=dGWdLFXVuQ9bvU?htVE$96|-&oscqNyQw1E$qMg)4!~3t#M*W- zk*p#kM5zX`sF}7IvAUM&M~Ee!Zv{z-^(}GNuH9N&C+B<>A`E`N<$Vr%9^Ki(Cnj*7 ztGA(~m-azL$HbJsf1hyg-o4FVUqb8crg>&tTshV@HpWkPf7%SDa@RZj6j>tEYq1;x z8@0HwaT~Xy9&#J8^0}^)Wb&IuKr+pGW6?aut?*Dn(#`XYyho#&2`|{wNC*{`R24W? zEo_iBzg701m#;Y_&0LvVyM164K$B?L)`b2~%3PCg^({l!8E}C)T%tMG5owj$_EYf5 z-h9@xS`#e}o!p|8zWDJvv%a#TB3QyeK>@Rb+hzg;1xb+f*vW5elfqyy1=-njv(3(h zILs+xy%kR%+YBVJ)xTR($P{&A7|NB-D$&W))-TaSM^SQ&g%ML)&ow&>+0XES#opnZ zY>h`|3fU-($Xq^|_gwThX!mg+sU-!RS~Yu=sDMDVVAN|@!?t4{=RA?fLfLCVw{&PI zED4tnZC(~T+hC^;Xz0#>BGuv7@cVA3JBAYO+f*s3DS9?GUC3e9KvOMGHZYgRqz!VDEAFaHt`1|N$Uy1OK@xq> z_WtW18Y+&BC6Q-8cogP>ZLBt}pta+a=W>2UKInb4eosN%3^2KnH%Kd;FIO2!$JD?2 z9j$mLV`sWH4-tF1IT{LhvNFA-N0G#h>?WjZF##2UXdPF5)X3E|9n3OmvwY-C>c(l!L0C2wBpqi-bDiY}tC3u0E@FWn z9sa(y<0YgG4nLp4bpBci%atW;ryd6u2908T_ga1)Vf)|k>5;9m8HzNteFRnhPrUm8 z>Q>-{kxG7C3|v%U2fj-ss>&*|gab*6Bx8J_>DLTA!3si5X)~J8~CUPXq8`x?fdp*B|&00 zk7EXi=&u+2ku>c_0RQY2ZxoTm22W0pj~a}Xzb9^!Rn=nf8TXw!f()1eh!yl18xaSN z7E*+y^qXy+xLfd1m*cae9>UvwgndpAF3a7oORY6Wrmcs%+1=hfz1x~FnapE(2Ab(C zkS5*!BfuMb_J{EJ!jF33E_C9}^hr<5qdRCR--o&b$!^QtQJ;O(uI4{^#v|1C=2{fV z)ivE&6o{Flh}JGnuy~xo?=AJTokIQduBTU%23TWC)3jBR9Bsdd z1k||Bk{Cxz%>3hnMGQ^`fgCGEarf)QPlw?fURrFSUsS2n;Ss-Xd~L^m^2#ds|C}mm z>DtyEZkuwmeR+oC+?Fsu9$ED+dt_Vwaj(d6n_e>UKtLy75+j1hG>(;MO>)1DWl9}S zfQ`H$IXqkij{xv9dFJu)@tzA+P1l&s)rgbjz^WY7_&g*z<`FN*+~sg}z_cAOxI8;< zo2cI(zPd_UU3Il5zQ;REkdo#s;S(L3OXeg{$4PP=zPa94(UbY01s^PTB4Qqk9Rw_o zPYglX+Il~)eHND+co<;2xJd(EymU7bZQ-Oh8RwUy!(Zhe43n}vdUUu8Qs1e zM3##Dab$+7!}@@mf^gFs+Nw)QCEg}k_Lo!cP8SoeRZ+{kMs#SM-=F=SamdDuZN-2mD@S8==5^@7GD?&_Q)p63+R^p$IGe7bh>7F4KB>+F)^AZo z6Njg3Y2A+R`@020#|im&nv%i7-Fx;4YUCoeu)LlJEQM~s27roG8!OW)S) zf!6Wp(hq(WdeQ+iAGYBV&--7c;+okjK>JmOIVS#l^4n88hM5cd?&^69DD~-v(&Q|O zXQpOs+V(qgD?V+@=Rf^u5DUGLyOmRiK!5e4rC=Lic|(YmANA_lYFMKv2jlWDmGKfLZ_gHtLl=C?AD-Bp zNDa}yN^@BkDqNu*iZa8Y4RQ8U^sRV#`F6xf?qlgA7KJE+jnS*W)#D+q{v1L*L~2_f zkIANG&JSN8>M99X6}{M43@m-x}H`nNfuoP18gcVB=Xy zrkb)sChxh2$nl@8M0Br4Qy0AWJ;t74gFlrW*u{_iI^`WpN<6Wi{+bFFkByVMS4GyF z${8osiMvaIp#xs85?n>1dOxg;wWO(dn^a^G;w&!IEYHe6PpU_g2_gz}0jwOs8JzDG zI*kS(EA*2_8c#4vZsy1CAm@+nckQeNX-m2T>&%(NfiPx@ViLAwxh6;Btzk*=YMQ^6 zIH16&8ZEGGYkHGyE+jc-WAe!!XvRoxTf^vdoJjl1t8=y{Qit74^*O*}b&WFwROPWc zQ>~FkQ~ue??%o;CkmV|O(;7Tt#qj#3nhMRZUk|P*&%Ax`Gi&mMG7l}eog(hbExW+j z0O3KX`vMt_TAI#g?M2zky}poiNGbx(W7G!a>!JU<7SL^E2|*H;ZH-El$a1_yOH*_) zNQ(Jh)EW6eH&|n5l`s*-s<*S}I3|YBG&QyW3M{EkprB?E&gVdtYBjV8k`5Fl-fETT z_AQf83fM3dyV>e%CttX4_HI47*zEt>T!@TVb=I(QwcpcR%aR;zw9&FIQ-1QY>YmMTr_zvjMMUs70g2v1^}E(3!;*Lp~$I^ zhb!s1OJ&El=A$Ow6X%zbzF^X9jh>19Xn!t{=matcP$6WSbV_6SBq%uL@3Me=$64A; zQEWe7(!i7vMrPpRbG)ftZPWzX3X)@$f2dg6oM=K6n8-tf6Rd>PsIBD%(g&$Q(!f=} z(ZcKt?!t7dxv9e`#+j&Ozl$i9&E5>Ui)QmU7TrG8Zhn$1KcZR43a&HBn>ey!yICUp zboe45wh;#6YqT&7tEVZyL#_^8ep5lq8O*oa{=si5c!PRA>t5RUO;l)+(6rTT;p`9-uN*GNlt{6dbs^fp@>FK$zZ2ZZ+m#rl(Mvn0{8~+yu++{kQt$l11dIji(*B6Zt0@k z>TTI|W3Tp2iI)IV^d(JJKd!3~{;#&`>SyBurJ5^#D#H&2+9{szga>u?K0UA4QL)WT z#B5`PZ?|b%*y~Jy+ChU@$zL6kpjj127e8MY#;*Jco96ZL*FMPwG0Cze`e%VP4V z)>cjFaqu&6#>D#qwz&OM(9=qfNHWhq24NM%e@rS}@GVPu3ptbhu@gAJD>;A|NxxWV zuq*wJRi*+;FhGw9t}chDf#8!QOTZ!4o|HBNGHy+4n10k|6(9x?zpg42Vov(Qo{ zFM?5_Nbp*6e8Iq@S@7?)xa@AV=vkYQa+Ea zaSPYyf3*m(zOAKwnP&SevK1K?uW4Ey1W99947&8FDHi`&N{C>kRF6m&^Qq^Hg)@__ zJ%}?fjTG%Kk^$5cP^`FR4Or;t~d(Q{APbng07kQg=y zveJ*DuV9T^shcA3PSmR(V>*y5yY9M{JlL|6pu~rI8hVqE&LNxqt6sOo`N^%l%t->< zXSM4b={L~gm?D9(_IJm>*p2G!wW$-$6$akjJDYQjSGBC+Kz-loHBv7k#|#Xit9G8$ zmFTfQ>8o!Fi2ey>BPTNX+30`=icaijC1NeM5t-|Eb?M=2aR( z*;I=I!%oaLr7QFM^L5QD$*Tt*t@x_NVR7n+u@I?<3sY_`65`eQDxC?kox`=kg2k`C zGn#i4ZK1XMqAWqb)D&To$48~PK3wl|UVF2>SRy&&)qA-lIQRTnKrK*-1uu3fMon*w zlu5jMKKN7bfjXmhYt`C(tD$rpoFShqz6PEvMyNy&C`u_wswP2Kh0$56_0^4owhF=9 zW3zH)i11gULoahRSFRT%pdSX8Khqy@+bcvq6h?A1^3+5dEZRnOO@ zW{D+j(wH?x`Rr@m*Xnxr71K<oj6usKL9_xj(rBt@2szCe!NCy=sv;kbn>yd4I{oFA?S>Wh`4A(7)7Neu3~aZEem|X7)YVR%^dJ zen>z%stZ$7BrXoX&}Kyd?KYn0HCe2vQtccN49rSvtN5N))bTs5Qyawz^`<`G?mYpj z+0GzT*ZVG+G5+81-`$k!wD%Q>*_g}D78!jf9kk9d-vV}j;Jc`%5Vdvj-R|{AOD%qPMm02Xo*~=k*wtWGl$<&DKD~U0 z<;??iUP}q~_RUYhc4Kj#_cmu+q!&PapZB&!I<)vJjV?qmRlgk)67A^Nya}9P69;9= zbUm|u#oSk`bu|C+*sRA$@Hlc2t;$olA1C%ILK5!#bvy635y~u!mCEd9=*N4ERCL#5 zIE_1!Qht(A;gvE<&aHl$oa>cOIN4gOOjLfX1m(Z8w@M1#4_5S@8V~Uv=cX8dD-4Xq z02WX%(9^48LH*PsX6t7c*@p14-I!kZjuwRDeY6@W;M6ZA0*P8+%;g0r_`B+xfCj)V z!erU?57Q(Wvv0c0T{$|6Z~{Z@OkL=UJkBUq=49nC5X@!fJ$jjUD!{a_>E*N*2=n!; zg0ZYGM@mXc)LG)Reuonk8-cfk>$+yp+?!1BG*#YP2QCvVx2WiIKlUj0F;OOq@dmqT z7y(({lCWmS)U606)JIW>EbP%B^>NWg7nv+oK$+}01FPs;0dUk^LYUl1$t^cI=NyZe z%+B@MBH2&sJ9rUxgqN$yI-L3k9ipo1eZ71~B$kCG^S6pO&|}T` zyaC8e;bD_xzw)YRV9RZ;majiyS$-Sn=ibZaz#yU5Ko1FTBf?idkU`9IqOeVd(eHOK zRMLsA5=_J#RWcSM{?>f#+ai-f22D^hhws*^c7@kIG-qapb4h=yM_yM&Hubr-A%2Rk zN?^Bc*HZD#2@)Z}b3oCj7~|J{@SQo1@1hWSxpB!He<{4&?5T}EEBTxy{v(!nCwGk{ zPO`L9Nm9r@@7eP%T9oXr7AEhI&>`=Fkel8hYr$`w$7kh-Z@J=1nzg~=RK3k8lxw~v zOh%?7P6oOl7b$2t=BgvJvpM0Z_QlfZqf72o)Ju+}*oko3oP14tq!jAUeGp9)T)|P- z)_c%RK_ElCpQt2z<68!|-dX;)!vzL$m#~@eINaA9R$#b&#oI@aKQLHE&4BjW76WbtY7)aed7k4RP zapc=KD*ZJ?_mbFN-joT2_xh!Xha z`4-BE3IDMC5iD$r@Fn&DTBXkZtek}BNqsQXOYjw(NA3MLA~r;=)Wf~IZFiHPyTxfH zpnWERn%6#%_!!q-Vb5r|NoRmD4jI{p2k#h?=2ir^B8Bt+@a)~GLZ_^KIZ2$R%bIIr zS0pJVUVD%g<`OBWzwiaiA?W^&M}au|iE3f=SxmW~ozwG@$~BaG-3#m{N8HVXl*>E| zWLlizB)JzSRfvokX^Qbd$I5J%RN?DSlKOW$*C>OnF5lbrzQmKXvh8;EF=i~W?bc<9 z^VT;hof~k6I*r+(BoBn+;8TV88j`vLjc?sN&R(^?^O81}%~25@wiQc$JP}TyZ7~*? z{BpDRpuz3@v!+kMv#X26^aY>mlWCjf?t|Q9D&@n^+zOo2@a6r}-Sb4^HD)v5rzar^ z5Eo9*Agt&vBm{p9!jk6V&o=dvdCU9g;gAv)i8zNAu>}fH+vEOSpcngUNYO?a>Krh( zTC)dNAjV@Q-GHhk%UlpjyT8~(HL;@jCI(}8JD??=g%s)uj}urm@e3W_z9_VbgoATp z!dNZ-&>J#5c^VY#6`mwl#c^jKX|xZpWDp6IWUK0U8uzT~xaHkah!+_FJ*~(4C>4;W zw~02nIyG}Q`XqRhCE$4=*)Nj_#yu-)qRnI-8JgoyK*#npn}k1`4V9oTWGrhV2aUUO ziWZ8a_tw|;Aws)p556=F+~cZfHc+?v5ZNPZ(XQ?rXhD1X0Go=tjL4bn8GI~?fovnn zw+n;mrveV4g~*#N+eKxXMazs(m)2PwhO?3CSc$!!`je_ArB&xNWiW;N9PyY~HDI?z zsv>@;ZS3rAt^RP41getB%9MUqyp;dHsvWgH!QRnNchklvlMR`9OFy^dfISf1`yq0v zPUHP660N$e*^X30rL-!RcIlk&jJvvT;(#`K1s%FOpV5JoO(qoSDFh@9r%z#^ZHX6x zG%4^0LL4YaQkM$JBaEaYhe(qG)B>Ch2N513HnUmZz}b9&hD-?3EF2zD(-i}Q9(;x@ z;vYl=+Ydl(+rmKQ#c#wO)Q|?To(t$jHiWA(vXR=ETfC-rz|ek9x;t}zw9#X?vlsE! z2H(WYp-jjhW}sa===v@ijydeSIvfl>dyjm$&jjWoEiwumqWF0XmIwCy^uc$xrO|C!lhaPWm+qRFh_qzzfgRfq)19L{jPA2iZ@dFd*F z=`VEZa~HgQU?=PSY65oovs=_*H(pHru=zENj&`)8g_J;!?ku`&<)^fOuYz zH`kC3L)ute;gQgo*kd`mTE-0dX)^qA_jKiPLQ&T$Z7)m~`W~8K3Chb`>VJ6}J;Vk; zJ!iNab0H<>$0-Zy`of46)vLdtnjr|xUgFYE9{?a_tq^6tpCkrTpT{@FU*;hlFHZl^ zpbnKMR2-y=f){A7`8Efj2`~uw-fY9502;Om2brU|ss7Lxp^{1#za0W~fifFvh7@T> zPzm(8^V^bUf)g~lq**&T)VTMICBs08i>hIVEgQM_S#q1>*Y+bo>Jx>m@ygRG%I-7+ zG5W=v`D@(#f$L&%@Z5aQ(;-2Q_{ytQUG_V#4aFUdcV3?neSJXz$lNH^71edgxQ1nj=;hkq9 zq}DYjOmwO>E+2W8BvrF)?JV@ToqIi;Z}viPEpX>Yd*17=IgR#w5Pg;PpViTpvN!lC z-D?d|b)mx-Vtqkk!$i!;>H9(D5}IgiWXbmg9(VdmN)C~=cMR&vQbcShg!=BJ>e?5@ zyFCy(ZRjHy2g-GaQavGieJD$b4TZ1BjDoyFXogrvyJsp!dMQIsvRqh$ahZ$vf?3&W zq)8GbX!v85p>@iF$1f6z*hFv!&d4W2-|9#C={T|{Qpn;^*j0w=QdWg;u|BVNtz`WI_!$k4@l0;Xq5f}GC^?i%|MxG%!0yE(9dqYP%6KJ2 z#HS$>4g!YQ@lkd-0^!paCy@|kFCFD6xhE9D$rSA`h}lRwkJhuC2oVJV91%*9e@giH-lyj1j%fJWAx{z6S6lV9<24B3k_OvbW3XDrL>a)Fj+*Mm@i87*^`Q!Vp@gi4Q11j8zaSM(c@dQk}BN>{yoJ4vg*PD zhRnsl4BKF{NS8e z-My{}rKw}ihOZM=qvskODj^vzXJ;XIj*OX5)K^)C5EP3t$)oaT2~HngHq|i87Qa&5 zVnId4!6PztO^<*y`vJeJUaY2lP7H6Er!&-bB5Q62^cW3`&V-LsioL3ZC)>mwhv{`j zDQ9ZRh8e&vun^BMmI7;!`Clxa^(rhVNUhJfjst#wViXo++w5e)I*x#cbYu0!2C!_U zlQ|Gn(5r`#k(b&hDucop)dxjXX#27E3{U;sB5JaKyv$&IrT*WA=$NVir|2-9;qp&0 zmZZ=ay7;(jEIloN77*m8jXz^YI~j&F&U5@-O~blX?%CUmP{DKIMW4PX8N3Y1VxC>& z*^KB$WbwFf>iAs_&+l;>edWtt`pij22TN+C971^BRY_8&P3~LwT6M*y37xZT<>)wQ zDf@7|>Gu1?S{q-RKB>-~=pC(>PuyPMXcx;OX6t$i$t>hR&6L~Lb6U-}x+^UD9;FVU z5;K@oi^T!2h61Tn%60a3nE`LJb6E3^^2W|g{o5n_|2y_zn;q8V1 zGdiIdg{8wy&@sRFIg|t$hu}oU-zLt;%(~xY3g;C;3|QAN%&>+EEd7D}o_SJqrU+&< zbb_QEUO#{B|8x%9KNx*j_^mneSEO*(?=PW2S2eGW5S@q8{sum>LC)humbb|xES*2C#Z_V-ajo7>Yy>c73*k`nLHZYIgCJ@4MOQN-l~1>d3LP8Qv)e0s)DS zKbI^0Ns-6`xy9pTKNckMpa4}lT~*S6orL-IlMvm(PnN~n zea}EGToj41s7R0y{RyXdRG$-i4OtP zIWrvz3*19!yELJsKL?+{3kNF&wRB+bHrzum_B1Y}Q$DsQhVJDCHLo*)%n#9MnIHFK zshV4@#3p@y?S(A_b<7oa_%ciS(`aQ#1p`jX|E9I({clxr^tXWlfS9>4l=nzzjdWSW z`;hhECgje2Shvg2tT6KkgxLf^4h$sq2t$Yk5Z0_jOhLB?G6gZ;H+#VY4AQ=EV1OgE z{6mj?AYV&7l)01?EM*Q4f}2Xuwe#3`3sRFrFmS{O+FPDP?bHq`W?1U>eH!b0b=39H z1xI#ONM258`C`pAWFn;syZ-Z4Xc?{fu*_(wk2)+#pRA@IVHrI&1NB+r zYcG>Rbds?2i$(1k662%QD_h4m!pC3A@!eJmJrOeJpW`)P&ljhEGf?!e^n@BXOJs2& ztBbKJ8=o47`AwELyNE8#$@rv#6f0ZB1Mo8!$Y$FiYgEC9V^0nOwi3DP-|pgpMIG(Q z(Wn~}QK<4Ey1vRllPm;o3g(f-LwEZEWTm*~2oRZMzhV?@O&iJA!-&u22H#_AU;lita&AFEgE7DjapiQ#jrKufi=)X(OyJBkp1WkZb?Q1j-= zLC8@JY6yl;n11g3HY3YkO!r5Fx%}=`j?9wA#vs8L$;Ot>nL2;3D5q*xp*p2Z#ze}o zwBft@vyvr~#ciBU)~g5Zvoe+Z|5<4lyGD+07RD4CIPbC)V4wSazQrM0jRfHAD{h5Y zio7W1+1Sa~AH>ER)DR@oYF&~TGFp{R_J?4s%nwqM$+tw@)C=DaF8E^9w+GCF3{uVS z_#t-|RnH9{yWO6=}4(b-UyYr6)j0NWTnq-Q0ld%hpU01i-{QM#RJS_*hVu`U{p} zW9FEbm&kFBHis^kx{ib6(+nAUhn-cE8CX@uoMaR3ZstkjOm&XrmOBP~%Vqpz9=94= z(d-vp!G%t!N8U;9LXkdpI4(#hb7Jx9`{1h0bR1fb+ka`hqW(mcB zBRG*x+Oi(;M&Pezqk7B7o>0uyCGmg+r$(<*wCx|bVaIgIF`oBCFAp_s$)iPq_M1B` zbQVH!zIP`_NWeRVt0r5g!X0^vFx%%_-c%&J5PP(_ry^>5bV^w>7gVvx8(TqU-#bc) zIX-sdUu(BJ<58dPXFG073bT5a;;aU^HqW%We)#1KluLiSG3$CpVcp)l9maNTjlwa$iBax zqQul6*XIkPzNaY{xWIVg>BJSXM4dp}V%jRu$Op9QTAAsT>Z()n0Tko2>B|vUrhhq6 z`~AWw83jWcboW`?|Kd8qCG2o5%{Hc9o-ub$>8_AMOQ54-UhHygQ`jLXDAg*~KZA$L zg3|cYlaf!n)vk0)KmK{2`tl45mvdVZsoNBqlK$m9myM%bEiDzxISw8*3_9MJ;fNU+ z87bvA>q8K&m1t$bgebv_rq`zA=#hwGWrFH^?m?^KnibymYum4L4b;D zOs>)|6cWaKA?7D0)E0>#72!5&%zsw>GTOS(3FDgpR7}$7Y`(X6^MSY5xvyUXqfM|X z4%$FvkoNZguStIVA}56%hZ5+>@WlG^xD_S&BaO@Ya_6B;PgeXe`=mtQ@3KNxq;W`; zStGymF}j^`cT-(i@&a1OO}hU(+e$RwWbkig%^egA>`7dP(b1{SN-&#Ul^f`B6LzLwKa>0jn(xvPrpcw>VyIBPur3x2uDbyJVF6O2!5 z8vZ~;hgQh{;ur^A^}IHBV>;!_o1zDx{I#5|P&V#;&F2kmY&^IYy~MOGnZf-1UdA&QD9*>VgStSsXB1i=Bbp zM8)y&2UPkiCa?7YWf~>egu=835Jn4zHz@hUYsLCDL40VuDi>wx8fiB7yrQU#gv#Z* z(C^~=hm@G?D0c$1RNGsEr>|O6r{kU6Aly2pQ{se_eaPN!enFJj_JEn4tkHP_VLg@C zL6q;SPq;$`Nh#+XkI?`>>Ucv66)X1pS14I@z1+)27z=9r6%z^)9!D!0tqm=NLF;rk zh9&opuYCR{cm)VqaReZjM5pSF#8%H6fvwQ9u^74Y(Om;v1x}IGiP9Xwg#y1$M^TYg zl%>`<(VBScvwGHHcVs(d<6oR?Y*M9r8A;I>KW6mjJ*oAcieT$)F1ThsC_;x1T3Z%2XusHI8ZNM{^-AlrUPk1 z4{kJg+gHyU%mDO@fhSo5xkQKsWfKv0XdZQDv@lsQFCXsPNaV10eLwThp^kQ~hPS?u zf_TS+V!Nz~~kVDf}pVYoH)*zWL^Wp=4vfrLX;D2uQSV@8T-hs15pB8SFaYbMLzq*S95# zSU>N*SMleU82yVm_0@c;FqcEDDMr$NN8@;)r^{_62ags@_sOecm|toe8f8YQ(LEY@ zag3IezIey-Tcn!yCZzIjW+j z+5J7jfL0afb|S}QYuztj)}x_+HKx4tn-KjiXLV3MgMUf*;KzyJIkM*OuF=CX$INS@ zE)mz;KA$BX-94(p;O`Y9Glw(rU-~%K=DCyHByc*Ti|B>bIY8jo1sL%?63VPpWbCN> zJCvBw!ZI`x-e@x(0T!}`fNIU|woY$+0|ZF?b?j9mg|4chz&KP|>GoW*xmnv>%3&br zCFz_0FnK>-_<8L9u_T)kZ$#MS6tXQh55$T+OX?#NZQJU)5XGbZaXMPmirel8kDVZ3 zFktlhg)1+x=y#+=VF-+1Q53+|HRx8JS{{<=<0Xz*jFt6~cMchmEtokM|{7Ug0t?j_wKjuXX%yLH~~66fpz*k*DK_9ii(w z^figSU2f)2AI*v%anH(3mL?_h?^#H++LWCd;`+P1Q;)^cRck+&Y#JVwaP}KY7rPi( zxoc-KwZwMu=0xLojM5We(S1l2{dZVryXz^p)6RF&$#oHq2R6hSv_fWcI2B$SBhg%oug+J=C`9_bZzosv;Q zrSVvu{~n!^EcQw{_+!*D=IcJFwEw>*E(MS}q$2ff;0gtZ8B!AJ^@O(@ zwADO^l(%Fb%6v2vng1^HDQQ~L2FM1}En zeZOiWA$HC&2l6Easnepv2N>DLbzAbXsM0|7{7$q!_qDA8nY+UXi@p z%9Udr{FFjs0T>cKqEKi0)Et=jhr}i%$s)&$-1(J*GF{#oW#5Kd_W@078AabwOkRO& zOm4x)`*}rz_jB{dkMi>Mj~Gge{pUvf`@)j`>M?dT@~Hx+e+aP!0k@N&gX3c?$Nbn~ zubU{*+Sl?>fL)ej@S%foH+zo-Wk_jKEEEUt2=prcLxN>rdA;$+QUO>gqELR~z&H+Q ze|voi6U)uMq|Qk{qNdNghSF!95Ty+Hg{fOj6v%7MrMU~{d=+6vRjm#EwO~ZjcS_87 z-w}YJ8S()sSm;q4ak1{iXiaKUtfhqahYcmG*0jTTOGqJ2P(An-XKiIm`@x= zWGa!y!Flq8HG%Q0L8}dp!x`4g3K{rx)9bY~P?u&8t7p0+FScyu={D@T`N`e=gvi}d z-E}WmkV;@@DTy^Sj`NfBKREQ6?_}xe0vUH1JsEdteR+53UCE^VoP?*HK5vrI(C7*h zY+eJX^M66g7h|n6C@JX;R7n7d%}Q)uELpKQ>+Xen+iOaRg`d3>CD>Ad&5HvqwU5L6 zeyzZm4D9_R_vYP;30)mJGV5;VPG!2YxR$ij*p5h#_|`0sxV9J%WOGa-S!eRI<9D(W z#AW|?D(=gBLSRo(Ow4Bf$j;zX=+rPRQ$d17)?MNhI&#dngIf4h_jLW4;Fi%A*V>eL zYF3r&6MhIlQ((v}qfm*HVj$P;^HutbU<=%cl?QQRj*K20Yh=}GEsD1A;a^~sbeV+3 zgED0&sg!Kd{PDF)-9o|!0Jhn6ay1PN4c(T$-Z3@%dugzD&4lyYV^Ex?nq4y`7hW|& z=x&~U@Z^3z>3!v~IDHy}*1FenY#gq0gY{<5D0i>jE><_c0x8v6#JJt+cs@jGkFK0T zC2)d2@CBdqH}DIsgud7t&+iqEP2)umJ>eswPNf`-stX)oj=^W2JRG>7(BG|$Z~lGU zb%L+)ZLGex)N>;rim(qQ%1}{WU?+7h!eumWT(-KZeYloC*VG*9b(eqGC;3ljf2puiNdL1GTzg{&cZt%JlMd;}`St_a*1GS-`jP5>qWX3x3-k z74G}H%U~}hhFx-X&SGkAi-d*Sdd0CA>o@lQuJSk3N7>YpBe`>IZ<4VY41#wS&;tb*E+lA4y@J)*d&!!j$bay z+xg7NQ@)(zDXFBD@s*Um{;RcVt9?KJ$8N=FkWa_f_H6u|_SwRgG^bl8d^^Rdifb)MvsVLJrjhTH`4t zD)t$RNeMfAkSSbIs7yBvAI)?`iJqK%24mvyr+tt6FzII5^V7M;we9Dago@_#TCDqB zEvJ`Sn$JbOPCb0MqDm^~nNdERN|j?V01=`{|9?KJq0~jaNrkTGYNuAK&^6pr%G)W3 zha<6z3M|OVGJpvPpfeNmU!Wdecv=4T&VM65m;-m@%Zq1)JvnyYf3r`PoyXi3{y(a| zGA_!deOmz3Q*&$UU_mFKtmtu|GO*9$)Iu-9WiZw@KW8yD=ix&D>S zD{O^;*YlRlQ%!B2JpPp({L6UNJX!Un$XfTBR%iPZX)d@RSh*xeA-eZ0lvzum^-2^M43 zC#Gykh9s9btgT~r*9PMgVq?Xhye!Wy(I`U1Jd!8|<#m8UQm{niWAoZBXlK9v^lq|1 zfs9hnnOP}IBm>0)JwHEpULSf;rQ#PE8OfGg!jt(#g*kTiDpROTZFJgh>||Ank`&Y4 zV#a-l%JtPOYKEob+nH_%xT~95Lo;v5A9}YjectEWpvGEJs{8si#Yj4TwCCwBDHM10 z0}dIYRI3cV=F9MKL57eU0!4fUC|S7aFe!>vD*Zfp0#Tq~(HB9%W71;}$Y*4Lf|q0f zyLJ&5TX8x+GObbp+n}s5EXfe1M>fl8&aS zAnJd?%End@7e_4UdCW{Lq9+DOrNEp6-fPV-ECvTqUxTCgxo~r$kt|UZeNjrINa=y# z!JGc52Smf4eJ)i&(th&;nE@e#)AhxPrkdKy2@C2SV{2Hvc$KWW=!a#>i|qSo>D(Yq zxZhqR$m>x@TIaQ+R(iUif83SlW?3vZD(v`J_(T=YbN6|b^hC5k4Dr%FJ_QlCJGDEEUtH z(a|A{)oXoQlR}v#;8Yky!Q-$hNt7rV=rl0)XtEs!<2?Be7`fYHIRPQQCKZg+2A}o! zZ9Ffwih6hifvrhTCLJX&%>;>J0$U4@08M|@>4PG|JFkz4hPRKirJvF^%3PLRN6(xt zmc$OGiEzEn^W=?EW{Oem%U`E9o1?3r5Gt57Q|_=3CF)fh`H8X$D6Khmdk(L32TuShgZXmD?bS1|u?xY*)&Lc} z9%&kJ=5)SsS<^SAOWB{LS+fc|B`|*9e+~w01_rECvYuh5HY`;8#|==%a5V}k51vfL z$_E1vr=mESpUImsc*3T4&874jSqm5VNtZAl|+LktgNiL!d|B#EWZ{O^u(FC zxwz5SFdoJXf}~-1F$3RX1QmajbR;F?4L-G?q~znvKYI$op3X(TzPE%9Foe*{GK|43 zG!DrmiOna;*J^JI-}L;*eSsQqC|?nlDS#N;L${)m-0ol%&?WksyxxnNf5N|e_Nb`W zlxJdNL-@Qj%m_O8v%on1OX?@H;w#N2In((`2iL~|JcWe2xhaM~WmRNw55k-eJpDwp}ugYgVsd}lKW#D~y zS6uts?CdPD%~zr!eLn1Uc5Wy|UXq)|Kte+RU@7q$lM)lq$3uNN<51*mzRMsB3q`l& zg!|!uP7!~G0}0)-*L(XwYJ!C^V8kWwkOw_7CC2!mK-ed?r2R zw_MY3IzR~t9j%Tcep_qM=Be54>r<0SEY$ta2$YG207D5wV=>CgBUtir#$rOZDnTsi z-ZpjtFPk0eHQ@@i;NV1~tKTvHjfc2nvy{m(04QxW2E^%hFDvyZW(Ol9`Okjk%aKS) z0@~E}bVa%>b|8yw{#}F&qi&t;14*WJIDc*eU3|y9UAt0+ZUgCB0?LAq{{$IBr>-!t zg^K5vhAcg}Cnb+Fkf9St?-Yc)HJw++yTs(PeJ)S2r>OGLyYBPn#)T;RYeJ|A^Y5F2 zTs)|S2m!1+)+1Td&HMbWF8osi^dU@q>qf$$R32yKuXrcMMzSaV?@xh)md1rz*&JXx zlRPmT-F9a=!4w9*4B-N{>q#6A=@+slrj3!)iu4{GxqS3HD5gH37={S0!YKTD?B2hv zx1LfjFX>6(3&|H@#dbWO*0sY%2)G{@Ie4R-H_3GQ9|C}gL?U0G&lw|}j~$n8KW$g6 zhz)l?G^sIg1S$zoDACq?!bFg>ZVgd|-ZeW|7KcE;eR?%27Fa9Mk3X4bMqs&=BV1=Y zp%XKNf#*YIci@!+IN4sZk9lN;XnetK1&jt+A;SGDh*JaO*en!@73E^N_VELuKmW?(wh%SX|3&>c+w^WY~H>m#ZJ zE6?M@y2`=-dd55=lFU!J%>hd zAJg0^dCO51M6l^N82(tPQiMa@GWa-9VAoXp4)=~1%VUSuA^QxuL5^Wm9mqJ@pg1|< zoP=}6!)ApUc9Z5-w=9F(T%ntf2i7(@^UwkvJ34L~7*guG-duTjo_=>Dx$qjG2$MVx zt>!lS8nX1{$rFWHf!Z1m<3pg*0iXzfmr-Ns**Y72LNEa)iF~RER!durIHmwI=K~Rs zp|_mWBF^MTDkj3F9Dvo*5~An2y*9BcjZV^2AJN~1;8~a=yTz$*DoP?s*!bZ-)9BKkZ^{JC}_uhLWzGo8g})j zd$gwJ9>J|*?!2Q_?t!;Dypl0JDZ|lHXs^ut=Ft9mSi$oToJms#ocD zpHoFnn?_kAm>rZ3&&k@)q?!Q)e#~t2#qWr--O83%-_^|Khh=mGbF92M$Aw^ z-*GCk^mm9#v+YNKOwp91<&@{MpYdhBmwTEUZtZ!lq8CJXmlrb@i)Nwm8Hnxn1&}pt6^(VF3!e`JH7$ zO|>NC_Dip88;@uOfV!Kc)|(&#ZSb9$s)?D;e|Qf?DqFPPKnIgpe}WP=I>Nk_Z{}!m zO9X(;HX~D%cWc_OBbj`+6f#MwOY!yI=tT&-RuHSZjILRL(Q8Wz^~FmihK1MXihV~S#V(lX!#?cDNbq4(mooXiHrK5cId zAE#Dnf7dHy2O7bcitiB#D+dRe(2qk3x}s6e^@y2SJOce=AYKsf`#XIpWZ$sGm2LC1 zhVSs3S=NIrV6~7RrHdJ6%)|z5F)1Il{kmUuch>NPcz+?u#ul8_I!q zu^VUZ@7!g((D*1JRJG6W<|z62>_@Z)Acc@Lp4_#EP-+g+^i2e5+$cveFQY(9j(Keu-W8l7W_hQcurhq)_7wzHo27)Kl z?V_`@M*qD`Ex6WgYNOQ8s0iiH2&gA}r?Isr#;%G)3ry!U>Sx|Q(RyzcX=O0-bA;bp z)@=e4mmcZ?bOQZFQFkqALa;hLACN*2OX<5?GTZDD&XV&G08OBy^MR`V5hi#`_K~vw z+K|+yg@hlZIgb zuTQFAc&(>bzej^J_@@IQ$YLHqjYl3XmQ{VGbSGfRKgbm6xIVsqNj{K+q9 zrt9Uqxo~5=JoZeTPf!+soYU-$Q`f`vBauwIhqn-o*+e-!M>94<*pcee^9k`pq!~Sz z!(@>SE>Jo?1CnyhyOnJj&Cu`Ce^p|fAcX{RYVyDvaDB>C)1)VnnwTgBqP7Pxe(PDW zvR763kOWyzd@jI@aor!+e`u(}%&8QG_b4D3mz2uoK&PFhUkU1QxGL4WS3i&bs54DQ z(QAi)&HHR6*hEH91ldh{L+5JKe!A$j$}PvtcqpNXKZ7<72Pa1?5Z~&_emn9;>{vs3 zh2dMNL7Sk@@wm+DDWQ0;7Y^xDLa@=IXR>aNi+Y(LlDmZK;nK`RL`TDlK}H^{!te&l+e1?B83cJy04OUXF2J%>f z5#}q+QuGPWA(aM7ebpn{B2^F?Y_3Iz_^j9M`UZ`xp_Cd2`hobr2-c$cKJAWa?${( zVC=9Deo{Q1hu#Sa*jci!izQBvIj17GrV2;WR8eX)-*|K-C@6E##h~ALB(>M!hi>hc z`pu5(VeU)!a(WzZwSfiE&co^sdVEeZtqqwY75rW%1A>vBQ^K|$tLvYS*6!9pnK^xM zMPt-o;0~0SS!iQ|a|TW?=3+OcnvW#WyF^t8Z3#enG;3HK2_m;&wcl$E9;ow8z3o~= zA7}0A^Gs{B?2m+pu`Ni;P6 z`9*>R#$(B0s^;~$kGpeq3aw@zREIWm;pTzr%IQFU=+r*4=F#1zCx?_X1XS;iW8{WS zOY`RIFtsekUVu`#TJXUo@E{|gl;4Jh zhK9ByA}XDja{>o>3mO}sRRkd%U;`x)2Ogb`SNMmdNU_HDG0{L@JM2;FcSI>8FNJ7e zdW4)6GdT1KA=q*(>Oan9q!vo7Y72P@;(b=}sL(MC?OZS+3=j^=m zUHVpqvMxV9%0cfGUML}H+s_L6wp48#Gy0SEMpcsQWPin77j}rFCPK6|DOkpvbzL6P zXA8`003?}ie1=^=H}|to@9FV|8D=rgrvZj-LYKk7IHkMsR?M8wbwgFRu}zW8zyXR) zPbZS^ev$B_rv|eHkc)%>9thvs)vx?5Gm)ggA*B_Y_*s6h1as^sl74^Bn2!cYrGkzZ z)Nd7#UIXEwyot~B6wZv^M165O?;%mqh9o?nu>%*aAB)q~l*%FfV9V1DAnk=-lJJ(` zT!4`Rl!oFqO*RAyht+=C!7k^c!0nJB<*Y$$KtA+=x60aDSy!G={`$CKJWYO0IwDy0 zbD8j=miJL;%Fn*b{>LhX;S?0B+`>6ex0jY=cz7>mcNS{lMeiIwieAgLx>;Yj9}4b& zzU&|!|JS=~T+-D%^(QfcP}fki;{^LOpO;txEFU92Ra9(w@jki^D0+um+iXrDIJ9}8HU3zg~ZBrfa56+C0 znA8Uv>sVxnsU&}h^;G++NZ&A|cK>pbtB!$U-tq_yR`r$ka=(uK6T@N%)Jb@J5{@}^ z3?m|;D26_r(~)*arabK~7A9v8X9Zx-6@EQw>1NCcT4_BIsyInERW(DP{E#Mj!*UmN zhwJh`)$F5;$IbmvW}wrAHn9jp^pWe)T2n%)Ay00DU0e7(2j2Xszgd*e$td`sT3o+a zE{|X8=(l!!k^&Sv7ppsg1L8!RB_JUK7rz5Y{kcii;T9IoVpPvWn%vb0g(c5{hdJBC zYt=LP$7OGeYFx}asYJNIa6)h|8VIhv*pqV(?^Je?W9DCUrAo9w!{0TpGFbg|db*oK z^{NsJ;;BP*X2hIDkrPO5ITaRlPBxPqNy7#y3-Rqz{`!$+sD`P}Dy&&7gD#jEA(B%G zGAGs_Yn)VOPt2f$@A{8hT2J?u9uEuymAgD?W}m=XLrG{ToQhc;aDK?Qi#&4AKmF7WeLgtubc;#{(4Ro7MAdh%s@M(Mf2e$XQdC@J znK8G%YNBApY)oseQk24nX|!*SmaQFrzGE0ZV^$|V7NP2^`I70Tzgd2kqN4kO=^qYC z%-AS1JjQKE`DggW3#}xDqNsCYsr2-^)^B=z*10hNaxAClVPLY5^m91Q4gP z784xOU_trc)cL+!D~_cMC5J%t9q|enl4V_gCFwWXi!+==CG;PFlr3Z`$~SE)3ea1g zNebk#Y`t$Z+46ghu(ek2Ola`6T#)OA^ZvTq(%*l44YK!oEb)uadLwt}i&nnPE>yes zjfqjx&Fd_lhmIfdoy^3_(Op;0|6odBln4rOiI4$#;Xu5Jj%RYrL`?yH=J))!qVNA) zaCyZD5ObXkO;TP%!5@f+Y}(Hw-F=k3;yxHF2<)Wq5BoRCJ)ro62k2a=)I%Y8EB8-3%$_qUA$Ydf;Y@T_P#N3LmH zSuIr#Qi76t@ORibO%2NNc&Y)b1uK^*2i>@5Kg}j^yeelBEgg0sBGO;uDsl~f&JL+4 z4CLY%jZyU{?~ZxLOap#GE~C0A7A20XCeJxT`11w?o4Ztnl|(-G1CKgx1D`Ej3eX_U zfV8DF4jJ2h7B`Ui693T`{UBCyEgGGrCxNlBGq$=FsNmPqQmC zgVA3OR3R@yVabr(1pir|Z4JNC>T`_uuNf1}UX~M9Ip9jlRuIcCXB(F8P!@*qbcM9f zezQqdwY~b~CUyV5&>|6=)Ff|`m8U(_Fd54fNLuj8;O(qci{S6vIi;Po^GN1@dmdB% zkZ35g&GQ~F`C7X5ba_dNv`iqe#(cRz~cijSF=@&CMjL$(5bXV?7Rztf$y zca9b6#EoXcIWVZLb~s=xka*daPg|EuMgFdPJyF(8Ihg%(2Y5?5qu)8&7z~E*i-$Ox zG`M{$R!cZpLIx%NRt+b0ZuIe*_$_wFX|1rw`Q&HTRX&%_*`&s*nl z$xV?yK{~KDI!iT8$s@o%jY!^G1(dw#sEHJr_Z#b^V^Ar?Gyy5*(T7X64{te256?dr zw4f}A_-RN?TrhbRK0TT}l@)Gd#zpVDJV#cMWlJfA8>Or>{u793)%TDmm{bd3P+3$z zk|CBaH|1eW@bT28OHj%qmY3wJnWt5R!*ErU$dj;9th?RDUUAwc>J~hLRhcZ3|gOm zRQ>bo0VDL=D?({ukn6K6uPN8q(6XGbX9Hy}loInm&_x6)%U6Todfh`RcVMgz4 z{QoWHrKqOeN|&hqL2h9kL&Z70whLoX3wX0A>0bk{JzmS1N@_exo{&PtR7iqRHT&-{(ExN*U8uAV1?zzh_M!ff zCbK9rXUrT;cQ^R;NAdAo%KUMAWrZ^9;3dq?PQrm{WzAdHx1AT&p`_o++6w@J^)*&o z2~w~X-Jbz2$cMK-FSlXM+XWmc>V6%lc(xD@J zOwj0GS=2%Ch4P|_H$_il1~h%Jo3E6f)@0jh#g*(22LNp3V&9{++g%s~Yqf95C( zPWAkSyhX35bF)wvM57(Xm>Tgj%R-DP?|f&_pq>q>;d(t>ezqfL>I{_8|NUW&nTOvJ zG_>HYZKn;NZ488AC3SoCqW^hx$vcon)lwNwk4607+e`l^kouchggV|1GbVjK72z0<1gx_Tpko_tYIwA}@pMRX{6TE71UzvyA8rD4D2e@LcrZRHtghbl_Oc zRCM{_I;eWrpl%qPgd~HAZ95gun%0B@vXSf8o@$hW_o{@RVn`4y#FyAEuv3&i8sFpk ztiRY2r{bx0_Jp;~TxvAEfb31ILBwj7qz5x+VkZ${kY$H(>d?dz_&h6Qd=G&QP1Qq$ z_ioE`Bt6z-d7PL}=2RAV3?bG1J@+)Iw4*IGjLHSfNmCj!U!W1F&bih51&fHorGSh? zfmuV&le0lymcs-yYbJvPXjV7MXpEpfER#D|;1-C`+UhmGv6Sb)C|l2uIW|e1M}#+N z0RWE0qt;(3}_C;zcbAb*N=>l=YDDmQ>Xh_*yM&D$ZbSZQ$>-rXuDUC0rOv_4~Mk zYL6j#^5QPgEoqsF)o{c7ckLGsc(1x?H%HoU5)Cf*VO#4h`-S|OK1thCK0v(FT+GGL zH`Q_pCMt}qf{hQuZn)bD&u6a3ql<6&+vO@mPgq8U6oPbQQKOK>VCY(OuAtokRI5?7Be=W$(H`o=~%-4@6vFVxv zHDb6)ucJ=0GYbk85kx>G-R60c3z)75tE$h?QRIf`@lTtjb@7PUYCi?We}57Xs>oTy zm~|ICpYVdrWO1hg$9SaA{!ck&=JV2N_P0)r_Df2~YUlg^1L@<8Ae^3QYW|@UW$a4bEA3{rqt-s%D z?*0D*(E6y#ha9mhR_KpP$V!&k7|kjIvP6hOlK$Rom$d;RPNTiWHeFnuZvx*SkXo0W zmvhy|0p#$P<=Wyvf;RWtC$kA2n=v5LO&s}9z&{9=0*Z!#o4wfPr2#}|iu3+ZvFX6G z0VqC_W&&f76ok+lAd7E_ug6N!pdigfM+D*{P-0y>dZm!wQflEbzb7`g;E2Q9d}@#; z=yFfL)jbxJm}OAf(x7M>c6;Ywr5g@I`T}w9

(Wv6^7GGHAfigv)Bdj12;LI)Q4O*guZZrgQ)(1~kT^jgd%0I9wYHp`F@L z1h0vaEdVKp`p?I57tY9e>>&W*jqu`-6{?J2ZOHEk2M~)#RS3fdK*$iqjuW+k;Tb-A z^53K6PF*+*eh;4d?>gsu74`2`H6X`*YgHp)d#&rk8t1?3+<*`3ydPrU;Q655i3_$= z*Z=Eq)rU0=yIj5lKI2FLVoI!rV~i^U@k5^It~%d#tm|{I?xPCF$JL&Xs@(8;06wU4 zZL4$wwpIb$?^k=?t#ECv_HM28;N(YD&K)(5{Vt!pdu=3k&>P(E4Pl+~G(fv`qYg<+Q*26V0wCr^F4*J7~eF1!Q0?1fUfc$|S zZr_%wTD-#6ng*;5_+Cvtay0N>fNgb-t+n;g==6#?W;*957SC2_=4$lZVymH?{bEQ_Y@eUW`_n z#ta0ng7yOMX@lqEY6oW7`&gK&*7t6e3oXN_$>4mq%JEL6^X)2v zZS{c<8UovDeg8UA|JL5BcMeu>uXdtAqAg754x?`_pO6Kt43691t8xI)fe&gu=%D~w zhG7rzLA@8U0{Vwxy~LGZ9m72ap;5w44Lj& zq$(WR>2$8!@yXL0{`u=yiXT|Uwt#}Pw5E*nocz)GA&%K5+V#r+h;flXM@Z8Jy zKl^v22tM@UY8XpkOTbEfe<**uY{!8Q>z!CYk0^#WxM@$tiVa(S`})QoBdXx3RX=!Y z)erx+8qVK)|Mm}u|MRcn%iP_VXFulm2iGIHyd(g*w?z!i>>#nm)5VATRo{bO;#6x+)QFjCjy3i_z zsE~}zF)Z|wfEZPWCGmto4#Z-HYh2N&ObJhvh_#QF)We@qcRb>Zhny%r6F2auiw?rh zxZ#L}aDd29RQ16a!rlw+$W4)@0e+)!JVc@Gala9z5_NbYsyc+pr3BuO%g{+kCpe&o zf?KnrS6DT1ZipRGV(_kL80~waw2%Y1FB(VpJg`>r-f2Wco)Ez6<0OFa5Q)+;L=Bur z!EF&iBGd>1Sm4TnxgHUYNU7)eAgWSC$*IV*H++<8j~RlvR+f9JLLtma5}w#{Ao7j$ zR!K|df!HblmLyakiPVJ*hY`X0gaG+80KBr2iqMq#?vPF}?Gs?iJ<=Ne8j9EXjfS9s zrqCutpE@H7t}n6?$w_7p#ln#QKCfgTbN~p)gP;n8!t&kmIv_gI4!x|j#vtz~uSS#( z#uD~Qh{cGEF_Ad*r-}eXu`n=?Pw(@iQUQ25F*poT%!l*NI}c`jNUk1_N0`0k>ym3U zFFYAAAnd0(W>3USLS;ccT;XVgpN3OaC<;HIOgD_Z#8Bk%48I;gv=X%4BE%_GI!7#sD~pIto8e=eEynH2vZL< zK^no8m>0OqYSWs7VuqAiEbl_JNC*hTUn+w`1d6E*TV%OSi zwjQqg=kBUEc2;cKU5yg`oA%ah*jxR^o=U6)kM&^zH~_5Kd1&pf!*3m^dvky7=1(jC zwYTowT{WK^@g8UliS-w6bSPkKuW5LDf90lK73)8#T(_g5{G&tbKRL2#Pt}G!M>gy` zjADqJ4^*$;eR$ocM>g!O+O!|fs@S;u$eVkQyuJ7Ee`+1*8wLQWh(Jy-ir&6^pnBu3 zeH(Wl+JN`nwSU8&19+)7_8!={{|KBZXcHDq?8|oTTerUgctEZl{PegIoE|x4hkW3G z%qzo5%zjSctYc#KVC|yKlS|$(pb~*HV#Ala?Y7%4z4TI1hG=5&0bdw|X%t#MtfpjK zV1RTi06`B1Ra(dumVj6c31;A$s1A$ZOcaF2ULB*A7&#)aiXXXJWzeAR5{(Z7;E#t* zAfu2s8uZ6euNSR_uzn2wB&&EVjP+yKeoG5lAT|pe6*&VTBjO8zh0>!*4&bs6(p6qq zmP$UMC*bG|F<01NULHYjaUR{E#pWaAO7|KHQ1%Yb62KF|U~mAw4Zs23f?k9Ra={Ja z3zE`+Kb?L*66+CvhpaH5E))WSePl$5Mv$7y42LxPV8|N)L_^5DC+B@QTyGedmJQ(u zQH@B%X@p&nqr(wUCE?3;)ufN_Pk^$b|7>S~j_!ANa5hKHZv>_+a z2*#_>6q1?Hawt;g@wp*NVkEG5B(Ff~3#cRsTO@Dc*cnrRKkLEFAfuz0*?_9p|UKdNTWoQaZQXbH*& zU!dCQsrCBn{6WkJ#j2r`V6a7E7|b%uXkbUF_?1{C0hDEKsvIo($6)nzgZ_XlnA3-3 zZcHdsUWYUfXkcq4WMwishKa$8y%3F~K~k5w6o)I|X>d1IH`Z4=>T5ji`e2ay#<~ib zRE;PsTToOOq5v>&B}ud1#ezPuWGd7hGeGDdI-rM`j3_1K<92WtrklJVSy*LC@~h#X zA!>lIIO3RFnW!e<4yGi;xt>?p=ghSB2x1LGsDghUgPe2f|^HIMAqi_)0z30vYKWaFViPw=x)k-4_vTM3d3OaG3t2zC~ePg@+NI4gke| z!KoUFp*1vx138A+1Ps780ffMA0fqjcKZGHHkpSIHGK_#;*6c})nlrob!3Q7w{F*)V z9w@&DS+?FdOXhByh1PLD=|)DQ+8&s`%-6@bY1Kuv+WgAD^~ zxE?o(tqBZ)n&NePAq0T6v$TP?phws{Mwt&P_JoHu1KL3hJsxcX5)Qz_sh5xjAOcXr zVjNx%sTxfS5=UXghs&O%D?76X<{+8)0`4G~eZ+8v{9x6PIUtdcuo6)Sp&11u0TT71 z1o%nzT%3o*CR)>-OYoAE6AqzL51uY77(slY7{IXMLfC(pf&s~n5+%U`RDh@z#4@FY*(v&9< z#8*?86bmY30)X&-gXOIVzc?9t=m0&Wjxk9kIUpPby(1X~R5gH@M|^N_bSfYXG-KRK z0Lt9CbEi(7iqQ*k7R+8`S#d~Nd``3cHqY{tG<&RPKW3~1=wZsqyOZlj)POJ%F${hh z*i1BJLJ{>)vn(-X$ha7VjVdbhg=G3r2$itO-bpF7M5Zx`aLn2Q6cuWXloJ}&m~jyV z@sJ@iMxs0z7LEbMI1gCUZ*kS1G*r%<6aRL$VdS5gIFnI zFjquKo*-Z&E6Y8w1;QdovmW9XWRy_Q5hVPO4HX>Gh&!N7nL?7~1qQ1|#1#jDMMO6M zh_{yvx`DXc&&F4%IsrfArz(%DE2t1%_B#SfZ1Pr?X`BBCwcCf zy<<=O3w|1t-=k0T3v)^^HsWo(;-RQn7KOEr#Z+L|wD?aI&LIr93SFGiW+cEt7>$nRzWo6xrU*?pd87PnEsT zWW(Y|W6!OzhaNqkJpuj%h(=E`4w+PNh&}S}#R3L109i^P6|wQ3vMO0nkb(yZWrgce zv(RIZ1tLfUf+e9afV~FvEg(R^xx>^#?K12rVMU z2{nXp!|REVunDmRPa%U1QwO0zmLAV?0I_NS*?1zL3lKgah#@?I#1=Aca3u~&@n8Z( z?c+h#hY;Bd6iOD+LJvh-!u4d}l7t@~gd+J;=Q!^U`}y2WV(ClF9cqdt43fxc^729?l5j<0IY(mS z7*0U2e9U)<@+9%I^^~}BI7DIu<_Mjqxo^%jnp3zD&9Ba>=DO6J)|8kF3K z7o;i4lt=dH62ApeB$9;KHHq0rhC>hpig~5jZPvvUvnN#_OMWPz=b~eza)AHAMgk`w z8Sr3DgpN+pwJbT2&7XjUdy*u`MiO=#QD_LnNBv%};U@}4_$TGT5ip2nlafHbRMH1n zB_IQ5)PUB3Jz6Gl3ZZ61=*YTE1dSCoO9&$iF+KrXDMV$$L(w=asko8kr)Zb~2Ml;3 zsYdiJu#w=Q`;0wID{|UhMI!m}4OguyRFyXVI+4q8W`gz!Vw`!7Pg2igT0P zz7e8wKrd-TV5Bx?fA*YngxL!}ZEXZnhD>(cFZO&ZSWlS!f&~kN*$eX{0|K#m`mAg+ zO#ty}4v(TMS(R1%DTPrd{$!4!?n)e_aC=_#r+EOQ1_-|wZpiW~aST@S5`u{E0}(wb z=S?OvZ^98t`DWg1- z)NgD$&^?Jv1TMIL^fvOgP!EJ!xuEE{iEd+NM-2HLyd*Am?t#$rQ zSFN7Yb$0KrQ`_zZ)9NDeU6T(7o}VKYt0X1@mP6{w8j>%p=*x?Jb?ZSCqZ(M>IyOp* zF&9`h)9ir@@)kv0=)p@1b=@6$``iuc6Az=Bb#Cgz|r zWnW9^c_ieM&na+a%IIvSEK)5)A&%NV1zmKUu-FI?(}#D3NTt4D#~zSNdF|Y$$E!CP z;a2@533Fs4|7kDR0)NB#AoWDK#|}e6t`T%RM4gEjVPUM4Obob(;w#>#%mOngk7Q?w zMan%5T(9z1?72yQ_-x33SE?L<`0A;uxqL`ZV=@#AMq%^i6f8{XTFp?D(fOp#MQKi9 ziCBBZ%Mx0GGk-(M@%Pfh@?f0E!B80f9*+l=in}f(!Lo<3)wB7*JDdcON^QjQM!3D%t^^uREDMNVTcQi$A;2$LDX^@g#CUvYqjg?qzpsL(SR zOkjF9C8kGX?N;wIK&&XYv%yhq-nYe-$C!+o`vPAS13*9cKFxoTC`*bFUl{WZe3maK z3tA@F*-ZxaT+k$aa$VW7CnSFNsWAC_2<<~T)sBPw`f1AQbDHlCb0M`Zf}yJ4#MM1% zi1iG(=YfXxC8ID&dfu72jH{+C@qd4S7>1TCNNWa28YL~d6!wZ{?Qr6FOk+f;R=F3J zv~t;jQ+(J3;to2=Kup6CNXi(|)h|icC2H_$j7VxVcxEd1FrJgD!S_Fv5V4eHx-F{u zEX(M4Dl0-VQk!AF%Mai+T``UiYsX%rNzh0w(_IgQc_PACyCzVRLeBnDG6PJD>J?-~ zlv|9-s^{o80P_8tiFCM91i!`J52T4azs_{$KH@{TVn2tuLIFNT`b9BSSZT%B*b#4o ziGDe}xNl^;bHF@NE-_&1F@dInrm$*bO|>~ib>AbY?V!95E)*lhNf>1G!jCc&abVSZ;wpV+d)$YI1)Yz}&FxKSN-4|}`*Sf`R8_#gn2 z+%l?De!Bw^Y3$^gBProJDMY69i@Vg82R}kKYEAz!+(?5K8L<;yFqWAkgQ|ObC&Z$5 z^QQEcBjlwuX>v<^9vQJ|2pM7x^uRTRwy% zmhk!6nH4qNcoc%95S!jWs?ZMZ?w0wB8yKPUpyAU)8$Gj&$mMOw$J*hvod8TX!o-H5 zP3#f(XNX%`TU?txf`gRn$bG}z$fGRxnmW>aY1H`sky`966oBk$X#Ki^U*6CP9B0|Dicmwp<_yxh)Yxce}yXG|xQ@j1H zpD;U8-7&ES<< zI;|ix<%I20zzkTmF#4Ldw#)_l}K^kfj9HDDkko<%oyJ!4E`HL=l=YqP9ss<|rP z>`9t2rVau&>wM}D1_s^YCmnqm{Rnh>0_U-$>2%H3XeZw z8k2mO*dyhTv*PYJrH>HmX5>u?>w{c23YF*-br#)UB)FjYRv^?jCcU&QoPJ_M5$ZqC2m{O6|XY z|C|GEvJ_L9dO<6p53EKl^su;0^iX_^A-M%+E@CT3U^HwzD2f z0o#2~UvQcAZ2LkAo}iYQGerZ{XJ4hG@o<@CcD)Dc3yQ|0s!i*Uo(HAJadDU#4H{uY z^A(RwW}!l)k?=UNAv2xk>;qt7k#JeeYlTUNrI9qJXJGyGXZud0a8256x4EMsZ)yxX z%-$^on9)oO8f{k#27_pjS7scKsm1_B%Y>o##|7~&R%Fq_36vlfVce0NQ09_!gw{D5 zLJ5)CSE27rcOhr<*g9Tq=>7IuKy=93=1Q^Qdu-uH(z8>9ypn2Ro3Sb3FM9%2`;)hr z8Fp1#Y&ywWF*=n+zZTyTR?-SGNJL?_uShX%!+l;EzcCaJkvV_$aCqDWF%yd!$#Dd-8ZlW?%xim~Z4=wzf2>nsd|P##t1b8LRv zWc=k{JL%Q{L0x?G!q@LA)zSp1Re~J6}G}Me@aP7`T6rF0zjttdLo0tE*W+MQL-0{^ zsnDtkHyTGqKNb24rn_Ipd24pS+zC(e*c?>$5pW`vOr{HaJXeO;tsr5mWXxg%aM%FF zs>66Cj(^>emlZQnZ4bUta*Kxdwbc0|uAn zpI~kgxFWnD(l_Ny%FV8|?A3~h^M1{jG9C?U`T6^yB4hg@Xic^s6-2YCHGE$6^vxu!R6ln|w4vAT>8P(ya6dRW zSkw7@Jr>xZV`2bD&&U94#>l|Hz(Uh=7XRNcXTOtZ%ieZyW^|5zZTp8J|@ zZFi=G8_JvAJ>@}O&7@05jF5p@-%z8YxTVGYZcMvE_YL}GN4w&RPF$WK4_UXGA{ohK1E+RBZT~LMJ74w%P38j!<4D z_My%dY=z>?Zwse_!XLh<^qQt6jU6zy^oE*BcxhDy(U1qtThP8}LYW|zEb_vqSTZTy zV&uq0nn8$DQs3W;gAg4I=27wc+}7Oy<%}RdzIU0%Dx+3R=I*N@B92rZ=ZzS30l)-) zsR&=#5G%6DqX$xnR9NY?!dG^Z%GBWTBtj;`;4c$NmrNVc-3m(VZvV!Vg-pIV%hdOD zTLaqagq-s1@$?8CoN(1 z<>J`?qE2TNjZF$h*;LT6tiYlnQr1u3@ji!d?iX@x->l+`jx@Qt9Cz% zW|Q!eIh*>q9t$73vfi@8@ncI8A)>!7mKYMB{oCFTd92|$65#W3Nj#Y-DPf?A(^SN^ zWcb0pVZ!iQLWdEO*>U5a4hr`O5Znzcf2(AzczRE6z!(7~dD>MfbdNPui}VyS0SA5p zh*!$YHsJiS;}zpc5;hEF60652*bmNk1^wIK4dDvq0AdzGnILE*86si&&+OL4_XG?dek!-`uB7n*qXX!mF!NTQemA%9{k<5%R9-*5^GoxmA2fEs0x~Vh zcxkb9$-lCA>AgnmS|wE`t+i$zI7DTg0!%h$j3C=Y1Ji^4v5?LFDM3p^J`sOZfw;qc z^m~%L;yxqO^4EDsG%mUtG;5;WgavrGTMR^C;Ai}n{p1AIx}NXP)_%*PQRDP0M=~(* zC_3y^zD_7h?;1{QIIT4{_!*fUTxUTVVP@vtGc1)9Pdi%VD%9Uo|J6Fa1vIu=FUi$q zl$NsmIfZDFnL%>i3*SrP-RynAff4DYlqM_5}tL$_2mm6ZX| zv$cGKV9stY`1ez7im9}fj(KCP`lLNIC zVG)3?vmGTC6PeN+)K~qhBR+}sS5RSfJvLP3UrJ*>@i0-|9ib%r4je~nc?D(vUz7=D zenxkmLJ5WSi4hoIr)P%#rZi6-DBDwJmhy7ES)^v$YtLeg|I&({L++w+&!04+db-7S z#LKHxq;#J_=WEw=6InR{r1r827ccoWSKAF4G6M<=pNSE_6c4p#Mk2a@)kpw)=!7xZ zpCiGV!T%^LjcDt8;p!2#O8k59x7mLfn|W~JRG!}9o^y1%{&i9Z-r<7fUJ+4;w;>Pn z%}Tai9w9}6Hf6$rbq>4LJ*@nU&X#01leFR%-40^8HmtZbTzS6%YZM0e(xhNEgT;r| zf6|%3H~HsBa!o>E8qb4=7Ap5)2whPIgLK{MVU|@?b-wjX>&Vze;KzXI$CK(UF2Jwv z$0X)v5@J^2YV%%Laz22@uS8G|Z^c4UcPrV+q9O`J@`fw#T>DtM-T|6O`L$}=ca()d z--KyA5+>fp9Z#2@T{p#Q!8-c2>alZBMJXDpsCoyALA<{Q_NUy}BwN)%Qy+W{Duyt4 z_wes$@`q{-nOXT4!(9-J$)TbO7RWT+meKDD1J~5lVZS%iM|M@C(2}VC;s!;C^O)1$ zvCynUdfTk=F7xG=Tj$NG2eJAgFyvEx535sV@JXlk#7pBgG~l_u)21VZ+r#P)4Ml;+ zhnC~O7**v6kSd}Or5t9U!vv=b=+SVrexnO6_o5PC>qymBa7Ajz(#+1fX9NFk=hMbP zFN39=JqBQbsf)~+vL0tK(Tk4U5!IZbcBqw+ZZV1BYW$+X=GgG1*%rWr~OUwWgdW{ zzDfP+$F{SVZgK9|8t*Bj7MlG>49H?a39NnsI09118LM|VR1njfzf|kNU)vsMEo_H% zC$1g(UjqVY*P3-e@^=*bzFD3*I=+&K^I>6aLqonb%ahVe80y0~B!wpCr_CRhfR9x_ zVMk>WMHx(-4*AMT;o3RRi9*xW+upE2JY{@r_{Kf0OsIAk7mZ*9JJ4F4n*a@$)`^BX z73Q@4LDw3LNt?Xs*ay_hV0gXFT+B#fKAFsa#wXE{?oKu4m0HuUbjp1Ou9V{ za!8`Ri?sKsAcFZA~3BR%7|k# zBo~<&?4Ru!>8PYhKAYB7O*B(qEiX}VPIvhze$&k7C)wu;wJE&U)&O_lBmC@Am*CIP zowN{49x8T7k@JA=0gqWfD}5QFVebV81E#Wchaf1cUe&cGSyU{NpAE*IllDj&aU>Pz zVD$9sm0c7E8)DL`_W6{fD@#wH0Qo%aN@5n`ZIvilRQ&n=7!==f6Eqk~$eAA@FQ0_u zh~dXe{M4u)UCn`su?9ue#8?f^kzI=hdw=eI8_eC!l8veF^hnl!wd$oR6WEwv0HDw+*_a$!YDxz~;Z~)7t1q(6AEWxAl{<8t z0d*US%0wGjXS-jyv``XG7hzhA^4uBsyfN7#Y91Z*H%{dJJN^2 zkeI`gK;XJ7U6E4ur8h|Z;7y8gq6k&xTB8l+l}0#kY{qy%)7NJ*tm-%gl? zY!O{`$AW5ue>*8lUs_FyT(uYVYNnn23FFGN5Qr6J7%$@w%cO*%Z>*KpI2n+?#zKPW zNH0l-;@XWhXD(v8$F~v6CnQfPhHWVMR4n;NRVO7Oxf&@C=XYOb@TK$BE=-Na-B)w4 zevNI8Vwq!?j)|T5wd5qDodHl@V2HUWDsgxO|$w<-|pa~u{LbSf#DWqgabZH^rXZ9Wx1nKp^Xsv2dv z_-^A%n)8jVIer4551ti18f~T@2iXaz+)|e9YiJ}M?Q}fSMaotR2RmVYPcpP!L^i<=)nS%3546`_s3l7|xI!}h{q_AzLpT-A zSH`}l!U7PC(yWhlkh1knR2A(YWhpD*L(lJPU6|thUr$-lU)SsL91;tvJm#|-^}@CN zEa=2P;l9Mf&vG!*VM<~w4pLe$kbYT z{uoxg4I|bYBF)b_{{`#9dp(kyd;c~0cq}2{U>-CXZM}8HBd-{#_84I? zG`vA+%?z>!g|3d%ss8=pPMXHd5VmnnEm}Pc&>f6s4WT(qpGB9n#*bRU0_6F*CFSRP zSL?O2bsggxBt+4emG4JfO-g35rf_4g7@|$7##xoalUMM$gpom7=rc@&cfz&ANoZg< zh`u9lh%m*C57*P%jRgsw$I>a68~}`>p5xvT8ipY=`d9SrpX{q3|6ThO)Ch^QlW!Ui z2TM{V@@xCYWrM=v3^bJ%Ce8`Py1{TWj%RLp27sx=Vm(%%;GF(^8z$P{1781+{inD_Sz93TPqet`@+s zr~X4RQG6c?L!P-vS|dUv#1WVLO&S8BHW|CqdM$wR4Ba2daM0(36PQTTMlZaHoYs~tjo;2p03Yh&-q)s z==#mElftt^66()tm^$2v?edfLm#?1ZB0DI3D?f)TO10!sw$PqdLm%~yE?l4VOeFxU ze7$}UeZ63sSrs{)I10!SDD>eF0so80pO6|tlibdGdU7Q=tqgRNJ)JrnK%$an?LBI& zrl4}H6eID%vJ#$EoD|RVwe;{ZpTfVIhTIJ)8@+FJ&X2z&z>qR4nwx5!&Yure`vd8x zx&mW$l0G#?^00xZ&S9t(`qhwMY<1>XDDVON@9DUt9SzK{w~snccos1WT@wF%N|FLZ zyLh&mjw1<2BkiTsr(`iJ$_SG=%nwaz_c|B#E*(4Wli(yCLtBQ?)HEn)miPYc&bx|S zz0rwi(-dyk{6(}(em`eqt)V^$k&f_3K<5m2 zA9<5hITN+K-ZNQm02yByIr!sj9v`g*gY77&gD}(oUFM^^N!165|+k)=u^m z1eqkMQ_QZ>-drLLyrtD1FDWZe4tDgLR6gwL`7QSC^C~f^dAL8HT zRRSaF-4e;ar=xf>CVbH@Q_y=TJX&}4Q5M-njZ05u7)fy-%=@r)|ntm`XJ6 z(PLZOC)B9{aR5OHqDKxb=k~hy;3e(` zr-0XSLZa*T6W$vbPjvWO4QjV!y1$If1BH(zDBvB3j1%}-x}ee31-NI%4#>(uV6FsVp0VJ-Y{`)G+toEYpk*h4sm7q4#N^uOI5Ol z*FWuBnsl0Ys?aS3F4(=-&~iUqxOsF)gn&zpPC0^}=QHdLl4qNU%4?3^W!%RI4vhGNpl1MUZ*J;tH2e4 z`S-_vW2&gyVTs?I7Y5&O(Ej;anp}|jyusa1oCsPD@rwe%t}UWOo#(>=F9X`TMr$I! z6-_pdYuT~Ni5dYW_HjNV6U2!jWRlyoV zC{So$f-2l*^KkZEp=Ja6V6y!+<4|YTKx(hx=Hq=<*6rEa#_MQ>nX|(3W$I0p;IebA z%kZ?^=M_zr$-qytJ!V7lhVGLav;N^{UYL#EP-3y3xe7wKk$t!-92Uzmzb6)Q_;$Zt zDv0eAF_1mm^76tP2potop8-#c*pmvSXlY{)jpz~6%F}~OGomfduH^dZE~`>Jss3ar4D}@c_{-2vJeu8GwH>bY zfD?gMex+2NEIAzdpJj2ctIh`}78QXv8J~ zyWAyKo=0nSYw6oqFZYwj{wI0-c-d&2@2r70D;=$P&Eq4Pf)7c{x>c_ImE^szT7_hZ zum?LY?lba2Y)e=SyiY1eUJQ+~f84!qw1K5Wm|k zGcW5yxaEc%Q;;IP5XZS!yPOKb)25NB?;_2ijmJc~aOc+0sq^DLYrwCZ^}sDVsaEUD*KwuP4OciV2}*dwV_*8)S!~1g#drsG90pc2f>eqJ>Zpi7CUgkKbaI%M^#AQ}vM&{AVG39c^54FDFD++dzc%s{e9MI3zg3_anv9 zJNLT3x5|z^DFN<31dk`ot%k^iou1CTsC+lDWNN>ploV)nqOZ8L+|RskzCD*DAfo$v zoj!DF8W>%_FP%ZdA6Q$tGYUOa2tH`##nnw~-aWFC9&J|nFUO$H58+}fWx5U>vFEfk zgI-el?4!F}uV_psC-B+a<$5()t6>@*1}#aGQ$i5C^#LH3i^8)Q$v0X;XhP^oi@dU| ztw-{c(#%eadzluviT6KiP6GZ55fk_NHXskkbYWZC{XO>20s02Op&m^yo85}vMzWRG!wLDriA)di z_mSC9S(NspYsDI4J@*a%GTFBf^QK0K0z{maYew+1_T8s4kIDWeH|aV z_uA}w3+&=<37jU?&k*%_41U<~@mhl?`#czJx7qBKR0UnnRTMdpA-!+cfpX@k7#V-} zXWM;0@7fI?ukOakePa<8z*WlP3iy|NPtX~DsQR{sRFgH4dvcXPj)^g8GVx9R=aa#! z{@u_2%%LMR^anQpjf!rVP+eVBAsUU~PNoGF@M`G%7Dy$FY!8|(bGi4a>8k%?h2^CF z0|!`uFo6W$zo9qK`j&HDF~y3%z`T1a?yq19+}}MD9AEC@v|3oGE{PR5t*=;eGQo=H z9rA|{^n6f;7HlHRUcV^#&eDDXFWP!k&&Ub1dJuHny8Zl?SynlSpgV7?GUnpr*|6;$(L7C-?X-XFQbf>!QA?Ux-%i~B^pPNbaYF>yJZ^$qP5+(nWy58ILq zgr-(jtV!DH9GSB0SygLVxH9=@=S28DydmxHy1Y_1%h6I2JUm3KK^YE z#SyGm+2dg^w%$y&v$rffCt{ODp&;J&0xw$bZacEyOH+O=!di&8$K2~1x7%1*{h0>; zlx? zM7*2V>*~K{tUV_L4#7A1jWcZ&MpwU1KLxl9o&Dbu+nql)j?6>hth~gFU58_0JPAky ztpPTGyHQfE!f}Q-r^k(0)Mazs^>d(e>s9i(vD@=~`(B8m=*s=oLG$L@`}IBn$zQLd ze`Pk)8>{O;)l9CJ{WQg`sbno+b*bdtGoPG6dZ)poc74 zQ{yX`Gf;T-(=RNd#jP^>%nI#pkANM;C|@zYBo=tM9ErG&gAIw zVznypJkMAZ<+ZtOVC{^;Y}b2W?-_k975RL}>>6^uEX7&-cMb&&iJh_~gsp0J2;z)M z{}Gnk@%lJi>kmh}nCZG1K-BzrcnEyA8b0@bK?`Ve{-6VT=`QM$RrA$a4*!ux>RMtI zY_KWulvE|+vzutq=R8mX>7?`SO!WSqngB1q6jXFwU&VP5e)HP&8djEkeh?^(fF7U~urOUw;;E#(~E zLzx8ee?K4@cLpAmbsM=}!AA5?k2wThaKsj^hev^^wV={t~}Q#@l_$_-EGpQsn_|j zd@0%R5bht0uA#@>AbMX{WjKRBgU=@brv#Y(<7yvih|1q*k(isX9TpY%h7$eV%`I;| zTfjvwU?YN4g-p=lQSO!3?UvxsROq4_nA&~$IgU2!0EExd8=So~Ootyp7(Y zc~!1I-d|q*0v;k~dYs>iiawP#m~R^MZm!T(?Bfq)Ybip;x#$uwAh|CfNUZf?4Qc-D@M9 z9m`sB)9=tU&D4KzCcppXj)c$s$Tt0oc``Q*sW;OyfDIV*Wqw}dW%{+2E+62`@Sp=p zYqB=h2a{64$(EG9X{iSvk~l_^_WPtSX*_4{<^R-eAjWDfiHdocgcG-J=z zSYD>~i(J7pW&@XV{f0x+hr8yDe=$k-wy2K29la&GLV-^>rdh_`Q&r6y&r^cV-S;Qs zX@2`V1oHrv^ltCdB}Z{82!N@v*HJJ0dFORh%~TG*^E+>96%#{^J6C~cF5TWQ98dNZ z?uHMeQ2ot9_B{5PugKKtot1jiVsPrV;yZsH;Ee(wcd-=HScJXKVQVL*(@pp;I-*%x zZ?A8rK)g4V`pe8M2j@Vi?!%3JT~m+Mlr&?%higNxx5Hf*@*`>eY&D0?-mwZ?mQ3!? z-_^8TzQa3zOv^fdC*GVun&N*T(lvg$I1?8Se(506DFo z5DOU_{M&*&WmOUf6QF7zkk<)QWOD=F&vUIm*EGEPsIEhH!!;P^kU5o`4b!FHn}tvdysEIYr;1i_Gs+Q z!qe5{d$Pd>5$|iV@)4thit)I?pcz$yGuyK3{qx_^M20EESHgG z)^6twkzsSdk7z(^7gDhT1EKGFgVqi`WZ&39S>@Z7E1|3K3>mQ7^pA`uH$Geq&QIh^ zy5Byz20s>LF7tcy(Jtt-&`Dw)RWxrO@*_lxGSvie7WEL=rBFC+svCIWjUcST*)wtT zb#hw<1R6V4iaiopa$@g1_IxP21q3tw4!%Y@Heh^v-S#J+s*Te`%8R?z`{j?GB3{@c zJt?F)ln^O{d;%sE`4Dz)+U05q8*uvpbXui4m!%>AaHg&K6^B}s%>{I^Gy%d%!V7fP z`XDRmylt}-a38?BgrxutD3;k=1aZF=%*Y|Aw271O>$-c1i{fhe{l=&K8{oS}3kBSArcx_Ka2-la8-YbcA4cZ-k)j zcVCc|L38y191)lWxzDi!zs4yNy%e!kZxxpy2#QvX#Z2&0MckO}$z@}OidH^!r1~?V zQ_g-0dQb@hBdnmB*Oci->#)NUk&sC&7Y=VIM#u>K`I$<-aXWlMhrd41}v+Pggv&@3kzG}mu8pE zLwNEtadD%xCJ#bz?o&<58s-V)2#Gq&86nYgM_*VJ6YJ#N3%$TpKs~t;gZ=Vsj)o%P zW8oOpR|qA}dj;en;u7dkJ1r_eJ^Jd-taBde_YDvj6I#a131qyO>Jlojz4Qd2O??5d z3baK);s4&+;R~JIc8Oqz(D*8J+4sl};~L|yt;|iVY<9yJ30?1$@4}UNo=NY^vN8Q~j*IK7~-|U5{?4@XMCEVTDPq_ehwst-rj*ZnJ$mfEz_d zk0tMjOf8n?GNwOpZ_DqcDvZ1Zxv=4ThOQE+KPqADVMECmcKmk1HrKF)XIJ-z@nD1_ z3TVuqJ#&%hBd;HaNUh<1*=2M31U1f?=B!NJ3?<>IQ;@EWk>@JazA;jSo8qO9AqNmV zS~Z7P@EgcID_(8JT|uB<;=7MH1wkWU5H=;BNUZk7U4HT*uVGI-kXqdnLNqa|hA&m~NZ0&SNeoJ{$rT2h#9GIuhz@DCPi z_!Gtn|H2iUL^QgI?7dv+PH8?7TWLKh3{*N~-U+!a;#p9A@FOA&3fYe^2#IsOa)?&R z#_s70m}Ph$*fObb^l|t_1Zs>z9H|uh-CuzC@g{-~y|5ekVKBS96*Dnmh*3K3u8Q9o z@soV05}!aANqXn<=}*y|><0?HBlZJZY2uOEncnrSb$0AOG`hm>zMibJGqmv{M$nLG z?|mO4HIS^>7V_*ULgHz>GZ9xLsE_9Tyli-VjR4Mde8k=k*b6+S=N9H(uw!HhxOgt> z6V(O{^|;Awm;>lVAp&QY* zM*|{Y^XzJEF+a~mFv4PRBS@2Mg0f+?Ohv!yT5MxPxrcqqK7=$5oPh_V<6;bNbfWr4^ObY0utA*MD$7pcvxIUG{gtr++2HVC1&_3BrEr2A;8RZ zMbcR+)0adK=xMEUN+68&=iS1~JM9OD!rA&DUZ4N^5N6@1nLiAA`I5@sP2}{#SASuK z6vQ>&>yOPQcfgcfFVT{_0+v2k%i`GuVSm@?E*$C!M1mdwx4NMg5$Z$wdbjo>Cc?bc@Yk8p`O>fj{me0S$T zupPvA;%*~?h&*hT!3L=i$Y-L{v$}!a^`KJ50>((<%++QWnA35t&y^{{3MW=&HLI-u z7&nq=i>D==HW!3K|A6_d^7PQb#P@-^4N>9clfWH=D~8&e>PiwpehWO>`UkUgiaL{7 zfZZjVDXNsA<`d!FN0E0#ZBYE8)IgxE4fr7pnB5lssRsaDowaG)odIh2=oPUl%XYPP(LPEwTr%S!y`?nn&% z`8QmlRhp`P?y~=%aRP<^JMMqR=7Rgn2lojMcFFX*bN{>I{MN}Sc-AJ@Vp1#o!b4ccUwTs@PC8)Z+K!v!=KSPzCxca&;PIYnF*8l t{|ziQm?sW$61?^Zzf@E5zuqrDAk|Ize?xv%JA?SVnWTgNA literal 0 HcmV?d00001 diff --git a/images/Geo-Rep-WAN.png b/images/Geo-Rep-WAN.png new file mode 100644 index 0000000000000000000000000000000000000000..d72d72768bcdf859883f5056de4e7c8e1ba40721 GIT binary patch literal 96291 zcmd3N1ydbC(=P7r?h@P~xCWO1L4yPjesDRs26qh-g1ZNIcjw^lat`itdGD>dZ+-vZ z+p68|*_oZ$+Ub7gsqUVLuc~tBC?qIQP*CU!@-iAwP%y3^@+1<>$My37(%+98w2OwE z6jaqD+3`mK&Pq~65(=s|4)w(Z{-aD~Ca<9a1?9~E1r-ni1@-jtDBuVR%8e5W>f{F$ zlu$Yp6d@q1Rb2!M>OEFLM)I4d(U~E<=Qq&5!S(g1Mm*+kKb5juJN`9w+-fK?Y3b`{ zK_M$3fx#54U|F0UFhwGk$M5LpcK>rlO9=XM3;QN>kFt}%W@~RHd z-3yJ1$Vyi8Aq1`tLkE=aTWkk}!V+NUe*W7-u!>-|JyJP zmH^Pv@zOvx@!u|?qYi9{|JxY>#+n0$E-z?xLVfJ={|)*v`r9p8O~7fFACfN7|L+pe zCinyTe^t9&VK~>O%Ny_D9vJ}_-LvSl$LEQc(dMc1_N(uL)fUG8`HK%gmw;+P|Gm>e ztG&~u$|_5N#&<_39Wsy%=xXR?)wap8e~R?wM0-0ZA0D`Vq%80j_P*0(*tNVoUY%jT z8(ow{(lvjKp2Z6i+~xCFC*WSj-=UpLsRIuDUi`y@6FjcHWOv=JEH{VJ)f;;wy-RP{ z;RKsw02vLnecIP^p~W!1X7S5QO$&v6hecdb9kNCADi^U?!L3&+2W@8`;MxD=;#-*v zT14Hc4}+M|)QdkVFSwZYJ*MfT|2F1v>`p$d$Z%DTk+l$NR4tx4<$6AL@zWy7)-F`P z&fQ6^*6VYiYrJlhSL%nH3U!%pUh$z=Sw!!%#B~#APr7p5puaAl7z(vVk1_hicKx(> z^!?VWKZUt)@Y&K9yK|O^GBClGLG0RX+=tKB1=qgCXQ4Vo+!0Q*i=}xdvRXf=T9|6Q zy7~OA38RWkuNp>YE?mdg3xZI=+^4%z9nHdbOC`fvR+O&<(EqcEa8jd zC1FjmY)QJ%gqGp9?6|YYS%He{|0u*RbeB#f%Yh(8c z>Gmml_h7TW(V>;>w0+wZ)VVU2Eu5vFUsZfVw~)pN){}NWqDEMBX0c zb?sK+M&4;dm%h&3HBqi3KKE8hIN^8wyXRJxXLn`J7@gwas}=wJPk|1}bp$&H7=u-q z;tN7TfWrk29CcWQWl{k;v$1ijx!RZvNYZH#j|VyS)t4>gH);z=jCmIu&MO|-neNhQ zJF0*u5wf%dH?>r<9rSzfj~3}XmE8|3$@6>*nP4+4;(d%WM=~n9Ij0?#Six-4MHlUk}l9r3?D_oQbZQ zJQCc8__1N+M6sF4pi|pE*`tSSKG3O%ypUPBwp9>fhkm8@PFeG0BzkD%8(xEi%l>T1 zU1MWO3pYwLNFrWsi&K(oVM#UZ-W_HNWM*SEBaXQ@r zek0k5g%Y-Dlkc~7XB8Ty7Y@Ca*Tqwpjg^4DMXrv_Vdaq)?eQlId47!Ca&-GH;bxTb zYrdNPrT&0XJNS2az&@5mfwBJRNaByXRvR4$i!ImPbvQUaQDnbTB1-!RjA?D{X6ok5 z-}w_0a$RUuFYR`AHe8=ZN`kl)cjFqq{i2ch60ID~;Zw+8@=Zq3u7tW2;L=!*%?W;5&Z1wXKQ8GY-=XvN&k~N$ChgGAXoty9gJ|Yi)IeZ zSZgTHegQu?s&W}>#3H7MzRyjOJlciHEPEQ+vZy;)QRujTb`X*k3%@aIeZnjFxfR=B zQ>YP(>BRUlETP?!CDv2@P-j>7I0I{8?)O^s<&3afYxuD_R`?SF=LP|pVof$&aLEjjfLFj9Y!1!(b;#enkg%P}?4mHdB9NByHmLlgsA@$pf zGDm%Oi#+{jr{y;rG?*Ceo`^71+FiY7zCMFLyP2oAZU*_~MOwmpP=kU$cx|V#`p)1> zZ^ox(=HPbY&^a5Se~mtx!{`)B_VlD|eg=Jsp-l-O(Kh2IYf{fu$&ZiP4)NES(c+Fj z7HT=)Cp5CGusHpt?Pa(#cV)AkHt#Yv)*HAv8Cc%EKqpDEFrf|De~6r!Y7|IRy7f7M z2I8R7+0))nr3c>(^K}Hz8EY%@0+xhC2c?=|8k(vh zYz@i4agoM+O$`eh?ltcgWDxwV+?=~qp4nQPnNB|j#)S-E(aLYf%P+7Xzz@LJju(lp zSwX2n(9c#j*GD*JhXdHB(&@R1!4bF%NA(P)d;D z>qO9LV=zSDr(=}Unj7hN_X3&mL`^&`3&?4NU2@n4p$e|nfChK#QKx&sykbi!bmsD! zWoy*xb3DEbH}~W~fpmHRvH>v#*9;D=Y9;R{=qhGWrt0VggYs8CKw$v7SfT^Efj_<; z{G+-j-8e7ZQAN?QMR$7&e$IqwHxFiV$EK)Jypj1T;i z^x%LZOPZ3Cy=f_&GgK2=lYBp!!^g5>*=r#cfNd~glCt@0Mvs0;zr_uO(o_wJ{xkf% z_D&J>SdvlMHwAMId1~`)1mT6P7zBc%4ws%hFV< zq8pFQ=$t!BUA;EnPAikaP%0$SLrat9t?3I>fm->kt>fSA{(?ED2|r@X&L3yZ_!4?4H;23 zhXL`Mh#iD!UN6LEAKH{ZXra(B@ZZQo8*ptaEE5z+XQi$YvVyDGnwjAN@Znr_83)W; zT(}!m`7Kug7=EH%?g~1~)EA^UBaRO?e8s4(rP>xhl@0)_2zrjpcS}K(1Q;(kh*b*) zEtB4~^_&8HCM`oK7&}RZl|AN!(OL$U`%!SeGB{h{lvb5@|K|TbD`9Bf#~SM`*59{I z@;%ZYi+B3F{xT{D%=}(3?vH$As{c3ohZa4k9N#;JY*n{2KsM$q6n$>i>g{{y-@1ia zq$;~xey7B>S-WGbECV=w&aX@jTfg-&@DRlVcgPGbl|nJf{!0GU;8W`F*6~p~Vo8h4 zB>jC#IK*!>sEkY^`04FiMqv}cFJ07p#&kS_LfHF;#By_W17ZYT0)hK8D z$OQJBt7kU;$i{)WR7;);EDKH0I+2)v@+KQJidswsidr%QS-b?0RVR8<+QO>PPj^pl z*n&@Et}XOuGW7}_(G9*=o)TQt0JfT3wzPXvLUhbNBmIHlF=TMGwkyL|%KSkPwt_;} z?L0Nlq_3bnbhu@csn?Cxi*qn(e%cnVLG#m3wh7f(S)xmzIq|7Py@v#CLMuNW#-Ne8 zR0<=P(umPUcuq`X|!&p}M^Gpt$1;I!s#fm7s#DUe()=&~;X<&nj zZ~VWA?M}8%XSe1}YvtLKBo-LqLJrYUieb`A5u{E07+0%jT5Z`$O*(?=Xp@HN*)+&< zehc?FV8Ib5TEOLt8ZvisjZOY2XY7j7=9~I-vj)?gxPOZYuj9vE6!DgLu(HU&I!8)$ z99YL`ET54?oQm|bmN(=PX)DL(ZL7rsVRcI^*Qcl&i9UKm`z@vAdZB@*{w8s5Z7;3k zM8_^G!6|*9BH^fe;|7Q^6B-Gw1M1i=X@9_x}tw-717L`}WBI!+f_pAe7Nnpz>LI9)2zLc^2+{!E>d ztF*?)I)H6pSX5GEhp?}@XDxB6I`<0+k(a}`j+wgu&5qirdHGfjk>)%sG87fheoEU9 zuXvca=pTpzwN(p|7prNyV;t|5WSoc~Tf5#6FB!{EJ&2qcKDyu7ENPrE6H)25is;FLs1AO^^=g>Zj@CS?5;55QNY%O@Dq@yc`^^wBIC@DbgXRqvco$@aNG z9n|=Da}he(5SyXKE4KcCBNIOrTpv=SB1vx&MVrz7rzPlf|5Z9eR%i=b3J&s(3{z~d z_@hmC+(?>C&*exePE}42zU=v?$WU4j{+wPTGuJP8+T8#WB4?&ng<|`Jk!|~w(m%{c zU)XST^DcwnbxH%Y*+5%o%@!#*ew_avP@~wIW?d+4l1kKRaCy|SNuD@)Ve0619UCza z%rw;V>S`@wXll}7F<1&vrY1{Tj0VhMNQ#nw=Lmyi>-@L#&7Z=KOEXd#sw^67k&~n~ ze_&Y6oG%z0>(%Ujs3j(xKqgrXSCec)U`3CC%K|B#Q1CpI{NlD6H|zA)V8B-#IshD> zg4UXH&kr>s_obfCN85|qnRuFFGAqn!_HVrCwAa3$cQO&UWZ7h?55;6SEur$3;4r2R zJRN!CUc9?S#j}rVGcKZC;+DsmFk*(_gUvbpe)ySA!?IHsZ*$KFy=GaVesW``Vv&Nu z(=Yr+kG-}0DLejFmnA(kBia7zr$#$W4MV0ygubLlho}YdXKy0P`#mW4#rMS zVheJT>LST;L$Pu3@xL|W-6_9ALjef&08aMi^s{g8+O9Qg_wW6I#t1sp7>j5I%Qg~q zv?3KKg}TMXjl*ZzU&8D1m{>7nX|bBnuIDbX7}~FI^nAu|b$rGpldESAexjUlsSs>c zEf?UrBK(7!8Eg2H@na#mS_(Jb5Y|6qkv=mKZ=xWeOtHBnG2%PPwwDD6YDp7jL)kXl zI)+}Y7{-LY7iP@;kAezL7?l^Qm>u;e_(dIR!T`Hogi5pm4H{=f)eV(dvJ(AJrhPop zX|VOEBFe=@=>81}P~hH+@zvK8al;54KoW4LfHY zz$U}8H4(Vj(TgTrCXb~*gajjY4j_z`aXDrXDE19 z?A#?SXDwWT-dlZsCa+%Nh^d!Bl{y&9AFISH%4SJZ5!YD^aX$=ys4agAyS8Ppg_HKw zjP49*M(bK`2}xse>+!MZP?UPgB`_R|dnYG*&;nR2;?%!5@ONDhHwa;%k;D`f7V@)+ zZmbSXOl`7o4ij=vQc^lP8jEjIiNF6a?uz;|t)y;!yj*CZWr|!OW=`WEX;-d`@+-6{ zrCKULH`;|g?}}+KOR;xn(!O{pkfqUQqDK>^*Xe;vMFJegJCeDKnHf6qeS>d@2>Xmf0gg^3z_ zQl%0{OtPf%qD3Q>n)hraGqR!r&FT=<54w{|&9QoGIzBQIMYqP*)|Q8dCm8mH3rG|~COeU$ zhwyVeVJ4VoU=70#=9`!g{3`0MPI+!0QWI}N`qNaL8A)=p%p8|je1|onX%RspiCzdN z4cFukR2Z?0AX}BRe#9)nW@i-;QPaSzT~#UA#9P1DHRwy|hzgdE!i(uXl{pEKUc(j= zg_w#w_g_=WMD#}e5IXq`8CgApkRBw0KO(yM(2-==%FV&h0f8(&^#-B#m0rDYcFOJ! z=7V+43ldyT(UgI59WIwyn;dcJyz6ZlCNXHwLE59H*Xdwo7~H8yv+dBzK^IFOt7UAF z(keD6-R|2>=wD6O?2h;bOcW7e^b_1vEO5S5wk9Tv%%}ACYT3LtH#I%Mh- zYJ-Ilzn|b_c$?M$d7E(M2MajL&11FGFFI?Lebi(%`o_kqv$Nyd1R$vSy)G3sHRBd% z3we1YKom!S5B(nFKH}bkB7#Xy=r4s)W}X_NAOGHJ#_1ir=>Q9($#o2kQU&ol(V1&& zXFA6Il`d%%_sYIM=a-Ou5iy8ehC*^gf%nG?&oLk)j`1=4ZhT8~bc~>{cGf16;L#Lq zk9EG<_in#)p$1z}-Yc$}sMpKG<)P`Ik)psjrts=zjxnYWrm)h4;trNQokdNj-s)7!bXu~Z!_aV@e;TEYOmrzgZUhe zISgTbVZf@~cO9OW3xNO@V-&|5oGkY>h7s3<#oo+=mNMSN5F{2PVb0Ay2DCXinxPBT zFjJD{X*Ye;Pbi6*3S~R`47vzC@#QKK+8DLU->7}~N^2_HIm^qfXOoo8W*#D#Uc*?W zn)zc0?rdiFu#hKf71?RK=p`woq4-fU5(3lgv2-_VEcn#acD!4#irF7L^{w!NUDmKE zM0gcR+B|#Fl&pheydw13W>0%QMQF41=IX;H>!4Wek#M={jzisrlz@@HvmRyU`DUUP zJdk7xoYO~(jU)E*4Jt*VZ;c>WCS|mwZqH)H4!tx261fGlH@UkU`Om#niCEg?wT+2t zxt2qweF(D0C&!I~j1l9;f9m#eDYyt^R;bv9!y7w_+_G*Ogu@EJR;~RVivYy*D>p-7 z8%rB87ixSnG&HaGUXIW66?V0=@^SmQS;bQ#J>&qTBVpGB_66toypK;EWOZ%n>waTS z=-vJz#b^-f$;iR_z3Aa=>ws(SNm`KB?yr+l#mJ#W-L%F&oFqEFL~OO7PRHE09d|J} z@w#z!_7-PY9KwW=_UJ}@2A$Nf?_a+H9xk>aozLI}9bi$|RhyqzspJ zbG1|O0+eqBjj>ajsrr3ADGe|fRYEPQY%`yb3uv*H;@) z*QyK|Ik5tk^9-MRUJ=K$`D_mtRjfyxR8*#OsCUMgN2~2t+T90T)NU!KpJRn(cMn&a z9Ph8M$0jDP@6mPJEZ1T0)y~%&Pj}}id?1xgv6N!YFGmq(abTYw)NgB zrWzP-EC(z58@gGtKe^o_^p{+8t6|t=8+ZIGvOZkCPjs+atiEp)}yc;CT#_X6VAsSIA`*0o*_tz;nf0 zpaF!%)74N>U`>WQno*(nb8C44r6nkb^oLfb|Bj`{!a`vMcDhx9%Hw-_?fWafLJ@c% zRN-O#{l@Pr4jG#?TyX=>`W;R8Br&Kwb-e>^XL>EP$0`rs8y_RUM4w|w zMBfhKmD+t_^!qtI5)M!R`?dlCWtS=09G7ftY`QL=y)&SqH-9l1-IqOc>ln2Vj2W*H zDj}FVXz)ZCVwobtBce}G{a{YEp3Hc7c=*tNOtQ~4EWi(Az|$EZGYX0#EUOe;bl%z~ zU}$B^reLuAS*M%+b48<&aoh=^N}u>gdB)1?2-h+J44)YV;%*sFR; z&9Bzp2`S^x{&&4eapHVFoUV%w?_Rh21J{R|8lf9sjaRYoJgpzOPZ$2#HrcJGBxWuz z(a9zgZ$YaCqIeUne{V=9;!0%vYC@S`C&@3Ot)~5DCT$4|&8GJ$`1O&;A~PBQ=)C%! z!@%#y)4U}$AitT~k9 zeRVmRE#!JUTll@n!T0&@e4!llM=}`PalhepeLzo7Z^+L2N3!D2S6(|Pi!VC_oC&s7 z2g9Ro1_j^MH+Yq#4??gbphME+>1?b6HRLz6sj&FF*<0$2ib_(3ZS?!yE;N40?4%#{ zyJ4($J)Rskr{5ef>{&{#G9;l&d@D`lc1lq$@`OTLVXDCB!LDJ`G-5R=6U63p$@!6K zq6-_JnMs0!yPH<6yI3g)az9^1?=rh#77l;7QEtY7f*{p(pG#S3*`5^cIIAm_e$%Jb ziC`%k&ELh{cz&pdmX^Ca5iU`R+kiztJBih4VyC*ec&Mel(7IrS5d%{Hm6ef!5<|o! z;WT}iF1=UI_fUVT-6ib?c6=z#)CeMqOZbdo* z(D<{7LlgL1*v&z}eE1Jzj;HDBKXSeW^ov4mEk678*83QNx7F%RPNHz&J@Lc-nqzj> zdTr759@+S>#M!IaI+TiUp*oRO2239Z3rqPY)^-hbbuCTJ@OI_Vw1uOS6O!3k?hAWK zNp%+DU?!nKKCTIF}T@) zs=gn(dqk78`#!HE)~%haurQU@LV@03pIwjU!5@ob29K4yq9RIqm@)a1hv#VaJ=%#d z;Nj03=BCTN@cX^KwxXi1n~J@g;TJD^zm1fvwcfL=pfFi3R}6E|4%61)C0<#X&D9k< zAwK@Ds6aIqf7r{Ij*Sf|bT?k1l&u(yE*^I&Q4iDpfR4?m(iQhy0wCZkk~rQ6meWo1|qnE@4emHAro7JmtxXkW0*t?~P?;H=7^ zXKbi+#q{XN%7DBq^UZW4DLZSv&mO)PJ#rqZ?M=qH+eu)b;Z1?)a9^GPxQ2LR0_x_! z1YY&>k>QHhc&QYlX3_;MNX$iw@TFKS7OX)SdOUc$W)d%gWu03sFf-8ne2w^*$TQ{V z+6i%azHNbh;T%R5E=>G+b;LCEXan&m^vrJA-!#e+(@y!^5>q3065DS{1;#y(7uvvH zfQR&m^A!(!P^m3M?6uYP7*1FlqdqLjuKvQTK7 zhLHrx`3yWLR<)YiGq=d+RFu(u5DIVP$?^Kx+%_)~BCrL2tEpwmap z0Jz@fb~e{E6pSg>47@kerkS2zt5|A+EQM40VT=KWRBPPbpPb%$OO8A%Ko{*&{K~4T zuP5b>6n+m0!eAl+)BY0m{K05)VZZrbGf9jei5D1qK4fq_RTs`PIikTadl?VfOn5pY_!kN7vKqy*N~ylG2#R+0yN@Rjyh(;865_?fRk*^Znzj zIfcQ12QnPwN_jI4``?YnK{}o%5h1^?9%$#l8P503!h>R0!?g95{ulw#?0MUFFYa}9X%&V}w$WxgF-9ZBvAkN9 z!Ra@<s94W7w|5DD(YK7;)|2T`lVE^n^u zyI%Mi-%)G^HRws->i>uh!jJmuRfYn_>PW?4ZyF(j%?q~g%xZqCU=mT=nh8JYF z)iOdpte-)&jLwV-z(bl0?b5g6hyb!Ju0hsQNl>hi2>Ai9HY?eJo~z(Z4(?+dCBP{MJZtFjpnCE^X$~ZM7nZ??q>GJ1Cm>Oy zT@;XX>*`{Bx=NjvVoOL9h(Mih4NI&$gGIdedDss3eMTb@0gN|<=iIttmQ`YJ%Tg^X z6ii%<{e@zBqzz0%36?>-oPc6=#9kQNN4HU-4UnZS4`d^hz)aDtmipR&F-b1G@9IS#I{QQgm$)2iFtEFz4_Ka_gN@UuTWAM7Xdlhy0Bfpx@>lI^d>TL3E(LF z+hQ))v)%9zZ1{}c@!Z3>=(y@~cC*~SSMcj%D;Ovs#&r&^ijMJ!&bXM@VS9y zZizESs@w0zeeYvVH;b=nG1srxbzDqodY#aJ+1}sYct1W84n~u#cR676WvhyYcVwwY%dc?qPM%LG^j#$4DO_++dk4Nr?XrR5C`0d)wP;SQEMN*vT(|E|G zN8gBm_jt%*jz2M%%09kUI%-SH2kU)*Sz)QZKieqwZBr#*aib6cEd7C&u3fJ*13`B@ zi(hR?LppEWP3;T}wJe+@^MYS9H$;JMr;D%-@-k0re8(HLxZn@sL)ULc6e$}^BH%ST zFGmTq*~v~Cis?diY0;?`<7AB?=K=2%8l#|NFtNANU`L5$O-(&}PtxA29B-C?-=r{9 z_y#om!u7_5+ulKdbm~N*Vr7j?LQQ++{$p7L`PaosD8^_O;;5kUP z>w9{h*^?`Hrp17^{QrdI}(C;r*7;p|ekzA%QFJ=BxL`5F>oG=`nNUl<{lBYo}++5Xub@hb)kF!@o zm5jFr(vQ3=_W02Eu++SALV~wjQdYvw++62zPw`u;T<7~LnTY4~&{R`hv;F;$C2s@hXh!syg@HoB?@hu>OYDKMzlcKq z{#}XRN=_Q`J-5GKS#mIJ^v*6X*0EG|?kK7G;!e-QxUzivp*qCY`7s^0b?3hqE9)EGYHBxI9Zq9afW1L~QLZ8raM@CM5AX}@SGK3E01%^K zM08@|x8nWs;QS<#ye(6ile{>Ptrj1GeT=p~UD=4pKmS)AH8I}p$%xnRIGDQ5>&vyJ z0J#UCfBgAp*vxr=mivBfzWkkMjYMO++q58QAW&?p7#AmsT7 zvE-tAfj~z8_gkH|rgI_htzpbfKY{JoTHm~vyJO&chvuf|d1WKUZv(@2XBQOFHD3Y8 z6%)rMhxJx;z|!8K(ZOF0Ghao2RzF|YBa*( z=D!Agzgx${?uh`FjB?!?YI-;u3uo&X4gJ-xn$4!i?b{#T)ANNEMu{RNMqSrdkJI%= z(g1hBp{l5C9Lf7ybegu_Y75Y_M;9fqqqCEX^MdAJ@(*-yTLa{2HahLl_qC6NG@R|U zsjAV<^RXqZ)dM`+=sR?oFDpnPOhAmIVF2)YC3&V;-|9PF$&)UAE*5(^9_LuJU%Q;R zM=cWNH=9jHlv{VwIh^bWk83vDXBN)-8JdEwtSH)g+kZ)!efR5kl&z_cl%IQQ--(u* z_|^1q;475nyVveQ?tyPq-`mSv+=pEeD!M?#V8h~wuEEc_GJ|x33#^8s#tC} z4SjXt=$!E%E^+WhRP((l&9|ByDzx~u4V}M5N$;9oRD@Dlr#JgGm4Ni$hUN0KpJ><( zMAU7x;Nz=4umKG!CGHCqaeD#QzP)w@*=)6O+TA(A?di)kX1!r{oUjnZN+XcPpi(*v z{d}o(EF>by*!wZ&_VnNRria~q>}$PvHjmfKUZCg`#>txfNwd?G2w*d?^0Xsc6%n!7 z|9xGDPO;$>1>m++!zjkpR8jC&4Drtqdd$i5eZrvpd?EgRf3POFS@FHgVp%b~MV!6E zW5wlFPL+hyZpTBMeQLPbK;Ix0JC}1euXOkMo@lmZefK{4-RUm;Zj8XLrt#5f#oq@<37~>yXMU7&3Up4V!0WAE? zbQ3~0_??&bLdcf0*?}mNig5wo`MUT{y%x6P)QmHZCW?;q$&n#DLP26UR<@B#UCT`m zAm3e_>lZ!EosQkT#h#5dBUcgTb(0jg85I@t^CuOEsiE(ozs3nSOj7rBG5^|Ss;k*? zv-DBC%I`cSBN##XWV(~1KN(Z(#q+-NJgu58%DnK^`J?O36LLRk!nvPEZLot`1wHhuf8d%})n{1;UiQs?FA- z!+;!71T+#ZlTnS|F?mkMjT({YX=PuWwPLhkCmIbllE14HwRt}_bpRVE`7Cdm+ltyB zH!AS&zO#^GJv&OY)4&k^$Z!&L!3)|p^w^kdBiy+Oo6M=->dvdL?|R7W-4Sj)-RO8F z&kwtUtfO+l=ZD3Tc?>;R<>qjitjQ$4df!k8`rs++nhhy8dtAY~h`le)P54h98pja{ z6@5?3!OHMBUm_>OXCtI#_?nTJ+&*hFa5F@aA#^MDP}u$Q5nl@bW}S> zkt1gMkS^AND23Px*$5|K*FB-ciavsl6rV1^#lgWK7vlB2_z@y{bG{AJ3h%vo>S6m!A#bbgQNG%nmAN(`c>nJaLSZluV`KCBW^mLCTHodjolal>U1HT z#+jU4t;a)_Nr+p)U3;*KTo@6Z2nle$nm^^?(iQramYJN9nCW&`u{u80)oHeqT;+=p z(|HZ{QcBHsyDqVD2*N|7m!q*yl(%sMK!XU1Wx}iKjfC7Ex;NH^(9h;vCM%|5$%G8_ z^pNA~WMGnVVKyu<1@0}BQZYz*Yjo~ap+NglT``dtwhmS7)y-Gnw)dXV#TKQe=VI0G z?dD(XRd6#F@t!p0$cx24`vR)p`n+$wZ3pMXCo-5SIW(1Ql5nvn8sYI#lskGT%yLQpsiF{DlY_c_yBfO&Q8pJ)+!U&_hc#U5vc@5OG$bvD^LJ-N8K zMMgI#UTc96iq$Sq`i7wqaA*7cj)ex+NAb$JUDVedEaC})Rr`0!Ut@R(jSB|)Of0@8 zj0jE}CyWMO5PvJ-bF+o&eAC-fBpMlWVk$Ldgz@@8C!?(pRZ=KMH9a)B;XR(N&e;`B zA&9~a%9x8qLY4ra;R!!5>Q;!%MT}gO7l+B{v$+n%iTOF+q;Kmkb1DAESG3(1et$Yg zNeet9(p?HiCtmYknht!vhg60V))cXF3GQq>55%Yfb`a?b~H&&7#|IGv@x z-K$XWSUz-{z&>M;a{iQ<3Y30|Ia%y{uaU&1Xn(&vDhcUjfyb`hw&kxpo^O+D1N$Px zcG_mFFV+;aE7rEEht^@U*%)RU>CmPT@M;$m>>KIVdOD=3o^ORCeD1iO$?6E7(gKaq zJ1Y`s4DX>Y6mIwU-TiY<0=IUV9YU`IV_q%~7m6pn*>&x1H}5x37jR=!F#zGwMBJWN z*&d=F^H$w{bCqU1?sa!;74Ngy=#rMR4wMiAZ+c9#aCDl~&x#3B`}Vxh=gY;O&%^*c zR#Phz(t2%}nK_;i+utYK`ohtBRydva0DFb&KuEGlU)as*D(=5PU zn)V2DGC@r&=eB!ZZmWY7IMm+;Zhby=>_qLri%qrvbQ~Uy4Fm$4dA7&;nR(5pr@8v% zFu?*pAO4t_$wrg$**&CV_Z2H=J>1;L5lrMF0z!{ktcDifUVlh9AP#J;I(Bv85x~nS zTojfN8he$$|4E1!gXR=}X@=?{v)E5DF~PoQ?^TM75cfO#ToP=Q0|vS~`rn2UC-2Ty zt=FrG65u+O{w+ZABMv#Fk~qGe68FEzvkneNYlHMy4>KS&6NQyzr7t~vQ6lIoS?I8e z>+}Uuiu)xq_AY`JvP5Aq_jNkFZ_Z{rU54Dr0s9|QLM-lpQ?c3no0@j$miTi!Bwl?k zC&Q%C(q2MWCO^vtGqP~v6Vb?xpMsLv7t?PS`o9aMpWGpH9cEod>$zS}PU-DGghQ6w zOBLY6KV9H&lMlO`FE=XPPm;@`9d`^mD0q9bO61jif^3IVo^8OUO%oD4>}&J8HBRU* z>Svl4t7duK^b8H6QSjG%PfN>5=Z8ea*(lc8jyjv>!}`BHeun{JWHYWmPp6V2u8Y7L z7<3r1i#OuvVw(EV7K25AgB*t(hLCeZEs6 z;vzDAI}$U55KWt%ZFSN<-4cr;Ips>r#macI`%GdgH(Nc~zQ6ixA{STuWxled`6{1Z zOvU>nGr>;>V1b`mWkx`N_jRK%Fno`C2{AN3uS>CCrIbm5lhIVKxcPkKUZTs%Rpk43 zr3^X|*(}Gen6&8;dNNQF0@Px6w;-dFne1;1X~y0?iY9ebQ36BjwdrZw`&`Fjk z1J#vMS+6dyqoa|L<51dNu7<^4+iuP`Q70b-Haz`7le==5Eme)1&l_#^Lopp~j=6!f zvY0{uSHruMi{8ljsO`7S@!#UF5oy5tjOi+J8eEFTx|QQyQc~Qs(CL8oziZV8zWko% z@6>%m8(bzQAqtr94S>R=^0fDXt%v^f*}Ac}x5+t*4A;XDNzv9u;N#;^D%;C;&@OUX z{h@%6o!{}Y0OCEduPrbZ4A@cf^B?-DpJF4?cK9M9$GfvIY*Q0kPzf5(J=F4v(l;o0XrVgSlaLwXf z1Y_>G_)lIRQI{@5*TrQXR_3R@V>3(<04SXccFQH(8sEM<(NK)$+WYtwMu>weoN z0=_=Sc9`86rhcUU~*A;I_Kih?`4n9?wyPyk(|lW~ByU#Z}jQ+Dgo8 zG3{7tgVaQ|+D1wFEDPLa7A=yHwIcdiITs_~U`jcnh9EVWGUN>p} z!*#sSKenR5<;CuJXQcDi96WLV#zzGc2|{h5gb;~?G33i{-6jqvg*YC(kk@jdiYBw$ zdi@_?F|T)Uaj~YcoSE=ysgN*WG%zSWAARVK-eJvKe{B-9SSS18)T3uJ;zM`b9>EzT zDIqaEB_}x{v&O3fw!q(9c8l>tofK453MT``Jv(tJz#pt8gwyb5^j$zuG(u zEkYx&{glAqux|gyr6je=&35|FtT-#k+P@#g-+4{vhuPO#X~u>}jzt!x)0BSyCV_!{ zN#!sMgCRa&bvv3XhseFZ0%EfJMsTe}MPAvPJ%w6;AKX_E_}ub@&|hdlprw5sz1)D8$R8D-=0pZtt>2{1Gm!Qr~`;+G>|4wW?Rgf zs>tQgAx$pNnXRoj4=66z+?4*#LTh>#|D{0r>w5HitIKDcZ21iIY;9fhIP`=ojyc}E&IpHme_7WF_W*b6 zpC5yFcXoF7aJG*24tH?DxMaCG#xn(fNS@T-YDS9?-Md5D!bUb&k! z(hzd6;eApRNq;Cvbn?&g*YVUhJ5o}3%2<7bZV^|TATk(e@F>&_x_MILt-pVVM@1>g z%F3!}v32u_vRNF)vVCx9lxxQ+3PW()J=n1`2Q~2LX3i_n1M+-O?ud9tDRZ}dcW&hK z2oDTjP$=b^aiXRj>?T>R-3-{k6z8^IU=Hu?CE4TKx6}f!`R(N}Vcvol{Lh-VpW`Z2 zM$i*a%cEGX<8sArXNp~bcO-M2ebbk?MbD3ne|JO?eg>Ba7IW@iSBoFeCY94-A;v)s!lW;fKc5PoEvg&_q88!6_)ddxNAxoCYt5mlx!;3Uk za8b2T=w&W<6!wsa272hmY&-+*ir5fsPQEHD4&PN(vfW+@bTv`r7wEpdc=Po$${~sxBjh+)sAP#Jif{Lv3F;3aYX0VR zS+hHYvAz05{I$ky7lF!F(V3<~;v<9_!W*y@qmab#e%}$7%GR{eY5TrN{_Jm1^Vj2D z{e0!#4%G5)rq=ZjaD3AIU=DWSvv|uWzF*%tu~~jKo(TL6tz%@4c#1;Z$E}Bf#=EJqVpCa#E z>U<5z6>?+7A=}FGeq8`n=&W?pocMsxmNgl%LjgO(J^6@tIU>$f!nA@M!suJACY9&p zX0qM0bH!@a15t#u(By%BJ=EK|eqvT@*&cE2W-tA1kPN}+s6hs7XCPM9XDe{pv_W7s z=luCnE&A36!ahu-)rX^lhn)4p{BE}W?JFu{DFn?O9iQja#8t9B*V+ou&7gZj^5djw zY3i>~sO0F1i&%9Vur^o%Y~SB}yB-h9Kfp^yPR_ERm}Z&0h^eAawCz49?(sIw=)1Lr z%iaYt<>nt3RWMTYXP;iD_e;&8NhA63@lFFHgj^PmAfz^-Co+lK;@(#qYgO5o*viu! z-vg(L?q7{zt4Mmuh#2G|epmf>*}@e%t7~o`!)8>JmJ`tF`8yCfuNMj?os*}Y2Wwo! zdtA3Urbz5!IL)Q2yrPUz?av0_s9RkZrq%6aDc`J(I8N>LAnHD)j@epC&$VFlUfgE} z!^z-$^@GnylQx|zD~^qiaXh>|Lm+VNm}lW%6FuIqH**g-R{ptcZom%dEIVYmzTdbT zKABE<*$mnqW_Kjw%yDaK7O8YJ$vwOK3jG7#52(PZaiDEZJdRB${XNyVHLApZP4tywS_U6K zz5>08Masqm1*)0Am}#vZco=eN#>JknF5-3)EdIjT)li_p99Su9sI8BWM-=(B<8l}L z1ZkrLuXUEUH}C!Jno5nqm=eAAOp862AkzJ{4YYYO`$+TidKMoBy>x*#`%gb0Y`xj5 zZyOgeiF@ZSE{95|hrzp~sIZ&;{d7b4M}YLZfkD*#c13g|+wZ$r3O;OT)ZM|j6@PO> z4cUNiT6(&P+n#s4f6-dq$(M@nk48MyFwqrc&NH^CmcerL7e5P$Qf04`r|G{~yU z{87ROafv7;t{oY%<##v}^prmolSYhg1xBpj#rL;!V~3SV7KK{0BP>gjxTWapu@m5* zZPwpyjF@5Hg(CG0Q2^a1vmn(5rBW5#&L&?;i`v_N+;6Bjz8<2>thya9TRk*Ktlm?O z4v*R52~ZoH9$Qh8@U=QtQ=DTVKjyx-lT3EwPr9nl^nv-i7vt7QRZs zpQwc>z<^RnSkH@q#+iNX+WO3yt*>@ z3$-8@@+Ee{HgwUdWSUB$m%kv`|3f&v59v~vSwny{{AHt=CDd}iQ0izWjVW^MI&H*SQ7U6 zmW;>yp?*rF(SDV`ph0p?P6R`_zusyJ45~IhoXk#8y&ARUpjGH7E8-Rqw|X6Pt3UL? z$i+2^X9%R!y~~s8X%CCB#@u)YuU1ov*NJnpatbi6Pr6%diivvPpM-g?e_+jrv7@>k z6%NOV|Bg`%H2he8JPr-(5B)+}FY3xhkCxI~jQtnN)uzCrZ87BjLo2r!9~m#h_4hY` z$IEcXkDMqj1IBr@Pozk`-KD~AC!9~Q>T?vnaPjT;ThvCn=#Z92FC^yw1g~pM`o7A* z0Hu`Q-<}=Saz#R!&}ea4`;a_k8#qmKurH#uYR!rrI1}B@XZ{ZWqd;80w?o&SBgamB zZ{{pGTysl`>#;zi`T_%2D6d%60j_$DRNP!sdL{qhf!!}W|IDPjCqO@L&%m!!m(F+x zsN4!f5^zFK;%QY2wAt9b$eKV8QeQJ0Av}rbL)}?0V0Puo?gEy^P`||V8 zVu9+|o>zwI+_gvl&fN#}8aQ&)gnRz)zn>mCd9GQEC$2%{?D9oFzx(g3Vk!vuU(~1ex>4a;3e-V_rcZIr+cr?&p!7I>J1Slwdk`mOEzxUf!O-~{RU5%c*mZ7 z8!~bu)!L@wvV3^^Km2HB`*z*>^&9oz)TaUhq8jQ^^FZ6!h!CWFq?yBg3+?dUho2_j z_@y{XN=537R#j15;_i9&&PgMN4Cyg`?4adKe(-WX+tN@|UsdjQ!Tt3Y-|E(Bz{sI@ zJ^t8>FgqZGFD@m?e#3^&z5BN7jQF-YmpkkVNz5r}mZO9fRp!-;a6KArgGOD0bSyt_ z$624gJAT|~7+Sk@?(xLq&p?_cg&w)AT6J1#!pAe;A3S_mr|!MF_ZtEq$%gHF%c|=T zG@p`|_~u(LPntA(Q2!ntw6@UI8R$9wX|8*R=8F1zGz$+2U{ z!pVgN4VF(JD-?i!y}O-0ak#RqNZL{dyIp-<<%ae4Jo9k)@S&?#0cK)Q5690x@S~ZZ z4;+fn%Z^j-pZd+bA8H!vODf6(F9v<~`K(S|I(F{bY4SaHZ`-le-`^K`hKOJ+zLJ0F z;NJgxhjjN{_aN8;D-4jjOV?iQI(8d%$HYU&Pd7^y%?jl2ld%j%hDA~Y zsG^n*)lo-0O+#%3(sEd>D|JL<=#x)92L9lhy8;(`_UMR+%Jy`5Z@&3PVc})C`B)2B zq`XT|9-ySG7&b+ytzEkIL}|%yznhO#8R!1JMuH49Zz&v$S3c<{lMf2eIZLQsR$}OEcVc0Uj4tfGja>^OR88p zK5~x?P+Q@^7Na5&Lss4}<~M`uuU2RhYF02^_NteVkwY&DHV*df zx^(W`XCMNaezIB<6q{DB)Hk7TFsc@+B~n?VLIEeV#Gq@soR{d~>iFs_&tpU(d1lmz zkw{JZ=)?EtfB(%7^S=4dTW=%lxpTMP?K<^_O8xbGtANmi#ulwurp?IAedU$crrbRl z>gmG|PMi1Ld<4;9;6j4AaNgt23HJ{fFb?$mGjA^bd3AHGqNzq26&m-+d$SN%Kd|@k zDdQhJvgc%w@5PdW;xi{4AGl{Kj)MfUhaPwsc7pf+`_X5g&cZt;BuboPLL%TV^0(j71VB}@@e{^OoA%HfZ@xb7#~;2!`u$R?Pd@+hy-&Y*`-53)x9yEgNUN!> zHyMO2wGaPwcz9cH*u0>G`L_8~fJP|E%YFYa z4vAZuVUj^S5i(dVTsY_A=IZ6+U0PWIl)wsGU0V|q7wzrqFJImI9)h%_RNU@ zf3LFALh5aWW~3TwZs;^KP#)ahgBy_1!lhP(!7t{DH3v2h;o)OvX0uo+pI$niiY zc37U{65?ImT`oAEzZepfm6KWDSjU?;CXf zoD1qtA$Jk+#uqO|Tq&yCkcfydCtR|t z6Yj;y#rcA>)42=h;ow89Jyx@V44=@yd9OEwhJ{B);B*L_4hRfE(;*b+p)zm^Eec(Q zheyJ}5_kbPO3;$14nOTLv>ty)On|KXPqwi1_mW(WLGyd^IUmU94NU`jM{U) z-f*3|;jn1S1JAK<#tRDTy(cf_!{EoZK#)c6Y&}cXIb| z_6zV2i;ARjk7)N|tkOy$=2UAC-OO66CH;Qj%6sEnx5U3tq#h3dd_)QjGCL8yEmt&S zpduRw1c!Eddc2G4nK@s6He%!uU=nh_n79EqbJxzD`;Hzx`Hj~<+P?i@XlN*VvWnW; zBB%3*9qgAp{rG)jNA>I3wH<=LkwndW+eqQ=IAzL|FTVKV*omWv#my_qiBFBb|G~Rq z7KQ;~%Dt1f@7PsRS_yA?UT)FWExQH}8jey>s8<4;PgX`YMi{Ckq^D*rU2N60GbF9v zylnTQ4?kgNv)11yq#&>Kt2y(a)Iu7%bISeS{P1(orASsEi@+5+;%Ubtk3D`rqKA+o z1l^jFyF0WSF=XJ!AHDb7vonSc9?`W^&u(3NUpViIu1t|}5mvTVv|oqL14bgx)y_X6 zt_D*|uS2RbbqrQX!7G#}qiRix`sRjm7Z=Ah_ST3V>e9JAY(bD%;fSUBOgKcBo)afM zFnjJ#C(n7O=2kRH=)oZ3DK0Jzg--4mJ-&OF-cLOC%!2vLA}+VcyeIh z(fxamdg0l(Y*uWjs}MEUwZugxeE!K7-8=Q{)UMlv(UW&>-XG`_Boj%?ib_u$KK|c# z-a|Ygq(PhZW-^07!i)i3e!`KK=e9OOYx3H4C^!Rzs*Sq znqbC8ejHOjOB?&>)1Ax^G zXI0zU+`H-gIG3rsKK6x7z!UVY*VB`=4>vXyQ1NQ|>_Aw?X~S5poOojFjDp?x%@J4W z(Gkc1@0mlKZ z@Lj_JjuD1Bh%#Aib91>!RDsxL6wMC|@|yj{r}s~ta_6MG?w)+lLk~}X_~9p?c;dzH zzh8Rj$c4(vW*`pkxw)mPp{Z1^s4Xkcv|9G#DL~YW9vG+{6>%plHy!iY} z>(_5Y(M})}xKmkE8kZRL_PcLPd+dR`r`-AWJ8zvn<5*RVa+(J0gZL>#uc6ZB^y$-2 zpL9f>1Qg$(R%}YusguVapZ@3rQ}09M&ZN60e)7@BC>Mq3?V`)2E0^0%y?+|Azu$QK z-Bq@05RNRC$S}C5Y(-0bOzfpqHj6&{?421irXhN0(!|j-o}T{Q*Iz(@-M($(Yp=Ze z;M9lEEa}9F6G6fLY1t{8cWj*c$b%#982{k(XJE*UP0OyANfChuiHZ6-Ad5*OLx%&@ zVOA@ez$?6$m*jsw^Zh5MKluE!)1Q6nVc2_z4~OzS2}OWEpYx4xP*_F11WZS63uH8~ zJ~b_C*32)SdgAGOr%e6q)7d+=@6FE6rJ=5oKwA6YgAZU~e(K5RzWm~ABz3}|gFTeEMb2Fa0|NeXLx_i>g zufJjIu)elQ+$2$m6-b~pwFKr<#`3ao;5TyM=D?K?v$nPG&3emlaOLgZXeI*8SV>*h zf`%>k90=J`OXUDr$5ReM00r3UsAU`Ksv_0|IxFmUY_UdZIVyQC^z^0*y--70TXkrE zf{J47OGC)VuYu>J@pf_?M+LLC*-T+IzNumwM=xGo9G~#wqo&fL*VY)t&eUELfQVlk zc?Xj@Kdp(GFn|IN`JV*x#?IXg(|Ai-VkSdYl9F-pZK~(9`ndeXu7~add!mXm)CjsN zaL3ps6W{YPaJ>5qe+0%DG@%0#)hK7>`nZq+g_48-s%uC!E2!?R8b#n30E3MXPuQNg za;gQ*D$=uel3I9)X`X0fBuGCnNI;xG(L;K0=AKSsg7Jmhx$TZ3N}$;K}AD(MPY7UMp;E^Nm(gYX9SED6;;;Mi0c}-=K%aywMc7G zw@50lK_UCv>O85qvaz4bQl|ytJ~iytI_+VzXWo2m_-6ih#+Il02l=pmcU#er8>5C8dQm zi*OOO)eYrk)u;$qRaaZrfUFtnFh%xvjS3}8VDUyNY7{B0tSl%i&MPj=MrK=Gbs1`c zBjzYKC!?sK5CzO|H00pcH&xa*RU^G8D=#0$eblk!4$v#&fB!FUBoYRTq~O7vO5)n1M~7Ixkr6>>ehs#Qf zYHDg=3#zT7Y;+jaX|XZdK1|4Jf!N=OE&uwT-3+)InRE@fk`gAgH#cJ>poDs-@Lo?G z^SH~6qbVnREmxyhJ|K>iF0RI57EnQMtPnc%-;qNHQlZ0{N=}9E8N_m94L=tx_Dm%- zx8E4ik*x;CPNE!dICh%Plxy2f3Zj4!*jQ^Xx&?N+Hr+CyAOPM-B{nK|)W?!ar%^?5 z-l4=4s7m@Jdl>^)3Hz^2Oy`mTQvi(}#0Z&$9^0lXAuwj06ih9sITGXSWPL{V2y2u} z$0v9sb`$)oNG1?kb#d?(z#n%(9YR>41T~2FcI6SaIF3=7rn|v|P>_L|^=D)K8_-t< z4Q8P7Q4>!2vgP@}lH79TUC1Avh_ z+oQid)%%5^Tq8$qVN?U==LL6|>?N3PD3lt-e0frTh6Ob5Z(*26DKU3b_EQFAhCUqwxn@Q}#v%^TLjT0*XSS%X|sr;@j@ z_-zDE%M^;{%Id5!8Av!PmQOTR7jSZ3pgKGG(P&SA$y0 zti3KvltL(+9+hWmRN7j(szRf$Rch+gdT@`JL$)mRJ76vRxa=v+EH))@@o;0{c~#am zmDe=D?Jt(&F)0zbfJXp38+FZ;OHk7vPY5Z_O%14%0U3*Rh{W@iQcD25Olt~XJO1@B zVPU{;;lQncYpV((Km|LYaa&NuNG(G%7_@{DY{Ja2$<}!cV2T~pV(p=1e$L02i}{DYsP$vu~XC83A>>7F?hq3Fy)Uzz8KL3@Y8XflB<>c z6r4Czv%X;@9{>%pRG8lm-4~f+3BR!e0Ui4}ziV0%5d{Lx6*3H^KY&eSK@>2OPA}Ay z$LGuBVg)sSsR%1d^YeG!_Yj^&F^0p<;2#eu9?p3o-?!@Z@kiDr}%EfUF?gNMx2bm1k4Y?dZDzXe$= zziG8(UWKwZ!y2yAao{ocWs>HKG0x1wyay`R;RKzPh4~2h%D|QT>If$AGd-G6sughY zK*$BwqRWL=gY_ST4c;!38dw?OkOLc6AE^U7K_N#DIxoLMpa@4Y4swD*ORekmQ zTnP_;;unK^q;|T-tL5inp9?L_%?!}AEC{MIhc#R$Yjy{`p5h=~&fH>?{s-f6)EvXa`0*5ib%9JyMN^{)V~ zLLf0;xViI{JM#%>jp7Hn$#sYWZ>VpmsH-mryaDQfm1?L%dWan8;Y+RI@MAP@YlK0x z8LCwaunNT}Q16LsHL%1ebig%Hqq4bPC27{9qbR`@1}03ZDDWh20bKEl|IAQBOhlgs zOH|Db;u^vs*0Gg@ zIVuK>(MKjwo@)ef#kB!h$($<}Q*&`uqd|uzInebD7SIHWnx#9?jXh$zUM6^RWmPrvQCWWd>l7>nQ z?jb)2mxLV}8){H~2G(H60b~-Ov5Ky`O{5c}%`MDTGBwJrHMhVdf~Y_l6n6#wf)d=) zh`4k3Gw>6cvZYyEFOoC>@A*UIVGA&f*Ectz4HEXWC@A%p`gMkatT3Mn#zmKwEG z+E`spW&P13hy*B_PN)J7B4O63G~CQ0XxYcYfg5q)mNeI^4q`$t_BprNG6x}6VI{^j ztLwx%8Iv;z?mx^irq^(*B4kP+D0hp1Fo%JxWS+sKWnnXN0}OO}R-aaI(2_>Wq6=Gv z0;Y+YbWqdh9IIHHcXQ_`0TPItb6bDKt%v+Nm>ZJoj;%*CYObwnu@PA*Tn9Q0#9l^i z&Umepnnwj0FNs|Y0Vh7>4q|*_LU(@8C_UNw6S`S;1gsEbH5dHgg2v3w&be(`Oau=p zw_P*yyRaP5I?XVZ$p~0u@yN*Dpg<&(j7s_|w?3K& z?Y3Ttap?RhW?=$Xlh2hIgpqGW9mSzaLI+|>t*lv82h%9(_@l`yZU)_Q8by^-MFSKj z-~^s+iyTH?E$N~(K&jXtQzDxoTmZ%ZY!VbuX%a&zh0>^K*5e&Pd14Vlk!xVBMU^k^ zZ-;C`fp$y-qf;9dS-=U|gwYcTnzTU!Sgok5p?2kL0sv}IB@BvTov5k4rLm^Dra@8< zFoypazy{5dYJXFEHOvM&EVr-HT%)H#j|w0J%wsli^5$~O64Y7fZv?{{c~di5)XBM^ zN?v-1UdYQPxmF5r1vD~C1&JVB*GDN-AR0{2gzM;5D@Q9mUM2}L7d7KU+0VM>T9FJa zB0Dh26J#}^Knzs-gy$6gPV@_c#!O?A%_y*yEbAHrQtptnhuUUzSgNh1hXl^z_cT!D zq0{3sfaMSf*mcU9n;XGoiiu#ZCX8!7gpqPxhX^(qWI%FIG1HZn>@(Cu5h5#Dz3>-w z)_Rn}gf*SDFQ)4;Q&=r-R||sv1`hl-a5XcunrtW>H#m_D&Dw)gqbKgOAa#mah6z56 zD;?`#hvfwykUj*PFc1SqIE4FSH-^3}p@jlcS(>B~HUurx45&<|R1?-RdlqIhd`2w9 z%BWv*OR`CSH3FMpVdgp_S3@!HSQl$@!Eoi3Z7>a(g~0L!f$hvfA()2FPYP{?AIhkb z3frWBQer;AlnFKYKIY0O48MZr$~Y?QV0OR!E?SARhFvPHPGtBc<=CXulAbC=cnP{T zz+A8-v&%Kcc!OC=D6ayzQiE;M?)Yj6ARyr%_5)_C;A=S7U)eIwZO}|}Wev-DFthpV zH-PJJ7}chqTrJ*k;ex{|52E8%S)~Sl$`G~+pDSt(OQcmAE&672Y5}e=IK zp++pJku=Ki>O&5MqmPu{o`3pJP8g7ZO|Dx@u6u7JrDxZNoJ*3eB2;_jXQdc zJ_xvS$#wxf5B0urI#YN(E)o&|%%=#b28-yfL2M>d1|GCtOlK+w3Cjy^2lJNs_L%_& z%wY!<0inTL?pFq58NpCPKu)F;?$u}JBi5}Q^P0smlJ^C7B1|e4cfhSrux!)RgBuvF z8xamIS3%Z+0!@+L5Cav`W`&rDOa$0N4k>~7Onz3_yEK$+jxuR7XzBQnMdi`Br=Gq# zfH640T~dg;5&>mNN^b3n^S_Kd%Wo|l_#HSv>JiV?VfaejL`bD&8k$r^vmGNF%P;#^2u5iEid% zO#7I^fO+VK>7C`zT=zx6T;^Yx_9R?1$|w}ljCy%^B3hjkflv(11wNB}4|xR71;~mp z6euTH%_xr&25&R&@W^6;n0!Ho1%~piCWF5axN?Iqk4on`UflNyXC4TyRAY;97?f32?$3=1|sP2E{u$V02n4OoBut=5C?DLaMz6eO&}1ZJ3D~umiJJ z2M}`!lW=6Z7`PIEx~xP9^KsIU0$1?fVIW6GdaBPLfXXKxDXL*&SH?nU91`3Xa(X+bk0J)``5s z403+;{Wl!^SC4r!=e^NBw{kruE^%|N8Kao(p3K-B{IDuATRj;M$pWu8h}BvYmLgLF z3p{ScS1p1op}Mm6@iBRj64#7KU%&` zZWw$CLou@jkzK?9TP@5XkSBNmgzzhF#UVUq<_qqVHku$XEeRTVv=kXinas=$P=ad* zbwfxf<8~m*F=ysqvS0vzsapj?C=mS%y%NbtR6UhqF(_~lSI5)OVI;vCL9TvSb9f{J zi^Qi53Iq@W2YEmw32!{CQ3(ZJtw5KqMwP4%{iV?blNJt&My8-F3bxZiAJyX>x4|AZ zJsFC|C-b;Wg1|IpRb#=4?2ZtyCa1_@ixK#CA%W#MzXJy#%VO>jVW>kerI;qzcrlKd zeTy<}kPCnaO$D{AM%04JAfXDTdc+KhFvhip77aczAX^O)2P%*XISACjj6xhJaOO7< z^IVYx`>XH2;o!e|%$qsyjrRF1*F(-VMgp%`v=J?voX9xJGz#+G8h|A%_0#Az<-kMw z1y+8nOt9Ot5GVYKENVu3w}2PS%E;2FNJnZDCw!Gr_&4Rb{x9RxEEPxa0|Qa z{e{ql@?m5YOHI(lW(9@mk(|OJvs;NU28d!#z#haLF|~{dJ8JyNtoAHxoY{3K>X-!{ zlTyyMNi1a+Zz2}$X(kWnbL2@%V}y=#%8gUUy!@k9~~j!B2r0t*$W>H*TL zBotkZP%KW^hB;a(2GI_%OB3M%5wMcW1^#Yji$>9)QP%2}Xf9NvkXGV(;hLE>0z!cv z>j!+-09PUGo5yM}rww?HzV6If1Xr$_yR5Jq;<=5coBw6hS$=Eb!0*QaW(H;rzA$HD z;X-Vn(HBU=_+ zD58A~a1&y<17%xKfdMdsodt6Z6}xIn2!&F$5Me`u;(_=U2*f^AiiEHDhKBX6fxJn1 zwL6mkqiCdIzcV4VpZIHp`&P zvb_Hy2QseymY9}p&}0%}gKsO-4060*HmFJr+FC59$n2oh9N;UAYA_V22`U7LWp+ny z6D5^{sd+*+4jDd?R7*vgj4@3-eoV+~Fb$`_5V)Gx84<3P%(yU4!cZyGtDzV*siYtg z(s0P&!oU?vG2(rY71~^1EN;9kZ_3w-^9}Ms8ppCiy|h3lFVxDfsO1Gpd67m@s8?Jz za8ec;6ou^lia~iruP7pt7hm;mkQLK$Bt-@}sD$3}3H_uDpBtoC=uw%rmGpBuqm0gL z+Q4BAiW0nQ#$j+dzwd8{I;yi=wGM!XL zM+VW=%8PXh9QFzwN0Uec2$+Hne2v#5r`Ssxo;?k*KK{;bd^B zB8Y$@wRnXlNO~Mxxv!Te(2=8=61rI(9G1gQ?p?uR|GzAMmfu=9@cVIqP?RN`@)yso z1YuD^FW8p)VrgxzL0Mdv9gr7xtTORzN$m0Bn4_gp2P&fWRz~ftiQHBbxdl`kwY4T1 zw7oiJC#TwIyzi)r-Bua5#X`5lfvUJI)$v;^V>egCY@+ShE#=W0OTyL`g|06M-g-Iw zKy#j-L7Hn&Rmr8*Vp$VbCovK|m0CpbAYoBRqhiG^1aKA1horYqb_OhQl(AwSGkhI< z{Zqj8hWcwOzP3iqGglvvuhU3jdqj#2CAcCc37$a&Dv2abjrFAGDJm=SLa(H`N=p2+ zRsPyCPj!i_`ihIT)J0S3rYLbzl(?wMu;HR8aZ{CfsLH)G<=&b~A6=zC_SaPUfz;*x z>Iz@}uBz}?;k`OgQR%Cw^jB4b0+m&M*v2QyssR2uJ~3_6{^dT(5-*Ud)Ek5iMX8sn z%nzh2^;MSotI7gEiV{B%HZ&pDKrGA`@+dkS-A5Dp;y4XW?#ho_F=*Xty zHj!iR3Z!b?->t*i3UR(t8HXd8Ep+fi2r z;J(zAxLJIH+w@jdddn-kl%OgfO|`GG+FM=4sN54ohbjl(0+nSK`VC!CYucZ)S|Zje!yTk^6laP6$0cHBnWHK zs7=x$s+Wtab<#?Mv{+smmFT(6d*A%9BR?b@|2E~=?9?Nlq#u4i``|k{2j0v(_=nkyW0?cw%c@3YC-PTPwFbl%HP*lm*#yljAz+u^ z@gL)y=0~2GnRMop#8dxGIq~1LQ}2VYk#yqyIwzP+#1Ic9C`}AgJYqCha1+$jHlWD^ zwFQ4O4}>;V`?P@T?E(PHaefaD@Cv@XN)ZK}vM>UGD@B~D8uj9`imZzs2d#V$E{r*| zBI4ke$p_v`-}h?vo)>d=J)O7n@ypw%6>NX7VEfd<9S>aI@gOK~J5m0Qhw^t!yS(#} z%iA8!-~Q<3ZPP99*WanfejNCYnQsZ!S(t>-4s1*^0QG;5=(_Nm2-kQ5lJ8rHpr>0K$Dlo{CKaBbM|3pevUZ)S?rNF zlaI_uJ@{D0foa+MAIjPLV9uV0bM{Qj-u+1C?#DCsKb?9&pp*m8m?-Vg^PrT&W_0Mq zb_@%bMZ5>cbt^b zi|PAc;*@sarPTfWyf364e2&qxX@?mdnvrtom6U_8(fgs7Qx3f-Fa)eP_(Jl*ml(Z} zeCWBPgU=)#d^+*a(}_o(k2~~Y;*r;54!jYxPgitc$FxO3aS`;th&FXklrYreED0m(@vO*yD@3Aue0mn2&q5m)E zvPc;J3-^B4-*QU~(_G1&!NHZvlu|)*iK5|hd1lawjo$?xT@rC}@uhvUk`MeJmW7ho@wSJHcRyKtdOhvP<5`Cv z$~$!5<^A^-?7P2U--88vA1>JY=;giBa}PY8b?EWzgO6t)e&SlnJp4rF;ipWLapbA= z!%yO9Y1h1`B=0?t39~|Ff9~p2<4+ zG<{3-bk5=Dat=LDbojaKL(gO%dMf+yblh*+!KacBJqs94I{0$T{@0=p|2OU=6xkmG z56!!<<6FlaKh<3hGpH(c@+yU-8fB9SJ6RTwFUx^!m^OF_qnWnFIeb?9N7C(*4~$LTIq$Hy^MB0pS}Cp zoINu@S-YOe*zrW>_NOv;J)eH??eLv%M;-Yh{=)Lg)bj>uo>ozT!s#-#1S$Dc(U!t| z$Wn?(5K1_vJup z121Omer~R9S*+)!k*YpKL~oc zbx)|L*Q3<^&tsqDeYn{dQ}(}DYe~PJ9q^{FA6tUwZ7C z8E|4@wC9$VvS@?2)SzfoOX?L85sFDs5jbANoaNQxv4VK|yy;K=A?UVfet+WPYf(D? zMCShir)I@LVH2hhcM8R0b<3f-qM$dLQ)#8u=v=I8$jFa46?WDx>YR0=t5ve=;uM#k z(w%?ExbRKZg}GTybFweY$#I#RhxAQJ215lBb&$9r)5@CRCRg)f^~h{wbC3{dB3HVAq_E0b$!xwDO!}F>Zi*%AY zZF6CBacFMD?nvjKqRxGh=sYXt^t*AV-htaS@#KezCuWjU^Tb@ZIg`%JC9f)XH=daV z;vUDuGc(NypU+I7Z)YYs&Ps8dnR5E-_vBNbC7=HEI`1bxNj9ewGm{x5p7<1(!HJxy zCqLuw38y}bKSl3JCuefP&Pk^}Nji@Gi4soDgcH}~8>VgA`7`0$6CWiW2Yq1n&OiAm z;rJ&Y^83R3duC4Z*)NmNewA|W>(sMf;~mb|xRbLO&5S$sY22xgLGdR)O+Nl*^6|N; z$G=EE@mVUa{NyL}e&VzAlV7Hu{3`v#S1Bjvrk$9ba`LmZlOKbUkA0GQY8FlucjA*I zcw`-CUpn#mrBh#rpIcm!ddkq0fY4&C2(j-H^!%eHdnlJoiL4x7DNxu1t5<<5*J6dB zr$1nsw4B1ifm`PQ)rUa^7E)WOVl6dPqec}}Yew8$Q&W-OSd~M;|A?P&Of!hmb&W~7 z#zaG7tf3*=&=6s02s1QZB5IB_h@uV6F$PgAy@Q&f49yXSrZ7WusD*CG0r01lLJdu3 z)EHuD3O0yBiLlXlk@l1%8(K27;#`BFUae_t7T2OiD_|9s<3OamqSy}V1FBJnmHKs} z%pJAuVh`6;+LmhF@U{iNm?Zd@16Q7@VXkKj^?LY$6?zvYqO(&(~(1 ztI2c(RcD;8&O9j)-_AH)lX0>({d8^mb>8bT9P2VpUHjggb;d-E*=HKF&v9zVI@^$S zzA^g(s3F_A!StT%)BwVtIl`XJS=hiqgg+Z|&o||}G#0pm8ZNsu<~f6!a-He(92}6H zF59sw)3GV*G(R;vFP$34#I4~TYqKukesR~J2KHW;>4Xm%;oCE{*{4BuS!bFuT$|F} zo70_}GS4^jV`e%wXI*Gaz0i>6+?eW2?`h{7(j6PqPd26>Z%#cS!47FBo6?SnvQ9N- zo~TSc3f9%;xhm_T4Qk}=lqp5!dPR#yjznIBGf|lf-U^DPkR5@gQkW^i{{gydV>~;Io4B!b=##DGE`#+?aS~^?Zwdh zh00r^&M?JVvM>=+TX`a(397CgbALr}&pmS0c6Y*=twaJm0$_Qy&AWB*V zvGanE&Z{nBgmG4CP?ypS=R^s$lrqb%7Ma2LEePqbC5({5n{SY16VZ{ejs20@%L#V@ zwjc{Wo5JlN28y$YWSo$xO}Vp_?5il%$SSeEXwm9UiC6*Nq{);MCUn*i%J;J({#Vf- zw@m(lXSzWN@-`p;ftN$4y3r-w=raE9=K9;kUM?E4dZW}_64g+lys}OytjZ7T?WmB} zQ#}@)2vut+>w%{~u=GOSEQ;+$bAda;cuioICBZssvYwJRO=wtoOLX%`=6l}K!GFWf zxAA&VX$Kd8ZfGc*LxptM^(!UPW-8&%`e0H|1ypxs&CpqHAn}}K-SBdFRJW3f{jtVk zWRPIF_Z(c!k(Fz(SNVYcg5b(LaWq(sAy{mxC^D!~HHi@wY(nuSY@15@Nm+5FW&_F(L6+2T!Za`vghx#yie_Dxbn4pcl0k>3 z*p^Tr$wWf6ZdS9KYA0a>&mP1+!3e;`P<5mRwqd*~T)7^7=oq0Msh&+#BHSapT1dX; zM4vFyH3;v#7?Z#Ol$ru7P?QSqAbj3HrNXqJ1{9e>$to%Zr9#W5RZ%jbTmlAsCW(yPj zeM&RkJUHD9vi`mHxm`f@cRUUstiniUjnjE=SITh^3di#Wimg#pZjWtK)j8gSg{uD; z!Byz8%J{&4ZC?EUCb#*yZk}%Pu%_K^{>Z;=&)?;GST$ANB#sw-V|`XQaXeua{)L)= z!eT`YJ%xJ3#s+h&y%=e&JY+-=hFDWjZih1;!+$|=Whsxe+O}S&sSlA6QF$f@FE~Yo zvw5pqzI>z51pUcEyi7zrD|ov;J{O311}!yJq~8ie`NBq`4_N`lTOcZyO)_cgJO700 z7`GDplNBH4MWT%X->hb@25kUjtOXK9feSx3RVqeVOH?|fKUr07BGwcZUG|Lc=9Ru# zvsh}P#Huu#olYptfitqS0akpGm0U)nCFac6qcuHM71tWXDA|aUnsr>lHK0HVK1UTv z>SV{dqcbZm8@@2DF#gh4yb(Rz7|5E!HS=pQ_2)8XUK_K-%ZP_|fMB5-k& zBM{F`6QoL3S%tM6;WhYf?RAsIWv`Q$d2{Ev)#Kml{x^3Ot$SVn_#Em??K@2ZJLMuX zmQqs^-hoxnIyu9QHFvL;P6+bZQYHxgd{^UgW%}@s2G?6<7L(e3_0`7IeM`^JbZFD8 zpvG~$!PB*5EcM8-4w>Ywn{t~ z1dTnZ&6{-?dq0@Qsu}062GOf+16P>}e)FHd;ClMoJj4I-LVp#metDU?=Innf;qrgv z-CyG8gtEU1C9PNbt76X|pt=4f_-1wyH;{CGOVh+(1&?2SRDaqIH|h{+8eTWuLh|~a z!kg6O{%V8cH($?f09O`#ctddI8ETx2$Vx2-&01?2x6ON7TEO);KT^vNEgbltIA8(S zUxznym@$Rh{3ZNToMw13C*c;oR zsma*D^;DrPqtWcoyc_1$2(#bv=&|4D^*=2_mLFR<@K5A`1zdjx;eHieuG-;Ghr`=@ z0XK2`t*iAvaf00JJzeW8zL8ar2Tt73{%USszA?DAdMkL(V+*+6?1QrGXW_sf#DRY` zxZVVEv>JYIFKGSgN4uqar+AGUdL#azk@^=u&CP(T0IM86ndW*uw>i~a;}MDG&xo6X zTfM4AKdV_Ro+&8r2AV4i#^%o4+fIM+r(?N13kUvq4*Yw;)wFU6Aw0k4bN$od@b+H7 zjllJ~cJE(2^&dZI|B7e48E`fC%iaK7d9>nR=->L`7^y9yKz|O7S_}~40fA5KQtKiD>nSmhAuE4p5NiV6RvGbzPwZDCf`2)Z`MN*11PlM(1I%*#za0nu?cjPd5A3Q{iihz0 zOTgxqF!*Me`A^;FSHTsWG>6Pv0asqKr}dqsYBR&KF*UD^PRwf;u+v>VbbrCuYhJqJ z7t@3+;QCJ;!#y+hme7Tkz<=E9mpSl{yWQV-LpKAitbQkz?!?Y0l#O>Z z2tp+`)YMR*fGOLc>T0Hfr6`m_zo8$&LDe*tDM1Lu-i(A}@?TLM z7nF{p>VHD98>)j~d09AM;Q-@+1zauQdQ%*@(N6yo4gOb`iyMKfxxB>#noyV&$jTuW z?*ym}T+!35JqrxPoc%@T2gsJThpB2mbp88a0b z0Ekl0q#TQ+k`{>=fXd6>f&j7nxd@^FFE_yewY88V5Ef0<(6Lm7e&9G2@0~%4IEaJm zZ>WU(mmBVu3$k$FCOB|2tD;47y$MGCyLY*nVf63j5Z%2=bG^2_@U@Y&OhH8FcsVDT zRIF6U^lG${ml@kRkV=XEpy*!)6;f!l0y{B2!HXIjv%ZoH(v*7mU&(iiF7$BXLN|q# z7j2IKQ9!Q0Imr}anY=}=5G#}txLM^&nM@(Y4nk{Wba4@25e=YK>=+7M32S4I9xq12 zG^@GDbX{t4&+5{%GStRiyrys{%SfRk>U#0Z8+XD}Y@bN186+tjaHhhvMH zMRg34$rVc-2{kx^1n+8v2;kb-+#r^=KzW7g$;*Rij5;gFQBE9IL0Tn%TcJ_NL278i za;aP>mKn|!S$E3_07-t+i#%lb+apMlZ;`? z$}m<%yv|Kr+uH1!olMW`mj?WAzl%Tim%j|IJmr*M5qz11M{s9CdzDFnuR7?F3aM5q zHYlWel|-i$Yn9C)y`mAMQ`Bqab$VrkPKCDi=v6EMUaHk1xuO}QLG%*9SS`joQfvUs za(R(R@l9Q6%yg+VDdsFg$-vOPn4<}L0iB8r)aS+`4|^}q1-7ZNFcsoP*{p)SG2 zPG0u56-%zojlOzvnt4FjlNMS`-eCJ?2E$cHV2ki|bGqomt4hJcEkQ2->%Mq204LL{ zH9+rXp!f#$42WsQul`P&>$UJ8JY8edBIk~=H5aj)g@9;SPgd5W6f>N^8~Yej4xf>D zt8^x13m@`=IP61SOb&sRXj{)S4g_4`QJTi|3F^ViO=`lD$_~fJ1AAl|?v7&DL6-tz z`4pzCejXZ1qe@7~5YqPPUfU$;^A2O|#7uC|QI9c}WkF{&Lt1_hI4;k`pdI-4T;eb? zjDCe}W3Ln=Ct91GwH^hBV8;Se1qU297*p_bN(Owgi!bD}BIobnZFDgdF z40>ub#2M=tsrWl*wb|#C>S>~znVo-yeSW!r0}Uf1{KB>>6-!hKw9AI?RH;#*(Y>lL zJ2k-F$=&h9`J;y~dbnlAMX4I94AOeNs66$OhtIiv?x%L1J-pHD!hwWH@5btUaZ|Zc z){v4E7T|Zz>HNWSXAYb`y}z(9O)9NZNb9xAMuWP!rMe(0z$wt>i1Xns9>=%49ogpP zxIZx}NUH$oN(?anLU~n6VN@NCyf|!q8 zOXa6oSURi`t*e}%vGDZ=>n#0>?@1CBs}9DVyU=G^_*?+Owj8+*K$^N!vm*!vKv^qu z^B3K)*)7m+RG5rkl8`}ehL`+v6Vduj3o~19SO7h-3V#x3jO=9reORKn@<64yH56RH zU-XsnLoj?0y9)J^@VEsv>goYR0A_|dCfUavaT$mT&BKkg zTg~PP8*W_6<0FsNbQM65o&tMJP(|<^13l87>Cv!YP7p^sj@8B&#bV;(h3O>J-w}Y2 z_T*!dJwuM$`~pl_jC_=`5x|HIb`!Ev*ohgq(r_{L3T5cnW({E93TGrSgnQxy!YPo? zLFpf;U0|$f%fS_dX~l*hMmnJ_h*z9v;V>FPR{Al^<)9ljfhdr*mB3F^3@qDd6*>Qa z_3^fx;#Zm@nfQyTt@U4RqYO%lg~3i$2}7;>&}Zmbp9bD$R?RHWbU6DlYms?cKh5z1@QK4okLgwFTC; zG?%Mn&{LZXs>br1n6rD=?pw2P>+-Mm+WfG4)%QEBe+c$=6xS6Sef7DK=S+U$<<K55xLf>6s=4vwtmYZ^PEATAr1EVC7cVIcPfT%%34 zY6mwFJx;1ona~D41WE@dhbCQ%m8Xhz{k?RhZXoaJIsyFeTcIdp`e! zpe-i7-_(FEncX;EZ{Y}Ia!LOD*yaRdr0%_Fv$v4>$K6Y?|j35)t>#LvQk?JC=E>jG;O?m5iT z62T;Rt|~BXfgM0NuyA>Noe>n~)gC5G_-Z5w~9kmK%sbOg{&R<&V`3*3(o1}|Yf#Ywi zxn2vdT!1j@=*FrsTw^oB59v#4jTZ5e$l;SoDFg%cW0-%-2+0f+h?veN1Vu8*4aIDf zD!mfP`D&E35z-U56_nc}pi$9Ih}Be}^bODi8weZ0)MDlaro2K|V_0p16Anul4Xw|Z z)J^&YPz}U>+e#*Lw2~yj7&JVNy_D* zuZ%;D$!btGp+hMUAd28sN?xSFMbW3+;Df8;X_0_)90k}ZGw5Y{9SD&yth63MDB&bh zK`3BG-J+F?G_n@8Or}vFB!(>KENz1x9BftK131mVm2r~%cCAGA8bO-A)_QRpG4OXg zl(0nd`Dqfy`2!VB%7rSn=pwNdnjZp`^ctyN(Og}Wl^PLvZvUpWKYg)b{%0Qh*C+~O zTe2^eM7cO^U%X?gRIej z=cb3!DbGp>+p_rktv}7(yX4zlOTO9k)7))K=DTj+emUe)ZC1LxyeQJe@%;7;N7vcz zwqLo{dS&>fu%_k~gj4}S5Z^?WZSsB)ru=T~{fizL&l-;s#2z}i0C{CA5I1+xykmYM zZb0TmDvi}D$#{cpdKcD#R@);NF-_pgrjY;`%#OvBD$3VFY`I*I_%$se-BgsC#Uj~c zAiY7VGspl@Y^mV(ITLZXFAvG}S>!ZQ{atQsmk z(`bwu3d1H~7pjJFB8eQ94Wnd1z-c6!`9_}{M_*jOSgmW(=%kS5*}c$0ijm5fAlwxD zkmZK6sb+HfnKrmpk8_NdE2GoU6|=f2Dy%;YqX}QtT2xVyATZFhUU5B`goajNy*k~F zixz(;&Gp(j#36y#TI0kYu!#g^$K+ll-XXqG24@n&FA;x1L6~d^;uGQdlTCm|6dxII z8LH(9tz52`$zkTw5hPPdHyS-OE@^-fbRtJWE7j>JqQsz+YPDjWT7n!2c+P08R4C+f z3~{4>kxN)cVwMfkk7*vMP!^U5KjeM@sv^hylYn=S;PAkW0MJo3fL>H)4rqsvO`dc? zQ6yAhR*6&#x_H`6h~8v-BK46nI%w_INGbafiRzXt<2UnuF zo*pMcLL^EJNO49L=8i$5Fc6Ls@w#||E{{*th;@O^D%L3gu40W`s+J?nl!8bFk%*oe zDhh!cq_mM#0l7qwR7gf4d4;APn`NfFj=z)MYHr$U3$8S^1#!V_qLLW`VwqL}mP6NU zEYH7^9_Mvp*QN!ZZJhUCk6p`~(|k(89aCI(oL&3%{v{vpnE&qPdGGF6Fw=4UlCZP; zFGu<|WF?&3y3S$2j}8le_BeSgD<)c0R%(!l^vyN0ipyysZhj{=Z(i^TfO+5YnTM=D zKfUhjoy$KywsnEW$xTJcp$2iSL8l;*Nhi)rxU_%ekGp>S^x)64j$3_wc*VSJKYxAP zdR6GL;{_3+hWhd}UniGcYxi0&*t}xlR)@8b5wVRTg+z(Lia28h!lRf2%uH4Li$dY; zzbxD|1XI+v;&+c(fj_xr!lVxff07Xu(Fc$q0fCqeL~6X_0Eib?%dh}JB;^Pt_(93< z!ri4zdWS$xFG6mdg#={&7!Jr0#tCR4U3u}*RkpW76ftC2rMK}fnb`0F17 zuG|p8P_|V*Vt~PY@63m%Q&2v=ss&MzI%ruKW@NAg#!x;3Q@|M9a(A}LS&eaqh)v+J z65*PZ&QF8O=v?J3Hu=GUtOi{RNT+Gms+)A`7L05L6;!qcAj>b2>uRRpBW%#}YLuZ! zGiKr+T>=SwY<3xx3cW(1m&^46`Y90}u<6>9N+b1P+ubP?KezNidLtwX_oeMNrwV23ID|(?Vm? zUyWv~+rJ?GK1aO1^NCB+f|<%#0?8N!gXe`YDK)4?%BC_{dIQhyKfL-I;HcAvA9B3+ z#yUFq?pk*3A0`_!{hVzA86M zUR_~mY0%V_HRh*yAK7x$Vg8zL-gaE~ee~J&DIVKmFRVXg{n_r-vyN>3DJ8^N3C}Ci zev~Z+t++Tf?2N;LLqC6V_{aCpTFp7J^1JOne!1n_Z+9niiQJRVv7?0lS@^=F2Zg@xKqxfA^!BD=s%W85e#zxN_?nUyk`U z|CU7?@aQ2yjHX#H5J|2u_F!HsFj2Tul!67B8HrnW2ngwAreKhBf#hk8g49-ks|HIG zVFr#P)d7JpwgmN#xuwV!MRsGO0a=Ke|uI9FT4y)#m!5Gbor5`+!s|+%wUaHiHWXeXFqE4l0Qfr&EdJ&Nh84C?M zy$CW8(2+70^pqwjsEK-xL!^mvA`KNElDY=jo`wa~HBu-sV4$sPnsus1jiMf@5io^f zWD~fOltdCGw?*(vHI=3#0f}4txv|1%hQU(@4gphvNiu|&P$~w%6*8<0A_DYIAUEMF z13wH6xmJzg1%h#yioBDPOHhTW5-h|t!8RjIU7`Js4`3S-bu(HSsoIC z0#YP%n?S+=cV#+U2rU=fv_OQM5U8y(5C>OoF*Evv%nYi(`zrdspWv6kRq)W5840=D zh(th5Gl^@8dQEeAjQ6QywqI}j`qh1l-gnZZ>3tBG|yyW#8IRv+!P{vzDzKy7ZKUe<)PA7tzlr-Yui{o(k6k5B#lzK7j6 zF6$PYwp(&w>Ecb_&pT$fGW5jW(Bqq(w=COZ_4URj-|twrJ~<(~Nu-m?4N$nCIg!jt z1Ca&a{exatc_Od-Cj%j4lqrus6}&G5w7ga&8Qa(jfMtYcx?Tk{4+!pVMi7B@V!aw9 z(POm||84DBKfpW4%(PH1a0J5M5iiv|7iX{jpG~{k+l+8+HNUJ444i439?V{z& z1g>OCGn$~8H5Xy)fEk3?L2o9L6(g>$ujqsLufEA~R1b@^ zPO8&NFv)5CBVe-0(a9)KoO`UzUf0-&|8mj7*IupQ7&qo^P|PYL9v}m|irOIMq%=qr29ZM7EY~(E z^i4`bGf1v$lxpgw%4(UaPN8Wa0A?czD}xkZCO{ekX1$mbzGXYmxJRglSg*&R2lpA| zNHnTOm8=#igL-u{tf4S9!)*x@szj=kNU2ad%snK#VT2h3^942}8odm7q=EgH2whD; zR9#)AfU8ohQ?=+&EPyg3Xvx9>m^ev(6yPr<_RwOC`6kwDMOrm1BuHtb%C8KqAz;c- zbVsEI`eOL<3Cxv9t^pH-W2Ui7g1rUi4;%r)AQ?(n6h0rFT>HeGN|aL684#g3twaKI zH^7jbuON(2=82`UeJBi%rYbTy2rpR1fT>jG2a2>5Ih0g1h#al3_+nN;q>*R=u^e0( zk$H-PD<+Rw;MYG5Tupu}a}OLI3I|t}qMmYCCDpo?@}$7?r`FBiI`8eh3qLqv{rTbL zpX~nm-GfU%I%zYfB;;g$tm~=O-|boa`MxD{PT2i)X6@4bD}UOy^oK1=evJ2Z)76&g z>npT%B~@vWkuFCL+5LE6)z|y2K2LDorpOC4l!wc5efx>1|JkuY7ha4>`KcZRgfqix(bTxoF?gZ%*2LduYWMIN)CU z#XB62)*UYEn|E>@y@zA!30{Rrm_XN?nPVqY*Af z;7zke1$!5f9#WNFWH5k`yxh#FNw06z>6&x~(gWD)0VH7fA%GTqsA7$wMQv!of<>K? zPzaOFab!8GL@HIYLe->Dw~$W1q~&DK zh;%9h+ChkCC`f({7Eb~*v`i*%!YS0+7OZCgW}HQ=#7AePBb3FaDxL~4kd@uK6thDvtx+f%6-sDLEn-EB1kp4Eie!|d zP!9vy5+RY82v_yUiUx*6F6ExHzwRwUMoB?bxKmgq$?%G#5* zU_cmj7)D?{8`IiGJxpWJKxF`0Ibj<>4y%O<4+c9lvC)qaLn0WdX#kr>g{)RBhw_S~ ze)!DDN=vdLt^f`Okns`npJUG82+cYq{}Vb2k{$CXV9vBaJi`upeS;Q5!BC@=VYnkE z2=-Wnl1iEznw#oS5r?Vlwv}}|UfK${Hx5kJyYK%e<1}G+sNG@k$CV74sv@=Htk4<34d6Fbl z!U$yEWx*&Ug-#3C3$-1k=waBDV5L%%OI9fs0k2Vh2&y@Y1%p(L=r9zV*F(J~4V#>= z#=N?}l^3s#=4w*DdB{GGeK5XQ^e3|*V3osT0k}eQHOQ*-;{rVQ+3j62YxBJK_AZ&Z zYvIRuKfUIAr%emQ8G)LDDBu07kFWW0--@peto&}x&GH z#RhRjr0a2~-FAoU=Iyoma_6FtkFNgGb;m;gL#sS@FFUgOi=7MqyKmX2r`9fsbULl9 zuhdGKASM>X1)g60&C#F#>$voNpUv}v4%yYFhNcJk>|DNh*V1{r7tcAe;`2i*XKh{Z z@rH$Scdl8J5S1cEj58E2cmSaLs-S#hBCuunAMv^wST#A&DYgi${a{PNbis&cC7-#| z35o$kUf6EL5G?gHXPfngdV`@(Z>VJ(4UDisA2Kq1h|e+8F=zRr!BGLjg(ffC##g{v za$)LJ2#>)3R7eB@x>^Fo4*NH1fHAcG@cn7-vtu^ug_GiRHH`QLm`>e@eT<3$&d!qw zNx6gtRV8YY)-=oNC2BFED_S&aj;1&`2?y+lTu|Vr#-I2Q_fo4h)U&kQSKtOnoy(gCqPpst`&=GuvSr*MT4qNBH{qa2_lH;6gVI!92Pgi@2l2G zoSFQ>Xa}vKR%58s(glzlz(fJ8hqU4WX~=iZawfR3``-YrChLkB#KM69BZdN|R8)|Y zLH^Ro0gbh4X_ZP+qfyksfC~}EpaV4+G<7;ntxnGeah2XsWiV8-4fYJQNDl0)I-+K+K?Ko>&omty3K~PTR`?YjKO`>*XsEgl!z`3yu^e~~y&Jb+C{ybzG=^II z3L^u@Q5%{Sh6Xu6S{TiIDD!#3VVGnHoCvq6Gt_8_uz|5vuhwxxIc5l`S*3(Dhbynq zYXRZ48cmay%^6&(Xj?ZanU=k3-ZvZX%jS=lk zMs!MK61si20oT@m_aX-OJ`|U;Mf2&Q+1;cgrqE8>%yMgU|UN+pu-x_^#)+`C(_bYHRZljqH8m(CH1<`>ehJ#_w73 z@%BZZuKRh;h80$cQHe6KP6@*bbQ`41%2ECa5}nZ}_xHzFqm9Ajal7FL&-Z}O1mKFO z8D=s}^|DV|p-6bk-3P4_Imx)qyJrmOXQ?i1Rvw~AHVzToxN=i%W8yn;luGpfH zz}N-_w^}C8sHlp_E(lJ`#kKh+=W_B($>Zdc3cBo@Ug({A*)1XaLUbC)Ek4U5k*>rm zIgbTL?K?2sq_!gT=^t5X2KW?S(%mZVKD;aQGn3{ zW`QOxNortIg-Tzd(B-u#Gn*7SE$RZfp-5@CqB4}I4f!%dfx>V_V<^-ba^!|gnIT(d z$dwt2we(?;TA)JgsWRj#jo+#by&l(-t5Ro+<(Vy#BDtngZz$Cn zDh#-OeW6AmFp92|Q7Jg1HB{&fWg6PXJAJOB{jpntLXAr*k*FY+HmeL`6xTCm1qd-> zlsrJ1Fb|&OX~crsM+@5DXq|4=T+Ol_D4gG97Uupr=E|ZfU>sa2@?0a*D+yeiN;0!T zJ@?x#+_L1Goh!cIu;7agi{=EL+La#cEGkG)mSFQ=Eq3S z!&TYghL$Vpy35VQsg8Rb)-Im2X2ECc7R=ml_XALN>#{khAhF+m!M@c$?zNh`fBDxt z7k#(S+Nv-)K~hs;P&6TN_K?kYdl!AQXW<9uw*2IEVuQ3cS6Ww^850)acKZ0*#p~z2 z50Kuy{LAfD^Bk5gN{)&rQ-lI05xG{@tdpW3l$470T;q278$G`^4k^Jqely_89Ih1j zPl;{3EHgYJaFoDP!;uxMcBx2~Qd|YJJ9aU0pMTgkkD!gtz77|>h@8CFI{U3T=e_oV z??z|;P0m3ZF9bL^dapU*vF3!^mUC`f&b#b%@!jt1x78_Ni_69JXM;AL58mS$xz8)i zDIz7iRt!;?VFTH#02K-4bskB%hk{}^x?Z$B<7;~^z{b&k&4pkGr;xSIVGge0Yg{Ak z+@kH=V?p*V5%#WOAbh^oEn=O^CG585e31QVA6&z_lU|z~Jr8?^x<{r&<&@=AH`l4* z4AVnuuhSU{MXHOLMaKf7_j^U|bPL<+61MSt$hxx^*PRL6aPHz}r_e2~VQZa2=ztf3 z?al?^GFF}P#XGh)xP`BG4a4cyI)^$q;XQ1VNAymg*wZ0NF_$aLrCJCLux#nPYU$9{+@G z-Z5J|Vm7!$ti2Ep+UAkC!z=k2gqB*%gJh5?)efsKFy|)@br;>O<0U&xa-+@Q&K;9<|#m zX17<=ZlB0qeo?yvqjq16=Cms)YKKqQE}yVHKH>X)A`bdS?#GE-LpGle*yeU|r+3J1 z|L9!-(L4Pk&Lm{|WfkT$i7}V}2jOo!FlCpby$-nYRQ;=bt`xIPJ>vkbl-x!BW<81t zH|XW{>ZY=S_)Gijmu*@8;|`mJ87i7&w_$`c9)g+`CW2Ri2-qc;`+R# zF!y8Iwk`j1-H#vdx1A54>)K_DlOkg%xDKh1aP1-aMrAv7cZbZG1rkKm1-A)s|0!E4<^?A+5MjgCC4RL_ zyp3zTjYr~Y&m>!~WIOK^d(UKh_oOxMN$Xq_*1HfEYyhrb2;OiZXrFgD1d;gCdT1K3 z1=b_t24*%E^K7h4V5(bn9bduU3|z03eYqaMv$BNjKvpS<$Gq{#Z}=pDc`&h-DGhNI zVyDEyoxX`{oN1KUILA47rmgWvw{}imbv|+Fv2d%CQFbmVYrQgUoRh4b;%!}GtuI8Z zb&p-^9=*;za-C3?N|z7^laTwDMINfnS>!Us!oWnQb$e4{@5$n7zt?>#5VS~wrVRlYom@T$0 z(HQ8fT_RUI0SKdxhbKpuHe}R`D^+CoE)}b?n7?7$#1^PtoHD?^$N7}3%2nMSmNTo z^n%OgkkEtCF~JoT@%4>RaA7@06a(q1R^4vduIHl?eoi@w1SuNdZz3TYx-50$*4$Wa7k4veVUD{(G$~V9DJYQDY=Igpmcl)Vb_WEmm|W3?Wlj-EJ3KDhocFVV;^h^w zJ|K2uQ2e@}I0yeYd*7H9uAxgW1T8%qw9@&Ky=U}Vuh?}SaSrZKD??U02iSXrt?`O- z@JR$rTDv9Lc_i7nCEB^htoMjI5}xjvUYOOQxT4mu5wRW;T*rx zDQ=@*=C08Et^R4N&xNf#8L-tm&MD%6n<-P7z`l5Ct4R=C72y%4p`DcZ&}$=)}^+B+4vzUV^q3XddQkF8$@ zU7J^$l}E~Q_vBUX2@YVk$%jE|zC9OOgxYY3?BnFU; z6Be{sTh}C8*OXRrOSN%Nwf0Db=zs%)tlW}Ux+Pn?rP#Wq;vyY964$uK*qjfuIu)?| zsE7SozvGvZF6Ne9k?Ej|Lp_6%M(#f$d9oE*uPI~oH`H8z30$wKna3(uQ9>0Q```;) ziG^}QU|!vki0mzaY4)Cp#8$5iJO6^!z6Dkuxyzh0S6#@kzL2)sCELa=%gQ;;$~kG3 zYw}9hxHVo$cAjx|?hvwKtX-p4Iz{0_jKyW1@k`vJ9RiZK1}C{@m&Vme%3Hau*Ly^)a=v8a5pVCCX6+IOF>sYztc_RPYQMzQehD_d3D!Pw5F4#sBLUc$ z66-wTH+Uv*^h#OhmFxf^&MS6>Tht<_OCWpSxIGc+p1EbQm5uP%!Y2!qUVbBVdT zLsJ}l;+DBZtn`Yt^QPgo);-R_B?c1W>T}_?=fmxBaE~ZEPmJRTYtKtW9^vb}6YSmM zaaF5a6Kysz^Nicz9kV|;#VeyYs-(V5Z2-O&wJ0NssvOgbc1EPH^Nn8Re9_h| zbe&I>H4yzmzzVnE3X zeS_w^c`S5ux4Rg;Gb-9SKR>9f0<4357$L)y*v?|{316AloaM!_$~BZ2%c@IJayn_P ze+zuQ7FMqduDqZc@D&DsRwR<7WC}aeh|vHC)%K-zR|4ElojT>3=kMpU- zh(J_pVv%{M4?u|tT7@JpJ<-?ctmny-PDhRexVa`ohfA7js*1DyFB}7cdOGfjkMPRI z0Tm6%1Y;rGvZjWTw73xOvqznd>~=eO(Dlrz!n|zC>Sl#3kr7RWZ&;1s5PYA#5H&xd2xfY~cgCm{oAd4^nlYwe!W59$~9}Vr&Cq z?E({42gKX>$F1^>S?LqI$`>m}0+DCjO4lf?Lbe_;s~G66a1USQ9cSa0ZsVW5!Xsmq zR~DAFb^h5<2X_Xio{G(dd+Un9P^dR}UKVW)$^`%eRIEHwt$lNrduJ|nPhRYru*^MS zg;z2ljpOSI&s6+romz;}OaUt5)JsGHJA|NZSgl^3{jgyrZ{fQ4(d!*TWq_1+0 zU*#NY;}T=*8U-C=txL$kkVLqd^QELUK}!a>QXD_)!9dnCQ%sXkfl|QKzol9DTJyEA zNC}x6l)S--3hFWUGtxZ}iBk>3h~5Baq;qP~?$Fc?0V&qr$rx3O+|rhM!X}Gw@19a_t z6M^L`yknMoMlAD)gov=hIdLh-CB@1er%R_<=K|voxmaLc_Dm_nS*o;#GPwc46x##i zHwGqe3QUIhyv8GXgKq*htX;#cJt9~6L|gmCS_j0L$R;p;m47s7bzrQ0P`qtG^eVrI z6@KAXfe}jsFD>v5SmqzNG9Ylx#h}e0!AIj0a#b1(UkR&T#?$y%A{xaM^UPy&aOEul zOf}?gQs?-ycDoU{3e^yexsR-%8fCu2Rt#mQH60q!ooW`Pjwo*gH?h`o2DoEcmNi31 zR!Il%Ff*e)9BD|D!AvzMD4znc;Z!CIRZxWD=J*!XgNebUR#FiqDv!l#2ml7aa;Q~g zd8Y(L6(W+5)PSI07Hm#w8YlyRdh&?GvLF0l63o1mo27-xpxCLx<4~D$ElI9U6PV7p* zxaGdFpk;osD+3a3E~eNAr`ZOjtnx}&?uJz;evN;sjc+_=v6VL#ht!n;*;WDhi#@WI zdgUzj%(M&0wev}}_lnyYoScYQ3aueaZ8)A(4yDu1C)3_Pcez`dm1o96=lDe~@m8J* zD?Q^^dnc~;j<N?Pm@w*I4*kMC(LPVn^$6(0N89a)scSWYJ4@$HP0AMErBdvTgmU(3?b5C1JT4M^}e~n+} zx`52J{%Q6;$&l%ZydmGy%53A4vf4jmwQoA$e5qUfa?d3E=@68)(LWXHINWN<^)eW` zN;TSYoxVtGa7n+s!6zImEw10rH`$u3z6o~TNl?2@gyRFqad2GEa<7DC-ie^4V5Ud> za>xfRu`68TNW1V(TkD^(`C`U;pZGPdA$TbJ10$jOBeE5i!e(USQ^8m&#ftgzj{;X3 z$8Cw%z~lyDGZLzbP}U`~n)Lc=4LJi+MXCd#iL0G1F1--C;6lWY7ovY~PFUoVwKBNS zHn_w-sL0kUdyQxIO6Sz&7ZM;|LA70WAqqzS6;5GR7cK!s*80Y;4@z|iNWsXq56WET zpRvd{d4)e@x(Hj3P>;N_B7>m>Q9nxE!O$q{^X_Z?BUbyzE%lCD=99L>E7jIN$HqGY zvJ!?1P!Qm16OgtVrWM~*h;OUi5@6D?_e@>ol=#z$@VWc_@ZeVmrrHK)tqH%pHuCa9 zm$2o|!B%IyPDUmnBB>hEsLF66It7lI<*p&iy!vWVKt;T86Bv&O;}T zfg6LSjd$D%&*-J@VT)Ws7CMJwiek*I@ym7y%y$SXv=6upX>pZT64Yhbn%27por}#V zW|2@ZjGT*3UF{TP>m9Q$Flm*0#0sZSQia_R{t#*F9toY;);D&we+)q^Bb$I2D6cjF zQFeY&%Uv)2c*67Bqs|MR{B44w9ioy~N5nZq$6E)5So`=}IlJr#49Qawt93M2QVvjEoWt z5KRW2Bj;J6qB2}aTt=oMrIS&OAd2`!r4-~qQf@NqHG(`YO7-I1Nbr=fCvD>=EJ1{x?hA)SZ?US%BC=D__RvyU8R(^>q z0@9cHXD;waUFeyyDll(tXdzY==mVSmV&hwM0M~4l;aJ?2C8sW}^~+itkh|b~40O;% zu$=~^uM5docQJXLf1HDNk;Matt zuL;Wr*nq@h9eVR&lA*XP?KWPFG}h zj_h_`5i7kTR{2CSlR)%J@2HjTQ7hacaUv|6OI+iY!OiRr<>b;*M<1IrKAYSF(wh~) za#)6&Sr7z*Nm-&Y2DLF8=O3WCng%q7SRTtxHE(s)4Tcfb&orwfLZv{O^m+(5#d<@U zShGJQ!NK(sn7-OKd1YYQ0cvEIe3<^%Gx?b*jt`SQx2;3roQZ}w|{f62BtBAr@Ux)x4!lnUS zU*n&I#}9>dIgCPq$u@qmtKCD^yZX7tC&S->bebZW;&@oJt+S6qK#W~bqW#6xRlf1C z5kX0X0~NP}>$h>dNFHC8Fc>;kyN1vZbb=xG;xgxerRRKM;I(&;TJM_-6ABz27{vA= zsn&kst33nOditD-j)$uT@xX-&{pskG6;3`YyhEXYF7ph*=(O{RUX4lR#^U;1LJ)Id z;}&G&9rs&gr*bh%RNRMK#+wE198&6Y)zEDu%ex<^vxY8Hz z1!aB(6+y){enn-zS4w<}N>) zC@Q{Qie)xWV{pwZwEO?|{sKy}>^jqi|26Ah^Ud;j?3wY*d^5Hz*|I&hES7B%r*+;)?-H?$7KUxAadYcZ{a@O{VwHr1nl<>l}Nms43o64G>0|i;ysfuiKus}>UJY__eeCDk+?Mh2!i<4NqwA~P!oQ|Ek5XIl4oM)!@3?h%Hmci7ZBq(?^2U}m3O zZ(7etDihl|a=C5zO84Z8^<#%~+T-kv5g8=Y6vvs0qOUt1v*cAwEK*J~x-NO0LJwfj zFDaHD{Rn#^0^Z8H-FoB$kS*+UiUzm}v6e6ei*B!|G(1eUnx+d*qFK~K_jaG#U9-4C znr>90Jtn&(u14 zt+q3vq3=R<$CbLi%MAl3Y}IImL8c;rQ;;Ujb}YkADz6l-d^9OLFb$oH{JPvzX|{~=YJ3zV%MGrsT`DMzNz5RI8fq?Jp@Vs+ z^P4U;4JWltBsL9StM0p0#zpm|Gz=uv_9Qg)BsBG2Y3aV+IdHYD_iSy;rG~C+4TD!| zho8u9BN!ZJE(@_1h1>GN&6apm`h~L60hv81p25)_a5X91>Ws-Nj?69!v*u%xzEs-E zrm^P0bG5$ zbLbveJ`AOvo_xBki1=W|J+Am5g`!J0T)Gv!bzc6CH9Rb_YS7^4rcTZoBGF0{S%pkb6G=k)`5x4o}rAsk+l9B zDT7nzI!BI`HXSN#2+qV$TN0B|9Ch9Cl(n#CW|>$OMSV$Cog`Nhwv0EG2PWiV7sk12 zUu+)Cnp`#wkEQkYn|gX}eFLuEL09*XjNX3dK!5h&kZWkjF*pkJf<{T*H_kT=5E~9k z%L_=!;RBjI%Jj4O6?N0g9EBo73S?uhnMtvXP0x!;&&MWyqM-3?ZBJtRFlcA)ypc@s6gMJg0#mcW?;{zxlq5Z6c2|`$toHhOc!ECG<_b)I4@H4{)s~_7`O;Cb{HLdL9KGRg)|IkPU1~ z?vC(g`1GptBI}y~Tzz!KUEr!oDHYAHB7s-JhH5^)oZed2`D}bIWCOOXagm<!@9e2W}|$1 zg@{Z3^jzNTV&2?R(cDV${A$I*TFKO6-PA_a$VyqyLTTS}^`twqeKOusbSS&xP%aT8 zM{r_#_~qmm?S+JO=b?675vpv}rvlC^R3jl~0$hb!Sr|caWqQEX$k2O3nylVD{JWd3 zUbWW%#aqEu+^h!uq^B^+Xi4U2Rd$Q*?u)G61^}8;eHTTPY`7OS%WHacACXXU-Ej`OoB)Jd;=cTz>Tng|&}abE$tj?aV)&SIWZ}pHsds+41j) z2y;nfR_S4D8J660uHuHdW%Of8%uS60IGK)RmdB)(?7QT^d4~1*d_&(0we3%oRGo0; zopNM9ot^(&PVw{E#VYCu6enj@JG&&=!kYv^JE9XzWHRFz3r%wEzmi45GZ=h2xBBs%%F~X*;}+M^Ove+h zLdMg%Wl!gpfX!!$Yo0HxlE2yuPi2=rSy1ytaRX+dJe61cWNyJzxp`0J<~-%f zc_t_CDOcX(*#)Pw3y;|I;%s^GuJThQ9fX$g4+kgbOTMSIn39U8oQ0KGDTD>kfaacY^{~)x1T6E1oncE?0Ku=iG~9Zt{3HnpYQJ zDk$Wp9(5EW+(R8D|D0?+kePj=wEjeHMRb}qKFxB>ly$;nKbm31&u}<33&?!Tmh)_G zA$6HY%+~01(@AIk^W`;1toe^#O^?f|iF8#0uE8mmL+MsaOQ&W&8ra-;DK95FJw4jt z3bhvmpn2h{b=AdY$&JJj8gI{y$#TS79Y<`glMct@cIQ)0=QG*4PdoD{6+M*Uh)A&q zCE1^;?jcL?KzcFROcD0_L$3N0j*25Gw&cZ#{ zLQaIO;!rVIo*in+j7dv7S5nOK<;m>c*t%3wcJS(z5VI-NVLwz{9O1|gNXx_*bkdsl zl)c~yTkZ)i$7(-fcO126pK#_r=E#3MEBC3)oENRd&m`xZx@dheA^T!Z6V0&zlQTSa}0 zy1RVUUAp2njcmj@Y9G1k{K;h-xypy_6-0uLWV$M*sDO5NZMidAdJd&nW71sVNzQ-- z`|;e`=c>A{_s%$Hw)lC~hU}_#!(G1QE?w9zo8RWWd|{`0&0V$XE|}ZMo8HKs*~p&W zw2ZAhTiGdTD`|NV>G|=d+^B0A$I~o;tJGzxNc{pq z;>%3~70Yft%2pUVWh+}1E1MN7>(y(Uwd?Nc)t$=K?Xp#O(eh62(oWv0J85Wz35n0f zsZJV~1e=eVbDzmBF1<0+y|y*HzBRci6vPClRMbO1IlIBt$O^h;sXU0_DouBVa8mW< zmg;-ORV|#@vVuvcgtC9hy*%Mw8g_3qF3-P^n-`a!6`X9PZY0!R7GN(qUfyUq?HZkS4Ncny$L&KCj?qc`$e49hM&|H{d2}LmaP*mqhR{q`Y+h}I zt1=+N6?okemufLJwrMI(!t5{R=0>GuL^*Oo9Yuk*vOqF*U3JL=lj*}_86%UagJa44 z!>Rp4romA&E|-zf?9q|zksHn-abn;VArg?>H=RDZ@kC{RxT7)9T>WTjDP=#=8PwHS z;6?KuX!C4gRb;Z048t&6MQlzvwJs-%Yc6*Vqz_D_^^M|V$?O?+j!b5cP2w9d_4Szs z1}vi^_Ng)RjggeTLDSHLb$HP{xb#xh02u_)&g$sAs?e;Q&VJKJUnh2n#vuV$?hI6=$*D_VN*q3)d zA?0n~JG9~J<;Ifc-g;=u?C!|=kb|oaNr-B5s>}iIqe>DJMI3))VtG^Cbvy1;@>c_G z4Pmaf_~QQ4HRDe=OgvXV{A^A4i`6Y>E9);*)LgErxmsC&t*VLYyX$qGSE`#Y)HLD$ zyxh`#seSO-x~{mKstBi~5QBMy9wV-0K5NTwnp+~-l?w5Xq`P$w~O@mk3#;$Zso+@a< zL=H^J7ZlGbin^Y8EZtf zT&(ZE*wA;eq32>l*TwpF#<|L-b2Tkzt6N{HYI(7$?!uqBHSEE_FLRJA+hBtp$ z8ro2HK#|}L_jdEr;n~2Q8(6_NwERy@?}h)@iqMddE6E?;M?X-B@sr zE;>dQ_^+K0(XKLCH=hZ~Gs!8VyNY4vSbzp9qnmfTf z(vtf&w=j`oOje){2oG{pk7)Z2?&g2Qq7T_F^q%htjf>D;mgN<5-=mn=cpE zgk5(8n@aYl7w=Ec4b93wUDkA^dpz^TvI{Tt^ty9=F?(z=XMDjrGHo51wBDGqj!!wK z=dvcJO`{W86AO;9HS5Uwx%wMe#`vEPW)=ima>7j3Lnh0`vhsdKv)s4szFu7)nP!f0 z6@)tr!d=B7_QJyj)z^C_onvdxk!5DgIyz?_n={KSE!f7F95T+ci?rRGfn#F+VX-8@K^djI&W^!6v z5AqPVDhwDA=CU9XwM-@Ca}ZPxNht^@TX0fN zcv?XeCHrws(5wmY$|H8#}_^pY$YWiAQLfEXY* zC@GK1NveURev+D^bV^`z!qYj5i$P{+WLgdd`dlmlU8*O@$Huh4bw_#eq1?(N*`$SL z$EI43nQU0(y<0oOTU+qYE6Y`Sg^_8foWgHLaP_Wfl?LKvBZKl?x`)y#TqO=*0a1-Z z@-(?WU|!quHl;YQBf;T7YIvxlCB)Gd;p&Xd?TWUxNQF~Y)gf~k6(&)tsx>;jguLQ- zYsKmOhU2-_$8u_p7t|llts_uD0(zvgF4S2Y;;0ESmlE5NOVi$jm;L<+@y6 z3+C}y5_2UbFD^4bj?6t*b&#zh$W|3@uOaCgx-#-3$ahzzI3(*=pryuHtDwAy%`7^S zRd&o=d?dX9+P0&al5Nie6Jo0fv{!`WREOtP9x1Fo!JJlCfbp+A-y8C6mk#Ep1>fH9ag(V)Rz*P@b zNapoD_<$>EsL-0?zs1;!ayEv#L@3Fywesv!4KL`GmPeWhY-n*Q_IR>zlN>xyr+C~_ zv%`|CVablrwCo59-mE42(hCk|7E>*AIJ^G1vx=za7e9+I`!i@ zEfg<=*_!rcpyReg<+X%6YAHz&RL-hI@=!3Nszs||3WP6^g7}EE3gMZ_YL0QV#^-j2 zKs1rn6zyt>ver=KijX;BFClRRaBbaor?mCPnOsL~r4$QAS}Vh|sv@kl4C-PLGJqu) zg@~Xm6<`ieFQJrH5_B`m$s`I)E~1b(ETf!4#4ro$MF~s*@!4(UtWZh|wcKfIcH!70 zu-do1-L!k|-N?Fg|mb zIsk8gpEC&s{>|kS6H@^dm{}Z{0S!)Jgefm9&2iM4`+RC$0Aq=UoHdo?82}moZo-D*teSo54tAs|vvRlJl%@~v{ zFDk>*8hcs9l;X55uD+O829i-f6Y6LVcD5X(&NU0d3f5T-Yb6po6>CFeRxN7?)r^G| zX`%wUT<$l-D;ecbmPpAd!`>N@-4~JDAMEIOG`%tawn9^0LUjvk3w&)`bDyt3Ehvq4 zQFPlBWNi$0wM95u52jb`Pa^fVmO^YuFf@S~@EL2XgBdC$y_oeK0_bKHg)>k*%~cd` zLyJMCBARTVxiHFHaMD(MrJ}KCUAh{;R&lMWIi8}i>}FCe0<861RAf#^h^a0(rHc84 zoGjd2P026t9BHfJ(s(}6Rby0CL<~AJ6t_gHjMCO!*TXLQ( zENfL!WT==_pJ!?8s`w2gwNkgH2U!(8irRo~r+lq!wqKNdd@c6#iv`!clt6mT`+}=R zO$qF&idmJ;bc<(L{_90n#Rug<*1Le~URd>{=iCFhz816k2HfXu!PP)k)e4l`;c*L- z4g!A%lpHL0)K;D{hz~)~eJRC{WK`}m)f_N49L#K@Vl@!vKj4kh!OW7_j6!J9BCcnL zCsI>aa@br+7A}xXC5&jax%Dx54XC_ft_Cb%bZe{v0!B|e3L6$y@t}f37fPD~E?O`f zcs2vlYl5vU2h2@Tc|GCT?LaKLdxX6a(FErM^oNWvQ$5T|jPOhuVP?1>nnSXhL#-_V z=0?nt`2239Spc=F=>^B^rKPjWod~5JcUs5bA=p(>wXIbED#iD(EHMJPw6w~ARDcuq z38*3JaWC^39EC4J3SqPAgEOjP9Lyjv}KtZ7b=M@Oi$ zGsw{zZ0Ao+!mpH^pHkn8<;p$iTH85W0FPh^w=D=xvg~et62uQ}O(FK?Fwsd{*|Qu) z5R?Vo#F}PJu)J8=e939>S57k2-WB5P33PM^I665N8W}n)I7i_c4@~>I`)pYY#j-4s zKx=c5wZk*A+Cwbu!RD5*%occ9!ZOKmP`kA@bF46Hqgv~=f`w2pT7=9jl4mJPXfc9q z_2>{Vv&5(6T_|pb&2b3d%i0cMo_JFQoMAvX7ar*B2!!Xw(i(1Vmb1;0`B6WY!LB%p zOq$vo)q7zU7-yuaIM7ial-qhBr+L4lA<$79Y=QhG_wl@nc51X?Tip`J881a`Bb-4r zVP4r>ou}u#)OAXHePwmsgRH7{dl!|&D@PUIs9Wqx%H38-nLfG2{T<`kj8+9sQoAc5)2!)2WHV{1WnfIBQ8Ho5%arp1*Dy+&I>+6K2 z#BBhkDnYDiK13fBcN+R4NqfS2rSdb@-B&7`;DN!ViB=k77ZDL`Z41n5NBa-TY=+nf z?q4K~@WiD-cTgIhRsoZm5b&a`L%7D1Wvx;kvRWRlKzn0gRxK%zKr#g8&*qdht!zo2 z(YBjX!{BR9Az-r92bmfI%&iAGicKD51eAz1Et;(Dp;;XWkWfo2Ly5sQ4Xd13kWIz` zOEZ!Ufd(F9nBs{&<|r$fS;A}uUz1t~I5h4zVMNMmgq#jMMpgt{nu5$wHPwsZYo=s~ z)2Iu~YzQ*f^C$Mt2(&bT`i!8gW`_R2mqB15tQ6_?)AqcQi8+s3czYXWx8&x|=yVqa zAYy}NRtB1?VCjL{P~@jsU5l+f*v?Sb$n@Hpv{Yw!_HS(t#4@orhggAI5E>$y-UE=0!w1<)vD$mWx0C6wHPD|$Z83&b}$07WCU3{f)vDR zlx1bhfvoKui}fUnI15Fg9`F-%S6I$LWm&WGvg)v&>Bn@TsFf~t3S8aS1zf9mBv|MH zj`jdo*Fif6V(ofV5bylREb%NcxMaRWBjIVMfG^Nm9cZgN=x7SeZa?U3-*1&aVapN@ zc`CQ6n|8Kv!m0ZutXq=z?P0XR%TJ#bELR!y7@;+TShk8G<<0xr?aRH}mF_OK(K_gz z+`_k3=ID%jJ#M?7yL|cPo&DQgnb2u@Ux27|j#Dp9)35&m)&j14pku4BjCsLT!Wg#$ zS9nZ)!4*Rdxi4bJ+9?mRnTNLx24J2$Wl)Mu3C?LrCA5NBH6g@4fkUU)LKSg@ zy1+mdX>o*T@_d=q&YqE_4EE1wfqcGHK+XuXwn}8pQW6EHtUb4Qd=}}ah}?GwCnYs? zXmAz3uDLQG6JEnwV4JBEF=%C$)5qfSk?Pn#7EIgvXh#F?V)3`p zhR9xAJ-@->ND70WRe`H$OA1^)+$@Uf^GyP-K3ah81+OeuPc|c5HqiQbhvv+au5(zf zOTy5^g2N#YY-vy=oH#a#AM}J;dO=eZ9CSCO<1o?cHUo-|RyCR#tla=dt3ZpRQ{AU7 zaV%$bhG#U={2`@o$OEpcfNK+6hUk4k*6xt(0d@$ovhMl00@rrEWCg{oT5W75mw{YC zZ5)G4vLekL;nw~T$3TF+?;uOT-po3V&d9q~-2%88mMc^&9&oj^1*jovuUeY2^F2d< zuaS>qX&YL@yjsdFOb$$7G>EuR)FI$XD}r@*Qgu5jQOiM12O8A$=t7;Ph;cU4hIhGMAs;5E2ztS>XLT*U}b z$htu1cDm88uG1iMl`<Z@lky+U?EPoh>evTR{GVhYn^J;ls@gUa!kn z_9?C_hEJ2@y$44yauAGEdY)@jRiJcbWnf^Sx3{;ar)O%4 z7!;HrlKO#_ql%Tq6)(QD&D9NZ^ssn^ky$KP4X#*7SgwSh#HAz_8(`C|2RMf=j#><< zVWhJo+}RcC=tkiVv3G~s+a&6QDdubma7x>!{0BIFn(tuU+1P^Soor zeYvJ5G$9xM-Y5={*&1N!49G%jN49h$UKGTt!Ev*C1FgOKzrG-(QHlY$1^{@Nq_%nm zuBEi`DVbh|wO=gP=0S*hU?(Q5hTALz{&sUaih@9$r6YteoQ;$3;u7>=w|WFSx`UiO zyaR;1BPc?g-3<94e!KFB3{1(~QovOuT9PwN-iZQN2NAANYXwR?Hm*#gxhX^r*Tv4un{?AP<|O?J|3CBBCNZW-qOp-6W$EE-k2gg!X(}g6Ild zi9D(%({FBYRZ>a0zbS7uz*TiqkqF1OL^!&Z)}PBIv`NF33S>Pl7g+nnIy85QriQ+z zTn7TEG6Y!_l(q_{idLwH;47da%8sok*ak3nho`qjC6)Pt>)Cw3l>k1fS#Ow2gDb!z zKNl5=;Z+F*>Nu#QS#(dlyEecThbrbH;MxVa2HOV@+IkPLJrv+JEYB(ANwgFwNe{le`iF?O_{aP8di_QHkU}QjtU3`Bg|*cceoo3Zsg-3Sc0jmX>+f zM#HXPD=7h{QNv0G|KYj$jHd$%a@;c;)&;RZTmC1vQh1Y}V2+Hc`9rsn3y(iq+6Xxs*b@c{0`vM*P00zM+QlEg* zKt^_FKyFt+PLF`AgG5Lg$yJkQ^h9<=_3~yHZA-l1DmgxA!&r5IsI4!^iS90rF<)>6 zS@{!)t4%#)@Lyo>pu+>M0a;=qD}EaR*Rt7Ff;%2?P0fcrSoP9r3^uhQX#>rjVhy7% z*t)o!FsJzBLY){7eF)M6)~@|ooxF=B?da7IZh&iu9hIVmW(E+=;->{%>58K*8Oj+_ zn>$FEjj7s!BS5AOU!WY6KZz!R40rX0W%n_?2rGPh zkq+#Qn$T2O(+g<60+moNVsO(vu_f&?09Skx9&mLFh1{DCT(#FsTdwM*+gbsoUT)#l zb@wtIJbb`)?YTU%bMv`LM6kvCZ)y!Q0j}Z%7QckHQx&)>^Yj*Q4R)d-iv23v*ihU+ zEN%ua<=*i0wy5hB2Dk!6XA2r&>kZ4qFVUmGwH55#1+JL9K&%0-V3IGmN~i8q_)d(?=ZYJv;ZL&g+j{A+ybr= z8QM;+X{E2NbVD>t)R~y8e(;nbE|#|jSF{Jz2o5T4A9EL)hr~S8iD8s$a1F8c@TU+o zl8l!4lgub{vfce;k6f&%ZzFDYPn{;RjY(;oCs?)J(r%-b~{)LFd0=be+2<8U~5 zFD@=FEG*QYQ&UqvFm946o7ofXYCl4p`@ov9uMql2D*6S6^@i4hcqT zNB~v?t^I*n1H7Zb14v%4DX1gnro3C*QH6P657=4{I6C$_I~fNYZHO;oOymjyu8+G) zA)TNY6wCEeZC6NQF5pTB5s4PDTYG=7YY;HgE>i{qK>-_w=uzJg0_$LCsVJEi&Mv;+ zmqom`h0u~TlGb|}Ij8Lf<>PabUPSijjxbN9wRFNoLywSfXBCX` zVoY0`!>z5@HZqyoE$r;}wOqM6fmIoun9d-5M0S6;s~6){z}4PLJ`|~@6d#|+Dk+~? z2V94?0M{+RRj6!K(<)WFum@bdtSk>`%vHIC_kgSFB0`R-P|-5hWh~P@OZZT>wB@>n z7;H$D+gB(pE;449I30q3|K~|@LYyVzwJy%#C zlV0Tk*X+I!2fok-AGa_uSI-b^Q=l$?wKd{6RjyhUs1VC_54dLLT&uaoawYwb=!+nt zrI{$N8hmKhCMlYO5>qz zksb0h;>lrY+GGH?9upt49>=d$qAc3qu3X=^hy`4^I%EO!h7L=HHsDIzWGvS+&MKi;5*xFIxnoU!-XlpZ3PvxuY4R-Z|tU#txm$k&}^hPM< zzXGn#HXR+~Gn*kENoc}sB@sz-Fim7xmYj4{l+7%2lrFbBrMVYYUMbI`U_o%MJ&a>1 zA)_)vJlAqiWmM@*5u(5Q`JBZ?$$yoWFRBBusuP_g0n=PcUy#RbMOEYTlvbjPPf(Dv zxs}$`5u53V%qpaeFw{nRM=dgtL`zBMB#R0cnliyLxEa8z7{?k}2`Uo}w2CDi;p)ZT zu9HY2Y*MO3RR#6C6irmju1Nyv_RjdWs6@+BzpzfGtV(av;3~v%V%_=hb-y{ml_sE& z)C#!nNQoq_8~B@;6y+8!r^tl-RY~VDwT5PNglBYzn|gxrz%bW09Uz_mGgeL5~bA`2DaP7k35aMhMc93*HGL*WPS(2Zu2ezHKNyUzA z3$T&2CYg0AUAco3i^%eTD^44t4wP!qZ3b}VY0+-s^QA44TaUqN#u3y^t7JyF8O=*P zz1rC;rz%6@H{K!Fdw_>TE?F>AY-rD#_PB*r%(YEt3wLj9Bv-c_OR0`cX^qHe=lnx# z{Wy-8H_l)E*Bhc%`3M{lvm0<_17)4J;O@pQKVWY}5J+T&+0JZ>$ZU;FuO#OBoU01m zP+P9^8>Zw~(JGxYWO{#<{fwz+ss{r1x3OBeL;osEl^FYOxV)i?gv z-DiONa_-!@D_5>yrvjpQfS-N#S-#;{$;rtUi>0BV0h^WhD{f(2z^W8VxzunA6T=_~ z3optZa3w!cVy>1_ypds!PRU{9;j#_~W(@^c1_fU&odhfi_eiWqhkKNy@}x=PMUwaFy;@nFUYfl-4b6^tpFBwzn_Uc7$Aa($tyus3bWG&fyouY*ok#g6d)T z?IdMMazw;0y2+6Zz@2q8-`MVSGv*sMg6#_5f- zxs}@39-U!>^o=Hc^dE!IjuQ*bq|T>FBECAwS)7r|WKs@ru`!7Ls(eo*WVo#>($Pa0 znDb#~1YD`-NiRk_p?@}P^OQak9h=wzT&c!^)`r$Nf~;?HaJ?DI@mj7v;L0pWLetuo zWXg0cZ7Ogj?tK$n!!o)l%Gzw!lFK|2Fn)v^=S$-?LeGj)b#4QRLUZNat83$7H>7~l%B zDsUBK4YMfw82lDkC28yCAeJLB6`e|;^-9S!mP{zA;YqIrTrZI=tZII`);E)?TaTyK z#H5lM%~|zvRzbD_54g%y$Xw~Q%ia{YcH;Zh!E6cclI}!%c^S=;Yi;X7UT~h26vw0% zK9^mAZaC?tEjTFy8@PpW0OLSgT$ov1m4qfL7(iAaR!Rl8b{6L5M}~$c)r`;R%UNn1~8iw4v39dwW4R9rgSt{~r zJDpm5+E!LMx7fA4L&S=fa}dc%!mzn261ECLvt(!rzN%bKRT87B6Se=Z6O$1k$sp>f z!mDI#bPIND{fWN>PTqnC(eIB7NsOKMVhiN)ija*$Ng?rb*|zT zro>y7#7JQ-o)8t}P)eD{W)(|Oo>&>=N-*%icyrK!Lu^fzEsPeaGWtT(8)9$^7c>(J zrJpmUIEmHWaz*LQD*uao2Nexcsf@aeOoA~I*boPRs?SmR_kyf4bZ#M$S1Aj&OFBOJ ziW*$uSHmq#NMlyHg{iM6q(M2L5NxflO8ahAO@^+xys?d2Sl77<%Y^^spMm>60|svi zAHY#^hIO5*@x4)dt(jS+0$+7X09+yUBm;_!J+>kAimOIqu2qduX?9o(h+hgtgQ^2W zYbWF+gLE5=Dup)&_<69<(E$4!4k0OTbGBi&QXr@PB=wp&QOPf%Ax!PcTIbGI%jVX_ zn)aaUPH2f@pn|dr$s!U!$63MV5NBryDaT3>i5E?YKH06ZD?HLkMP5T)Jy@=ivtq8J zl9j|IDD57zmsiXzQB4cDW;AvmP06P9HRV83>#ve6H2VZYpiT=?70uOUjo!*eiN%7i zV!TSQN#n6p4@!j$p7RvPX;V(c=#0m5rDvZ4S7{oZDRr(;`oLL1m<-?-VW{F_N;)w) zGCznZRjnZ4sx1mW2);VWAS3@&{C51D>mpH17xJ3p(#lZlq*~b} zh8pnZN&A*yz5%W(6zTz2DNqxl2?1AC=PGIIWR3TEz}3-2o--l+Tfwz0GPM|R)fL|= zI3ZhNYb!@|vEFiX~fU7zpeiF%F56K{pxAa0msqI?rv>i}00 znn=deF;}&AH>I9xgHnAVvs_!J&Q&Z|Qky8vStVIyL+bMHyIl1j@S&ao?IbhE;J{Q} z>Zx<9iKaHZk}vZwIW;)tYzQ`XY5sRgP| zN#8X!0Is>QX$6F1k+f1_sLBgTG9q0F8LJK<5nKaYJ&tab=%m5ba1LuT7oHq=gCmpe zr!!sUqcdaM5^x>e-04~0qQf2F3Y9Fj2Lw!#V?{BcP*r$JCp>1Wlk$2JzEC5?01&bP zjsk?^l&07T^+nkUQkqEVKj4~L%q2W-EvcQ`09*&Nc=aHwmk{liJ{fx& z;oS1j77#8#YnuN*#{kkY7MRv1Ynl}S)mQqCunufxUr+NP*?fGd`(=4LYLU&Z~qDeF^V ztC+1?8TNHUEihv0R1ncguaDx($MH+`0vzPxL5*~Hh2?xmQj?UO~rDJOfJMNtSwjg7R*h($J6rTq07pjm$84;UQ zxk$E(SfUzS^$|VCHQ41en}mCJ8zr?vgSYB8Xd$98&lZ#TDhp202bgjD2=GM_lf;%Mqe@+tfRP~FhI+dd+ za8>xKf}JXY;sdTw6*da}VWuEA;3`Ba_%{eVC~!s9)d@|2>xD88xRM$t0S0rEhqTO- z%_QMZvbOBf0eh23j;0jR&;2LS^im(lY z*@l5wp=IV*3S1>}gIidEtMX1uy0RibQOS)eplg6D2ZBW6PT(rkohn;cGA;>MaYt3Q zu&2(I0!|^7^Kjz44pP>C$cKIgwEe2#)=2x%U*0UdR^ZC&g;Pv}D=}Ab3*TzFUaj^8 zS7`ZE`j022L%>yojp+2=tX)A1;zw4g=@h3c6fQcgNk>m@AaorqXu?y3y{~$4jZ&Pp zjh)2CUNUrG%!eTXa3xF=p3@VAw@GFC7|5zE3V|PQDZDNh^h8M|aio*%Djs1KbFG0a zf}S}~x~i(?R!ICoE&;A5(sB=(iX-7&w@Zxx$5Ispp85r4F~cY)bwql2cyA1Mny}L- z21Kbckiiwv^#t*kid`g}@rEP=jj- z8Tl#~9}hF6T!i!q(r9pn{_tXHdvtPfR7QgaSB6e#(y78ap-GBLRc_H;z*Uz19&n|* zGT=%p0>JeeU5gWoDaDDlbZc-8vk!#BwBiG`)1V$JG~Hm1k7~ zisS)7`3tcF=}RZG3ThYE$`j&=oE>Tn5|^+a>SIj zo@h&UxUG7E z_?xNCr4tF@8kd^?q`j%5ZBO|@-7I0OJ(tvAc81#oJ#egfCl(t;Y+VbCrN=WmBIoxTX|E(Z50} zaP$yx6}Pat{nl(@kJ;+=40|hImF22ev(ELGeDqA}bc-k`0$eF6lbptiw#bD1sC1k& zP+RneS_eY~T$SajE=pIDsZ4iqtw_18VnEzwxk~5-ax03^)l$B9aK&sD`xUcQS2yTd zXaCDT1NV0Z4Ck=b$**77B!twt>AqCa z&Np~*@u=~DOREpN&_RH#IL34{5(8HGY#^%$2fHvYGQcFjRlcDbC!^K?*Qax8YUh_d zma78S!x^+3uOe2egq~Db=z(dWW4b=P0-9LgLnNnTa|XC#gEDlsa9En_xFx6f#H?y(pw=CJ9 zRBA~3mW(REH9oE28AnCU^aezA+HzI&hn_muH?8G*FW{=uhFRNNWSP!G9il8(z?I&9 zgyNN3xI>lV2oVHnKDs1E?(wbBR-fGQTkKaaxOREKwRwnaVenOhD{%+|T+s?OxEh&I zx75a{OG&9Do8;GN)|K8eGZU*WlW^PMvETC>~*|mm*iyW=BJ; zC%;J*a$#GGb4k%e80ot=sk%MjDoUd2L;>uzYH&SUCtLgG(9MU~J*SDcE5$P_CLL?f)#0wjvMPdE0^!_##`cX*|e zP31fa0eB`U!QiVHyNcRg>K7yx$kG~RYo`HseYc<_b$+?HKst-Av zDdm(DT;Wsn4Dv1oTouwNB2~{2N*SJ6tYL=-hlJR{)*kKbq{UX0shTG0l;S)N2+b`? zJ*eBArQE_tOw!8|J3(W=n60YpUP}JNEiBrAA%#3{VMXrmA!PORFHxO036oi&M3$h~ zzb#ZtF*p+bAqd;JG{GF7|Zq5f~%tA zQe^!aTzA*r;JMxHAC`(^D>M`>V!8T&>kF=01+L;2hT2Jk>rMJY{J^^KRDrr;k=Dha zo;EU?UKnsyr8t8IxY8aj25wzG@GQs0&uwv(u!ujx=-)u2kbmOXsqth-*aEKYGp-aV>qfjmz2bF~JiXCKTc zJZLI~M*>8rG*;EuN((;EmRLX8r-dXvUn_kyd+d)BG%a@p|pYj9;249m4+ zVf|TGX^bf!o+Ba1PHPZ`CR2})NMu5MCCnXCVys;jH>r+^+FXlAR&{&GKEj8Gql)CB|zBGgqHHaf=au_ea@DBPIC7}E#(A^K90h6rkulu^BK>QXvY*7&*Yz| z004LgNklsE(N0as^kV8Mq7>{%?_S_fyXL zkL8~MBl3FNPCNpo-qL%c)K#+IrSY7QjsdQ)`}eK6&lOdLT{Fd4#G|I;LAnx8>^#sY zQ|i_zVvz?NQbTfW@R3qb0QDe!UZue}!9_781y;%bu)~?!9%E~Xv(&^S<&*r;wk#xT zfNKJ!>Z$pMGAj<*>oIK)q}OV21z8VRT1f!bwDpp8Xl;*n_2@o4QH~C3=SaTA5Y#{` z8nbkdk*r$j25l;h%_x2_g2W?zdWlUPm6PEny@p+gdQToBx-i__# z`u6Z7OGv6KFrzTQR7yiu+O0D`I$;4QTWxd;S-3zV~-+&AS zqN*(lDBa!5TW9iwwIs$-BO0QqIXtr?!b}@i5_diHhn`4)(Pqxm!&P^3RouVSK9lth zOrl^~+aqn=vG%@*jF#wRNF;^`=HnKoaODM8T|`nL;2Q3r6sHYvMOh>J{2-abl7ojC z2OEeqzQDh#DnUEJJX)OM!3fBrfsC|G)#RVFoP(lFMN?YHp{GvwLU}#N3cj{TDNZx# z^4PDySXD<@tS+X;P z2!@D`mnl@1a#Rjn7Oe*|TMwo;-~gyVSHI9Et@`DhYHvc5rz?&xxFU)6fUD#Z`@z+Z zaIfzS?D7fwqO6#+N+d}gjKZWEOBGKGm|g zL&&gWW#?)|2exc%dMV&~G`Bm-rMO>cZc3M%%tq|d114#BC&rw;Gs@K&o!!MdxGn)& zWh(=&5)Y>on^qB)Q4YAqnWVQ})%+52se4lrns7%SwUoudS!ioOa0zRyQWpSD zQZ7UmskWY)aF{{pO0pwK^E8V}!O*!Q2DrwU>tiyCqp#Z@%g8Ptni|^_hNelXadzD4 z4V|&6c1-^eQ*lUUd6d0A-qjj~jw1co8_@|4WY*#wmWE@FCYtm~KR&zAumi5j$tT@y zh0`XpBhuUvZElaVv;eL#Y2}C0il4St)J&}Pt-6O+wkFo8!q>@_kXyca>s;>yu9&Tc z<;oO@zT;@pL;F9)gr!c4Y~C!z;%n{3S6(!CmbH^g!UT%~cWs-Xttq&`eK zwW%64#s8<+6QTQ(90bwhbTtle4!gbb^Bl+o1u+eKq#2WivKFeZDoI99sA(nUs@%fc z=PR4w=pq$D`ut>8GXgVfC2C9Zx+L03vI96R4bss`agR9LIe+O?WtXQ2-$k|MPG|c#Wli9!WFIVTrFwj$wU>6a)_=Vh1jl6rU=^PgIQGvZPoi7 zjRzcxc!jlVZopF=X>9=SRqt3&$74;@0;o%b1DADXb9821^!2YgietesFz#LBxOb+t0vVz}2X86>>2sQD~C0K_iM)hQwUAx6t@!wjmgDCssDa zCfg#@a+F(GkW#rrN&atB@4_@riVZBjgqg%g1)zemnN>pIU>3Kq;IUa1Zb~~4aSKCV z8ktfWmEd~LS=lhVf#r&~HnVm3P-bnjnIU|#DA~MP)>q-SU z<*YJWq>w>sOojJftZgWcO)@;1gkWmBy6`q)Z4A0nGT^SkXNG-zIK}mxwWx7oX?jOy z4}3*HSz3CJnsVaI#Uw}vrv>G>uwmU1w{;a0hxrOm(V! zQ6k(?G{!M^0ODf+!oLT;u{s`3m<^DO25~!0D7cU0kI+t3nb+ zofb?eVa}#-7eGQuvPw#F3i@K7DaDFby-i0lDoiaS#5QR9-M-}JmJo+PwWI|S@FOWd zwiX>%SAkV>&4o5vM;BDdtaM7(E{gyO#nB9rrxR62?7NL`(3A6qAt z03bBIYqA>9i(^xAF637Yu5M8}J-Fshs%(o)bkO5a9%ky1$Sur<>xAp733b+nIHlq! z#3kNx{8!Og4Rp3b1gHm6Le31yZ&dV$s)CJ669NjrH7YUpB}iAc-Cl52zF%3u9&jbo z5*Gh;#pRDw!YwQXI}C})`ZI?8$oIOXo-=Td(>XWQw+Fjx3Ki>5SVG9%r7*fEc>)Dk5ldRw`vz9$q>(BcLpG2*z;AV=~La(hI_pokz0r3MZzd zD-J#2>N}66<`CcIuSgZCsm&A{Q3?f3O>igiUs8>fP-@V&BM28{YJxBThoZXb=1?vq zy#ZJaO9v_ILQ^UvV4Q4w&XQL*I&))vdvtkiWNE#3dDB$aaV#VIP*y3Gis1-A5`9yv zLsDz_ITlnvdL81_d&FAiN(h)B&NI~qr`83hvP~lpa42^u#?~8dYTJLca^IyQz!kPv z!1W1pX;I&F*W&h#HTR6d5xRr>x`nkVt9!C}>sEBz<-6sCJQxg3w~r2 zNQfYD3uC!H>ntK99-3JcXsVz{i?Ay*%T2{6z|zzn7~Qx{@^SFmTYv}5!Aha5%&HBr z)Uc`!X4M_E)Z=Q94N_}DQW`=NDxwqe)9QKwJgP#wiMhI}125SQUN1P9QF6dsaWJEL zUq-dSr`Dl>8MUN13JxE6HpIrQ_T;${Brw$-Osn6QQh&fioup`;n!>z^D1$M;JoTvb|;oJg&nI^;EG=8X-u}gxwyPEv$Cea zHLbqmx%}#vYr4<2Ouf{$c&=^vTh*Uxk;JyAdTMD2~|n#Qj6EhP2Lp0DpcTitB!?JvGD zPAadtrSEEG=heoMb1f4uG*7+IGXG-R()sq4^R3J0nikJCFL0SJcP{FEo@-mW*tvSC zYn=_wwJb5XWZusjk!&C&(g-l(z+)L>K-reI9=KISlRIDlF?Je zH%^w0olxU=!Qjb)p;JYJkChBR!6+SkysV$`MESsD<^3ni22NHCK2bCNc-_Q_%F)Ax zy{Ak1+2*cPjV`cGH(@cvlk;A3TjCyEA+=Jg*b7&=xw zcC2*rSlI-p_gKl$(?uQ6=G9nQx?2~PJ6Bd)7FMoSHa(eF`*=~usp1|v&(gkAh21BM zx*snYc(OvS;$&ezLylE8!Z(kX4RYG2iu+C$^&T(mJ5@4xs%+#``S7XI-jgL=CwQ+I zJXSu$t{!(*C6+d{jLq~+Ew+p=rIfWlVJm+kzxm0s0k%IYr`y3fG9fUESoT!$lD zl9;6BCw9-q+C2T9R@Q(AT6Lw=w!KhTd$zXkeB0bhtxGR8Exk~`_*|`wGj)qEH!fXh zTY0&8_DtRQ3w6VcGxZ}c*N>iUxbbq+*vrk6FE&p-+c5of{q!?U3(vQ%zRyGd#<8cW zho7k(eSt5VX3!zdw=KNfH2-45EE}`sOO10}``M=1vkl|tYkSX@)FqVEwa>3;a2?-t z!%3doH}-OA^UF28%nW;Yp>F!Qs>zq?7hbAcVgg@mL}OSz+rIK*%i@`q`IlQ3K=<>_ zOXr$E`sH(ttIyZWKV3EVeBIL7#*K50>*wlL&(|$pZk)SNedA(T@740I(t(BQ;hEaL z8w0Zol;S{bqske;R9(UF5P+*f)?2_8!$-LL*XgjmfjLk1wyCxEYRka2zPYP|s|o$v zS9`avc5Pqj-oDtoeZG71a{I=$_RYl3&7|(_yZn9aY0)bvMTAr)HCDS}#<#pKBOB*F2Rxu%6hvlF+qy zxnn+|b2*`B^(sne)7R#YH zImyH^ckbB~>k@~*ThT#QU;}Gf4soiraeH)1bJD2;nuMBJ_4sKi@Tshx9 zb+L2iQuos3?$v8u8_At(8Es4HZS$u7jg-Ny#9{Z9ft~Zc+b?&oo$p)|UASwF**@F7 zeWrQgTKjxb(}=5W2-g0ojqQca4SZ+@xN`R-wNimA?zNlXy1n6M_}#* zQI_^tTgPE1i8eK%sZb~sN1Do_GRwl#i$hWh2r-i3Nd68EHi`I1s*#ZQ9!#qZO|ObI zmB%D!KkF!~nOyGQl29l)i7^S*=p+~Q1;YAE`*CM$OhHe4aeriPC-Ec6^ig@pIG>3A zs5qVSn&PtSDclG#l>}!L2c?w+!%>`78}Dk4 z!R3~va-q%De2B8vF`~^CahdsXsn(}0IrZZ+hzlHDKw#HF|H0up-GC9z&;6bEM-rsSW}#-CMvaz60YFu`G;Kf zNAp^bxtZ#3eob6xRz)-$YT6ZKfFFLa<#$Fd=t<@ve)^H@d^=M89 zKarkpmWFssV?2&x+{I}X5ou*{mbxR3W;Td|m=yoEwJ|QY^-z90=NXVGjVxkp)kiZ6 zNlX}8-67b}KDC-r*>NJh0A`7>v{G?9+nb|vT6yZiEalOTniywoEKiHGmq zdNqH0G#NRE zE!kzW^E4Fc+Ht2fbstU3iAu>Pj71>>*`o*<4Q$btCc#cCa)X^0lUsT?x8X=0LD0&8 z)B+%udZbuKW2B8#qZ;IlE;o_{E>r1|tdjUt#}k>ZsvDDZa-G=P8r|6JS=q#|e#}z9 zoP=bQkxEItFCe)R#=66`W=ZqBB%KT{Zj7z0lhA~7Unt_Fafmiv^#@g*ubQ9g z_q#97H(<2?X?GWDOoT0u9X-xh*0cH`)+wO&pZQ9niEv`Oo&yP;C!!#CZky>NL zc`bD*)QS+=3`mFgsw*_DJ}SM2YB1PLB9e0=((?mS^Xac1Mv`z=!=q%-+1g_B`;M0m zMLB71S`m^`7<|pX@5O7^YlYi^I4DWHmns{N*^6VcilUv>L0M%%);dW6X9WXzd33!e zW2iisJ*T*6=5WZtQZ5Kg&k4>fj?QjCrvrl})K5OGYKJEY>tykzR?ylh?z;U_Q6-I* z21%aUann+RP7%?z@<{0Q#TC0B-JP>WU_n^nUQQnXY7NIvic zvKf!!zSPQyoQ|UvLlL=TH&pFQ$_=>g2)&&CyrYm*2w`ZNTg|HPI+rj_T0&N$z((h{hS_U)4uevP_nmhfN-KNZTyre7j9z#F5M0>nW$K7hlU9&HQbwz= zrrDdBy@0gBu(a&R1oKPTCAfD`fZ*$7ovP(2#;VF`lByfw@z;bB5@Yuce=^hS4ln)_ zP&1%K(B7OT;p^Yn+Q5Ofwk@hYLdi`g$nwJKJYpKdi~7JTrHu&jxQy}`Gi9T#fI+mQ zk9HtImL4d<1Jj%LC03B!8*8h(&~n2*xn&z$P3jsyo?R2?L^G}?r-57~$-K37(v*}! z7cx0Q(#qq_74fM#Puq%`=GOt&uGP(y>bB#lIj79UhcnA!Ni#N8?@y^DwUF$6DfE=m zRq}~Qm8yEaYREq2w#~Ye2IpfOBqWzgmpUw6mDYrc%scoOnl5Ja5o={^M*b;lVeQ-k zB@EIPr==I##n1#BnNvq{+YaS)>O3hjq~K*doZGQKsU$GHEYe>2bZxJ7-ff<7UmsdM zTF?^XtVg0E!K8qP#7T1s3>s z#mg7VYdU#a-0s@tjTee47ty^YoRBAzNqAj7kp?}*=-^AAS&GVHF z5owNaST}8zae0kVE|T)AB*#w(Txxkgm{g7q7@A)3SaJ7LrQJ^ycb>{`4Nfiy0bk8< z8PWi*h0Hxtf1^wKLbIuet)bs7MPtF2)1Gk@bgphwU(~m-W~y#Eo>?4kt%}X6qL_@- zr(j3jqvpy0d)?u(o`@W3#VUgFTqWi|o7Z-weDI~B?x#v98>i<~m9%swg^Kz)TU#{z zYUwqUmPa^hG3&$991)487o0`?D&|U4IqsWcijah1BCx7A&e}Fi*~%N?1y?_BzaQ4# zKr?V#a1{j+H_Qf|Tz9s}FOQFYYO8E5at zT8*^#gRF?7gPC3X0oP%TjKU)srBuPeNpz>{i!7f z(kS4OdS5oi=oAv;%x3Ha^pCid?Bl79x|v1j1VC2YgdJ;}*Q#4W6ReSR0Ww$XQeaGE zl7xSn+!zAGT~_D83_!Z}ctJbqS5T=QJL-mq%R7q^%&9Yx|D7 zW@+=u?2?c~OHi^45Dl@`fR~`lPg1K6n5CpzXq(LC;O?2mp{&{M!Yy~vmOFKH{YX*k zzO=HRq?A3HUd15{aD_08MTVhy*j#xeHTPP1BQ|UI=1$$h%FAW-VaX0$$^oY0a7Qf` zWT3fne_G{%)GEnA$3v1@$-;`UH)Kt_OE%r5+iu&88!Z?*h@i9z2+Z)%V!l6Wkusru zX1Y999xZG>>@0`$$;Eab-4=`u74Dt&86v8Kv4!U5MzGpWnMqLPDNSr%(6h%?z< z$}fe9n-CQ3(ARb(aY=RWqKwLesh&JF$yU?E{r;DK2Hrq3a3^q8UNotTTwmQgKIoVfH@7iaG#+3=H0sfnxcA9OoLxTr+M) zO8?B0#T2~NDBmecDi8~Ue@nEp8|4+&Gon9mv&1CWpS2Y=&##kGOImV9b^EE*++)dk zhf<4Wv07~ z+PLh-XDj-0Cf&7b?($_fo~6etyTe>Hf%Yn~ajim9!9%q#r4-#F!dXEhi)bs=-?oWrSWTUmD)h} z_S*W=;@ZOe*4oU@&fvN`xxDpoVs?CLX+)|N2STR+sW>((jN*~mBe4Y|QO<76xP6HQ zB=wxiYd)FVaLiRrt_B@jNoXR61Do<-8tgH3QesXuJXLlXfNNoFvg>(QS>xOqb{;X; z%=)g!GYaApT~wAs-%qG01Rs(JiVi80&A{tR8c{XcbG)tksj?1yX~#;*a)O|yl29vE zh`Q~$&@Iszz(&vM8sTk7&WA+eaaVcO)NVWXGmtoif>)=9ci@5#pWQ7I2k1 zSCK+STTBC4CB^aq2Uj0k+he)1db_RreaKRHAW3*N3GQOK%5s4V!Ymz^aNrQ#ln|`7_Su?Y(t2NR8Go#-D=4*)XecKy zkCVN5e`YP@P~in#s>eVTK64_?k=L!~itF0fgqW0MIiAeZb`(`o2FfwVDQ`V!t#~A} zY=2f|R3SP_HfGPbti##AxgreDfxRT$ zmKT~~j?c25Eh_F=-|xUjl7yRibelI-ee=|5wygs&i$LUxQ!8a|ra9!+h0 zB&`vbW@L6B$*#Cz1CvY0mcTv>PRa?op1uE?J;YoR<*JFwrsy1vypjra>83@z##9!L zY{@J-npJqNw6m=hEWIcwIp?so?8VZi>Y3G^ zO?T&p+uSzt7>QSzWfZ#yCYA)HRzpc4oki>@`Yoht5*Ee8BsZ8u4U&WprWOUIl~JRQ zAs*{&iO+5WUt=6Xy9h9gOF6YR+*Cu%^|Y|{xz!7C|F%1`ec&-?832!+ zL)r%!AtYDhwHD$-Xc{f`X=(ODMj6zb1uqk1~f)YUpsbS~JYKyUV zNZwFd8B|+9W^ke{D9H}#1oIx5E2PhlrdP7aVsbhtdxbg}ey>Bi}^M7o>3NY z=a##5&7IQN_k^n|G9@=WjqIUf7&uX>2#}ypNtQCl#c)h7I&eKF)KnOhS`d(&w?8SD zb%q`avl6~j;tgndhq8N)WcS9~+hbJ7;e@&DXZ2yA-0NOQwjNjNAsHw=hZ~m3qvi= z;MDZfIXTyAs`2n3YnBLUc+&60Ijo5!XrB(auHsLV4}NfcNC4|Wy3D(SEBK1iPeK#m zN{Z9U+VbMs(!%E2?Do!$EqBGhWNKymDZ)C}3xtQr(i98(8(nVF>JOw6)~O=g2-iwj zN>NNk>EWyj5{M34N-?dX9c7WO^5CrE{TT&3jNwi^UKK&+(tX#nBTNNH?WJeR8`InR z>X$c31Zm%N7mUs%)ODP+6^13*V=W~SS><82YIJcas&O^&An(sC4$3NKpHX%)gkdZv z)LMdm9BwT~s*nyMc}7^AWJfw0aY|!Xz`aTn&ogDM=^Z1aC1bfFF$#z05~_Qja@EDA zl>}bN#YYVh8#%fZF9ar+J$khu=z1wv6mP4G$}Eq_C>3rTQw0NdRl3wc>qtUY6d_(y z6_`qF_>n95p~=OF*_R9JG8((;XO{@>4sLDtZfy2$y7LAmAgVi*SrB6?jG|fzgFdZ< zJrgI!8?1u6b%%2sk$8f>$vFuBXq!m67)NDnc6EfU3@=)Yt18Y}5pOGjyq$`YBbly~ z7U!jsip++l&ZWh%ovoRj&H3&174p&WJ&*|EVc*&ft_I59J$m(q1lL=iI1R3nK#G-4 zxrStz${^rMD#0ejI9r>`o14%x!9q#OjH9jZ#r&%H^z4XaS7=HeJ`J3lct;tOcENUi z)Y%rET8j5IHmx8!H5*rcWLox7SH+3KrelR+^QqFEhqBfcdiOe%nrO@qI!&! zzr6l&?lhQ38wzXt@C&qbO|U`Pb;w+D!dd%RL938lTWSxxnvOW@k2ve%t%NHIS>+hG zd?3%BMMjD|o|%1#MgMk*MCED*&5xo|E25H$h?G3zsDk6Suy-858&o+>w(gM^3v1&| z`9uif?d7Zwx*m#QnpCI`R}Et#mq|J?N&IXOn^i>r4T|cHI;&3QsRd^#56`N=BaL$g z2RI#xe|j-J{JQmgNxikLAHHwGLR||R`CX%z$~q2bmPDoI@nFX~Dsio#$XU9UR-WPbWSB}7hRZ@9hp&ZBB%C{vot8(8k}Z| zNK1dYyv)?pIIzA>2#RuKRi^DJX%)(T54Z}fYGh^To9=)4XW$Jo19t*fMK2~kM27TA z*RRdBmCe;v_xAku_UPJH%hXay-_#jLdH5B3$kptaw9=!F#v^uVl^UE>a`0*{1%WiA zicQI*ZuL-l{&7q3sjRY-j`BEbL8LV=Ak}&x#ReS4y2!Jon#J|Vr3^@3pD(JlbPtzI z%+V&jcUw|48W%TmhbAwU)gLxxAI_p59Q>@M0B1~2U35-$s0}4M`#_o_B$LpOD=5Vp zmEl7Ak=q!Wl7qC2wNxCkRZ}|$fC2C%G4Ys_zWa1eg{AXG< zJ^?39#j#1b@u@{|>7|FwRgBPtynss%s9`C5IAN=19Jf|6j@hb?+pCXcRZu1rnvffK z*-1i{M!n6&$?h3wU#cCqGIU>bLci3oiK6OMCQfLz&KbWKIR(#M@morF3gMQs* zaXuj6stY$H|AhkWO>9+$Bo1s!cPIRcly5KXY)@@%4X_-tsWkE>cftr`k8VLr@P|P9I@%y#5Gxqp=r4QA)v^u3cH>Ye$5e` z=sc00cfO#eWMHakW|cclmxjI#cj@2^C>WoRb?Ca|5RXK1Hn*H82wjEgAVIY#_a0{a z2+Gb(1#ublfhwebsxVV$RWUdp>R67Nt5N-;67!=Ha^kLKA5Y1)B1${SY40k-;8Ixqi<4r@lsPx>>Bv*JcjT_63+NyXAB2sgsGx%>obUK)4rx+$mEvNBp?l&4jxZI$5sC%O9*$Y^p2;qCc8^qyPVuxr+&{YRZWv$8Y#KamDT+$8 zqI1wz_OP`WoDNIR2}*YG1j0PdA)r2vNpVD6v&5&mSg9wRrH5T5;g-B;OJR6=PQYa| z(|*cX1`}ymswpPZe9~bvwYL@Y_1xIrT-ezmTUf=)RWA{t5taz6YKgO{VO5Y-?zkUZ z-ypE!KMk3IJAtcei>?+iIq>8Ttgo-Ft!-?r3rjXc@LStAR@R4CcluY{=~Zn{nDP-9 zBvlZwj87|wfe=5@1?T9|v^<2&X;a?eYgthjQlc-X$6hrbO3I2&F~_EvBd(`MB%6=f z3XWRyV?f!Ytl;y>k6H2{Nu}>8rF2Ln3Rec!H)#^kGPi1N>Ut(C_cToRDfUAdIW#hf zw-jQoQFIWJlog(w6`f{{O@(X16ql44pKLy2a!Ox;ge;PYsO=$+%NZU^&yBj25pf~q zL{irI{ECvH$&RH>8o-TjxnU!xtgZ(Gqp|0OtRgPzNRl(=sx2rF70SaE=M_*o=@>%(Akuu$&Tc*#;W1Tf%R>U zBAhc@TV!cSIn(?K-(Sorf8JL7Ojhx!)aI}9*68bw!|8c2{B&0D1 zbsnL7F~J;@Y>7{^ai!5InK8+x_$1Teq|C#KX{Xc7kEfYlw!2Dud;69bN7q+z2rs!e z7rAjFh-h$x32;kM=&ca@h6mSO;P0ylXzf53u?ks+(iem~LKR0!{l>=X&gRP2<_aYo zjW^%8R{f3cr{Zb}=LTk~!j1R`gYS{PpZZ*Ru~_cN|Hw zA4{?xy>4b4x}F(*H7z>96n#A_I@ubNWT)*BOD*cECHk`Yv9z4a1=Wt`o|@sQzJ>J( z)kbP;%iTV+Uf4OBRNDB0wcvD;{qUs>?swcZYwR^UBj#$BjBDoTtL!{8Ho+2m&5HR; zb{``u!9hC<-jAl_9l4%;G|_eVn(btw{fs%^T-8$1Gul17d}BkpG!RH`n_JH79N>zc zG-n@5GRIywMI>ZUIZ75jPXdD##+kE>~!8@kap%fcekAl2^uiyIXqbLn-R&pV3HP7bHqA`?yF37I_1bfXOCpoylK zM6M+>_F8%jd%l_$f1QI`qLR!Z3F#E}1Yb;zxRf4qCF59%{aCv7P=@)L{Jd+mHRWR? zU5ks{9f`J5KShbM#9D>7QrfnnHmR0#nnVK7)dg1)%AyOk-+=X_+kKw_!!)@qrW)0* zD&WczRmiGn-9e953s?QQA9J)O(|-cbxaqfw9uTQQpgjCn`o~80AAV<^2=oz2gldGrhB`6RU2z zfh}#j35`+fzqsX|T5%7|thS8K)eKCO^^KSGPZkeO7xv5H3I-=RR`K9O$>3P&&^RYk z!p?_o$miiHI20Ldz$hDDCi@aiT+0_6LkA9F=Wj0uq#>#Y;>2u8Ln z?wer9uZCt3BaHIl+2a1mqP_{PrJ!ewfkTe}>Mx52#!CmMIT?6^3;V~4`0v0N1%VUm zJG7?3zr`Sg#2`xp@|oeqwT`L9=CS$u(Rru}D+Z=HTtVM>{=fv+RX8-oWipD?eCj>3 z&!WK_jDn#NM)A;S@xTc4SuuE{VsNy2c(iSDYH(?3dV34(AhlaUt+avBaN`qay%l9| zYFc++m9MkqdS`I;SmIvd9^BvJ_*Qk0YtkoiOOSPEb7>P*V{2)1Yh`t7Wkpnvow2pG z_Q~1Oo*`#bXIe!gst~NUiRDe#OPiC*S{TVPno`P}Qp+1sD(X@z>r<*^Bv&^vl4^Kw z(Icg@na!Y9DC!(+9GU8!UKm|mhaYQpOV-xN%1+z(B0!ka+-JSBjfVwcVzg4pViTsiMVH*66BftLh%=nwY(@ zyf&=@$za~d+E(}cQtjwOQTIS{Wlds5Z9;h+9U>DdS`sT;dB0lLfDW5j)sR$GpHy9& zQl&;kO>%j4T2)+?CUe$c1q5)QC8rUV4 z)~8f7WYn~9-0Rh~miG4i{+`yw*&FNY9D)LEJPwqSQ5Q(OTxABx(p8yxf~@K#uMuBKI?-Tr$;YU`4t^iH{2qKwsxdfjA|CCQx5T+ z@?V~4g34M_u^;x#QS_PX%k*6AjvJeVL0XLdQh(5c8DKCWVydpLGh^6{IJZRBt2v}8 z0#PJUQ#RZX$jjw#i(upou|4n}^IP3nU4NL^@qEz#)l;pHrH`vGR!=D%FeN=$1*D`% zSs|-Z?IlCl$Xzn<)mQ6!;K0?#&8^T>*J&tl<(&y%5yAn{Gi2;g*|D_%wI>ba2|@G! zIXnsL8{ovy(k6=4AU>0IH`(w*t8Tio46exFGn{|?cVxpo3NiH7&d}!e5HZe;oq>%V z-Wj-L8EnHk<6eS5JS5y;dM2={7P-yq?&($c_{z@c@)muOMi$nF7FOB#h8m;nSq=Or z41$^roHiuo_OE#c?Rf{t)Lq@7FU`QhdOtDz>G`pRrJ1#Lia~KG&8}}ut*yd@LI&Y9j-xW_iSRyX?Bx0xB4oGqCsrVAGS zp|x$MmrLN%2*Y%*N&^sP88PCI!;=I(H8jKuFl=p!&`>xuht0Pw zDKos!?rdTO$~OoT_x9q>2C@tD0NJ7~HtbQ#3za=8jG^B=Jwo% zob#?Bhu+yiN)wGqA^evb7?1l(MF1g^-0;4HW0JmpTr~|wmB?bEW>saX zx*~mba+$P^;~n9K%wh0XK66mUBA2Nw>H7+2NpU754dXW>b9>38C*rO z-VR)MRt#`;Z!b~YK~oBkb9iHYeq()ZV{>wObz*saYLykNbUmf+F{VJo4fl+~O9tO- zWlbwGTB9-a8qp%r8Ci2%J9BFrtlRm8B{fzSmN%Bx-OF3r6kHNUz#v$`_7whGz(?E2co%F@Kz zngk)Z5VapeA7rd6~7X(=q zxB{yJu6XoU1z&Y?upT$fRv1G3;CjDd#eclphga_?UcKO|Sz2|*fG*TlSgXvh9jQHA zcJC08LwZL=2S|HHsN4L^rtkVQulA%{N zH~)E6gSLEB7tIgFnAV+Z^zfjj`XGPRHW@?xRe#pZenwS_&!Si0>gk3qWWsm4 z@2{B6#6I7!ZeirCC&;SX;24&xOb*8D4*AmPdE%J|T(MlmUCez_uq3M1<_@NqHZ&RN zXj%c}d-gQ>gLAM z#v1Mm4a1xmT9`PfRZs`~!zC`KsysGpHwi`bGb~HZb1mxUAot?x%JM2qnk(|;uwvnH zQ|PYGpHl+YXVzBdH#g8p^_=)li~h`{uc%JC0<;@j*uHaXn`qYa>s$O+5PfHhkjCsL zPa#^aYJz@Sa8*lMGK776!hUe|W8LdA1BOVwi`C*0uU3qfx(msBaYxs7z1ET~O%@ z((z8ZH!3zpF@^cLIQo`kC-PYjRcpROwhEL)GNkPzGC+5J^t1$5G%qUU&||&PDiU#` zcd3RKwGqhbLWIOrmxSuR;{4oWiSkiRk;Za`!6HH7-cXQ?ULe{u;&V$xoq|=MR1L5c z*`X*7WP;_S&|U-`z0pGz@7AijcNmI}^3_B2p(;&uMz<&>D&a$=d5e3@bGHnFh?{xB z9;Ef{k#2n6MSZv{b@+`8t~x{5qgdUHFeCYee%83V%EnV+SE2h+`VkA78>QR5DlHG6 z2Kp6VlT{rH5gp2FENb*+QL`3-H)&v})}+AdgRRc|0Jlo2WmRQl2%o*USBST!M zT&pr$^-OAHHOj91G1u1xRQw0kGjMl%<(8=1Etcy|aMh2i!Hy@{r|xxtMA@+(5K>Si zPB>syo)X438Zv~$JZCrUP84N^4hr5h`@9K&0*9WLK!vz})Nh60SPlJ?Wj7r;H*_7j zp?q^zA_(On^pGTIMD&{7o&-bvR4VD?0aqN@+o;?di)6hhh}C+#l0#~5vO`bFiXynb zCGA$uOIx~1gxnn3ycG39S6d2cvp9ws;(lIR;|j=QRk>oaf3;hEm7am@_Zf2Hy6!-t zPC`-1Kbm#ldq#S)1fx|1O4JoE0h4z3>%V*FWY>oW6I^dP3H1&3N#D}oiftw#J264= zae|=&AviDqPG$XhTjuyIad{^AvjC|07SP$0T`O*u9j)eQtqMo9Ai4pNGT)-#h{hzK zxgp(nJp9Htmkl>iEmO`)h7iz;f~Gw!UVksg(srS&crmiY8>1L1SrgiCAv@tOtSNOJ zTJMuprfGQTwt*stV+5xVF%=o2?`g-8q`IlVgbbDTrB6mQQ?KSBs;JcE$wMTpSBf-W za&|r9Kxvm=wObQ_8K}3Lq+03co>UIiKtYm{)Z&!$78DoXG7q+db3}jiDNt&^~w_iU$y&>|9XA5vJ)kd5P8bP6lw0UNY`}-Eo1*)58X}guMb*lfLG~eT!0eK zh(=NKrHoTGRTD|B_OFDT@7a2k1FtBQ8>%xdeSr2s2UpJ}-K^x5Nm81Tnqn+cbypM? zDxU^RPlJ-K?7msYAy0sZ8B$mH_%0m<6W&rEUGSrD#XFU1aR|uDUEu(p0EE)9JojFM ztE@1szKJW_;~UY>h7T@Kc_oxf>c*Q-ZuQV&cVr)H63A2HC zT4&Qpq)c1Eb@dF({CLn>Gj6Dcsh*db3NA;7H$1({$<3433eav;PwR)otLgbF_3AtI z?gI~q?zrI5KvbATo*FN>YNOfXUGeCc{)DF2 z3tariy*UGSjJbMOsVXPa-cFBOSXY!OI6_qi)s(+f*#_SDpi-%|&sfV1550=~%&-@Y zCtb@Tj|4D0nL1b52(@}FH4mg2o9DM{8Pd*e<-An@DIF@6L-1yI zRb{+tN_{PFRmQ7#cwF)_J05FQdZ~GwV`_etwR-a#C3g%}z*7sV*ldg&g9~qH zaJ|#xt8!bkTUcId5!_C#Fe+b_7vA>)@WdWe^jlM==%h1Qo1zSfU1-23Q)ZkAMlGLZ z0eK)pJOg5osV$^jTxmMuA(5B3O7y)4qGVMO=D?W587s6F(x6o3@puCOY9U}^u0Zjv z#~J}+kX0T$1AIBOJ^}3(G5jj}0g(8CvMvpcQZgWv16dI;dmJO$@#0ypMgUt_@yti;yZJ1An07!d^7-@G30&tLJ) zkI&@foX2_I??J3($Q#2)meRrD5i`Q+ab zZN033a=fKf_$W>;`$&W6sON2QON-5J>6;iE+w|iKb9!Cv&E^wjlWAS|$zd_oCJB4p z;$D|87JsS)4IokDSNo4kL&4UP{}^-{7$HW_NAy_zB3wf)@qBOW@Z6pE*0AZ7cOebb zd?eVkMwQ7EzB0(S>U#G9^f^+csvYrYGc-e|t4`UQuDZ#tNcfDsKw3NV$;GwPV5h4n zV2x|T3lyzz_npbjj39>Xrwp{R79RpfQJ10W&OSDvLk*FW`+p0~cuS#%f3&}Pf4im~ z1D>h)9>Lx7R4sw5G

60Sg>(3^m9EX(Ol%rM(3tjomzHl5)xZLIv$_m$OHK!?S zQ?^loWziIK{mk5D8Lod+oe#F{c;Pp&J=9@)-$tIYq=n8hUy};HZ@FP(TJ4KrF6`PR zyvof~6x3s8_de+>EDJl9j(qDQe-dT+^*tF*a9)y$j_}fvO!abq;OhE=_A7h6U$W*W0b_?d8rHG3+1)Aq6TeLwBnvh^r?U~Kf9PGV zDKv2KuI)9vTM#oazkbMjJKx-O4c;oCO#*t>Y(G))T;M+HK`6){Bo#Ptb5%o3y?^|g z_PYSH*sC@>8n+L8W4;*4zVP>c1hq|6z6qTYwp`u;hVTe4<7(PGmAfzFG;?Bn{nK)1 z!TIYqo@vCHH~U|2lzZYN?-V+pTKgp?URO@U(O0^C9v64NX%D^unzpJqPzG~la5~Q* zo;`n@(yO9(x3V9F?HC${g_NJhnOCwuVsD>O!@lqZW;-aneg%Dcsv_Bkr@x;KO^1iN6d zZcRhgu5Ap!8&K38mX^!InW8Z)Mzw$Bh+z`=wk$41C;j|z7m)`x{S{~&miM9&MT#ol zEV5CNWpuYfH_)LwI=Zbssyi|rf@R4A#;sg-0nHyvo_@HQq9RnG6N2@v65+Ys@5lhp z@ZCD_kb+1JOOLoRK`iFpMPTGa=>8+H{CQl5x?dcs^f-zoHG@X z5n33(3@oNEfE8Vv?!{UWCTw9_eHCpMwZ&Q!>Xau^av1RawfM(t78YDW@naR8IrWkU z`~&8jn{Q7H?HOZS-W8anGnCVg?pk7?A8#nVd!}a)RU0B~J#WJg=H}r>F##Li{YBAF z6tQVr*W_1g_q_SO8`o*me`5#~ttr54Z(V`?TrB#PS?#5Wcx%hRuf)VeyGn6()Ehep zISzoSf5x18!Lq8t_v<}w5fc`Hn1RRlor@E{DSgRzYcCH6@Ns-bx0~Rjyj<^gpM@NL zzi$I8<@@oRQ&m1ho&3|by)#ZYETeZn>@4ED9YBa`{$ayWjF#9Ri|bb(K8rnJN`lTm zS~tAYvh!o&l^(0q6$AXElhB_BuOID*eS?SM>BaZKTqz3FMxK`JsuB`^*i#w?>dN!F zgIh0NmzEq6@u>$JxG&M{&-SoO3Xe}+VSUb$@?dYnf9mKp+9dMrJL{dgiTBT#4b(rT z|Jb)IR(m#6G{boQQb+<|D$t*k{7$mxqzBdRO6&4;n9cae)B8+n{2em=!@D1f%Fu&a z;F*-X6W!XcPz^1wzYfw#r+Fx%oXMAYMDa#*Gw!ZQ`LADhBnAUD%sKFUN2g644e0{b=$zC?3$)X%)s!i z5!>`K1Wpv)`LZH)p?mlxj_NaD;CYI8myX`$p24ExVJRytNt&b8s9{>l+7os6 z+jG&A{0xis4UBwc(?9hKeIgK01vn_xXz5O#FGmg{Ykx`d3fvum@UYg-N8WP5gg_c2E_vN4m zPo9npN@S}hDJwY^ac)%#n|VWu)6;9>&|li`I&J<6Ae47ZbMe-PLq{b}X=yCcA3`7Y ztDb&AP*&ZwcvyZEJah9|EbIzS`#yH8aNT%w{ucBGKR+oUg@Q%HNN4INTY~CP%42HV z@;ibiOv^mYfdi&3)9Whu{*el*l)uh9<(OAkfK%sJqipaNh4NXmjCpAp% z%FW9BPMh#>2qu$b9(4nd!h)>zJN;}ugrj+vHfeU8$f~NUOj$Za9@&x>-0A`lpV|`n zcUpbWjmZskHXP{P&9(Jool%RRN9$FfmQmwmiVvA*O!h}_vT%l1VlA)64Mx69RP+l{ z`@QIEQKtX?q-_~`wG@WB-dvQSZ+N7nsKdE#V`MY}rr^Mo6+!Y!6P#3a*0yC&A;BpK z$}xzH1mdJAI5-&v!o|ZWtp%Or<~i5MZh)Kp(PzGJO=j$4+8$`<)e&j1^)*NR4Zi6c{rOv%PFvYF>qs9R1yRMFVQU(j5atrO=X%F!Ip7y~-{B_H&i9uq* zBRRRtH*R2SQo@OrkvL1x4uG@;AS{DzC9NxOT6CS96j50>PyC0642DOHN{VhXFbaet z2}{SxLF7&y_0x<|)w9av$ki`ddEji@+~=7sN)H%TyQJ=y1@RinlU?smrHFQvMRdiL zXsKA$C&qzYynXca2GHaK3JGNikK7pePF3ypsbB=6S3M}pi}#&%#@>f5CU;wxONigu zTmfuW0EO8xtBKhKwr??6kK_^_89hhOSPi$P|3cS1#`1;86O&?EUOlx-3Q3A?ii+K$LF1AQuZD5&?ibO2 z{ZPVBv51HuZK+;I{8Nd4qjE$zy7iI}W9l!L{IeqQ+PE-6tjE!9_ii-;5Q#zo6}S?)_C`F1CdI z1q^a>ut9m#8U}sk{k5Im@$C|fjw>q6I;N%@Vv`J4i4$QZaLCsAxD%MX?!0tdeMX+K z1rhk+lz6TY`@P*AhIIe2_W`nS88sEf=HE(w#GaMW@Pjl#N{=yY4a+Qlnq#RgfBn*!F?wL&ni#W&5sApnSQ@8`w2>14aF#kG692k2!1t@* z#~achY*+K~aFXG9P^XT0UvrlAH?3R)J2tek+aIlklHi!pj)=@? z`|~Y{k8JSl=mrP=`Ixe}^Lf^vOGR`4x+@}|u=6h-=5cayft3SLPJ)D41e}bWpP%pS z)QFl)_ncwQ;Zvc)$x%7qjEy-Iz_ ziCcR3;L_CKZ@ceXbN@=8Un=_M3#LgU?@=q;^m0^fmmY$HuDio83LeqXRl6~IhwNyD zk2+|M#xch2)E7=SJTg>B0^gQW!25x5#1b-oQjoF@jMy{-tG(MR5}j0i@WAo*!cUQ} zGyi6mPNXULLL`CI1X{}h5@wpB>h)#ZN93F6zQuLrRevck{BME0sWG zq9yF;yLaSqPJfEv7ThDs|8i*|Z@eD;385;fWgeBe{U?S=AR#g7{=Iu+UXC9)mU(a7 zpoJzIQ+q<5uUvND&*Sec7A@Oxto+j-jsx3}P2;M%U~ovODQNg=GI3 zHc8Vn8r}Y5^7<)vCVpBPv=5Fvfs`1lU%7UJ@A_pc2b)G20ILWfq~U7zzL&nP;dv8? zf+&3LzyQYhy!=i#?RL|R*0twU4+MkTUJO6L$V3bKSr4r-&? zPf{I#ARCC?F$Uq6J7X*%4tbf}Ioa7Sn`H$+M7gbFtarGilH7I-=(OasarhBLa1jnE zD-H<_)Yi}oZN35dF0KLwG-1#cQSonQo1?YM^Z*al2u|Vhl!hKyJYfTOE_%wWdF{Ojii`yPXFY;4c7kvf7%{N5MH|cYWIct+o6K!d zyJIiW(WI2f^=?5D(HZXZ68NJ#J>n%&qta?3E2DYJ8%%T0(eIvr6E>9hgV?En1Y8;hZlv0NQ+3o&*HQ?|y|o3os1Pb9?o;gH{Mv0wCS3s4FVk z+sGItktq>d2>9tRE0@&w^z=(?Y|AU2uC5xUq>&(4_dGib%fRrkXnfXT zbaqs@L`;-^l!iVHYii3Vgn!*TqAUXF;D2}y4O_!vNlPzm7v7*@w|aQ-)g#Ok%uSE& z!@``OKOZ0O2P zk$kVjAVNWJy+YUfe9R4A`?x#{e%*shHQ?v(L}Yo}aI!P&U2En^d{HR(WBh zv8syh!ATdZG>o(lCzKgSY*N({QdVYbCG=U;K{t=CI5DtZ#4v>6v?bO%m3@BqEqzx6 ze{NXKslfQzf#O-Cz(=O5c4yUXmdx$h!e-F?SVLF8RNkx_K20W$J;J(}I<^54uJ-VD zd2JC&j6$4DiP$a_)#|;k$9+{2UC6-5$nDmR3mq|w}bNF!Oh7T@pzpU9_pZJWieF(H6O%V+4_1~9FF zU>>$GILMp#SJR(THCR?V$43S28@|Cm?IGgBSYx?bl;BZT_@!l3^OnPTJm`FcdKg}k z24VC&qcu{9>m>vMlB<==;ooaI>734giN)`7WO|zw#l-qC9xJ_~FHibGMA}!?R@L2w znhjWaFlBWMM(Gx`YYS)Ax;e^u>%Dv1yEVoKl}&9Ml2nmA!&G3g*nq&sH3KqrGKEKf zscui}T*#CAb*Z^UI^`HZk@Z4(g9!hMET~MBu^&Fr;K1iSV$}|Y+}zwx*r-JhgSQ5J zmLvAvL3FTegSQ>mkLhw=B#PJ%qjScJCcjS~&SF~Y)Bf4~?7K}EsnTH~E$F{NM+$KJ zL^L2G5qBqF6qWCGj|}wnU07D8-mfdstFORu^NPuR{Cs;n%GV>#^}(c7?1!G2^g9;o z#D%G;lO!>)GBz3zdX~inE~}P+``|VNDaR>G|=rF*`9A*Ip8%35Z8$pv$PbIzY2A8byaz{ zcWzAHN5YSar9t>KF#d#kqT7d)8Uf_Ov$$ABb;`*#U4#_ZX3JaWJK`INX{Eqc>@k?) zVP|LX>iLD!*Y08Z9^pfY((oz8G2`_YTy4*NK8Ecbu5`o6yKpj9($92tb!=qV-s1IVm&rwtqzLd_I@vszKcTL_gYDH)OEp6aKQ}9c2_b*>aO`^jN0SIgFwMY z=LtO@DohUPsu^(AKRO~nm;%5<+vjoKCt|nl$(<=Mimf;3-*{Td$NuLtE%j~GRT+ei z_nary@QJjm+0Q}{5V7zL31H}!3Nkw9jJidtw8WX2(_RLY<3jP97eE3ENk­VR2l zTF5(Jj`ng*Nt4qn-*5G=hfId7SH+M01}}w;Si*=&aLVafKIowMsMf6|IfX3+tP-@6 z0m51SwmUvP?tEKYd>#SF)!7aAq=xo)=P#U1Oi=3&6jIzS}Z!eeUczJp5 zR544Orh$pKw?B^ZI7=7Te^j|SQCQghw3B?uL0P6cI4ScRNrnw(lTO2uSZ9auQ#dh$ z@|p4(rIG9R5``u$p-n?sRnuTk`ZcjQOnhDFUqhaCvj+#N-!F9aPNl$OMGV?F}n_2 zHH{>6z4o-fOub;nPpgX^zb+~*ar5_N5SmBB)1M=tndjpxm?3k&@Vx>snPBWw^0N8V zzX@=B;?$qgtMina>27W^6nff~g^r}CphHJhsD{30yXQ8uq2F<0j|MQ9JeNY;h*v)y zhZ70jpj~6~Ior$L}E{h~ppHa5sDP8d+$qw0?fdQv^YGDFCqy`|N zfg|!T26UnWdMYp}2EZl!r=X!o0utK^Aa&@d<0v6He|(%&O_vCe!qP^)ng87OzNb@g80gjclPma(EEhEA2g{Qz2aahh-B@0}U$qu5(R+NfS-5Uhhfa`tk3OqRZE!@l>}81yKrLCp{*7nhTX#HvRl5f*{cOOsEyL zQv$w|umd8GQgq3iVB%kJ8jcyd`D%4bgG*G#fK%3`n#+7ZJog zGcaJoUAV2N;T?!s4RIN&wrl>Duix!G zq+#y}KARcpg@)k+@!sIWPu`btyg>V0ktt>Mhr#4xuE_H|ZweU=+y7f)gya^?2j=SP zwvwX(k(B5tk~BChLD^EZn+hBCt7Ef_BRAZEcS`O)l4q!spM3R8D&0my=o%+~RC7gz zrs)sqv>O|~k2*|?Zt`6Y>9yBkdFSFHzT{NROAUJ%IC|W1M+?n<8PY)YLI=dV1JT=a!nOKaldaH(51I#uLwh6CKQE(_ zJ|u3GH^&yB<5{@dRQ;7UbExQC2J9rZw3IvdD%3$t>@Db|9SvG76x7uCD!n-C$?HtRK%ea@zqmS>19p+)MT%z}h1S%Frr%Y!fwVQ?%^$QZ-U1ZM# z?;i!^|88tAWnyPz!A(=}NX*>P=iPk4L4DSiuJ0dK%4$-RzW4s+>(&aH>tqrL`DaT; znw6v&jv94HWMt>liyFBEf9!RuE~{vquUo>~!gYYiP*2@PQh+ z=1o2Q;;(!8QdPLL9aV?_hAzm;5GQb~^oMY45Xj8b`pIIl*CQtA^Vfy~i_(-M=_@tj z;^IG^e!m|0{?^su%xqzUuC)SdqjTmzNj!PMq$2|0u=?=ha_!Nrblwz}WxY=K32gmL zx>1BzCx=4wN5gB;D!=& zt2%Zljk1p*?*b^h0FCz_Z(V}@bSq50_VTBlz}>SvG?7k{R^~OWW!}>SQs}u4k&5Z; z3gZ66!%rI^CSc9_@BXhm=uE(}Wu>K$X~-Kua`C5T1)c>1Jv?*xt?g3tDP>yeU*7=2 ztv*4=U8FvvdI7(Gc7v^&l`C?mq{C0#A!0I-y@I|EIjFIz^ZNVnj@w?21HQi>*4WV^ zs6bHKRG*6v!|ApdW*biV*Sdrwfbg9hL{bywv5d#g;>Q??KkoyW`S7P1xWKD~4X;j8Nj{T26iJ04HahK3Q+(#$AS z{CCIGJM7=iWCeO=+82_X+*E8F19=e{ktVVl?fCa^prfkf;a*F3nQ)f+O_^We7~ zWm1|1fX3eKb#-;WvxSlLCM8j^`$_KFFGDmHPg7sI|753GP?}|?nkEDlOn9*V`IYMp zXUQ)**7Q*|43v2-?VU{yicq3w=p<1Mz@Hh=OJ^x3NfUa}+5YVrF4!7Y!4T65CAQ9e0TR8`8` za>x0kPe^@sG~z&Zot|Gk{JPcRQkQ#8=DvSk$jYPIS;{sYnwez*oa^uDnELdXJu0pA zUO=@Vu?|3MSK_?M$!W=#WAj4^*Xj*g;{y?trC*N$;W9wVJ}(6cxbc)onjz4xEr@V5D4j-|Qt+94KI@b+ zE>73N{N$lxU%!7|Me^?p;Q3GTHsg!@W=U`|aiSB>L}62f%0{)#Jc<%pSbCjBpsAj3 zqYD_zPcU2c5=Fg!nDw9HZ0vi3RKy8X8#SsC%~Y^ck_?bprm;?^kVgT^f#Rq0#mCbx z-rUBc1io!n8jPKclTscpApapJXU6gEg5hUqD*PIGSSZLy&(O#JX^Y>8xrpa3HO$^C z0h~Vgs;15+&A#&D%cX=*7?znQncd(&R8~Iw7XN4&nUbF72REJxzb|_Ved&HH6T2oz zIj2T4on#UrML>-u@8;bs5TOaAzs*wiC)ULX6yb``%U}^69Q27p7QShAm!*3Hq?XyS zPIFXKDd@eiHAV7QsQj#Uox#0{VLL~Ul%-5J4fHKw#U&=PkTHTVQW}VY z7|a++r533!VYB8E>Q8RxirsHD@SE9A!r)@R|1HZ}UG3rmhF)?ZE^Gm`RKXJuhIHOv zdp&8&Ojq?6PWlI@tqy({U>8lt4~V=v_NMIpZfO{hG|vSfWf_}jP0mfTI14)uzP(+0 z%~3SvfE9RtHPuomOIhKMxQwGUtmmD)=Rk=%76EN|B=z_b!&;W?Xx(T3Pmcyfa^3}W zNa95r-07{=deQgVK51HIY`AF7a8z?t_nd)o5jnJ;&8XEs1m@~_^XciV?8T)&<2zm*5noEA(QX} zRAak@rTgprJ?8PG#fkc(qAP(6Xv9ng6&9Q`!G5?J!7e**bqimYs@qvuEX#Y6k;~mb zoy@B_zQ(*>m-_mnv6bcK&P56IQZ?7lsNLgcuKhij?gL6RQTC8AmzHilQ0-w5*8zN% zb$r{)Jh0Ypamu>x2&E%1oRzD!&i#;@tKPGjvwK*{ouU0CueVn|T*e7}KO-b0abKHgUmZ!@R7Jo@3`2cSmNQ>oCL~dpM&V7F-XN^|2q+B|TivT=~$tzW}?k zo*-zac3!^N%vI?KNmk|4$<)nB~bLprQ1XS|Kn|UP;yGTZuTfs^qB<`@&d|KU+QLw|ZDCv|g267&cgDe$r zs@Qb7R8BuGg(Ik5Eh)D!C*EXrL`f8)yPS~E`|;1|y#3JNnU&2qSEzg?1D#nz{q!BC zoJ9s8X&K)F;8is42!199L-AE+d~|r-kwDuX8iIqdeV#5YY&vNW@ou=Q_NRqah#e~L zuc@gv#qxvJ0}scmV)Y@u5&1AaK5?&(0@wt#GO*E(J$S~R~pZLc!*w#7K@Q_cvD z$h*BQ!Xcy0ZL%3PeyQdO7Ky5eyCn&!k0|S$w!Dj2eJ>s0V@?SE9(+wL|6<0cw$PJc z$>(xKHMT>H%7=EO zTu`lZ?Znp*M!a@pE^+JhH?^l6HPi62On!u9rQCp9S@HPzmnU($<@Wu&QPD3z?6S#21pE64 z#V+NOm|aQF>t|0=(Wn!7hX`V4Q@EVTVWZ1yCHshXgIqB|JRe)zAb86-z8M+;ebg#6Or!6_(WHfJFL7Khj^6H5h7Eo_T1-Jld)0xyOSU0=7mFXCXLp6k za|C=9P&=sGriTCHF_mtHWybDs8DWRTbzL9ZURgyqun_`><~u4AsOtv-3w6hJqWsk2oJB85bp-1 zAe9ei%JJyeRrpAB{}gzo?3A(&8}8tvw?l7)%E!6-#45!J3r-DM(z{02F|B*~(MyK3 z(S#G84NFmdMB#Dc)rnvtyH9(DL`00oF%|f&&N-_ytbouXu^|~2E~Rf%iIRS=DZ@6m*u;668d8 z7`QT*_hr+F%HMqteZiEysrBXF0e*3mqdF)%-ZJ4w&;rqDt}AHd7J2UAH^q|70~M5A zrXq~2@j>%TzeOFUFQNO@$kr*CJM@UKX2qrk9rWUATjI|%YL+BoZ>cz$=gy7;AExml z{@BG2UW!~A9D>h3&jklK5xrj7CdUI2wC0S7zTDx@cs*M*EktFvEcpZAspd%>9 ziZx21;yHr1X;%OU0+m(#b0*|fHvx2mECPGfi51{lj0q+PhJVDy?8ZP3h+E;5x61;( zh>H0=a$Y;-*%D81U3A4eXY`v5Tt?^a13GH!H?_Bmpdlt_#sjUu_IXc%+zQZfZW*-$ zkLuYGZSz$CR9#4M0F(`xWNWioZp+vQmgt9+au9woAXFBo3E+2R6NogWm*Ct1po91_ z9x&Vo?khl!t`l=87v3%x&>?^wxwxPeFfE0RnmFB%OABW|v>7>wm~WzTV^2OMaIF3B zE9!8LQMU6=>`F6&EP)-30x=S0m#Obqj=IKn<7@kX&;NTu{UcS?|D-5>%Yguy0??6p z4*&c0!Q_9V)eY+@GXM94dYCyJilS~-e%LTJ>S-=>MMOnNw#H4D__=Z{EF0LPA3SL`UNV2?<%o_4^Su z*)`;tDIdQsNPS*ttC3WWaP3}K$emPmRY^#yQ)zLw6xVfLdz}}$BqTwCBqZU{BqW#D zP2oEvBz}@4Bzra_BnqEMNZ38|nhX_5NQfy|F^PObpd-S!R%s!7{^RTVs z&TI#Dttp2Z87yWX0n6)W2@$*_kJ7ov%qa!V5lE9K`w#HQ79jC<3X{_2^&$TcSQgi| z=~{TBEq#^qzdPp$sH7hc8TNBU3^Dx&1aNXk(7_N|fLD$t(*GOJx{uSy|KHP`J0fWm zUw_#s1j#N|S=oE!g{ z2fAM2x)olbTsbu2n4+>p?eJd!w+y(Z3yRW@7$W^&)LzqYP3CkB zv1KY-B>z45zd=hyO1pW=VtMvOpgH%{|KM&%jWP9GG*}b5+V=XkE3}Xwr=|kNU$KZh z2&P9weAM87V{&#fbzPiT;Lr<0`u~?6hm`g=sqKRwc2BLJne6<<`M2FAil^uDw$?Bw z_I-7!XSUMqe#JaS?!Iow4ris=OB& z;Vj3I&}YM^=;J4|$?`v%Z{l6vkNEAp5J7PhWsXp-I>n_RX ztSnBL5|+155N>uYqcd^oIHznZj3G+d!;7}ZKL+~U|6p2lh(>&aIaJNpGFyfDQ7lk| z{m#b-2%E=upSABynpq}m@RP0pZ*4j= zUV0(owtxL1w|k_|&y=Foj-lpB2CvSpQx$4U-tBmBD?VCD*+VX=FZva~Z43YTq0gL> zd$90Ma*sMsq4eFpTIU$mv|gcDY0qJq(X^|&f}ou3H~+;xElVlww?BXeI`6?)47J?d zBe(rxHuSm$97H@fRNNh1IwnoD-47J4IGf2_eY{y(Y~RjOz8K*C2H1LW+(+Bk_H(1O zL7*|eYE?8qBpt*<0Xrh?l7rbts7wN$r;T#=pm1%T?SP|z)o>OCOZTc#KvIugkpcBa zPgOC~F$}+~(vH$yv1ZbjbY_fOLYJx{b%bx4Yr6K@ zQU3DWvQ>SzlVkJUIFwI>#m&az^DJki0unVY716RR@}O; zg!B@I4d6V*MN|g}Mr~1xr3~z_a z5zK63h578Wes}{HtZR%U#9gkL8lsI4`@$SIhdQQMB-ltMrO}8*+9mYd$Bfs+TrIMa z^v%1-kd`UlX4Qr7pyH$dl2|K;A>>xKVP;Jknl09h zu+?0v`-Qm{5?V+C z;&>a5-jsk|2!Su+J~zG2Q;JU#cs6V_Ip)mT79h;w(@aMFDe50}z&angK7S;}bf|%u zQA|QCe&3;xu(92CQc|UeIY1`l%w;%@hN8a$@I%>cCmYhE;<*)`i%}h423LJSmY)CK z=6Wtgk^Z{z^@tkh9TJ@l`aMvaQ?pdHwc^C{|Ae7OnLvA$-!3@2qV=^1!3>+`I|@=V z&vU@dM;XCv8(*m9;ZC<i5H6u<8@TBttRxsN4X&a7l0 zTP`P&eK?6zZk(Vo{?0c|U&y&ZiT!Hm@pugYykjNjd7iR>YQ>}k)80(%>lG>pM8fE2 zEPSq7u>{9;!Ei2Z>O#AFtdyNhihaHK+1qeNq@4Sa+9F*4c6S8`5;GeZyXaBoV&G`~ z-*VULkg1UiR-I4Fo2l1*0=A@qWsD6w40DM1G%MVGw?8oaa9#Y*X>jwaClHttRP(a* z=b?C|QFHOO)t-6~%YYbpQ^pgDO9{0JYDdq);M-I0tb~*+eviJi3_~IpJYc8woban9 zkD2%HJimpKO03+BHj2$1xtkVzdzP{@eMqZd!7&$mRUsnMX3!r)|3fR*U-@VVKhp&Wr>fc7`IEB9TpzHRnOWA)9lA3T zqM@Y-$EN5$5 zFq_ROJ3IX!<Gu9q-$RfA)S|2#=2ZA zI}~L6_mrFc$j1j-moZ_d86Ks5vZ-w*c=9u;=}*7w-iDa{9p#1+yBHUJP=^TI*Qz`* z5KWWG%q!d)l(_9dME2QLpX{Ww5K-)KLPsHf2TZ_2>FimQQz7?LlqInL47Rt1iC0~K zuzdva5KuUO(OvyjsH*vFYr(x0BvR%X5>)wG^lCMgc!nZw7N!;`yuk(?r5BrqF4S`q z&pe1n+t8yK@MSrGh}S1xY{SprVB6CM0i9)sjP@qCdi8lJ!Un3Ad%V6ODmh_$z+WVBO6ham#+#sB1n5Mi5`+e|yMrrqpya)3E4|@dH{Afl$nPRp@Cd zvTZ+tp0kqbqfZU3?_Dvw13dn)q+|{ijA)&)Y*?Llkqz0)01yWd#DftKvUz{xiZ-$S z9sPO56q(EkvAx;X&Ujni)HI zHAd1ew&sw}c>dDCq)qDn+tfsq3$%M>h==5CiQa72lMSlwba%{5a$x;NS*$~1kklJ# z{E<$pSHiP-*Hf)J3NXc*Z~>M_k5}g_oM*Q@z47DVWwEKSWE@kxCIJ0#=_`1QEb?+B zp=B~~$b|O%jbya!I>+Z|+g}6Jqs6ZdCd=E6G$L5dZ`u%JEZ2OWt;*xYJax*#GWa16 zjSG0Bp|l@z*<^lnX=9HJi!(u_@_#%Pk*O3*37>#Fb;en6-@b~O7EA}I?Pypcak*6N1>58jgT0hK;7H$K43SB9}l&*mkVB#}axOyNu1z zhaNBzS8hNMfr!I~C7&xd$ToXs(jJ`#a=!|`A3=5e^!^$=6*SWf z$KnBDSNdUS^cC29;t+!HBSwuLEV0LrQ*mmP%{E=@#kj}nNJP2IG-@DM*D@ZezwEK) z*4D{n3e9=;D0e@-#BQktofr*l6#Q)PPLR6X%lj)a%jOP!=l6mKYjG}vFls!Vpw$y4 z4IQI3hAQ>7P3=4cPPPZ0shwzj%qtG1Gx!`tC(9G(#DP)Gq*p10{5@k?q|xp;;JEu? z*P2yFXiRpxBN_a{nD@;&eN8p){<)jCZ8cf`aAG#oIi)S&yt$q5)$~0;?P0lHfxp5x zBh{D&0L%=}6FKpu5VUpL%syJE%3}5^x1EExU%q%N@ z6(hih(=uRW3l1)EPd9ORfWX0$^}$2p(z*W zv@P=TbFr8?aM82t`cL`zs1|s6^8>-C=>?BwWD{rDSuOM(YG)V8dD$ZYXx{2wsA|4A z%j zr>go$T&8+b#nZ=-QEyU4jo7r`*cF3=<5TV#KQb)wc^v)M(LI1e{3wl5bwK)zzT%0g z`O1U%TLwI`(T)!MkFA)^&jP++Z+GKW+Nr$lof?V5QMeFj{1N)udU*h?@#7x;6B}6H zKKu)h*zH4T4{9lwQw*Los(V2hB3q=v`c#w6)=E5XLXzP9$HFtn;0<6LR^sVmY}sF1Y_9*fGGe?ZPN?H(}4R2 zntv9zU&}$ZdKlY?*OnZ7cOP-CrgkIQp<%vx1v=9b1LC&>edU897}L~DGnwlV>>?*ZE<#32~HbGdoSB%)6_gn$pe zS)XL^VfR~w9SVq!fQSt;hP?3%8ph32{CyVo_GO+3wX&bB@dtBV5W}Tl@j@@%GgSWafEum$>DYf!k>`YUXw?FPo zdr2I8F(>lLeO0}R1qp%F!Seilx$UFUmNp8bt*T7Qr|wnhzJdG!hn$A{-B9#KX5Lu% z9=^3K8lGI&(c*H=Fi7a2HUVo#S0GTWM{ELbddHZSe6x=qM_`K+1jYgU@#U%yP(J z?BXIZ_w(aF4Rx*$=ECqYK!jgkYYaun-HNdNb^R~ChouW11Ox*2!rlNik2%PO<9bp< z5A0)NdTvAs-01phCf@@@cp=0fa36cCgsDwTtQ`+^t<}{rS?gqKT%jc@ zOpLQZZtrqeu8&1n$xv zfJX}{6UKs3tyd9L$g4|Wo9M?WGtohGDph6_6idY8np+CihJ{4rKP^h&NCaZ?#vBBT1i zG~KGGx1YOcw}1@xl@G$tW0BDKX6=}Yr}2<`#`Sy;sLWW%iE3^9vlE-^uAw?d#9^yc zkX{puV*+JNV#MAVcpVhs6IO^h*;N$L=No%h(K*_BrU{x!>bgQxhhf$k1t+r)6K$vde74*0+^_jgoKbNV8NkW>;=HDl9xfRRxx4zG$JRPl8hC8>rHJG#Maf5N~AQWu74Fo?r ztSVH5kg2UvXkJ|Fe{Pj4C_Kp3f4Zh|N6xF5P8l1_6NV^IYCcGLL;aiew%g^(6;I=4 zFFh)3Wf>4g)c28fM1)OnH!O!}d)X$ustDf7agueN^~5SQu(NIQE7#ih)sKcErLoIU zM6AoQ7e)*cvU)8>r#xig>#^pQt-I6Eiwo$esHlBvYsamLo6PSgrz;H};9E9k_G;2G ze$}NLV>^;CB_k zbS1q_{8-@OalSqd?%SwIf0SGK9{nzDOG8%fji}n4p6^>a-F3`9WO?O=!uFJt0yl^! z6NQSN2oR!r^)m`e9JNvm*?E33hmSi8Jy{1d;Zt(k2z3$#V93)dPW%?I%^RaT6S_HA z$e#GKgK5a4o1f&-nL&8CcAbsw^N7+KneJlJx9^74+SxG087$oC8CuNT-P*RVAMYha zv$Jh!6EO(lZmEmMMOPfS4m#t7iDW;CE_Z|I2lM+By#(=Z>uI;6o{%fibT^L~Kc1ba zulTdliVEANLR_J3h~YEI&jZP7EK|XRjX32KKF^J#zJ@65ta{}w%=TfqzKatZlExPig1L$L80bCEUGH> z9fzY~TSfrlPG3RDnpxU{tA8QL`ZR(u?EG35>{Z)qNAWQ}y;zuMg`Tn_uKEFOsO$Z^ zepq7ubMR-|$(#iAGNUkXZy^8O)DA7xwO`SSB5VjLA0Jfa>$px%za&h6@dxJDBT_>S zz5w1e7iC~VHAD%$ zA_hbpSrbpeKD3ykfPi;l#~uWv1pH)klrWVVMglOApT*!&q5FhjcjEXq_&$q?zJ5|p zlEPH~7rk=-=Kg)nAubHJn|PI%_s_B_h_iFQjpWKa(m$x*PE6%|U9nX$;os0|$Y^2nSmjiT?k z^PTzU+`<G5&>sPN?5Dv-FH2-M>~zjBj+EfCG7h0 zf!T{V^9H2Q=HuTj6uO5FcW&*$I)7F5zvD=@jW7E`KDO<-!q!K28s=!QXzLY_Z_&Wz z)B5|&>~;68(cd#HYX7?2)%^frAjG2n97kUsv+hb7XAT8h3OlHfInP7|>0~R|zas`dd7)vG1Y*7zHp`CG`MgNOJT@bKgrek z`@a6~9T7;0&?9M76R{En#isg$@f&d5fe>~vbla_AIj_!ZLd6=ski3U{S=-~agRb^~ zZtE)_{m^e?pXbIdMM<7lK~JI5Mzve_6Q^9S=JbiHN~l0Af^e=Km&z5eQv?Fze^5bO zLk~=d7!1gxX}=b5u~!RX2>fNZqF&A!=!zjK;^MB=UOW9YGpJ(`L^PamFh)~BL5hti z=q)6iOQ7KUvIpp3o*;B6BmBApeml-8(q{A>Z$vh{g?#m5@5BvW{CAmojyaeXBn zf>e)V4B2jm`eG_}%-c4%u6@w;g63M<_hNdCHI>drY9+M`0Fstfvo3+{C}EoAw;PVL zS~4VzWv!2t8_;fW;blMh&Jr69F|=2wtPms`>m_7YB&VVEA~$ zt90Z+zfNht98{>_%}DI=Ed=E+Mx+6hf&JVGufN(plXI>%FVicOJ8oBkAOi5Zh|6h+ zf5jRl}y zANu#}i<2bz-Jzy=9mkgwdncb4D4a4GD*FC6O(^j}H$3tt7-;d9J=Skc z?8DJMQ-%J&F@`Nn3OH-QCbl$e^7ikui%*Qoz@Ha*Ws`q^cDhG&N7330-U33ZQf{AI zn6XidtJI3FnKu;B8VBsjSjVg6j2j}Ly}xBIw9dI)YPH61#J;6yRg_7q@iV#bE7EC` zTT%UR+AHx^F}qCJOxLlAq7GPedw%=Etpe^p(uMf%5KlDZ9hzfH94TE@_Y`cY%LF>-d6REBiZmM|N7+zsAnU)PZyZ_WN-0j9U0X?sFv`QeAiU zH_b-P_@|(}i>usV@@d6KSw)*a+#?yu+U}{;L|RBVDtDWxx`WL6_kNGG{f3UJqe52Z zAr_c2=w(!Tx}od6Tl6Ej+q_XvpS~L-A=lP^`4>P~3C1o5F5I}@?)l)$tx@8sb=%b; zYB3nAgi?`4g9){PbIQQxlWgb_5P(|&D5KkNAg-1n#B~JmqL8>NgbG84{lj|@F15c3 zJS>~QFQLLvS?2D|n9AU`vr`YkHWa@gt$!Q%;2~`-PxIkCCr{IrQd|M3TJp_OH9iM? zx$XhKcISz6)`VSs;%OQ5iiqEEu_jK3Vp{-#hZ%hg$*B*XCDyc^TY7AK#(k~cIvZtqVs3NKoCVv|kpk@oqie{Zu z4UFg;dARN@@SW^=%mca3Cqr2&5;H%nubzBF|DL?rS~00{_so{MTDL=H?db{K!ULfv za!;QOs4!o2f!2?M4~kp@pg!=@925O2M;$QD^pgI2<>OIt81=K8aV2AZc)`x8MBZ-bDO6TkfuRh}lU&^7&6k zN|rt=FYIQr*+hr(;yiK$JRiXV>vJ)=PqV*s3^D>}DC~|JK`B4q&AjNjFalabmy%}? z=xiwAYkj?Jw+?AQUdy{-^hV~`>YwP&wv@B2Q7F;~yX1ETgkP5Y#cX@}^Xd>jN`&4a z-76%VIB_2;O&DQ#vDazGkMPTK_)nIXr&vE8`H+=d#JN;|^0;h*hLp54)&z=&Ko$d0 zp;-9U41BJ;O^viUP->q@!1mH}F8lS}xMs5XiBjBGk1$keF6tbY4HvAeDubmbbT5ZW zxQ7>pT(t(yrOsvvJ-6?Y;9iCxS|QDrtV#C}2NVR9}V zfH<_%PCIqu;`Mo-(N7YOCvnR0`7UXAg9hNJ@VbL!9K*O}RNRql$ z8)|G|4qe9k7-rFe-r`LX*=0^zhprMW3vMlC2;J)-YA+D1P0=Lt&EW+T^@evuu>UG= ztE`@vc;M(O25Q7tU~(44;)D&MDQ$b5Hr{wZ8AZBM`NVsTUX0e}oaEDL#>z`&R_XTb zZm+zbybfjO^9r$LjE%P1(Szb;_}3&SVwfJh(sKG>BnZ!_wgl9S6()X=YKwmJ>#0Qd z8}{sPkB&=%XFJgW^=s=wlruLS6K#BrKintYDZ4c8SMEZseq8OGo@JLkoSF!Hu(9x> zs*oRe^}dV~(M~a1l1O5eA&QTi*P3oIA~GYE9`xgEamsJeHpQ(N0i)mnfRsMugZ z*7}ZUhv(-iKZ9@2%&O3-u7!WCR*n6A=GL%y2^$S*D=v-; zNL&;G1nn$S0a`dW2RbJ^Za3wOFD#hebfP zP2k7(Cp&Cw8>J+MaNWh~XGt3s);oTCBI}H#juK!+_h*z=$@g@z>9W6@rP)gFnDJ>5vtE5!l1>+_m}{tpoC) zDzNjdvis%V+!>#5SBrUb`>!^hu;)Ok@0k>y;4P)=8y0siTIk|J;|D6<^yAq;W!C#h zi@fvoY{*&g`zl`0fFrOAa$L04XvL>0dszuWhG3g{6#Wl4tk~}F*lXdPz*lcsGw~}_ z(1%%*f9fBHoQhbhxL=t=al=6{vXq?HB^Q?_aIZrdAmQ8tIzBfRr-j_c^7N1VojaNB zJ+6^<8s(A@o;?{$Z3E^;dPUds35 zsZe1%FeiV%$Nq%fLfmHE%rb!rKzI&;e`ae-=pM^(TJ{>IXH+_|(Hjw%$oQ(1PY;5) zlfqE2tFw!Xz-dn=?Z69Z{p%QCjmBkX<{z+ zv;5;jhh#B<*HJ4uqd8O5WbIe9ge{MOR3|V&uMP<^XzAURkVE zCB^*Yt-a*KI~9VT8h@x9Iebr!$=rRYva!|UtZ3TDH1K0cV8fNBu!ZvX!=saA&j*%y z!kGh(Z%-WLQmS0gUD>oaDUL0-4cyP}s0FgJz@L)4C*KV6-}8H7Bb`Vo1PGlARv4Jk z#uooM?&(uiW_Qz^UuhoEwBiK`EFwJz1{|%d_X1Nt!T!~WkegZWLqsqw!0T*C2Pkyf zc0N>mCNERa3Y0kQlsz4}{p3##Ca+*X;Q15sn=Z`;Nz?v~B`OZAw7nd3kn_;*(p2dl zYVWCY_#aOQE;PmCH%4#T=n1-v=kHEJqT{kmsd*pNbU;2(sTm208B6YbixZ{%CUCSj zS@K!x?g~kTFvr!yaeQ3R`xhWI2B^ zU#?u}^WJLcWgoP0y?((1ep(N}?`S)3#?Z}OA5JNc;|UwBXh0mtNQCH#R=T|Vb$#wH zWIJWSBPQjQk3EYbaQOQf2yrQHY)(M-f!*BpUoiYsJ}yfAN_kL3g=mE$c+x-;sA|SA z^b+toD*`9v!qWyEd@zHED?%Y6a5Tyq_qV}X!K~voWPK}VfQWB^!rpepUi)WC`tD0j zk6SZa9}v3$1ktC%1k_k$s6^g;EfIYkz9EK3{z(nRscWYkB$9I<9-e~yS@}jo_uYV5 zSy}ae{)pyQ?8wfy?qBGK;H3M%5;ug128OTX-5ec`!<;&NZ*|k|n-oJxmqpj1-Sy|h93X2}!M)Ua_(1+Fnz?jZ<0oDMaSCUU- zV*jM|FsU;fjjO)DSB33?fksx74IZWP{nlBy->*xf7)0WuINPI1-Lq~NCq~jsR{z6U ziNb?)X+V;plKX+ZgqVfH+yRhh)uQ>}*hn&ek(F`#y~=-rksoVS$ZC{x|{Q@%FU4eUO!p7CASU|{_pkVB>4@7>P{q~6iXce>HMhhn2XHYiP=LmR6%i7| z!ILUNrrGeJK=W86WOqtEcCqTTJH)2|LJE5jmCFybq@4Sgvay&l|M{nW=voFZP(#0} z^^Du*{Q9U1$(tb$C>8{QAfqYo#Oj7%IIWvkFI~XcIemfG45zo(Kkaf_UkMgZ5HBUb zDC8YENZau>O9dm8uy9&6O9+DSuNkHN9d{l3AsO@5F-z>RtCBcL_6XZ<;zXVA2(<<5 zS-5?w?AsZyC+vX<^vc*M2m)63`_{4W%S}Hp@q(y?t^~InR>rxvoGK5+wIEA#`4DGO z0Kya9Vfk*F-d?RUWZ-2!stKF_DZ~#md_83J>XM2S6-C9MUWeJr_2@|(s0!WOFVnxy zUl1|dL_&!Klp)ZaH471d@x!)ZBFvGE_%C}9Sq-{E)0|xVT zNUxNWe}1oaMRUQ~t^rLh#WmRS#%}( zy)-L?+~gplaTridp|&VW_#$(whN>$i&8G6!LrHam!x%u4Si5$aG6Ngq^Q>CF!I9J0 zz`J>=rdx0PH2nC+drX=JoFe8rW*-_P(zHjLntWg@1I&E-;C0mpd)my`;aNbrR-&)_PU30h7>lFJN95 zmw1Z1QGJ^q@>p-9SM2w&RL7!lmX?{VWa?_#d;<*`<1|NWY%Q(I>w7M9=lM}J zt`Z`Fzc&~=hT77m0KVj%daS8Vyy0tRA zKO6J6u}(&#cRNg^0BcgOKQLA1`2iCz;3N<3Q?WxYIR7(wnU$TL&oVV-Y+^FFdCIYw zU_`1=>fdbn@Z+?x3BRlRBZ(rj8OPfG&Evm+pBfj+Tj-HzP+rEpVZOMi-TpgL=;r>y zR4fr6YPmg{-@zgzSjr1t{x|!|QXn1p=UtzmK}XUb@yBXfK|vz=g}>w>yz zFFx=7SktC<&hRBBNa^=5PwjgvaXFSqLj?->w^`|!La&hiX`BsDMqgk2eKOJbJ>Ym} zSsj_R0{Z9OltXHnyjn?FU$a+_7AQW$0tC8U9hHMz%1Ir0A;QsPZY{GZc!T5+*4p(S z2l+0Bwu3nDCk(QnP*T6Y4mD~)8i*f{^4D@j@9VG!EH4e?GKNNTy~tk}9y#m*bNp6o zwAoGP?|x8|aB0<7JBTxJF2ykxsi*a1$ia5I)qk*85S(Aef0CA@DVrv9q%vU(^;+?q z=g#+y70O%nG6`5@l9E@~(*C!D)^Ci=K>qs!l^j9yDyuf z(vU`=`+GlUAKcS@{Dr@6L!Nf5sSKsLmc}Yp=r?%fk-uX{v($dFe)C#JCC3?6@CT`b zg>NEKn<*m~ueR{z&%)3kTgFbJ-WM@JYI&;CPK}%@nSinkP-_Bi_f&`Oi4ZXC zgY})`?GIW6pk8pU-@vx!6{Fwk8k@ADXT(Oy-%r0{gvNbGZZ$=F5?0CXs#vlMzC9M< z`#dZue2+t8fU_sA-OoZN@Q2iHh@@mubaEN*D)0Q#zopI~zW0>Y7Lznm@jKC7{un7M zirjWZotxHOa69jB-{l-->{yrkLN!p9Jiz*P;(MiVS@z2D*(b`pTok@7V>D#nXZ00D zZXMTfu?Nncvo+)XY3t7}67Gv^fJ0|18Kv7ZW(BhZuV9FCMXctA-kXZ)5O=P}^M@+r zX7bcuibQN=$O#>ii81O5ez~3gx9BY$Y#z>+(^S(%M^b~9`Dn{nU`HnYxjJ`NhP~gS zza=z3ab~r49<}Y z{IICzzA3d~1^Db(hj2aAI%Jf?{-aHxOyIbmbQmGrIr2>T*tyJtk7U7tk@J^ozWn!U za&0lvPk#>pW@jCfj)lEzGjoyl^0*9rok7`XO+^5ge|%_>)9hijeJwJ)Z?9&xXUqbLz4Y{|e?3gs_2+$wo9(~&L7J{kGM3&i z7}5ZHt2?ot0`#eyWG_epK+e{Y&&&F@yCHvfPuqV#J+l#FX}mw*we26!V!WE^%3PY* z!s~x8ldL7((l(o~!JuNec(Yx$gR$K_CPaA2TdynJsROZTC2{kNNwq`a?d@_=I#b$< zPh(v=(X6ywdp>;Xw(P+-|MBQ_=Tz9)Qbs9?SH*N}YZ4v@yWedIJj8y6j#>{ky!ZDA z(rjUa%D4UGWek?#Xb(y;6ONEeVPrEsiZ#Le?j{D_X;JD2^D~h|*FrS1l?ot2ds5!( zk||xeg^1 zj5tXFs_^C&#p;a7`&(tynjZu51r!`S-BpD8IT-||-Hu|}EF#`EBb-n7vXtiUR|Sog zeZ~rJFNXRQEC=e3<~0XTPsyxzyPEgNHY?Tzd7lFV;fASCg8y+cnIG^x?lI=a>+^$KJ;(D ze&L%QIL-sq-K8d7_owYh!qv{VjU>@I?2j#9%oe zA6gqBD~4mUbHPB1Fp1)EVOGHgP>QZ?vWkhJV&oGyb?#VBE_YG=qMtORx^zECiqce% zDR`SVJM~nw$v@|Ev?M3{qz)e5@8iz)tO7_I>!?ydZawK)Ag$$lozjpDN~0?>EK(Mt zU-^R7EdV)_7II$oOtGV_a|Z)Een~D@;>=HAFyo9bqI^mF8uuc#d*r!W6TX0$Vgn^_ ziZMm0J(xePPKmTt_#B*HVI$rk!qg!2ii_HACA~bpo+_}G=8qv>Zu-DXGWzw4EmPfg zi=lJtYOj`CWuPq6$DCPMSj|)vm%m6@+itq~9w-u`57$Mz6q2`o z8IUOT_Uqb7#kZvSlMZGYnZD$OXvJ?1!(I^B`gKp!2At)UeM1UKjcumx&F_` z&*{q4&e^RQ88Y|GT#JTN>!!NYT^EXRCT3P@NvZIDU7`D3vZn<_uM|1_&cCujS(nd} zq{#qx!|MIzj=pwSYVa1Xf=%W`gRDONrI^d3XE$f#rUwRtU z-f->wqFGbZo7yrcqikggq6_f+%fh!h_M%*zi(8T5Tj^OZiDTLqQ#!SuDnc8>ZVb}C zvPT^88TOCsCy>Qsh3Nru@y;)YP#xnYw^qr`9_^8BGd~Cn$2Hm*(I4F$dE3ITR>;#p zznAmLD&jh4CbeLuNy?`}^w)dd#hlGW^InvVN&8Ueb>Ux9Ula~zat zZhzy;AAD;0;@Q^5A1N*`I^>DqTH?`4z+{#8!XKXOkxg0o8#;r#hkO-Ezv{KMxS2i* zlzM9!7p%ljR8$9_q_lq*P)thu0`TW$B`o-soqC1ZVD+##Q$OZRPV zuO8d35e|24OT%Olg+02vA!Cmq?K?C{&7qXJgUnV%IWpf_<2wf!%voscg^T@Srm}8z zhHAMOqfEyXOdgM<&94ILu8dRK;|0EX_h~7W$jI z5Q(!5DVTkz9LRVaE}5mu_=xCQkt5`D`$7y5!3r}vVk~<0ekNIZ%^M{@r~YwyMrDcq zfv#efdQl?tBO30?Og+aBxBE{@Ug!Qm+EaYe0gz(dZ4)x!5QE)(ClDbsH3wL76Ap*D}cTii?%zpDP> z)eLwRp$Yl7M$3!W!Z7mQ?67NVS)Vu@h~osdH4T zPg|v#b>E*dxA7^d-FF<2&J97T=Z}!@KAk+)3(PHTb*m+u-SHT0VQemX&G2@J>8q0L z;cH9pKDUC~`Xm>ceA~L43I|RVLA<&N`FPvbl6-o0+^OsdAL{!W2YV!SapR|w z2w#N~rsSK?Cwm>t z^yi1vS=s*Ep4*Z*S!*dLKMGP zIdWI8^&(9cmyooSLfH`GQ%5h9ds(KKu&F{utN2WDw$)X68dRe_-(22=QE@}H?JZlr#DmJ4 zAg>y(F-`qZez4cx+c4$tQGO^^u@_)54l`OLIK13X-?Gi|CqtL&JXdT?qKu{JE2v*R zTtk-G$OLF}>(BTpb)AuO20k4>Yy5sh^Q3Fy{g%+ZQ_!2_=DQ32e?OyhGt@uZ>f6xV zR{uqv{K{OoJgJGFMqh5#wgTumf1SXXkWlSpxYta_;dIA~!aJia!2l?7^6FlRQeXr{ zRft1L;ytEM7O#vl=%(fcDaLfPNp#1B7Z|#4-7EB=pko`|DK?VV?$}Ow5Iope(XsO# zIJ+5?YwT1pk?V3Y`1{?`i;}P(?_6OcHcWRan7`5sO3&Y3A1QqT8T`{=FH3@D=QI?T zdpTJ7sN88BseWsDj|PVKH2I_+;Z6I_-^RXgR=fz8ySL8&C|J0DB~dTX;1>JTtQtO1 z*{0ELe&TVCMqP*C9RZePGtHpjrB9X)p7F_gyN#TspU~6#jk#rt-6%6Cu?6aUC-4ZJ zDOSH@Q60om$$QhExxXac-S%fgzC`Tx#rE<%LRAnrdX|QL%dX~fQN^8K;ibqChe9RN zID9CGaaU++MW4b&C-_4tN7VfcD~r=py0+ z?6xinA{OsHv2|P5(?m?aKdacZH2)j?ROIT}&)CGDwgQ?{e>I&5yLWLA+P9&y+;j_c~!ivVC%C?fKuEOf>!rH!~y8dF(KuO&| zQB7ZQb$3x!*G~D!6H?_C+&|a6y{2PFbw_Dg^B!s6;8JV?mx)ATaWy8JoZHnhP*T&m zwYvAInt>-PdbgDK6_xdrl(m;swwF|Q6xDWbkq#D0h6=@_1>)gi=|FyEXYSdyrzArs z`%Nl)$R3JV0s&oc<&1W!ptdcyrfaKYsI;cPq`Gfw-5{)}rUx4-5p{2q^=^}QZB=yT z*EZ$XG?z#^a%)?2D(WAXHaE>K&jqQ+hCWoJznN-9irl1KksM89g71_>pHC&dqq2@gW0)Nk2z`#P9R)ja!HK#dXuAR z+yU>rbIOI#I>PJ*Go`)B^Tv7_g(QOKElf8Miks4W>~%i(Wj^F|&-;x#4>DW6;v>L2 zVlEsgm)>5%!cM{}mVbPy!8t^GRwYR>DgoYnTeq7L65&jm08UkyuwC9W0Xdmxy{xMT7ZL&8FIse6i|4 zr>S?+iWU@WWL3HBJkT*zD(&7TR~CtZ)sX@T9y5$Pup;qDp-5R+GXUEv8NsU*RQ2&Y zIb~hjYx`?Ptx9unDYmM0dyWl^Zm;jn6ZPOJ+eE`#Ym`_+z_!%%Z>j4m6!+yz2683J z9EobPM3W=a6)3d1b;H@`2TQ6(o>7cS4CZ-%eAXLjUv@v&qsfzW6*Y{M*6RyIqq#N1 z1tL|EM2-C^5)YNg28*Tr#qz!_4TE@CuC%YXQI#tj$`OI|trhCA317q&Wu0UZl3Pb` z1>|O`6_DCNvEAB-eZ}9F%md$`BalvR0oxRUW7i0Zh6#FSG(nd1M-hEPOpX+YV!ZL6 zx~TCO7KY=pLKG}SB`0-XP`NcKw^0Jdv5l(i;U#bPoV#fP0CoVwz+r{PeqL=oud<#S zwVqU&PpXz-r$)`E)s{0F>shVsoX&opS*6Yfc8UxxiOC}$_rdW+EFDpFq@sJ~oI~?& z?UH-K>YH=;mfZoHFNjPwYS7SiiW+k2{)(;gAo-Gydd!S=pW5O9wmWC-&69Qn>Va+S zQk~ucT32f=SV;(`wpVDa*iY<0mEI1k8*{=72jdk+d!xyTIDgl)cW{nM?NH-3?TDg1 z-4;OMIxSq7d=h05E+F?hY+pVUo+0-ebn0J?&93n~B~nC+nM~fEH8U^~Z-!H$#g}>! z(IiQ2K_tFO@|1)|9>JnKEiIM>B@AKE10S@=jQj4qJx3@~6XdR0KLr;ZAz%aEluG5< zR4PR$qiVSeT;gZZMNg1W){+JW5af&5zK#`9g9&iCTAva6K8IorGGZ0CWNv91LVWy)R~u%a#x2)N8V2`Yf3N`;#N1UCpV` zl-7;D)MZ= zbsd593N_(l5K2jCQBTZhLlPc@+5!>-0`%6QX zBg2-%u#x2>D)Z4%%S&qOakcG)#(q+5ft?w(o>kk*wT=p%vs&-28FSYd>|zt;!inG0 zj)6I+ddV|pcB628$rHkg0a1E1jFDt16Keyuq4WZ8+vM;g#@@f+=$dmhPFk=_h{nUG z$6i(HEZ}sR+I(JRNt%_`7HS)A%QQCZHX&R?PPtfbm5f>C1`C{Tc;5XpP87-O&3?4H zp-COhZ=}=0);BD^DDXLTCpq!#l~dT6@5}2t>96jSFLot`50J2kFAx{q;$J3CZVqqs z`cjK8lh$C&69l)`$Sus95cn!^I&G{@N@tggS+l>;rqU)dq6`JKM5Io{j8uBLv3?Nl zz>^8xxk_2ebPB`p?fibUGNT3vp%rl?w9RHLSdw5T01 zbI>lgQJ>vBo?EX&82ns|%7tWbBB4;3a?grO8~V1i4(BNbiW}5L@{v68AV3Q*xl}Sx zD()+;>)9q%;#pbeI&!M|OXa#P4Th2iQ;}#CLHOoLGiuZN7UxSUB>9q#&5Hh<#^HSV zK#6#;v}UlhvUh7`S7}XWp`<%c)|Vsc$x$dbiH9E(sleJ?iN07mm0PLZD%S3ARGU{5 zi}A$ao{_97*_Kviev`7OQClF>=8NHW4`Jz0A{xRjZISdpUO#|1L6Nj4yRI`E-h6{5 zuR&khqARFvDJicTTyjy78Y4H(Ro-Ss4)+R`rzH~}ncDKJdpWtHt*t9hBY!}Mez71q z(+MJGljZ|&_V=TaG`Iri)f0?ExeNw5QWTmY;0%KZ8gHZy1QD7>b1H)8Oeq2ZmJsu{ z!>GepMUymIxqxkWx1cOi*9e23(V9<;nvaf{4-eZ84cTEYDs3+e*$<9bUr^a#FOJ#| zkJ=88I*zFAM^(0$RF)Ge`w5lxl*(~N<2a{rp4U3yh?kF9YmovUcOXTLDs;4LAs~-_ zvsq`*497PZcZ3qoNWvLPzzIffd%}fcQoqKEWrVYP-qAXXm4t<38v*%By}d$b!)8Hf zpt?+hJN7cQQ@F$aS+1qOmTN54T3eltd~~ti0_Pj>M&=& z4{xk$%&lxFDR16-u73L&$(D1XqAGcTs6MY&o+oL7i(eq^M;g~l zU=T^ldsAKwZbH(}UJV3R7_j5$o3i_2=$1yJdf6LfE*Rw-XLbr zF!^72nUFVuIA|~dl`+VljoJJ|i>~fjNAt7|E^>u_`Lt&F*r@pkAU$k7IBY*KWIv#E z!VV5Q4vg3jsvNKvMxBS$j>BsAVU6>M+6jsuRXJW#*-ogOCr2HpN1dlNj?-G(Sv}xw zuQ3t4o2MKIo1^U;J>u~4p{xSE7jpn&ZL1dF3RFv{Ju!57sIC6NWiPz#mKl4)loPvD zW3W~kY}iium=kth&z1#RSDDVu%$vO9EYms5^lZhTg@bOd)!SfzH&WvWwl|Jj+a_(; z2j#L?Yx6<M&MG%Bxz^Jo=>H%%TMZX1>Z~nf2stYG!WgdA#b4lb9`E zl4WSLX|`5a-$`_-^v_izH(oQH+*Bz5a>E4)N6-)5HZ#3dB+8R0idwq1bq+nzIlQk= z|3aVsNblIu-m#au^~XEM&U6{i^%zd|sgCya9q%96*D+Ec>&t6Ul{Tmf&o!KrcTv4e zI8oElpL4Qedvjl|q9a?B;D%DL&1vlS)M*4&1kg6966UCMo3!!Pxy&vdKKbt%tu z^&jg~9#IY-9vV12JaS-A^TN>hGhG@KtZkR-ORH7eY6hN@_F+4Vk?6tJ)=gyiSr zKqElV?!4@YlAO3JG~)_RIsC9mX8-}`X?J+q9m1c`j%D`x?SAByBk;eyp%s**LD~$J z9O${4a7Q3R)-&g5ny^TX%N5$C6RNqFMwbv0KQL_BH)P#6WZS272sUKfKWu+~*uHN> zfcJ}P*CCA?2!{)PNM%1VVm~@;e`&-4|NGde{g}pfTx&g}vz6&B)ngX)k`XxJ_0GEy zvWD6kNv%N?$=D-!BwL1kcD?>wh- z!Om+v+*0snL??1n-d#^MYn1ucn2{F;yiqJ7GnreaY~AyKw_AsHHE#s)M%f4SS^2;K zs{2byq^`?cykRFrX2VzO2waxj0*%;YC+6L9d1+pA&eJ3S7)fC=Y?HUm#!fOrJ0+jQ zNPZ5Rs*Kn!_sEIgoU*JOjB?v4c^_UKc7Y?yI(k^(npKO_*PK=q*b!mh(sh$qJxfZx zgk1`flk`}DJEb*QtC*1J5t8Dm*AyCvlK&Rqz19H6;eLUmziywCAIw!CH($B%XgLp< zbDtZPIp~oOg#516NXP8NmYS;UT2V<$XGvq{3DbOcAkpoK_Itx4{9uwTLjAEslRJi>_YR3_TkS|` zW%n*o2ez{qjU8xf&8n;_X==`wH*J@-R+|=;{=`TiG2)48y-~d{G!_VLam1 zZ)hZ%IH{W4A?YfpZp|rgM4VvI;dq!ZB%M2I6AQAZ*+jK=aj@%?mTweGo8!i~%S< zpIab0dlm*+6cBw~7p&H{eaTDksSO;)o5=brt)cq|SO4$!T(YEe+$= z_DLHO-sl&H&Kd&s0QQV0HtUL-pp;+>_L+U{^R9*|JDSC7O;$YXoYsQSJ@9`K(gx{yBf2CkF=kQPlfcqu4Tus?38)6kH505{$4!l;zP$j z2p0Ml0?5MUD?P5wy;y<VQaP?L?m0NA19P@Uz+|x=;v#6zHg+9Ld|15xGsQ| zuL`JHoIRQ?I9L}N>u{sy5x&@`hLhBf@ZM8#bLP*}%kkkK!u;E0?;PJF;e@~7G`jla zrlL?j^C}Pv_^3Y@cQB~2s6^1*G@?PZPqP$EM(QPig?EUzzW9LQ-H$X4|1 z=^Z{aF#Jq?(-UIJ^No!Mnwt){w!YNSv8ztJvqrSNN_?bOwO!O+Qro(-Y2?|q(I=!` zjdK^g?eO^g{Hw!L(ef zTP#yAoE%v=G&FZ`XmQ{0(sRRRko&nI+jBz>X5?@m7;(ViCR5q>k5bH>lba05?-3gU z?1zS|;5Y0AmF2}z^Gj;;agF(mdJzsiyxs^;c z?yNSrIlK`$2f0zU2KOCp=zzCu!h+O5&w>l_2EZFF8I~>-yp=k zL7RO8&f2%jCMnSjj`8d@<$wjv10jOUK1^Zy4pof9ndWVk_=p+ZXaEk=)I*B~^S05< z&2B+%QZd5gNv0D0IIJZH2~{S1ge<WM}Fh<0GbEwDMz>z?AjolXWPrN$xf)J!fvW zuu^2hC0IN_NMWa<<#-gy4hV|HT`6Y8M`GU1+(bh6=X_>LW$v{00YT4=+tc~-wN|R+v<4AFn4%OrN z=UdOWjymIsgPI%$E33_Zx!qqiZLXM@m&{x0mu$^uXPd>uzondMVBR09@Q^Pm;7~;*7|8{ z>x`vkb{V@Nom-U7&nT7_6c(pw$x}Axc)V4$6}@2<1I4vaiRp(-?NoI2s8U@ZZ$}Iu zzo7?t>vF?#n>|uL?~$068>X!7^NtQwnYbI$@=$t^f8?kQC7m4ANfl2Tw2cUzt>_w5bX4wgTLknfv=|jWS zj4z55@0c@)DIEh#en^=Ds8yP!)1wP7!P_33-KSi9Ub(z?$g)=;x8P?3+dRn)1GZrY z)J_;DH~oItPNuYx4O!r|?^BsyP@xtbLHLEUqYKrVMTu?+`DPT0L1r9fai|xAkzU)p zt7*n9pK{fi>{SK}0_^bA(RjuS*H3C4Aop2=r_A7mF;05{Y*>}iUuE>dDh(c3g~0`@ zG`eey9uDsc9m?GuZpM=9;Qv}+9Ig~&PnRAL7tle=6D!-zgROjsYQnq z5I_Z02)f8~(`by;WC}jB3hp8s2!Wy@rj|yr^*9WI3REeE@^9K2bXTB%4Zji=BuAgk zu@!jBMv7y%S=k!*wS@|E?A!W!BZgZ(CYs6q^SC`lOxYY=ZWqifSSx6wU=l|;mvUl) zGQ!Mi8qUX}5p+oS0Zb*u-zajErYq)Pg~J%30&=&^E*4ZsvqbG^2}7UNv2kCMFVXBy zw7OS2z42~OyvrHwwa0s%D=p4wt0&&+Nz^Sw9`BeaXfkDsRYf%er&_hv6>7n%9G%S< z^=H>>9+hjd8now^BXUnd?pkehueSOqpxy3Sfnhsso-^W#ozr`4Hgvc%hL z@orb5*Mn`wI$hBY?`pd@(e6*Yq_ga3))vc9e-6!-?g4AWhU7M4IgH#)#0M)!T(OcR ziL8);GyIjmFPTSN`4PCFYG(bHGDYqbwrPGBDiNSd8YFDPQHHB+u!lw#Awi7**1%8v9}i@-b6>&sM&3>dC_f!QJBbW8=Mj7=0ckckEx$<)gY0q zUPPld;^h03^Un<~?NOTd5V_%S3)n`4JcaEG$j$kEP~}AZ8vJrZ)b}e9SGPVlVtann zdT`Y8;>hyRk)@NPaO;-6(oSNK2^7&~6Rx@mSGCbzrne!njR-kh z?UPy?de2hiK0ikA2DU4V0<)`4zUpx=ZmUe5bgMOa0dMAZJ2|{jJI6duD?~r9_)HL|1KI&LG!Ap4jiw`G+Ag;1|J;7Li4miKGX_- zXc}}^1!F^9=@j-#tKyD#=zg7{)q7Qs-CO1JG2UFEzn47tZX(Da& zmeSgm97+FX`RFF-lQb9$rD|}rNYaN!sS-&~v8Wf~C#53PZ};a*`nJi3w#YRb>(tqb z@yF`4*_C}qo7E5*omoko8=c);)4N%r+bB0|ZWw<|G>qc&Jh>`gHe4uE=81dJSBA~z z*L9;w3@@>zQIl0Wu(@t1S2_yKmaWnO`J8tuk!V_WXIFRU){o`XPh`uCkXR{^jucCj zc-9t4FYS_iut3tcRo-7L>&15Rq(dl|+ay-zNylg<|APR1M6K`>I|uQ{+yTjPk3GaLZdw7%))d3UL)M8*<`^kYi#8 zBIFq0p|haTIe_sVoikAPbOE{$9F>hbr4UYmz8EvQ11axu4|Kd+XFPZ$=6Z1K35O_JQwVu#$v!BvAVaRR65DZ61ykg7^_qxVNY=+gE+~72BYm6># z{C>5;nIt#C8!7X1+GX^-Rq4%QgBfmj+l&J%8`POkc|w>+?uy1Sl?j&-9yR5nPux#x|6P4R`r)PEYf_<%e}kxQJRAUSxpX-SZauNMJsPuN zTJ|bUib~Czg3hKB(t&X@K1T|5KNweG!w@O_b9EI%!TdI;k~aUF!~!Ksi4;sq+U@PjsjIKEKP9L;dKD1 zR6Q@jdqc=A=$12OSiA@hZr7ZBYi(PBbSPJD$dQ_UQ#o27GZacS`7$CdK~V}Rw9!0? zI3w$j9@lhhA(_f!y$w&#GoNR(52|HNY+XseU52-ndDk&6a9& zISMT{zFpKmXpQ6C z!a6QRZgz)Zn_3M7aAcck^aLARQdK?qC&_<2<1QhL%6tJESg?1T`C*yvV_dm1NP?NcxRZhWLUN;F?3~tyVmI)+l1kO6 zmcDt?+C6JmS={4JKLX~s&||!wFgceprb591_q;Fdd<*se>&X3zIqVC_{fdvjs%PQb za$^@Cu@t!})=Y&7kR=MNMli;MB8c!z&^BX?1Q^DwL6cW@4Ttb&uNoNxwRdCvZ z+e89784K~mTr@F{4CzW@85GCN8_aTyMIAw(*=e`CT_7CPjQ~+>0gvIE!Y%H@p}N54 zR}*t9WYgglYz+j(bjLv06%0ba+KJI(-j#?o8eNRCO|hqAYY^G+9hmug|=f}p?3)Y1v^5Nq1iUdS4P(o8`vG6#B);QS!N06X!>!va33nx2i3WnsOBraGJAZ#{7m!vU*ct<3uhfF4pBpGzE&WJVdso zS}3Swi?n%l${n?xO|#b7M51-kSy0oR4`nowsZcyoRHH4ZQk5!<`EqSRqaKTZyasH1 zyhvfn!*eCZ%~As#ec%>%@GNv=KifEnPh~cgI5jX{P}P+W#Z#FUZwEYvR4TKvEHS)r zEp{VUs?Npl>ouDdFkM!?0fu)Ca--O7z!Jl<4lxtfQ-Cv^wZpn%g=i_J&UWXOG$ zFLJL$ZeEy%^fry%q~Tq0$O6tfBIA}I=2BzYX3vbLW6IZHa!HLY=#0XlEZ1{GXOtV7 zB#=PiR-r|T*DW@9Wky%iID_|$r)SpNH}CIV2zJi~Tc`byDMff0McU}#LS;B|)z7O4 z*v}5z8FbxyMqPmM^C}l}rtNUD4~)8CxC3m%0PN>9p65ord)3~(Di5Brd)TvU$PIgT z$o=$?>zQHj+kvS2^GeGBrTOp>!5eHwDjIOD8~2DNys+wVA~E{DsDNBcx@?G$gV`r^ zj+0neu-de9;JU#DhnrWQk*f{g+UQ6#8*=0@(Kr?~PFm0~pBu-aQitP1t=~&<33}9HW3Q)5iqx+qyS$DKJ%W={|ogi`1!CdLY~%fn@*SXGv@*@UksrP zxnJ>%cEPc|!2hOdr${IP3kQ*OMqn16i7gU0+P(dA%gv^l=JEOFsbv%ub zqkR^f9dizBvuk>(b9%mGda-p1dpqAevw+NR``mKZyt!j)u6uG;yJSVy44T1&ZH(Ve zBoIGCu_xY3*R&PyrFYSVw}Vp}hUo)cQ)E4pw%$p)gBNOFa(2wy`{u1h1^Exlx1s0>y8Fvwn(2{ zXDm|8fZX{q6I}3Y`4|k$MqoS(vGE2I;0*xhfy%P!T=`74Xsobi_(-c7Ym=#XqFgnZ zU)=?d7Ua&C7_(&)If^M*UgOkesd1x7mtQ}T1CZlZtV3iQ*am39Z~U`ZqQda5wmJJ8 zlXc6l>nV^UPL7hdsjXtrS5+uCW{C;Co3NySZ!I@rsX->2#0z03a~n*XC1bf_yt;B% z{QwpdbK%74{_&EUp5i(c;skL05rL<7Bty(#0*0Ld!r|O+mT9x3gzZg=F&Lu#cvyi% zy|uQl-$J$J2)uyYtiY1Z?0FzZ597lgp-jZR2|!MBD|7z9U8tL>-AoZO&-!VN-Z&?>3YLBZQ8(plsN z8!32?yIT-KpYk?Ocw`3TfT=?JI0RLOZ3kHC_+DUJX?vD%%gGHYyWvcO=+wilc5?%~ zIl1vykQ)z!?H=*&8ukF#uxFGm7$divHZ|-(ciDa=q+QGio1Yw|7BAMB4F_5QM{34fu8PrKX-IDy7@XWF_asMdzej&dXt?XQaia z#HFW2C8xw&&PcbMRTP~T=N_-jIaXG1qI_3veOb>y-^|Y-?-&W;|ul5M$NRz z=5@!B*oI(Vc){x%nl)GSs-LWEDmWo2fPU8*dH!ih-tn4(V-8%ob8ww-S%IW41? zKG%R(-+HET+o^`FC+oK!Zzwsbz-twpmEoN}Rnc(1ONFU`n9YH(xi<*6JAj#|g6Jwn zm%UDMCqKcpA~!Cm~|CQ;<(Dgbu5Oo&s`bBV(XnF@OodiYDW@>gte{SlFC`Zr{yLm#h@5BR4YKzL ziW9t(mRzh5&WogmlIj6oTaG0TADF|Gd#Qeb-0l=wG8JmC|6+5|76iGa2G5Xno@;&>@i;>@fM5vmQ}&gG)T-fopz3?MCl4#7%)!RF zd(V)4x6-z2$WB>mc-xHJoYRPkryC=>i#^Pylv>Zz{=U6QpC)W?S@)A>f23r(nGZcG4+<_wIET7YF!%U zz=X}Y(S4>BPb1FQtOOF1;F1zd@QOZ5p)i*V;qf6+YgaI#dsi5*{>qqt15U%Y(I$ls zC7#u0KTHRd9b`T)Bv~3l?UOvli^#ScYu`ko!#Mj=Q}4EOQV?rvUFQx6Q`U6lS9fla z4wPboRBbmfQCi-%10x^HJD-pbZI=(AakoU?Q!4IyO4eDUoiGNY2v+NyKE=rN-rBCM z=bN`wcVaB$ld|EhwF5;J-8<@(dz%fwNS`{m$zC zlP#LTStl~C$(@6jfC=O_f!x`W@j^y!K)RrIWS3$XKKDd8eo8r$SJ@Aor^rpA@(an$ z-{}~8r+g1NxlL*0rd~$D&!dn9(*Q~%H>)tr{9NTDkT(2>UDG+asj#2qR)7RZlR{_( zvY<1bhym4PNNmqIAw34b)Q&BcYe+2R^ym`DHVU?h*=oxRYV!+f>miNvu-0{i4Dhy` zU>YW%6uP0<6iznBTJIE(xo}&hbDdGUj;UQoMx1EkV%6Y8ZVqoa-<0HL*aod%hTQCt zgl$f4aGH#hd%*SVpnLZq<-FOJ3EpTGJEXQjq=oYv$!s8;d)u7c+|_2-b^z)iH@VtL zY;$t+NIS^Bh0M;7`_+7- zUIn>Pxdm+FJA}F|Pn=S}wS(H{MD3;L6qwPBX$}~om{Ze3bxRoKAR8`{51}?04Y$Qr z{W<4*FbcAyek8A9xVTl5FIQra#7hG@RUn3lX}{HXrrY#*MfaAP-fhz1ZL-ngy5YRa z!MyT;+_L_hvt5N{eI=FsrPX~H9|byYm8wf+YRs*`q{qkWGzS!#_HnzFm4=}Xu0=m{ zq)oLQA=tVhT>nuWMYT;ey#=CP)G5P?B$y&iLov3>hPOyYaq&kbR;g5rxfmNyDt3xH z+m~F^tBI+VM2%+tDcNYDWDKw29RSytjr3+SOj!-Vn@VmTBqxVEL+%W@U(Ge+je9JE zchVc<@MeO_NP+Ug!v^iH8mE;g7>6`PkPFNL2Gq7EzT{xqeKc!wcHnK>}9$7r3 zGNY^v%s!yD?jN-r)Hq--k`}2Ie>yt4d`!K33JyB)+X^)?80yy$LdPwJb0DY=v2ldT zL2ihXgWQ7X3M)*bax;c)W-exQpL^l>6CMC&gVqAGhn>$1Ie460Fh=e@N>;O`w4=c+ zMQ*G%u;zfJW{x394%u^r?geIZtuz#@=^em!r3txkGG2WqZN zx7n7jl-$&9gitx+An^zqu+ZPN97~*N?t8kjaeGzk4z%q^2SDyZxZRRI+!o6*$3Y23 zlD}Is1QHj^Mv5BMS&E^;Hr+;v@{zLUZIX^wt7klsXq>e^rx+@!fHW3*e@4O4&6NXr z)x%q4`V!ITrqdlI)ik&n6QuGgdN2gCRH4EAS8$-1R6My0DnQ%aCZ6VrgI20 zAg-W>YhF~|`(#z`k>+8Hv9_+Fr43zV=%`2ri>Q%;DZ#yoK6i%P8K3*DcCq52zw{t# z(&1k9hgO||#F9HYX$$I>1O4+}xPuC#mAq};;wkmq@llA1lR7Etw9zD;!uCG34aWPm z&}@BR#EPIa9smgxh>)KcT|TWfqb`ibX=`mz_CBYvBN2|mGz|NK@EIn6v5qe!vr&xZ z0=5wb=j6WF=T5^mF&iE>kB}3#8M&VxbaDf>cPTx4hrG`Xd!HY6Q@0pp({0c>huaMU zxs%Sf4X!o>Q?X0D4UG4IA-xT|nA|*C&e)tXf!vZQr*z8A;LRUE@D`c;;5XvuC_V?g zD~)y{H|CESt(a(n{yNm&k1Rtn7Gm$vn4@7W$R=WlnLUKrqcJ$*+^iTc=8Iy!3k-Kq z`S0+PhCC*?@Ud*@y9K`mVP@c&>2~p$`MOU$L+&g3TD}~)h1HLcb0Q&4RP6xaNSLCX zdNGoCLDv3wMdRajT~8{M(4j)5QL(hYQ~{-p?);k8g4#ByY@tIKZa1bZf|uEn{{OA& zcuX;z+o0Z9(}kf>(gpiiA|aVxdRo@MO{4@$K|~NYtD?U^qf>87FjMK?N)^(%=3 z?W2X&y{LtHOk{YZM)Rw)J&&Dk-E^j@L^@O?9WIoN0QOi(kX4W7*NkjB*P2(+QH=cW z+4imHTlY!3`WDPK>O@B)c$)h43Tv@L0oj{E?qazP(eezrGvvOaOT=&MLCK6A*C7-_ zsi?1k!yOE+dN4T46Q8k%^-BR%l6OqIAm5Ebs&l%Ar48{-ySL?9a8EoOZ2VIdGM z<>SA2=^3!iEt%xzsc!h$UMeG3dAOm34N-9c*hz0Yg>BpsqUA$g6r7XWJxmg+P*i2F zWk+n_u*MEUdY&7|jZr12vpb_DeNz}!k{hV(96@r#z3aw(NoIS23AqRD>2!zZ4tU@RM4a|}mErwwxC4Bj-vi5t&(v;1sYy*hKc ze_N;C+wpZSP44tc0?uzia}*(8CS8KuE!uFN6SY2G*^10`*14ul=bAAwvP9mu4T2f9 z%>`A8qMC-h3R!l!eDj%ljG2HYM`^QaqqHx(VPsqPD(iggOmo)R*6g!QS*PU1Wli~K6j>)Ew5h5Nc+49syMB4D6Aec>wL=AE zZDqq#$eGSY64TMdxn6DYxyFqr8aI}AXVogvR18^>yy}*miu%px8}lk#OHia$(}}TF zIc05Gln*~4 z>3!^kbmN(ZtnuDEZQj>A>qRRzhFc?1 zeM-Cdl6v{j=;A@u!oksHh>9ayOn7GEWgOejYn)I-;f3E!(ac7fZ)B9UWXJTj<9g>Y zor4)wvC(vGO_JPzH{?q-tm6wU+Co(t6`-XIE;t7^BIK0VR#DM7$o;g^4oi=dufe+? zscsKEZyqz}+3uwNIiFSncsn`2u|J5G(}YkByloF*n}pIRPR_~AuubSi7a4dA3}5JT zCj;gwpPRuO5A%ZDWK(|JN2I;VcH%dXr9)B!q{q+{Ogy77dnkeeH-a|{n{zof4sRhv zvYy;;)Oq$hvfV2sH;?n8NDZqcsE$By1kvf|C7n-+d$)@Ea?9JFY}KALI;7Tky)`UZ zaEj-wibY$i%hzai*Drb%OF_9UQn}!JqD%j1U1w3FYJ0P)q^_aKVn?)GJ~Ovd+Oeg! z12lc2QFU~5sb(%DTZ&2-f{J;6vOHCdze z=BRinQo9g6Vf2>Shj>0rlJRwew?(JzuRJRlyobjOpTcpRdO7wkM zkgi5x8O(-~=>Ac1TcK@2v2m!Fsi+D2IW>u*98`N>RJ&gobtPp=ZRFZ&Y%sw4sLq)* zH>%huWUZyY;xWt^x!F>HXSGxLY-;^t#(|wOA~%}0ldF)=OK+TZL5;O<(Wf-~Rkq-$J*0L7wf3Oe=Edw5 zH1a}0R=H@0Q2gKmW}Vq_i~E@G0$DnZ-G`BDaMt0SFF}iq)wrQ=hg8s0QyZDv&6D42 zmT|aW;T!dGJF>F)9mTD`G;M>0h?Rt3iLVyO=eHC1Gx3>?KeT}vQXMnSS~-^ zJ2V?zoezc4V#9n=9Ljq{{uhQ&9WwUiJ++14?J8 z&k^Xd`Z_J%E{g}&zU*&Y_DdH1FDfUWLKR*`TmA{rljV}GnOR2yb@)W?C?=6JpPQ2# zOw&q!J z)86Kur1Szr6lgv21&dbYJJ-fj&)EW&Osg=6sBftdN}TA6i;nU5tdGgI3ku z|8$vT%UL|KZM(P=Tie~#@vKz3v$kfxqUk_=^9%JY2kKjQOPZdpYj{T1v8!nix$SN8 z?k5yIr8V;06KAT`s@c`Z^lJD>Tido;S&4+ETWy#0JtgTnAa6S)Z#>-4`n;t6>1xT& zYRPkA1-7}rp<_>D*UpB%VsYnoxe_IN5G&g!>rk3)PUdqXT22GjP=AAwq0pnlD(JUK zZoDvr+FNIB1=a1StV5oggiSKFjKXkDrqE?#?sr6Tj3FM#KyT= z3w3`bZ7t!QELG!iaxx_v!`vxyb2*no8ZyGR-t&^deca$-vgF8Zd%)~UlfP;_z{$;T zlRme=Z|-?>5{o9?=>`~+vEnsotiog_FASP-7myo1H)fo`SyxQDASl~D=j&Pk^gs*0 z4E8Jqx?#({9vVCfU)%wWEvQ7mAddx|td!xO_!Az0hXLVGhQ|yy@EhG_NRV4YE1(FJ zE}&B=%%p-p!`yBPo?m7b@>M?L-rk#ABX?4Xg$~~(^W1EHH3URfJPNMC<2WM~940HlfIQhvJEWaQ2F5S}vez3M@J0t*p&_?l>2$+Jyum(~zr*40 z@W#uhY-kPNT-#PC>ng62o*5jTi^nX974yn!-M|Pbx|B8KoR&Y^GIB~YJrGF@_yU7I z->@$-Wa#1i_!dD zKa75ph6VqSGo*6(MzL4Uz<|To@AQwj1K8T2JB&G73VRgG7ZkN^tLxud(Y{O6rnERL ziMS_$f_xPBv5sy&Onhxj>TA+1Po8zJti;0mxijQWDzUtle(nsp-}tMX;B)hty)+#t z9`{D$2(Ds$=eRAbGl!7ghFGa&%ux<0F{S0Ol4MeVVF;W7+e}5=xqrwBavvNbqdOyR zZou}7jNBaF2Zx2Rn?!DvGo9Q7Zw-RyZonJO-{3b~aAxiov~CV>9y4dyMgthrT46pn zC-ep6z7X5|F@h2c^?9R0jg$MpsE5d{apAR64mYrU(%_~!U@SLqMo=$EaFsHQlqz^tf0_JeLIl0jYE**E)PkEZAy{&VBwgo?|b>0JO zo<$(tQ9o%@pf_*AimrN@$(%C$6TfVLwjGAH%{nls6pAita{x8!Q|>5-Hy=Sp6C_Dz zI601)hs`q;YF8XBzjAUz@MIxOKKE`(Z*gU3ah)=^c5q9B?m6Y$i>ifVn%VP)x%0a5 za*eS-Z)VQ^>si?9E zA}zbyMvkeb&*~RWjZU2$(w`a9of;TDqtuj*8cz+6f&Hh(77mU~&84*sXeWa__Tx?3gFUA6!{b#W#J#^b}2FDPfX)T?kn(8E?z*8WUw+o0KLO{{tnPz^_AKEpQC1ms13 zsfLJGt$vyz_is~)B}48voZRdXV|~e{^P&PdhH*WJsiFsI+7Z;5gF^rYlC^qkB_>iM zNIqbGVZe4^(6)b2n7#%0!S)Z49Z)(syvc?g2$H`zO!mT%BW2v-=Hx!Cavd3UA60u` zFKN6lY2C+ko|p746s+NPEd@@2bRsJ!w~*n6xGBqUr~BM#4!0dr=iDH^mNX|y(zv0j z95ckwZw725o_|OKb5j67iZYE{FQn$|59odxIGiYhuv(cUc|giZ@heU$WXt7&LAJ5XD$ zm|l2N)B$a>ysFM@RDFxk4nBx6k5t!vPLXq7o^?u`eO8ipUh&vz>2J=|V=!{1jRlGvt0tYp7+&eFc}2*Y^;1h>=M`RT?Jp1XgJfp)(R&^hb<#-|&*RYt|)& zNQh<`1GSDSmtN?%9PGF58?f`CnbcLq$c^6Ub>t@Bn(RVyk2pEAxq;j;j_ng#-$|YK zq|SX}%#$(@6dZ-YBL`V|hZt8}NoBax!{glTW-L!)dxQp-qzpSu`^@459NRCBQVrc< zt%nJ{xn9ybkL%s1#$3SmS(CeRf`FaQY)3$bALhrB_XE zOwxwWy~uoSkb7%2$UO=r?CiRcEzQO)%@C7j8mLt*Ipxim0tE-Ov~HA4rry>xR@^d{ z-vBjCOpjES)HGu{v!437%{#^YTO~tQRpEmnj;>;OiNPTu11ks z-!r0KBziDa3^>_Fo>_UXLUKFR{d*WY-V-5i( z8qInm8mqT&0VB5^pu*`q2rfW}_sHnxvU~4dbph zZ_$P!PEMinQEK3Z-02Ce=aklaTIV~h_naQ{!p<0cxCO$&QSh58&)1QgO%qXhFUe_$ zr1E*7dsKE9YUR?6)Xv$2F#sE`_7RO2%Yv75KDhfZEFFODvj*2Wqx<~0yK2&xG6BOW zw|g>BGZ}(aPXxKKEVy8d_W^*PnYVh%U&Fj`xZ~@|?U7D-CDY!zDbU?rJ?R3rIk{n& zXs(#>@~NZpNyOFZ_Xwn878&7r%!b-sehVph%n5~xD!;8TTA<4YtJT}YV-Cf*t7Y2T zGw;I?^s!{v9HK6eQ-w$iCpT~7&P2;!>AU_K$&EZWMr7`m^psY0L1iVkZe(+L`#sAum<}CDguL@o09PCKvM|Au^3|4|2^$sp?Vj@MCpDh(8xqH#ggZ=$w|%J9o;4 zplO;_HSlPy@-dNWqj>bux{=@13_T_q#*R-c+pw`@!r@;*l(= zj^<-wE@jhHVKXLcs2)9~I5jeXy+SbH30dD`<=wv#jr~$&dPFk*Xe~t3R1i?x);f+U zl#kX7!x6`O#8Ap&#blvk0{d3jXu{ac{8}~C&Yu$ZV;~k{_^6v(^SN2fT+klK_}p&` z-SUjj{Yux7*Y|hU#R8v;lN*6kbp2v7l{pl}lq&QcVLTu5re)g2<0?p`&F@n#?(4U} z;eLM5`CJm)9Ny25c*uoCks3jo2%T=6*&N$1s)0J5$D|l5j(38gEX6vFdc(k|A;aqSkvv=R2bFg4yutPmKBSLNLtk z1h&hJu5y#RV%$}QdT%n~_XXr;F0}w`g0#s`C2U5zg;k_%+*>j3t(x@nEubQu`oG6L zs7+(#X>MM;b^*D${yB!7%S>)e3WqUf^HHG=j0z=Sk2wg>V-5l~=Ag4%sIZmkZKQ%u zw>JC+55`z@j7|rg@_sHKHJCo+0-YeVJZ0xlubpdG65aY%H-` z)Lp{!+%>~A{JeaaQ|-uR(I^H+ zU|ur@VQiF)J&H+>3N2#n#|BK}7}w2w?vn#rgv9g2gK(pNEz$tqIrYX|>F8tC1H@X$ zsz|kY3Yx5esm)m!4Ba?|v6gg4rUUPD&$m?Q=7DWZDDjkBnGJ1J`Q)aCg}jz!jKZR+ z8PX9bs$wIc{wA@0bDbu;R+m#}#DHm<8Qlnl^T}c;x73Y3De7yPb7FWjBR3ktm}C%< z8^Lm>!Ji>_hTK>5J^kH&(0X#i{lY{eb1*dP3+b&MvFa%)bWB!zt8T^oKIcMbNE@S+rZJJ0i4RYn6DU1e_u*x+QvZednoZMm~ z8TbvukTDwX%?uMos{wCziy5QkV*s`@b<1adW3o7uW%;ahOavDh95SP$ZIa~BQE+aw zdgt9in?Hh?>$oB&eQvJJlF?9mnXmn;CO7ijELy%g7fI5~RwodKnm_~#Eo6W*B5rt`!6#Ay>9FeM6 zqAadxsnyS80=34!@~o^6%BDq)bNS8Y;&xj><21;fQ#-h^M!8v{!Dvb>O>#uqO;v~% z7&h0^NX)#($%2MS446i$`$_3Q$C4N1MzlOYB?8o^%Jq6zphU15=J1BYaW$gl)J{X2 zV^cmU=*W%Nmg8Fit$FlmK(?kxs=_)82b6b((a;fLR+um-Y_o!9h0&-kHXM`93Nz%2 zbp_RfFSe*itZpSyJ~EwCCCm!L+rDU47$5jegTiQFmz-vWk%BzNX46pZVa#Z&pK)PR znDR-b4fO_`BKcxID@@O3h3W8VVO@qe1`Jz335QTDCuHgFd+cDz*;=d<=aXa)dD-28&agYc5Rrr19o!J~w@pgzwC2 zg=wlCAZ-I_*Qk^j#f95{`(MW{`wsYfW;1v&MlTSHIm3|!9~8KPs%2N_G))6TRQAN^ z;tM15`-bOs56>bOIV8BoZsBZCSWt)n%rwfYi{V$ zhN0u@n9*~>=!cy$g4uy{roeeq0IW@_q@gNJkVfI;=D@$usE;qRHDC!W>FA2^6kPCR+Z= z7nds}H)|}y^`BR$B@*TcDTMEp^dedga${UaN#o>}_62yQo5W*$Fk?PK&Q;jBDq=y_Syl^Oo=bvjh(LIVTHPnJFph9g>jUuuVaX$o2 ze_cI{5I0s5#r0!_avfyMY0DCV2yXH8$7(ejMLH}vHr8P=q22~{^opiB-7J^_c<++* z=2!M^78@R|8_$wXW=o71K88gA0t1NHm$uDsX4lc&8XO z3FJSm80@w9uuC9!5JHZ8f;Lwy=j47JK6m~o)u7p^7~Lox$!i4s)L|AH~4WlLwBI*Ssb6%{on&)i! z)lD=!4s*CgrXtBW;GI{e#SttL>xyLB0{Lh@&Y1d<&GM1W2*}Xv(eWJVM4s4)N#1!? z-FusqBv>C$VD=nJ9`L!+LInG)&?9t%MH#{=8|C^<_3E5Pb#}w(X89-(&i2HZEv2`H zGow&E`nYIt!0dD3BcclkdR1r{ghLl)lFI4Sc~uNedJnAYiOnfy#+GbCog%#yUI1pW-` zLn<0iPgSU~*CNGo9;+A9?I=1USryj{8V`UiXuRkJnYNeo4j3d?Vc0mld7e!Nu!-5@ z!3q9TP@?7T^MVhYlX&3Tx$p(m1oZp^6R0wa^h%PpP}GAte|x#;296GqjaML7fc`^#J7Xuz_&U zni1VWzPQ#}p|#d(t&%ZYqX`pQA>rcD*h1(NL!G&g3A2!R9;rCrPG8$D*nAvBYFJv0L0(P~HOE!Ux6W8qnI@Kr@<= zGluI4?B2~7E%SV`<@9KFT|tGP5F}5ZW%n9t0noJ6 z+U^4_1Hdglv(v*j(3iMmEugcmWNs7Q(NJ#V zFjfF?-x;&VaEo97Ee>iiGJIgz8pPOnoapEUh{u9a7=c_EDlE(oPEzvj<+rh}g=|_R9F2HFp=Gb%XeV*a zwi%ZM)2lF)denSyU~zB%+}?r3XZshQ9k2k~y9XWcxiOWCbyPb6Z*HksUYy?up@Z2B z+b$TTw>h?1^A&i^na!~6Ll+hZ2WBHke%e5Ba$uX4r+KN#TkGK(=DfB%Mee;SAG2h{ zev|}N;AE40DZPXf-DOZmCmXZFPL0`58ysiGoU8_oTw!?3jM-$#Ts29Y3TE;^ku-&E z02_=yKOTgS4kI25wDzGEok2Rl?WDnCnj@afF`QFavKd) zgN;Jn?L{cp9N{TzkQkTnrCI?Rr`B`995j zGyz6}+?9PANS_wf^c8`xQsYLE0ioPHsgZ?;k!v^PDU6pRH>P|RmN!&uCpo#FZNw<< zegJ#34A`Cp!i(z1QGRLzX+cG6PF+99O&pbs zZ4&7a3CFB7RP*O5M)MjqMeTZgNO{eOEf3|?k7U<_g6cfED!;n(P=^v9m~l06TxrC> zH8|x6N9VWBN8zO?sSO*a!k3RCG|=wL{#7 z4}K<=II7fU)ifhiut`dicXu~>vG5)b%- zVN9cmCEOmLC*TLzoFQ)@8l>L%!iw?PZTm-sor)^KS` z(64j>xJrLdk$c?|HEsQX^FD3P1Y3H*@cwZOVzgXxhNE+ypw8kRSadf{BED)pg^`!T zmgoBy_w+C9R+@JWF7H;_fbG;6O&WvEWJkg8RMeVRhw-@}Y1norYq_b#>!_AW!#J}U zw!t~V8!Hg=fN-4LaKvAp-1}5s*#1!;j0viEQ7#V51NNvp<6FQw0CvWRMlaVn6B&8W zN#B1@?nTb<4Zw_9PfH%~aBh_1DVr#&%yB;n`nVa!iV!uzu41<)-=MzYZWUx<^N;Eh16+?XXA+blMLwb{}s`X?hdoFBAf z6<2p{E0gssJK@4LO)rAnTWbdj#h9h7$Mu%to$%Zm4Jc;=IH?hbs{+$9%UIoN&zGMJ734*KK08EzP&5Kktc1=2j zcnFdiS+e1RW~91@H;VfotsB}TK_P=Gr?O>#b2l7r)bAlQy|Ju$V{Jd$06uC}m@T5#0*b3+uVmDCkXJVVnfn2=*S?B8 z21XGEKyG9;_;L$S9i3mWu>k!kX>3m}BsZnisfZoq#xA)KJWnTgO1UK+)oA`s zH~uH$Hxpt@hd1Z9Kyf(VI$Mp#R;#s(^>)dawPDxZPMN;~SD8 z_bXmoQV(G8zFf4Nd~U>h5#NL84H!AMlGs<@zooVdP1p#U!oR|$8{|eUH?oRZ)B|Qh zh8UD3)-rN$Qs_3aC~Tf+sHmzF?q7=BPt|sU+(jt+k{aPnkxvDWxnV4;L5mnWr7WdJ zkULj8mnWOgkx!z6Di=A~nxVX!e#`&`xuFchqvaqsG{dvYJJ5a&Cy)nm$&Y9CXw(MA zuGqi_yKWm{oZJ)-=HwomMkW{Je!9LdzqSuqVC;XM94iJ2xl`O1d0}|iG8C*$AzChw z8ylA(y$e@RTPPX^xpzssF7&zAqUEpG=f3b`O;2p6f1*?m7hiP;!tRv>8f=btsZi;2 z%;asIew>F}8AfZqGO)_Zg!c4R#I(s=aPMC2t?@QA^GY&>vkIs)Nk7z${~ z^!~&8AYSgM-ggqSp7e|D^LAuP4)$vg>6Tv@3%_Vg98d*cP=^kx{iHQLLL!tWCY&dx zoi7@!2MzWYOzwjw|GqK*K8^oHZ4l+SXU83@C&NI5w#!NL5T9)wDZKc<+#y`;-Eu%?_opekSTb`6u_IM)Z_4sK14Z%Ks*q| zG{SfgaD_}iCpVi#D3JTgzfmj|&B)D492k9RJ8849V*WrxHL71UJ6!%SWSkwLun#k` zR`K$oFr4vNgs=^tn_bB`1zGl8;CE`s@!E7C{ylzkqved;1n;yC3Kq1WA0aY?MG1Gf z8M$$>4n*S4U}Vu7hWZK|?zUMsN}^6h8xCn*xc|>^01TjW!O%# zzub#3z%jiarRi9F;5&tv!%lLe9y1;No*KmYW5&ENAUx%Hvs^f~j4C!NTf<_4_HBaJ z6WFn;aW8KkV-ro7atoufz;7noBApJva0j;TuZ-fzj>D)Hof-MxC@2p(z9mEx)>x|r#IhB#` z4sz$p=c!~}fkGuU{Kov+fr9gmX9hH=cLuiMeN`xRm^G49(aFh;u=XZ7Ja1GGQ@u0x zjp~aPRItoymdWSlWmV9JDUkFPB*~4CFef*hz|E4$HFC?;c&9no2ibJKbg~dpTQMrV zjL7!lRj@ul5Et3;Ej2x9x%qhh=XBe!QYfyCjdUL62@dMbEq zI(Bp_e#jUFO^@p_E-iB27{b*GU*n4D(7EaGiE)TRxNzsxbPO3gEb!}Qyw$_T)`^)W zAb;ydIETkotaxbJ`p@U8OCN$P9P}f=62HP#5#c6sd4|Q@gSKAGV%U0qXWu_ zeT$AJ)7*=F>eJ({Q&ZST;)o`COcy&g7RGhwls*FWKBIit@ zbK~waCiB^G%b7{%=?UM-@h~kirlK#+M32n`vHjDU`LQsqNqrHJI{=3pd?kVsf0)!f zOW(fQZ=?WG)ZQTM&ho!tEz;Of3WIBpUt64Bc~ggea^4$DK%Q{k1IqQ?f$yHo++>5F-Ax;i&J}piT z;-;`oXk_hP4BKeY7Rb#**rbE_jOH zJa|6luGP8f1alzU4Y#{x3ggAxBg>viPjoq);KD2z%FfBngIzE4rOS{z{oq_oZlTF& zz0VDDBb!@LH;B4p>Mc^3IJqfQE~A1Zkoy9k8$L9pP{o6YmZ$sNAorHq0hAokQ)Eb$ zlJ8E@^#(0Hg&LPk2$iSE%}R{bs4hYyS$gqzoOH`k$PIEoA?ieb5Vc$*0D$~D z`oE>D`-vH^J5DDz;sKQZ2D!xpAa|`{20k~WUDC;&D?@l3kD+pDdZ#FAlTUJTBOD7? zo7Jqr=T0Yg560wx1*i{uL&;5bOe{A(J zGAau;9hc@-^pwbP)P}RGI#9m`K6^o2pCB_ou8>)v2&i59a>&QM+|Hw%xN(Xal2C}eAwP;H~ zGnlTRN>y0VpMS1tzpNW&W!7-wM8{zE>AJj%j+|;`j!6AT#ZY#&CcjpnQ#qVpHBeOB zxw%Z9TiK9b(~9L-b`9isNIwtSK%1n4Id$Eob*(77z^8@fK5h`uM!Y(m+^?-4OdvNt z05;JOp9t6GhQKG3!h(s!lG`h8>1gR6F)rCB?XLMi$OXS2e+`F%K?KaPJ5+H?>bc4B z)kJdo?OLAu)vhPrxW@~5ZgRLIoZNy5-E@4fDTE+++a!33!~rhWaWwUU+z6GAm|g8t zcB#pJUh6olv_f}f_ki`Oe(O^MjwHFA&tQ5cBX{~p4?e9c>6eq+%@t9o&l~mLNqB>= z%D>@z#GQM?S?@Zm5SQ5q)`e;1JxIh8_{^~&4ZV;8sI@JCd9@w%0ewS zZ|g#@7jtT1oZRITC_3{dE5?PoGsM7nO*l)mvxq%a$q8*{)NH2l97NF^n=r^4Wya%#{;z+4NDKSl0E z-W8_Q?GSh5)%23q3L`h9)>s%oOSkE5#xm4CgScet$vwx(&GXz4M?|NJ8)ZC ziw?prlqMp@@~pV0W!!=lz~G$u$!hsFNp~q)vm|4M)p#M@7KsMBJX@*;cGUE3tL>y> zike>`}Ada+me!^n-%S8SpobKaRQcMKtM_|{15S;Mi$fsqqpxwvPbW6T7xTpSixB&-INGJExN;?dHx|rO& z&Yah&y$rco44Ea2foI&z99nfDV9v-*+3pOvU(J{Da^yyB`Gt9IM9b@j z(87btBqnV}p>pzQ(aMFkX(Wb;+|Xm0K%^QK%G6a`PnUWO8RI@T$lW}%^t7mRD|~lI zk|8ZEK`l3hxHl;%&y7nfCpXooOJ=jBv-m5q6P0Un4ZKGT{J`ul_}thOtZYyVC6?T} zZg|z38i?H4%|!Z5GWf~{tOim(H{D^Oaw>yoJ~xT3lg~Ye3bnDgKyLJrBQRc2Kf`j^ zP!k!2$Im!zqJk&_z9p!k>gMyy{v> zv>KO+PD(KB1$|H?Pt!P#X6~$7fLUKqsod2(4)x=sgNsF#JyiH79f7cIQSD$cnz-cp zQn6}lP1o_ZK@>QkBd}^@yx@E*{+eGmQY=H|LeG}^z7ly)VO{&qmfbkdJu3TMDQC&|-T_4Dex`t+x8AWr0^UI}WHAQtT#R?VFR|@M!^QwokEBml+ zC_LZ&lw=6f5QmkMxfmS-ZyVZs(oy7QN*WBsV$BYjcDty5ue95-l9>0ep6W(%L1VF~ z7j<@M*hhzYAsX$)dOQ;Qd2Y;cM!)#tfD!w%4W$!M>{GDP_j(A&JHjunbm<2He;Gv$f|a=*&(9B-0}2jVbPlG7?`HSUW?JS#Ex>Z%(xBe7M@qUD7u z(TjD`s-A&Smj~Sl7^XgNwuP`(rnE2{#Sww?N~*cYd{aFdYkekkqd7d;rI3#8SMz

(FuxSlp^n)gG`H^?^fUL1um!D>tES62J9+{@!8af{#&|dWzq?XN*_Lv7T=V zo>QyX6e*{s^4x&8Y$hn53&>^#Y$y5cl}&qK%=u;sa-$FM7BhGwY+kE(RIANmy#;k@ zoiolMv(My-EQJyvcbKQT*XOxe*gWHNUtHmo>QNF}NrdiD28C3fJDdpPs=_PO_BSX? zE4zzGPMpQYQSAje4Hk|@O);sANl6nlUpfsX7D{{9!-1yjBr16^6lD8(S?{8Q*Ouc- zUJAi;5gLpkk1>^7KmC~00CE$y8+26uhUB++n)}>n9;To-eh-~+sVcv$8G}0~A}esX zLGFqnJ$!CXZg|^S4HLL!q(^{VAU6u!Sc5UEP@^~j>lS0t^8B*;YW)n=mM0Rso0agn zsV-eU10^-E099<5DuyKp7AT|&Cp8s_wMlZ*@&y`g1#%)cf(p-wy9Sm#z!TD!xLm`L zyLx%r&fOQjZl} ze{NmZ^UCS=aH3}=aemqdW!l`DE;QQ}OR2$*A{e3p%yE0XroC}|31Q*>`Gvys6*;x_ zd5VD?xhhvOj8sZibr;m^N~-$~_s@(VRNzlwPkvp|l@0az2K8@h2eL4ctzL&HeNk=q z9(fNAF{alY?HbHJUzdlqfW#4Hs@pq5^w zehgARjaX?ycei_MUE`qDfvzxgR7au#kUP8*VdO@t87}ziBX@|>+H^5u6u-)C30IM1h1$A+ITZ|CNN)Jt$mJuY@ACOAPS2FxrCGAKO)iQw3ulJsFcxfo z-{P+Bg{OO#D9_!u412PFnIh-?_DhmGH6)w&dGp?|i^)ypTxf}B;Rssa0stj$7Y3mIh$M}M9W#{G&dGGCqqRTg5)STN3a_8 z=c%YT$ePSy(^7-h3L2_l=fF-WqVsi0isGnpUu#YSyfp#N?*okANrb21O_ggYc%Lu? zVW$is4)3$ZAe?V>nX>_-J_xoz6NZP)d7n4jXkMQ7isS~oIk}}KKj1B4@J84ih$J%ldW~yKDgvTz*?Vf3m20ckv>jt>I#byxl3?`7mv_1 z5y)Cl#F%yM!joaz8>sE#Za1>mRMZVsO>~7dfZTl`_qKCVPHyH@jv4dbY*Lh08|)cy0)}JTC5OoUvd5*t@(`Dyqq=Z_H~Q+}x}z zY8x$XMNj!)Vf`Sy=flIZy|F}NAc3y#|EunLOrhM=H1bPv&!gD41~t0ZHdnVkA??7D zZ8n-X(l?l0BP(p^-P)-`OWP)~l5!hJU5H0_b(p$BiJ{emY&i&WBleEBTr3-d4n9WL z;cXU5I=5EKhpbKqs>V1c5C0g|p&)3Sfoi01r+}1KPXLNcHBXI;{{y0^((X%KQOtC|5R8o$L>?oa76xvUU z@LZs?_S^4yO$4bNGcObb=2`IE}I zL;Z_;;c#~?KHXz}nqfO>ptx;kza7SX?x&Q_r-xj~g2VWbO|*A&F>}_qMZn`eH-{My zBeQ8Ag6Dlq>w~4peO5=FHw<$>VVvB^VS~*)7ELHkxw)+NjT={b;RCjXPHw=PWXq`B zj8}oBs@2Yodw2mFuzlR%I&Scwj*K%Kd-#HmUJE-6WH{!Fao+0zcn4s|3_;j&Bf*>aEx?#f7eX~MAJc73f|;-C!^)KQ$Ouz>1g4|FP z0lBRhhMgog@{bfPS5R*im%(OHXi}<%rZ=W>O8PtQ^ zs8B<1Soxq9>Zdu?odEOy&)$DVH=3PSqVW9q?)uifYfZf4&(~fv<0MPAEICJwEm^YU zEZdS5Y}q5r9$A(pwL+)L#VY2^$~osEtC({HKm?K?!GN4I0TMX}2HgFe^Wr5cba!=2 zvZT*upM^$|$l={*KhJ*lrulNFcxPT9TB6-p%#)j2ViR)1k3{HmYY}mFxND-v=mzKJ z+VW$_Jwu;+*aklqG)52O@eD1#Ho8U+V3-2Xs)pegqQ|o%OOgpQW|(QZj3IZ&unJRQ zfG50SIKGw-V*Hr)ZEDM(L~dr8`Nqgy*i9F9V%sH!eWSi}^kC;Sx&qjzAS;cY(vTJj ztZ=Sr9$&qzcD74(psh!z;8v!Fb;;qWhn7_KPj2JbnM@H9?e)$2+ ze`5)3IG2cEt2Uj3_Vdti1_3-fMff3n(iTV8#>X!!;M<=z%CJoe^>7To@?7A>- zYMxj?98Jv-$okT8ymMq`yL@P9{%AjP*GE>uF+gdKEPh#^VV6qfjB^9I(WYjmW^eOI zv4`B;x1Ml}%I^SgROGRs9NgS0Ew6{Kj!WtuJgV>Ry)~zn+Fj0M7IYjfe9YqGj?;8q zI)hVTbekbJlhd(X21Io**2Q90Jmr1&o)^o%%)#i*kQ8v-JGzMey?$C(Q_dw&mruLjw`armwZOY;T%2KmO~Rdn9`zFGm^K<-NL=Cm6N7U-4Cn|rc?d;`3( z2JAE&2H0lEy$9YvZU){!ZW62wJ2RsWwrp8|H)2luj5Hh8uMZ9x0u$Dt(iK@*PuQaQ zKzch3oPoUj-;nzSzO-+N+yHM8x#6MaDhiyN`rLy=Kw&Z}SXN`$8o`?ANP>$a+oIpN zZ@>s|3}Vj{Kw_AU&vWFSTPG)$DQB>`S8=Qds$n>{*dS*^%^U`oRLurb!n_MkFN5TM z%D|gAH_MV(17*U!AGkMx+O4BNQ(^vH(w5ivNe_0*ff3ZhhLjA7_fZ49IB71Nf+yCn zxTCg++!Q=9glai;&3UyPfuZeV^Hl?CAUB%aN5@gCC!@>*Bbrx+S5Pp=&H!M98FXMD z(RVh~j(Z_i*-NT>RLd(AYE;`4dVGZ3+`p23@aDXI54GnZH$Lrraf~2U`PLToxlwn` z7GlNSMQ`Z*n4-2%UNf>(Jz+pu|3LpTc3#zk%hmn!q&e>z-`6#Ecu;z5dTIZN^1z6) zTDnrBG{b!K^&wU5xbnfO-nz4EL%y5UTRWsgUOGHufyzjO!OIQracrhu?OCO~kbeC) z?#~_QLlV0Ir>I(^uRvS8K^>|z9b?NfUmQge^`^gJyzlj~{?}!)11j}5$7a4YI{o_i z?4bz-HW1L9AMTz0=7W*_!-~T*%K-Injn5vIuT-f}^q9jhkB-isQ>Z;#CGS?LQ>U#S z9p4Wx)lu1@Y1OO4*nen_Pb}~2mGA44)Q&0-JsRJ4f2g`|wrLE78pXZ`O7y?spL%pa zain{)V`N4jO+qqO0CL0qgw%ah*?takza5zxM{bhauu?gj#wKMr;0u%DR14WgtGi^4 zWjj?%?R`U44<5A-kKT~UN3;g0)z%9;8+mlZOR(~e7YkUhY5{(eCoAYU-0#8Tv!tj) zi66ai;5XN-@i}jCF`L$)KW@V=tUT-2p`w_Bv$ z>TruNW{dPP%dCu1ZCCJaRHsT~71W zv7929EpR0B^>>Q@nnT<~N2y zuNZSzjG3#ZESwmyICxW&-jW4);{gM_1)o$NAqv4cklU8&vuAur!+_j_`t?b3VAj65^HvV_qI{>6mswRItcnFz;+QOLsZMb6Hwj*a-W-6Lbd#0j|K@5+-MTW zO=Ht2l@B11Q-e<8{$4G{onbvjzr+Og_$sQfuXn4CbfH=fxL)DB2*<$dfe6=5?Q>efU`+BYLEk7{q0jTqNZYYR>J4fzMJ|cakzI5 zca2hc)r9kv-X)Ms6mH=#MpiH*2BMKDxDHS~A2ws~S^=(WMF*QY0rE-dVunt63(>cFI|YC=&xq^uplhE57X zd}wHP-;nIclxqLP?7s2YBa-<;W6D>%$M-)Ty1lrD=1q9FG-9>3PEH^08#^|xI65WU zH#&WITv0WS3Xl9qzpNToy*=YK1DHOPw2ZCP^r>IFJ$3Yvym1tpweh2m#;z+gmSi@C z$~HW-@tQJCH%IOQA#4Tbew)aRua-FC!iXlRtZ6)qasYPWcM91*=3&P5w)5lDNA7jj z_Y9mGoj5l(u^f)Gq&fOzuX??%cq|Na7VvK>2JFJO1THS@p_bsLDk3*+*YMWw>@Cq1 z^h(!kD0N-m*bjyRC=^pxPIwnhSSj@8K9P@xsG&B^%;Gb z3Ipc`a_@g6fBEq;`t<18f4yt&HAt++mm#u44Ij5dGV=Zv#EPRT0H195gkr0KiaNUE zC@L_{9S1yt$Ytqxg_5R#+2kUX|{U2@3oPESI5U+85nx4f9S|KHW1TQk7nw+W$?Czu})R*%mHMj zj7Sa*Pwwj!%Nzn>+%#%C> zw};&1Udk*$nHMdak3uyh4^{O}CQr0Gv`wS!x8>^Uhu!u4LydzY4ZZ!hm2(RlA#)~c zNhW)h@`>do^G0wppN|v@upmz5w-cx;GDQmK+(ftu7Wa@_=!gr+@O;9be%|_c@J=rw zAUE~q*|_+a&Ow7dW?o+^kN{IXCZix1!I@o7_)w?iWFBz&2R`m601~?i#XpjM+Pe z+%02{8Iz?^EeCS*scrx^%wD+DYB}kI@Eg&hVBW0fO>tyIzIA-T)Fn#8*{F@P#T@;j z2n%Nm&3H0>qh>Zr-^(esGHr{Q*2N@Ni#pc4$d}mKC(LFm*|wD8$W82gCDpN-KCzYo za*GSJ`!u87=k!^DbDMH76TEE7U;(+YC@h@QZWF;aA-9>pn`fdtGE*Q5y4?cjCgjcz zIrBhnS^{ztev6!&A$M}rof>y#XjLFLz#H0fRLjA+<<^ZwS4i(qdcxUw1`dJ(az9C` zzfUc8;MnAh*a|1&g6D;IB0Or4dg!dNVWg*Vii=@JamgVH;sfH8Al4I z>G(JaxJWVHt@&z~@*5Br&**C87Cz4nu zn)x>$FTC2P+&4T2ksa0QAztoZf&K}z!PM;?L(3egm{6!`-~)5u!DQW|DVbw4h=M3_ zZq#4*I5#0T`DuP9tL4y^3ktPzwVWNr3q>F|`iL+DhGJRm+qflLY8{Xv&2-! z8hYhOXsPKOtL+{?-aUAvyT7V?u(oFu1}U|V25Y*8jy@c#>Ks2mxj1g}*rM@pKAYIu z_9XI?tDaNc^7?yIb@wI5?@k@RKVADkQgeT%>h?tSJvx8v&Lqy)J(S`C4V{wqK2>Aa z49;8{(`bV`8(Df?!lfOl&wg!5Q9CiZuc!N)x9=S2>~0*Hs(w6N_i(J`(M2L zV^KwX_L@zWikzG1_*)v6KlKIX1#>PAXDt&Z+S)48!70C;Nf%Q&>SsH%+kGqg#-72J zk@0hK`JI*3(-RY&i<%j`6YYS(`Nb}YbYgkM5Dfa#Y3L2HUy0|T<0)mcc>p#fJ-pRj zNw`IY8s8@J;|nhE-8;AlrE=!?`Ydt_#fI&zd;zj~IK1w{eFcQu9nTpzG7Fw4W(6Ow z`L3y5XH_1IaW^RlwmEV)%!h&8^{N2Cy8)H+xsa$$<1ohY8>c8B3-YuIG$xgk}fYPl(Q#hj;Uv1gF`7EBebW#l&J z#56O2cegEzQn|Q>T?E^s9+7irN8JR{W6mVQ?<5E}|IHq9+fqZ;_>eW)Z`>F#1~9%4 z&OPtk)cO<7a3-4BN$1FJn+M@CbqW8eK<q2$GGqD5ic|Bh_9<{m>0blNqAyO0 z_faUN^nh)cr}C6)pgXGND1CF}?l<|JJ0)0QHjSu|VG6omHQ}n2_yGST%I;SkoWQ<8 z^Gfd`MV<^UlZ_Zj&8d}sbi#OSOowVY0xX~{SNdY`_JFn=Jx$t^aq|fV+ut5?KlvG| zbYfw`awKKYLjMypLxrt;W^0Rv`HEYHjmVVF(y3a$KBc-aI(ubIb#;91+PD(tX!y<5 zX~os)S*%+#sv8rs8&lHj)3cYy{I!l^LK#Sq1)b#3&`F5eiQy*{SA zIf=_FuZ}5%o0?GJ{N<6^3xg7@ivyC|6RJ*`=I(^@-k54wsr5#3(Hs$6OkwE~k#W26 z&Ybf6#Kf6_p^HOf*GH%BOi1sHD{hX>ULT*mJ}JL3tvo+Eb7Mx1hlF)UBELN;eXN*Y zaQf`AR3Mo}OvUDQNxQz;r&zo*CA&H!xjZv-d0KLLT6TF#d1YFKg)`?zrGWM`{Zr=# zXRb~tZcFEH%_y!<%=9m4OraQL-r(*)X%-Q}jUZ^l#RJt@R5v$cdE1;-a4yFjHXGZc zxG;FEB2^@vMWiRT2GPtG_6=~Bt?qnuYI^2@LDyw8+*L2$n30XF7!8{#wKMo&Qh}F6 zVzT**JNQ(EF%gCbOfrV8BIeg%vbIg7Xri;|ldul~s5rHknaMd0bW}oz#=UGu%0D)2 zQ&F)SdF}zR@|SP0cM&c^x4xHV#&-;&OpXn*?E9P3$Y&R{%iC(J{ z&FWOsLb3_4T}E!+9cWUA&>&z-px~`&gfA!BSAg8fj@1-kn=eS!xLQrDmRIP^0pURI zw8*(ZyQx}k%>ubOcni$ioO@`=KeFav)C%zKv1fW6**<5k-;o<|*bn{h zdEdJi*hxPJaG(7=zsYYZlJ536HEk&Pwt@;tR7+x<`<*?Ux4n~o=Fy5QkiATM89(?k zLUK!!sM}l7?Va_afZQlI9wRFbJ8_wvB*#_?5k?INFhqis|T?~Q$Z2C(g~b}oIhM{`VKMMoT^>4PJS zh>YSpfPIfwQJE(g9wY@E$)fv~kK7xFqZcGfkm3To`!t58ZppFkIW%5h?=>74FdrGR zkQ+xo1ZX<6#`g~h(@DsSq9HC2?lC)zOHtB?$okNOsa}H@)$+CUPU{4@j6i3HKh;V7 zhsLd7>aX@KfmI_?3*I67`_V;R#AUFtAocW!0Ujab^fjbz8eKR$sYW{h1$-cPih7zj zh_Y=X&yWAM6l%}B)w~WVtP11?u_tjv2cm?Ss%~#qS--t@ZQ{ivGya$`@hqg`;i(di79dcRja@NdsIMS}H$I zw@QKS5~NkWExIBWCat~M9e;i|vR&H9@2n?s;8-a0!lXK}vlHAZxQjVwA?qsUJjLS1 z&h94Cq6*sx>&4{(#{AYWHh?!ch&#e>a7Vd@jtJt%?Cl^oE5{3IW^^cH_ftN)aY@=X zY9!%YVIx;`r!v-P8sk6G*~*3mG=ora|lt z^{e^4s{?9Jft?ISn;jy!?fdlfDA2`}IGzeL6St&m%sAjP2|Q^==_q>q8n)sZY% zbER7fQ^(jUgvm(fe7#S5c)|f}gLLcgwBgY7@}Y4xY&n{y^vyF)#7KhU9-Le|F4r}x z3@wtilatG5hLrRE6xi*M*hf?a5ZiGo%t0wH#GJCzs4s(tp<4bV(sGBm>O_>^} z?6{*N1A5FEp%jl*Ghu5QH=*3yyJE#r8iz7}ajRRs+S)f;2WxbsP6M-tt;a{K^`qwc zF%u@ZP{6O9FoPD?P9fitI^WFw&4!pSXriHduIW`g*1Al&ZS=X%$Z;2j*L#*;dN2o( z9pzz8;^7%FpIkk@S~tB`HHvz}a?OP9$RN^6){aeBF(LLE8GcKScT169KI;wv%OL9p z#{hB@9YS*xoQTxr-%g+VnNKB=8D@G);Qa)0=KvC!TpFErxM9P(E4sTAWx6MVxJk9OnDX6~sZ zEEkcRzEJ5jiv%mcdLoKe_xk49k+D0fc?kG1q;5)NBu0C$eCg(>_Y1;bQ!B*FuGd&vE!1VO0D z^IdYfh~=dM8R=7lI<9K1#3<{oNT<=HSnYEF(NY`j~T6Jz+zC%idwGanV` z$e150M#3;Uh4wb4$k;p>O;<0|@D&I*)wE&g#$4M36BwRd#vI%L-WzST<+6XQToR80wy_%L6Kut35o`;JJZxf~i`*nQe+s-;6KMR4$Ss052D_QJ z+S^8M#<_F7jywoA=iCD>8V}n;Zie3+xyK!8w6`amIV^xTy5NwS4?2@Wu0)?L+-KbY za!=WVv(C*Wf6TO*4y1RYxe~;i_|{brZsB|PM!dERoFmv?&{Vr%`8tlLi$`u5=4qWNg{xaj0Wlt^E}%4og?>EhvHaO#oSi18b353P0NU|n$u=BDq@^85wK?_!w)rE( zdqlL9d4>~_<;YFc^qs&qf3Fy+C7(NpJW;wpAp}lsg-m)op96B^rW4yGb2xrars!K< zwP$j8;;1fI!zo=L+9{pCJSu@#@Jeob(!X(OR?)e%EV0?0sWe_*W{A!*Qh?7)_9aGt z@)$Ah49yQn)9&*U#naBI$o?8O*o& zz$-Qg6&s35M5$!|{OgYfAdZm1R0c;FLu%{dl)7zXscpuD6ux$4;M76{S$rLH{!6K$ zZ-;6Vb+Wd3Z@b#jq%^h9TRRqP^)uS~NnNYVads(qRugJp@SV{3ae7uAxiG(RVr;p8 z&FRQ(uWxMwzen}f8!E#^mE#0%XKoXQ$1O@1dm^)=%rtF8Nwi&Sr(T zndx9!=faKCo|7`q1(~H+yS@-it)_M^C`=a?edm`pa8qZNLQQf{>zwcWO6bI#vqfe& zsWi1o4NVgLS+y4rtX1KunYQ7+PR{$zsvMV<#_Q50g*|{FRevO&NTwnFW%FA?Br%%} z6LyE+E^@O?7MlR$oSQG!=ib^T&Ygp*tVqd~zzj$gQoBf5-^pOIhy`K5rHd8;_AVSk zuwbdlz9`7E$|nk233MfM#Y8HeP6xO08(X`GQ}L7;lHm4}I3>&`+Y@r+Mj^gH$ijxQb^+Mn<-k|mEI2w$%`jk>%%yOt zd=W^HM>zrK!8xgDT%?W!V3u0HSTriNoh|NQkqa6qd%g&G#?z#Sx(h?n0u~+y9w9V@ zR3yhEN2?fGaQp~TXtFmcBezgteg|JZyhmuAL+=KWXAUh4N&(Mo6|>kD;Q8WyvAOZ5 za}vF2!RY~tqg053VFEpZLTMVG=^1&KR^MZD%90qUwVspAT%MX)^lxBW0~Dk;Vw)D} z!q~IGQ@|@j@!srdkcu65LgIE>Zx^}W{4n?Ie!sj$;7alW_9)mp;_)PMnMgK+SP@q& zZHuIhp)`kWi9IrG40f*guP=Jf&AHq0wN&^qz)i?K8?Ki}8WiD1rGVUx_^v*K+#uY~ zB6k_Q1-mf@(?vSafw(V5h}qx`Z7-xp=@fA~C)i=AxDBNzN?*wy4n)CcK{@+=&yc zRL-j9w7635tUhrLPHl{H!~W&U+o_g6h1>+*m40f3+$2ofTZG(c&biC9n^H+cuvdZKcqWiYLj(o%VBm@iT58@mnYw$3^5Ia%}U2WQc z)9lIKKP$c)>;~y}-u44}yH|Lo?C>mdi-dt4bc>)#;3}xnVGjHB$SmZ^uickaL!8yO zwy$UHwa1!+eJiLrW0(r=7cbo#`&QQsoUKvce(CWX{44iOs%sQlxI`cBnmT@O7}_-3 zZmCD_I?^?P3NuE-aSMo+I5Gxd+VX+nxmUWUU+%$o zfT;{P7x0J1!FE}%}jD5l}Q5aasYc&fN6S|R}(13 zk|-cs_}~KwsbY)!E*5cz(7NM(vbiKkC-Wo$B-6|^^jf&VOe&pAr3;xXO(z3l(Y*$H z-YK9|K-K<29%TbOJUkM<2S?)==^z{!;_)c<7P$ho2Jp1-7hD#;n)xhTu|fC*Ea#xk zs>``NiSAi70{a@Gz(ut2As(Z51z%4HwmIi!qsM#>`yGBW+4XQ+*ks@(Wh;Xn1E$e26_zv6?*{(#&s>_>O<{sGe5Y=jNiDC+YPYlkL4S zok&V}rH+5HxxNn#qHI2u&L(1+L@1p=SHc;kSP^X?zT{89ISXtXu>E+=Pj;tsu9I?S z^Q^0J*3ZVc!*%iqkh@VC19D@5c{Ajua&dW{TPT%RR?9i(-do%&wDLDB!285fSgdAO zGH>4Srl=2Kn?8XB!VM`IFrIqSOB8TI_Aks023k20x%Vmo>JTo&yTQyR7v5ULv=E*5&UY{>iK5 zu|zbUPC=lA)*%u{F3+gp#|1UkF&L8fEx*d}0&VGX0tXobUeHlL%AX_7(#{NVyiw=7ogDUWMAkE$34m|QBEPbTru zAmO4o4cg(LF^ua_sTn6Moq{A6dC=|xxH@4jrFn487ewH+J1Ni$ozL#@jR&gf*$hw| z=!LHtZUN7T;XPIoWq)vAF0rN#F`t;i#Te>8iQKfwdSr^t~zkMb4s zLTB$0Nx`{pYZIl6;x5>7GL!;6$D;}7b|4tk#!~l}G$&_fE~*!%VsUvIokH)~aY^Ug ziY}54?@%`Z%>{_{pa7uyL?O4nxv?4EAll7dd`ju10}%l=PhsNxp%S^dj!f8}R7lXd zI$gA9+H+*%H^CP#;|a58q!M6WKq!%CQ6q__Zd+v^c9RQWJkyAr!b zmwOhG%mi^YnN{~J93NS|ZHx>@N~h)>NBdR|_ADRh)gZ&^`1I=0G4*Q?r|WuWJ9=jo zt^|ZQQg^(yPyWjF(Zf9&06)f~kB;jOjxK+zNB;Fk(sTO2P_on$F4auwU+-Ty05jBa z?ZHVM5-#Apc@(nqyQAGjKN=JNc+t$ z#A9lavvqKA0rIr&z}8T-)ULDx%V9h7YTwG&A1^}k2K^d*V4!$!dN?|74^n3~3l^J0 z^^)XCtm=c29jt#Lf6OyuAncVS8a@HT}r{_GCag3uOl!GS*-SvBh2eoPYJh6uSS9Td?6co|s1 zobN)|xEgIM=sd8Q6&RSXo{ccF!x{eZhR7|#_A|(h%>kc&BXvwBwUJ7$C*!toSic!s z@@>kU!5P~o;vEMJn~zrmm;}4F=tYyMLt$^8b=1$g>g4_g1$@&4wuF0V$~TD>bs+jui!~lLR$U&@dShP{+Fx=7W!V*&f7F@h2H9@VaK=`y zyx0zcm80l@P{8J&%&nNWG(Tq00fgAg_YHE3hhMxFoH*g|f~F^=Gp?=Dxk*(OFmg}@ zgHj|-z~toc6jHQTztN>=nzc%cC3UHEUAu8?07$kB<4YQJo`TD@^4P%K;d_(Sw}(}p z6p(vp%~SVavgWZ8Hkv3(ALw7fjleza>phCEJ)XVhOvy?mtY)R-mA<)G2bTcb@IS{y zHEQ69dp`7V?B1f@nBT!@xO2DkNMo#nQ)k~CuoltUa8v(Q9}+Q^U+&SstNWfSixjKL zVyRY!bP(-FTBy_~2p zCY3<(id4rZqKn^ug%X>FuQNOz8P(gJB$daWAPpe_ zS*>Lg8SHSF&kU*1+58xp`*B4&tI-rupTOaXH3;bABs4)7?cQVF?D$93I-?%W-(HfV0GGyUA7`PH>cfLJ&rsUXWj$OJkzOh8*_3pA9d&8!7|~_PPo%g zac)7U=7^8jqNBF(q%9=1hmaViT~E8BTcJ!Tf-O3m&t!fVho8EI!qE@M5TX^CBp3w_qV)A(Z6=&jr;R#r#AUHJ#iF^P zZ3<~Dn(M^4dq4^RblB7=Hb+YZ+Iz^%==PB?{2BKbLhz9i6g`F zM<&+3*}q7!n^Q)dfmd3s!g|l2>yPZ5)wnSv4bF|OGu4?#)r8!G^GCZRhpzQW9Z}44 zBN^!E?ctUo4H=>hQGf?L&*8#`tP$Y!8GUFlS$Y`Qt(&#Lm<3cD0gqpKtO9<&-mN$` z0Nd2DJL)xise}?Y!2ACET3xrKsz-SYIi7t6M1vn3b?%2FdLQyaw5ON+o$ICB?&8sD zB%1SUi z$b!c{k#3*AM{m2&{S;pCR(Td^%I*a`T(IZ@+s>wQEHIOH zE@17rf4l_rVcOopw*jyX_~$ihd^gbHj&4L!(IlXSBX=r=#y82(@dR-hT%F$t5Q9-z zc$iR!o6%PK&fsRy0>IN}B?P7hzEdVy;xK zM>>0dcx>5XgYb>4%2F8?HVSnj6aa+EB69O-@28L(XfGl+{gq|qh{(-!5lMFCHcxarnj%Ey%!d|kdJn#T+K=s zVlXIp^HY6jQO?G|doKeU zunjEduubi4HEmZQEL>_{17V6 zc@it@DQhUdp4#3_;hRi|%?jH1+EQ1JN;&MKh&mjOUpTmlxpN|7@u9~z!$0=@+S{57 zeML7TWGPPBI7VLxIrQq<|IvHHE@#1;#j(e+`L-O4>fNKdc`)Ah{5mR>YTrc zKO6`+iK3BHCXR-u?0RM!#uy!gQ%AbS;c0z*T8r$?S4UP~8DIJ4p!%BwN;s}mNmm>(N0(2K$s2xwBE}1ZBrMCNZOCeh7I{L-WVr z&)qq?@Ajj|CI^sPS1O&7t{#D*djA59(y*UEFCX}gxUhYF^C+0tOAHw+o7Nsva8HFAXmCiCne}RL4 zE}0J}vY51@-ZMtJBZ+u46%$14tT~L23V&cEnP#`j9xX(A)QIPwYWxoBwp1>Uscrx> zeHrkT$U(xEht7$>i`9sG!Zh9$dGK=ZCK@dtTJ8CIMQJ7&hGm zb0(pCFONT!1$4~Itn6q;UpY3q{hU>H4E?q_w`c+-fH&~FVwJz`ViB-{$g(yXd4-h z(zpTIoPbYrROU^0ERNj5AQ?M^5wz1~gtr{>xu=j@e7VF>2;rc~m=G=x^&Lucn6UT9 z)3#82%^#X``XyH1n9+hC z(X0}-8$r00@MI0vD)$z~)#}Kol`oVEge7uroDn!TtXts?w^wEtnD>*UoEqLwa&F-< zvj_ZyB98}+uqD7YvPQtRD+xD{o2$wNZF!k+pEp8SP7B)dYsR$bSPtizN)NS4&Rxm8 zf!};RatO@3&siW>7KYzggRTM=$Tw&=e9Ia0rp1g=sgRD}(+HV8K5CCm*a*2*?)b`j z+7d4Mk~_g9`g}Z}o+CFAZnAb}vPvAsBC%*J7Rh8VuZrVwxcMQ(P0 zLh?`Y@cpCk{NGXd9W8_@g#tg?U5grWerpKKMWaY1^>@4UEYS4-!Y; z*QbIL%f8-OtXCgT9~)b&onAXSu>yH{)!1_V7-F!e8v90iwR*?)wzov~6@#nhYcrbK zPAOt9_T5+S>smR~XRLyB90I!T1>|p{Zw<8?vO}PBJ2Jk!uTO!UMD38gZa@OJ?AphD z16HeHcSpOudt=dj^s)3p*nu~9Ps0gRzmdd%58m!Fa#h8rv*z%_qA%u6uuaaUa?}rBy}mR_vdB zyT~nWAD$%NW$-TRQCYW{+G&h>(=-=*f{e$KCmN0#=wYvCirQdgK%>7QoqM!u9yB<| zjm~Ssl51nK#|vw#(d=?KeeCM(_J_T2x`hP|Dc7(uqMaj^eb}ajs(!5Mz;-a@+9MY= zQeT7Q^z>on{-`2y@A<2g6#!hDMjtkN%)y4vNAHie5e)CzaK(gWDWs!;?Oa#O(^$W85S5blK}vjR;# z7r6<-88hacY>wG2Bn^Z3i`-JhRav;^E~;fC)w0y9!dXS6+N4UG-vI6sY|8_9gyI5V zcIQVtgxmmZ z5xKb^i-_D~j`*ZKK5dUF-N_|i(iFn1{}$pt$-W6kiCqey=X4n5Z2pPGp$y24A0RiU zDEx3o?Ptbe-h_4&9<#+&9Zbt1C|an(oWsE<2cD40;J68W7fDzl?coy~@P2?u8;^<& zsq)801;BS)?iP>aeEa(r)dF_uw?yu}@&g+=BF>rtYWoYoow5(=&v3dg7wsdG%`D*{vfl-Q%JRw_pY;rCp zav;h}@#Ld5Yu%%X!*|DEK8ob3x*>J-p!(S0!qI*e(v!i4j}OfrAEKznqkS^^&ER|u z_5rv0s_yq)mMB&0LEr95>>yuOu$Pf3x{S6Oh6+W-HK*OLKb#jytE>Fx2t(bj~TA|O)GO`8n z1IsoP95jVP^xLnNiv^Zv-*u68R}{~Q>|)VC6(v0gnlCDlGKC~#+g#`&D3aKY6^slX zD){acqp?ljx_=`S^lc#g0&`U9&1ekMTl^E0gCR;T>GM0|l-aQriG;9{Nr>v{xK9E# zL}GB$I97Pzn+9$YB0doyZ8Dvr--F6yfFTjb*9Je>L(pP_T)h6n#=58mM74Nq7m9d< zY(k7Zc+!pI4pC&tq_C)%hUODX!X&a;*{LmqHAhKty5qThqZj!)y}+)z{Ix6NcjbPe z{91@s4n&LDs9J{V`IxFhBaJ;(a9LL*wP5$%nV7vYBDpVLgyYYc&VGJi`qHRu*x;6U z!c{lB8Xoi^8V+u+3E0RidLjsu7%{H*toiS%T~`;}7oZ_mIZvwGD37->stq%Q zCWX6E;Q?~jDSfpn9|uK-+%y};^jR@t+v-H)B7t`cMR9Pt$!Iszjf*a3@Iv93jTmpN zN=8e?>Sx-|9MmfVzxR^A%cEcXiUMrU@?w6VW_BG*Jj4IVe}jdZKNciu;-a{eCpq_1 zkBx_^i2I4X@&N2sn4VlTdBxLjQNzRy5n<0qZUMIS$y53=W*d@c8D=xsro}n8NVqBA zlkBdZgWQ~X1HWlGiudgWEJnd|os4!rb{2)6w}9V;epj*IMV;?qcXGr-abaVgH0RtX zp>yP>FcL=s$UV-GTked{dz0FYoF!U-fjMSsu^)%|BZflxBHJa9VSIdx2*9F=PmXp^ z{k-{?k*DejH93B5GC81$SZX3NE`h!rT6L*3B1cI(gC-OXG?A@sb0CPd39lrE9! z1Ku>O!FS&8q*%@ilTRS`n;dhV)adO!*C$?t@?L~#tXK{-0)_Ltp<-z@7(YKWgKqcH zP6RSdHjU5MjVTZHj34Y7IXW;_KRt6`xbN`j=#hb`ssU;Bu>4Tp)V|)4s)vST8gA>Js_z_ccr@KQq-q*eA>FaQPg>tU(>^M19g#E*P8{p% zKXQMdrf0IDA7NJ$|J-rq=#9=t>J_*+uWuK9xeP2XksN{c-o*jg$zDZ6*X*$evZ{x& z8sv(0As0&8HmE>8b#1?_Zcq;BtsRs#56{*=8fokt=;$7}JTgAAx@Jno)=Rstoh^ye zcd~D?`^3^4QIH1G&ZBmQW~W?O*<>;yvD{I2KcVv-V6Gwz-o4 zLqytHW^=Lq2VVK(KmJp~P`LmA|MW>jK~%#({j)#0-+4PzL?|VVMNk=Gw-nvnn3){; z=M9x@En{#6RS70{Kngnb4T<_q3 zO1*&2I%(M$|3krtZ>6~g4Gf%ik~FlI!F(4-ZeBbGW(&xjP2fBp3?Vvu!GTXt&NYb} zhc_Gcdhrt0n;a~j*dq)75Cv;b&WKn1m5xqiiUV~y$>`8Az{orPj=psYPH=_Ng2{b< zZ1&FRtac-9%awW;jaR4U&QGW=OEt&u4>mp;y(QPIMGG4s3ar~ruQsHJ1=zm`$W6UN z*3%PaV>tg{;yB{mY>B@RUOnF8u0U@5i`Da)+e1DJua%8-7G5j*U{vkdl-Yx0hV`Ch z-@QfeRh8?!k|ws>g0fJ5xIGl zSP-UFAUA{U5Ql9Tv0+uBH7{jz@O~rIe2Sp+3-c>J3)0mxzX;fsXGG+F3cLx$E2`x$ zg4}{Zo1iBbV?BjrPY`Yv28z=?&dp()2)90Y+CWqr>zt7ms5V>43nrmjzE>)@FxY0u zjaA9HE0LSSb{V_{&5V;FX2Dl@7n7Rg>4Xjr*Q%A3XkvLx1oAieJ_<68GQp*T676A$ z+prcMgt4UK1mz-$NuHFtuu}G=uZAw z_Vt#>oHx3wr;uB0|M3A5?8y-}fg&(cBnFpscW0MQ4$L(5OtuY48~P`XKI}cx*>mi1 zZ^P(N)nNC*?#_evda53dR6iO$+BtCKQGe~=XzR%IJ@x9+M%bE4`3t$=cF~^9J&?{_ z7?|wn8g9Hh(A+iN@L;6sc3<7Yq57`DmhQpEuAch)kB(lyQ+xk$Yxf9DP7Yt|Z10}9 zHmOuP*AbpXZrnRX*m4Il*){KGw`}Rku%fMVvbk#-agMG13ZQTOgZ}2uq3S!m$L{sj zbd4PE8foZ3*yBjk{hsD~ou?mkT^|^fYPB}#v6!8p&F0nja6eJRYuj&|BX*T-!NP+cjD9V508MaP7^$x|=;6okNf13(KDM zja&|4PE_F8g1HDLsJL*7c2jK858k-9tx$U|=iaO63VS!_1MUW(MzW?JMe6tq!vCy5-!y|%!)5k!)) zd!2vv`Olrbbk67V(4WZm4W%n)9?P6PsCvvKX@u6hXmCM+3Z@756%2M$1{Y2tIM2iE zC66XOH(g~Hrcy`6mbh}`TA>+u>enLJm#_OyHr3BT?uvJ$(h;KkHRiaE$^@DR=ptlr z7{M7DC1g5%%#UF_dm~<4Gi#(xec3$+-&BX_Zr#aiVB9@d@IVvVu~~=P{_rz zX$*jQ;tA7cbkP-%n!O`h_hXIww#s>N)^={zbyDVPmAaZ_&PKV5p*5`rIjuT{ns&G4^(0`6HSj0k zdDd4W^VWi7KfQbyY=c4`>p9H3LTb)gc;!M(9t*{JaXk;YDUBP8$9i()MyZ@v%lBYg zmwrp+M%KuiAoo+?4Z@A7ZlL&GdtL-@P;k(0&b%w}yVqI3>Vq+=J2B`&dHql+j_Ig*!}~3( z<*XVg>{juSCS1d}gEj=fwrwQer?+_~0$dljHZuipCI=G&xyd{@H{UZWpITlu1_B#7 zLT*Y=z&8`Jb6&W7qeAuz;JC=S->B>-{{Go{^h)>_Du@-r9SoK5<`<3|kgQ@Of?f5Z zH?$hgAQWqUGo=W`W&V&NxT)CKP(?%XSa4>;KkJVw*HbEgT(KTgZbVd@VO21)7EMBG z6UVesE(tGARApD3K2s=R2*q)O>h+j@t@fj2|tK-4q1qa}>1kvd@x#&mc{?#N;=y|9^^4`r0FY}$w{hhlShgzFJJRMkdO z9ZoF-BCEluE)X*MgL=2ypGXAL$=Fr_W?`|?t~nIZxC6MrygRfQh%W?Ucsxs+(S_jV zoPR^+^imAOdVDUJQ3cY=8+kmQC2tfy6P8F~vsg^=Xeh+Bz~>WCNUkE>=wc7K@pim( zW*mZuaO{g&AKH)Qv*9T;z*i{M(=S$^ZUS@A|QS z{PN$UiU4ILmEx%gU0R->fAt$*eech{>wWKk&)IV)fRre$CzJ3=O)|bK+%djRoP`tZ zW`om$-kT2^Gb;lyX?(iu9>=fz=C3xkHEnDJXaG8gEf4vil_l#OI0>_HUz~G;?{UE9 z8v>?s2auhtoG_Wj%Q3uICu~uki`;^$ybRkMxk=?qTcusifn&yr*FM3C{fla-+%3IQLmY z`n)N7-jwE?TO{1@J-uSYFj)4cCC8V@xu0rv1F-K~GIy;Rz_y6ofbA!do22Gq*_ZBtu>H%yb7iTf038@`+LTF|oT^CtXzT9qbo1z_|IY)6WSlacMw%oe~*ADfbaoj-I zBQAhH;R2KfnW3;q^u-czR{}}f-f?e)*3%gbqps(-(L_f8F|wUNd=WJn*eY&Pq#+WX zwxUd3OHog_7u*iIcu)^O@rPn6X#k1gVcUd88-9-gS19Q~IWWV9vOJ8}6K)@}}9m(3+W0%@$8GZ#MAU;cTA$X&V5s(j~oElB+9bNfJV zx~(I_Ub4PM9%C_ySg@DA{^gJS-Ut8ZZ~w;U|L$`J;~Ea_Ow>)n%aNhs-}&tieEQRW z`GNoW>&L5)ghCsDmd(x0P$+~`r_FCtXHdo$LDBO&u`Q%gc_Hk;$oBB$kxoXs?)&l z_T>yhsiC+gJX6Qp7ou(RQREPab#kE`tt<=2lpD3mjT+S^R^5EKS{cHsor~0~BDD&{ zk&(43hOKMnHt-nmVEE$dvzl2nwuL4B31_IW{k&C&U4lGNkK#OA4JaY7MWJVGm91C! z8LBR@LcMUx`tHnhWj@MR>q5CyPC48N;f9PnePRWRQb$j%rYX!6P_56M(q*xjs+=;p z&*`&>^}JxrVzD&t3>E;J!}etZQlauUtOW!{-LU0vS~6I-tc2wVT)Jy3a;Qd9(_L$d zmOXoi{RZovBZu|Ck$>ncbULzFj~uy2j?80c7OU5l;~AioM1qJg*t58ELmsM=4~Yx7 zZchx@;={JYs4Y3+NXpzPr8hP2Pb~Q38eeQ}J!0C3J0hs%AgM4DN@e4jd=mRm*6nU`+JKNxXfyZAC&NMMpZnyy&KML0j(XWyggeq=eK6pH;1f_u2tQI z>6zw7k6Sw*UFaW}vpSJ40Ldrm-eJ*E+=a6ghGLLgjEn?=k#~yR{J33NFsQ8RKY7IF zQsT-V#s&Eh4HKMl4<%u-qX+mB*>6G0o5vI{DtIKggZK`01w0)V=7ez794v~%xfHhG zu#t!tD6R>*wg@sF^V^7UjHUBPdQ2p!7YXi!x*a)zkdkIc2%2J9QwVkfrUF4h6!4OS zGf5<6Kp7BB!GD9LTNNRGE>W@+rd`l+K%xYkJ+LRhOG@!eyEvv1?y?X|fCp97S(a67 zha#g;HlUpx3#!0D5y}KKYVjWMt|Cwz+6Yoe0MwBQxwVZ*XfWwuJ|BjDwj8X5UJN!I zNR+0%8Zx)Aws&!}&>o`k2Wd4)qnP6E`ZG}tEQ5i8ROK+ zZ@v7|CqDV{zxwM>?c4X-&70Rf9v46n*K)aBmo8oU!$17Pzx~_KedaTtx^d$ghBQzW z2W;QHd-vlX|M+{~``(}a>7V|s-}@fnkK@=y@;1Xw3=8%_pY` z5mi7wr6hJaz<8P8M!mhLvF#I z1+dMQfZT)bG~zz{?ePIy5^Kbs26#_9lVIL5PeS2|qp}Krd$`=gYseML`Vu*eEkwbF zFs4N#p`@K*A68*Ln-}nyjpG~QH&<5DkR@Ue;mM_?%2} zYDV5PJa+6sC!%#9DCZ`1CTD_}9GOos*e>LE$Q~N_lSW^U=CAm8$lLu9_vqgya^s84 zk(n)vb7ZV1@V0upJg z4m*Dg&M;pvl1~9~^94-Y0p@X#;-=2nQlv{{1&Re&#bR;?Ja{~8Dnl|)OcGPGtR*@k z5%iM9C(OBVq}xgDz%O%$P?^O;(3_xFGQ zcYpC0e|GuuMU=QPr}6mlO@}WbAzV@}RefG1T1ws!D46#9rTs)pUapJ_^ z{oUXF?ce_G$&)9s8~Epc{-?kBo4-DN`sBTPcfR$lZ~W}fzUSi~|C8g#j}HzGMx*h; z!QrpJ^zxs5{FDFu&tKiY|KQ*M{onukumAdk2loO2KSb=StII$C^Y8nszxvGUufKlu z=uzA?IQAi z2K5$GIzziuAZ#wVTS%4QZjR6J4mPGj6jE6x$H}+gRK>-MZ-Ct7LQe++Xa=_sv6>C0 z)2?_NZ6~GOKW^~#tat(2muAhUB$n1GYr~YYcG_DbT?b|#lWrW95qKYy`HuttWd2%- z2di4@ISwMuky{x!rV0SF>lT9ziyMtgo6T5i>i&{kd@jQ1(ne8aRTNH~`n5YQ7Z5WSJ# zxbKA29L;Y5xx4Ipw7bWdXH0VLDt;Z00>yD*Gx$0;Sc^fYWWx8+`M=uBKMxz6ggv3tqv2xT*J#eWH?|mQwJ70 zj8Pqez%4Sjp)f|~#znmebOhK_#)gD!oFG$2(SnBG(d!0Zz`!`LP)JN5n#Pxohi=-M)SEPyXbO0M9HEa|b~C$}6v2 zxpJkZ=J*Fb@SDxejo`r>8ylbg^ryjq<#IVM+K-Qq|IE+)%%MX^j7Bpw_A4uEAmR-T z4ItB0+uAPb_1Xgm_H}f$MWbP-)BeE^{`QqCm%!a|U0nDJU-*aL`pw@ted;{_N|?=7 zkZdq-iDVk@894X*-uGURew7Mlv?PV2ZaQPz+@wXUmcN7k^b7BGzAG2xUuJIaCK$i7 z5df0Zu-G@qXrNrnL$9_26+Q)|P^dY!y=W0-O8VIL-B}H0!;lcFbh**jiNbP@qa<}TjIbzXCHEg&}*LHJyd z`-cO_0qs~oXc|I8AzdEil>j^jHj~DL=s3kcpR9*J#lPP%cvr%$jBj^BXV3j^3e;!SVKyi9-(_AH044;_&2x(`S!nqFcz` z-9aW0zzSbKJ}yM%RJH&T+(PN?{_qDcZa#!>spRSe*6*8WVAXp-D^*!|s3Dqc* zf}F5v!Ssp^>7dArhfn3@Elk)ZQ|T}kXf$P2aC>Zw(esgdX4FiS4k*V|veYvzW+Txc z>@o2cpv+EudS^F5l|8C>BaRtkdu-g6il9`+M^QaPhFJi09IWWYhng6JLO@5nA!N%9 zDIJ+}K&p&K44g)L84!+=J3pl}MdH)kI)Xo0zDvV41H$C8Dw+B2(YCBy?`H3g2+zEs zAv$+dz_#EN$qsj%zz9p^+?8+cw_7dexAjKI{dPY_>_ZQ|CP*3;AJUoFKYr;8D3zZ) z)iypc{ITEvu;1^+dyk9N)zy6Qi~q2&Fn8|UnU8(!_pV;OjNfau+E0DzQ|;~TZnqoY z31b$V`QtzSW7M&imsedbPgmDt{1XJ7-w0o5U8D4kGiX<%ihk?XEw9&)jh}YS_)EX| zYcGB6<%M|-OkENQJgO2}-M{iHzjF2JRdmPK)>go|4<6i)GhB0rf)KBmk$X@%pqjLJ zZ(mvV_Fua1xIeDG3JScV?D-G~wZYwnCMX}uZ9Rzzr)B{1soHu&9 zceJguyGx;TXE0+!UXCO%gk1`D);QWwU6J-zM6dU3CTVz3P@6M3M!8|ec86^9K;^dr z*+62Kgt``VwzgPGyBVKz2S@a-`%A9NbIwyTbIY{3e#&}$(sp#pb!6IiSQ0oS*N%;vxh{@tPq0laCc)*DspIA!kWzNUp><9q=9^(v|R z_$)cV@F8*7A2U;d;)~a0=2X@7=#OB+;5KGd*XI7Plf&tW@`_{wqKC^H!)!82)5cG((LyE)7DsLh@zkXN)x128(~CHxN0B^t ziK^waa31T5Ie*mzwB8#Ay9v(Cz`NY)7JA-9vh8^wI{pOAhR@m!&~zgk9s_>Uw09QB zjm3=BIPC^_Glv#rfHH5Eephb5ogeUI8FJ@vrq7k`wI{Lq94UZzzb)Nsi37a*t+7E% zY{(KBu|&wy+!0o~!(iS3ZwHJ5Qw4NkNh`}U>A9KG6F*P)w#$balA|#LQ^FDe0Cl(_ zf7>Y`8UT81^p8mYN)J{0?P^v;o7wwqd~xTYe7MfG9#K zN( zJ_0@R_BO>QLO?_IAIxZ)U_eMwU{!C1+$0SwWa5bsE}6)rA#(|*vj|GCczk$0{h<)t zx~PGj%fzCaIB?<61j9|RZDG`hCtOjP9Ct*=A{>;+z5;63ZHxvbS?>WZoWl1Tqp~D%EDys@4?(n zga9qE^YX;Czca}FodXR#Zty7qxxu-=^3{L*)TjRf$gNb#KK+?bOixcj8U}&b%P)WP zOJDjTAoJ+a!w{)~kz*Jv91brmETCr2XSE?TL*@L3e&~n3{N=AKEG%M+h2Q+zul?GG zKJ+_nZ7m$UIWy*Fm`z@-48r+j`42SbW+UwML*5beLk&C{6)Ph*4j@2omJmkGZ<5wS zWRZh*qZEd4G+*jfEuDGXcYS0;7YqQo5j=u`cG6s+&`A3md^Cj-8TPT}n9UdMIt4bs zjR1IEr;OZWTT$GC+&7Xhx>Gs8_Pj4LZCih|;<%=EQC{barEb!4e9}@iVLvqCJTT=s zFzr7u9oR1k?w70sybnqJN2Kc@;>WP2{YR&LRWp7dHyNm@)~n|{wR6rol^rPvi1urg zS>TX*Lg8zZdt2n56Y5aMB8Ij%S{653G@+)&4J=`LYM#u_$fk5ISUXSX2(=nR?oAQA zo0NVSq&KL%P4j*LIW+CZWuD`+m?IBh6pd$oAgqkTcD-z!V>W~Br;+=efHy<#w*+r0 z^;h6Ghww`925e)&jr;|X8#v7YTZC===9-zB*`l7D!?sv97b!R)x2t&1iM7pVz5(8% zPb$gI$(Th%ZcK>nAvf|!fZS9zcM*6C%bn?WXMo(mY;pD2Qr*@hPJ!RO=2(v@JYWg| zyxG852+SLTHPdF=9nWo~^HG?G6XO#GyPpK_H$!e}0HZ(33|`>(hueY38G0p1qaB$o zeLSb|Y$7figTJkPWBn^8YcvCiRyehl$dY|9(sba{0<{<00uvoXuL=uOQkWkCc)tVW z7VD-jj@i#L^>UqD=yQvl8xzTZ?R+|sOT_`+KwMxX3gcj947rn7P?%xbt5D#`O>j<_ z3J(Z25k-|~R+gg!01ib|v!FONV0Vo@@WBra z4i6FaK;m*5lx%Bou>auULo%g`)y!!Yxd2H%))t&SlO%RSKqT!U(2Rz0MLe5FQ(_CQ zUljGYgGxK1DapDS_sYGuIp@aX;O?{784y`d(*zTG0<5e+E?z+Hrz+5US;o{`0H&~l z4bv~P)4e5vcZFu+`x3d!9~u%Q?Lc9$gPuj6Ys)|WXMfn(RI{<^|He1J3c=dq(i}?M zU--i3(fJ1PZEtV=^FRMMkQlvZz|6|Z3Y2SARaGy&^wO8U^d(f(|KorBk6-!9*TA_W zkr-at)2C0v+T~Y%^%o8teC^QTeKoaJef^JN!G<1;TrLBjhQJMqwrC{T-gfFg{QLj% zrGNbDYp)(Wa`^Z+zwzoTue^5TNY%UF{qC=S{p+|e9>x#1$U9 zTNi`v%aOK~2pZvNhXdP)q@u&k-B-$yTr?35%{G&&v9%85ZdCZ1RX*}tMLOA>uVD^@ z*FFr9gBep)mon6(*lYy&J)9I7%RoR4?&7;hjq7S!gwE@;ka^*A$Fy$)JE_H?%`_1KzvWJ&VVE@QmY80|Jj`b`m# z?=ee=8JklKs5PACHFH$C!SFzQxy#cVxGxB!q@uM0dYa(e@O^@DP-<%jaX!()4v-t8 zK^TIvWwsTrjq8*0iz73W_H|2UH;~;y?k)4d?03EET_}u? zjg6rI{*7;Z3!FO^gSNkf+3u4kJO1d8Kla|Ad-s3-FaQ4C@A=8U|A+szrq!TLJ~=u5 zp$~oN`t|F`lF4F zjT+6O(Ww8CAN?WNw>g{^e5l|9!<|s+1Hn2vKj_FURLglUM^H>qYf40JunxY*LIIo% zQSyZ>65xtR8G&$Y7l$OB$1Blj^loQjI~y-1Get<*xOF-I5&X$c`<}5mrIkDfyenb50=z>aa*Ns}_{U0p7&CsZMSwT(fLnvlcl*qty+y+BJ}A z+xBh8_8sRAhwZbgp_5C26H6OsRwGcf0n~xS)v*XQ0lCUH*Ky?V{=fd~&(5nCNVZ1Cb9va# zQ`iJG_~{0j9*zJO_FNPNw_QZ8H0t@(ksy<1lDXjkg?q&UPV-qZkZKOwd=!e;(kTlD zRGT|Oa_IrfRu_<4$dF{u39ya62_jk6gcg6}@e2qd<8vpmp)$dC1Dno;?k)gdXiTe&aX3`qi%v4Gm%Z zTBp;Ycn&4`OD}yxDxD37sfrHT4R#K!wO9?^J)K|u+CTl$FTcOFtr7Gc)$fme>|K@ zV+Z~I=thR9cmaRGE5mK==@$8*Bu@kLMxPr)h;+rap9+E|;~hiFBtI=n;*cgNz!xz*TZT>`yr2-DE;zF|n- zG-AjbBi$nBW-;DnGjsTvW5J$9SPlTTxY7V`{?A9Y)I)2s(~`Jvj^DSY?wQkfjmg{k z=v`fu@Y@i1q~91eM-T?8bj4uOVh&|cGsngejZ=ulnAw4_LTe5hqf9jD$4exaHo4i@to&v;ZWC*bhMcO{oSByn`GRbr++d~{qt{1^x%9_+9qJO*qx3JI|7Tn@_bO-R*5v4y!P@-v2Y-pik>{tiV2{Ru~8r^UU_O%!yl5}BBqEb|I?TM;WMB8WXU8{ z&GfpHW&y?k`4q5Lt6TolkN@F6{O`|0o(2y6;SYZpV`IVKCQ2l|J%hjeOTXUL*&Eu7 zNN~?we~}M4 z+*K7m;CCx3^Rv!cnYCud*f#HORXQ3Z_9nTf0~B25tdY6V0tW+zgblELMCQdQyke@P zB!jO}kXs8Erg6XqNVC~CGBfA%VEp&|0{9cxso^*V`wC{b*D_d`=RU8^oLdE8XD{jqzrmz0>JwD%2C=pRvkNyJ zTQ?m$ST}6D9HfEe{GV6NJ3wyW_eo8tUT&|In5t)V^|Pi%xuafYYe1<(<-4>R19Crb zZIR&Ik_G3+DhH78f;nY)KLg%{XTY0~`>{RUWlwi9mBdTI+|Z=u!A$Q3V`3n(u5LUMf-_l0B%H3kEK5O{9^ z7AOXHyR?}vnKn`ba?Rb5*#X7sqHV*MAv3RyJdF}z(g{Q4NXtcgTOhV9{fJ7&XvDIG z3h+~y@pcb4l?UWE?R@Wfa6X|>V??D=S4Guwz&0zQ(Z(LI#z)t1!a|U_m{$} zyGyu+kq0FT>5$amP{Xz(WGcb7``oyi+f%Lw-2dzIfAzJOzC1fC#e+|z;AKLk2!O(R zz%#Fut}M;Ls0rpS=gyw|o4@_+==fL+Mf>8`v~2bp-+UD{^C-Y>B75<8^o3hfQ>deCdC2#I^EiD z|MqXA-i|&ut`2SaXFmNoXv-hme;h$FI7|Std4)oSTbP=fvfHhAJSfz6cRzySosS>z z3OP=3%Uq#WvEJ@i>3el~;W4ucId!$U$2eb=vkI)lO)Wk?xhP1)88fDIIJmyEN)*|q zVq3k1Z4GROK)C7XBjnJ4L(^iz73^mC;`hZj2j!z4NjWeUg3?tQj;iS$ccft2%%b%; zp!45SJI^a@t<&b}as9Cg^PzEC0&n_;;fYb zxqXmogS-N{sna|M$8tEgn44sVvkR6k%jQb4gm%bEqBLetbxfMuWF8o)w#}`d0A??3 z@;)~ga8b2Qfp`3#F5IP$^cq6r)~LcATlOa*Rr4hB z;T*=aNaM|Cd-y`SxAEQOd{L2$GwJ8iv#1IOFZrag)$0 zYmWTUAAYpD<|wQ+NED3yP=-dvDYFU#-?3mJ8_T6*%}sS^kIqOXFj`qqFE=%}fAJr` za^z_B%ir4fSD*b`D48BV>a&>an#I)*{mw@YA3QQVID!XBmM>d{`FRzLHDBMqPiN3; zG%G0Ie(8(<{FN`i^o^HZe)(IkfBx_O_w^fh+^gy}Du5Tn_Sav3b$NLSqv0zn8q9?QxvQ#<;EhMM9IbKa+LX#U*syGFL~E)W ze(t@$`1$|!51OSl*q~yb`^b?axCO}EaAA|l@SDH+>s(vToZSesd9@t1bgobnE6?xX z>-0Uk${up_flHpq$n z8mDXstA{G>z_{tqxRpxfBc^@B)a^bp3FO8vf!r1mxvOU!wNh7|)C=&2gd58Y|g6Xpxr=joPvVm_ZM~PYla*e+qaxsS8TZ} z*5Y+5dfVGqO~o_IF{HxOPHN9BxgWS_uKsc}# zQ(EG}5%Gh>~$3@6!qJ+LJK+howj2=^^ZN*GQvrEXxMAg@c@(#7s;!;kbK z)XZhh7-+YCBkhdmH>i|NA~hNhD|7C$RE;WCPZDl!8w{f^XqAaJqErr6Z44%7R;{;( zq}O^U*L-nnIIGmz2PAWAHa~DZn99J}7iI~3;)_Kc(wt93eG}jEM?mfh!u@>YCf~G77F2sLZmCkp_eAnlS;q$&D%6no8go2w=n365@AB{D zG|Tfp`!nzQ@b7|dRpY2@wcA`CC#o9U5hNAg$fe;Wk#B6M{>Vo^B$G>>UeEpmM?UvA zU$}F(6O;l&Lqmh(?|8a z*7(DRT_5|{9}M=7xSc-ybzyGl-9P=_fBN#*WXgFu`e7f2CO1#jD(56|FW3rwIICz@ zG#|^PRg0|4ce)ZLtVUc$D@IhQG5D389?63d&ukrFNE^1iad*|4( zqhR51FvU9u3Jx#QKl-CT0=Pn?_K}Z#<8oM{4TK;_Grle8U zOvJsQ!yU_({IM;|W?{*fo-l3RU-Dd3INGI-`YBu0xD7bHk5$OA_KjFxAEtsiI5)5y zgc}f!b#%&cY{mu5LsPk)S(jt}3fvUP4SEXb1#&mZ>`fB=S*7uz&bOK?*-Irua(CJp zZy8x>ov}4Z9PJD1CssnuOMw1)<$2p&uvPAFnWZxr-G=%M$Or07 zwrH%uObH)IB$L=$q~_~ues#9ARxBBIO8pj3^SH8ocC}5W19)TifXX>{iK&%uVulTR zI&y0vChdd3tyb#bDmE@Y=W;f_&)WTFF=n`hk}bL(do!e^F37me`?h8Sw* zx9mGNZM)Zv`EyI*W(ln3S8f@6vUF)_ht_?)uYE$(Ji2^J>A1Wax?zZ80cepy0_4Ui zr`qBYIX7S%oo}|Ngw9}_7I-!((X6yM+on3X6$9l-w&41bx(J8~4T z{~*fF`e|4kw%HLwC~7csGhBv_6=VmnZNqWS%qvtv-F8ie&*1@s$d|3_bd`lZf?YW=@)r;m{A4)1je(r6n~k2I=_v`uf1Y01Q>&i^8U>wUPGkSx; z(B0jAmi@qv6qzC#jppv%`{;9HL>pHzo6QiMV{!~X@Nce8hv7cN{vi5@?wlXFUqN;^vBAm3=Kb8!dPBk-@5@KGzPy}mzC z`K=#?SiB+|+KOs9KL!ap3;tnavKyY%TA8KIx12P4iSI66SQ?>ZuM2nv)R#hDr%*AY z*Nwaio~@R1O(}71hTLR3g+4iSA(8a1E1X~RC1JDLx8l2@q$se4N$c@3%b^h~#N@A! zT3#Qu?i;fT$USO3G->0=4a^3TaoFZiM=75&RIS&c=mzaZz!zN7T2%q`o13Q%=M?5H zyMW%$R_1 za9ppN@>NSVFuDdAJk|CkK1k$HQm&KQAydb*Aoo@Yxvw+OrK~V4z%c%aXN~8}P23nB zgIIeYi#v|lIk^kscGTpt_QKN;c#7mSl=0_$n0z}Xv0*#Xt`0yi2H0+ycQq=lH8LF* zwD%2jw#IpTliJ<7;yivy=T?_3!30{eGITII}Upl9A2cTVtDHwTNV_cZz zAzatV2rhG05X==QK9Sj$P!0Ya>S^ARXjfc7K;H_T8sn6ei}ejS=SGQNfUYuZlfC(# z`4Yz}o)yU|&HUIx#{_0zQbv4^TewkrrkEDXL8TPMW+)sAs27wdonpwS{Y2Xz{L%0K z=5PJ#zWp!Xymo$Vd9IK}#>9GiTjO8-&8L9efzamT$2}^g8k?L%JiE4P+`sS0|NUS7 zpVwYJFgh}cdqlPMSAX?aK@I^ZU>zvJf8sAc`K7P?GqmeN!=qr>*RI~$3?hkJc)E+~ zm5+S*_h0(@%YhIkY2YY{928Qab89wUcchUpzE3Ita3JRF{3+xXFU0YCuSov=iQJXo zEmlQDZ8pp0_%_{63Ua_-9^6|SI7QV2=-wDH(Z#rYLQ9vDiiDP5@7SL#uMe89~^CY`H>SW zm)qMUATJN3GUVljz{;n{-WT>w<2Wv8(fJpi|A`Mlmi0SNBDWycsN~$jGzj!9JRKZO zEO@jfGP}V<(H2ZE+9Q*Oz{7dR1*x%Z(o{2QIy!8AeGr7(_R5Iml@Z&k1l~5FII#TS zIQ&=~hbKu){)C1L@CGWWaJS3_TA-#@Y(iGnfz+@0P@CL?0key9wqDDod57};ESYVI zE7UQ$+A#~{hVtA3pO&K%L$%Uczu;_KaJ4V{p)zZbSQ{nQW{Is~%3L#HXi?akl@td~ z(5PG|S5!Ej&U%`ao@SY~VMKjx&fvjB5bXK33q#t~6Jt|n<(jrBb@S9pi`3eo^jA;1 zYi9!0QyW#2!P=RPdg%tl=Z#V@SMC&+6+H zhDMdCUS+ORn(F6m%^G)yHVE+U(8Pe;XI7G@mZK*Z!+`Au+?ULT=6$o=hF1n!gyW+I z_~bOtIGgb-b5B@#_Lh5Xq&ZSU0Mm=w*$G;4=PqAewTe(glTVTfpg@xXKf~0G%87Gd zC)iG2HKYi+wTUZh(VN=ny|qw}K8*3OSx0Qi2gBW>hl$kCf#%h6UNGcVVyI@5CtPA+fp=ld>a{hoxw9V&9 z%1B&MI2Q!4!tf`riZT&-~&q{oH3i_3?((_yh)zp@r(|diefd`1z)`cD=!J>GJjWy!+>x z8ry~k#vk17?0-Di-!pvbDykjm%Q$l5c5n^cHLl1%XShLJk?$37t@6i} zS{L{o>uux4u^XTnKI{otf0tEAo)Z@IHop$vql;HOa_Y&kHZv`DP~zhXtoXyfVB!+K zX@uFlPRMfB*%y_n5anx>%Vrf7A-cFYFHu&QS6W-Z{g?v6$*3s5h1phrEN=)T6qeA) zn*WZa_1$D@6w#9 zTWi}yCkSCJrR`al%_YMr$-l+4^Ni#0pt)HduA2(iNy0eOhE6=n?7%ju z0jo`FJR!H8op)c-tY29PUR?5@RM?wljCCON5*u3S*d?IiO(>25Gvt&jT_@z0lXLbH z^Y)fmONYvFL9>2w#gEmlvO*NrG4DRL=xb9s@z-{R<;sfx%$&Vh3ankN8Q0WKsypOs zmsV_-wXO?mj?+ul_IYFTys2f$-m>gz*7z?O^5<4ED44ga(RA7YTbmfe$3D=F{9LRS+Ux!H5>8;lp@TzLf?x1 zzH057LU(Dw(K>7DP}tkC4?t05aUJqB-s%Q^vqjNi53G64vza$)9{()=hE>$w#ZWU5Op71LdnmoNeR?I1V*s=lM6j(8N=}a8sxPf&ykQ*;@B8jX>$}B-? zaB!ge;~)Rnhd=lm(&-Tlo1H$>{_#Kmj{~!Oy|My+*dN=e>GMW4X zzx6?s$LHqNc-Md$sH*dSC;k7iiac*>&&8C>T))%@h6ti%*JI6xcCS zs4nb1-@Pe?@)f=d)$+AghMm4t|M0aZ--+_-h2_YOxr(w`R4_{`eN~|LKL(2ajz)NQN{`E z4M16-p@jfsGm~>hGw29Tn>Txw*RRMO5PR2;nyZG42ZxONK)bP6wVW?P?r}SYxerg1 z?}{+oEJr^Z=HUU@p#VzT=KYw$f~P4ISTH0%sg9pP8kl^eams#PVei%=pN^2*P$-Rh zqRrzgsD!sD*V~l7y0O(eYaV4drwpZTtF;~DvuCA?{g!p9H!|xEEvzS2W5sb#xXX*#R^51D(m;G6mYa(gXErk3mcVVT^Q2;}QNDa^ zR#&HTo>`1tUd^FYUNh^cQ(}}$cUq;tqp`s)XE~5q3#RnZ44$%dJv^}GX0iJmcP+Tu%=L3*G+09a9t=l;$bZ z`Gvq;6Pd99o+KBhxj{H+!^p<`*R-algE>%E>J0pgJ2nI<;kg0%z&YI zjs;^n=5)SgHtF`g{;gm5`JWvh8w_rEA#ncGXFj2tm#^to!KeT7FF%doiHV5`g+ig% z>;L+%{|3R5@B#sUgVYH&A>aJw*CDQ&nVEw286Mk^F`-M_+1dG1Kk=^G>bkL!$yupV zF*`RiC7Ya(Oi#(s0uM)Ga8QRkItLd%e$3e;&Hxmiivs-lSDwE3oA({*bH4?0ixTUn zl=1w%eCqwH{Ef(-0owpw4v=^`I4a{je#wEEBR7X_j-4Ed`56(^IdSJ;&%X~aIp?>= zsWTO1*bl!V2V;J};*ImW7Vn7r2Z<=o@2diko_>Na{@(om@g<*!+pU8ISe)`X!IKe-w?vz$1mNwv;y0y2TzJzB4CtF(b`YoRpkOMof1 z&Z0H$Ye4gOXsOSz?kJS}yQMLcy-TC(H@cQ$d3#~UkuCc3ySSDmif@d_>kuxGuPhCv5IJ%ceHTYQwA@b7k;U zM+qHLfa~*aeY~{3Te22PkC(0Y7j5cT$(W}uJ0y5E^!buIceUQLGV`gal{4wCuQ%ozhBjTNa4pzR;ULHnUPQYdfosoKr_mE(EJ4w6!y< zt@71JCa*e@^A$^x-4duI5~iW|#RJBm-CQ~|;18c$Ft*Ju*Nrc<$xYzXCuc2>jghrv zDS)vBnx5K?Zg0D@1zonJEtQr^rI~E0%b7VTcQ%X~PAk2)4Vl{}me>t@bDjZeEs#U1 zrHJ(`coVL(g{wS;+z_UT$bHk80CHc`MFHMeIDbjH=& zA~&Cqq@ZTFA;b6+9t7J6olc`@1b&QYkZ7s2ydJ$TQQaDs_swgKv8^R5yc^+?!+;8IuhnJ!9 z(m4DAbl>~lpLy-IS2W8Dyz|XFdK|(pUO4kZ|MfpjO^>1A(b!xEu=XqK?XVoG?SHd>_gnx=R#c!7HjPUkD^~U!E*c=GPb!FtHR(AQm_>DjN zBZ4$-iW&drpf2tJp1-gBHs7=#{Hwn>a)Z5)(J`bw`E)3q@+Ojojo7SXqetVpsdAj0 zGBu2vjt`rT44EpCn@mrc?BgRASKM6L+V$kV`1~;(gt$C&MikS ztj5l1<5(9~Qs%`z_r!K^@a46r40lJ(g2WqY&CjMg>k3U$-gmKkH$s?U)nuT4jBcf#ye1X7l*k~USc?Ua_| z8N+sI&>6fa*IibcujAnv9OitHwk)B+`)E1pUm43D#JDnj$&!bx_(`y zg&G{icIsZwIMJa;gW%$V>5kSqyOG30#U6-v=mJLdgVh+TB5^~=$(-Iucxz9OYPa?NKwHe{AL~c&IuNjDTV*$c%tcC8bZS)#8 z5CE!hhP8pXE0%^z9C^>=Ud}p6+{ufAEOR9Jrl{#6f(DT*h5~nSGo5lIasz7pmBH!3 zMZ>&vQ*QBh%a(2p&-BUFhydp{sBlEb7#OMAph{=K)-05o%RZ^qT^`*obM7a!HQ%eF zLq)fnA?TYRH%GB|P@Cr*iK941w!Zx3F9BlDo;&GukpDEA-;r<-V=ODn>V2=j{ENTv zb0)J6MfIxVhyUV}AD@^Sy>RLLkN^0O*VNST*9bQSwepYs*pIg95!)8)aN1J@}?{Bg6; zV{ty|_v7nG3N7()_Z@t1z5~ep?7Z5OC02ovS0Z=0cp?zoO7yNoZjQ2?<37cJMT``= zxCoheU+JsFO4&eOKs!fnUTNnqCH~359>0V#96y|?iv(Ka+~S@L1*&5hH()|qh2q`za@)@9yUNEXc)D8l8VI}(3p#1ZK;1$y+Jaf^Q~ zp2zLrfB`rkaYs(iSsEoqvSypopOEON@}Jp`?v!juMNDR~$ykhR51V|W7T=>~{jlA0 zWlq~MwRll(yd+)g)tXnLad)BMgE8k0-JLp^MULv2(K)QQ$h=`)avRSJ1R6E)fmPcX z*%~VO?GWEhSsF%`PDz#@>1+~z$dV_Fw`NOA8@rR1|L=AZvU^|cbV{Fbc$yKLwcwzO*pk!#t zxVn*A_Qx;vja}=X7@Aww`a*zh>{KAKMG^_RdSs9dG6yn{qYw*jf=%>?d!CfrSWg{S zzjxp3=OZ_>E@l<}ifZ{gKyHx&;S8rnIIv@?=%;?_Cw}=?ejfg&xEEkHN_5f4#y7tH z6}X>%?W_NUvEHdkfA_^NezC8w z2TTobBWmWj*^htxPk!+ifBrB3@)Iyq zGGcjzKd0~b8}_~XW_~B{`E4LKdpEJ4#Yn3^p0tGHYIk@{@4Yu~za+D^PnhaP2)Pdp zn*iId4I2R4m{9&PV)Wu__|i)F%4+MZY&CO6{*Uqk;QX8($uMO%g;SvUf zc7eDvp5QsfQoCH!DqTIh=sK(MU@#2GT{q_Fkc7@igIDK$GaCgAN?Eo`OOfqI%bxpd z{_EJpEZD)4uS=KumW|qQ0yhP)L;(P$?~`(K$E>Mkdi9iKLGr9<~p#y;D;7lU-^PWMVfIOE;C8 z&Q;TtBVdl_fj8dtw#pg0sxqP=aca&3a^E;0zpc`$d_h-X2NjFea1Qf(=x$t}^@zmEOLD6h_kL~O za!zfBu^NPREwi=`x%bLy0>qlKx(T@pw;Yr+2H2)$DstookUzO{VCPr}{AMv_6bH`F z(0O5=n<>mW=f+4kO?5{(az8c%N6niu$0kQ^2ruBz&ybr9^1}0g+zVI&6&tFAH7UZg z)4My4&EWW==K7H2#<*(1A79%{Pin2#`o|_TI<0@hpUwfZ!&x|xlTsZwLE#pxRL&*J zEFi0_(`MPkT#v?&cuyU6zLVe87rm{gk(-$(l3wgPg52V9n{#DwEfl(u_}t&$`;p)K z;D7k{|L5<0Jf7cN}*!WX^(IEM>}%Y_2j4rFdH$Nc-h|GWSEpZ_B~yD*l_ zt?oesjF+rj@ck6-35iIPiqJ6h6uU0MtzayDGi2^fZS~od;6ez+~C{Trrxcq zP?~mZ+?vy$nO->h1Atj7r=) z+U$0ZA<)=A*D|_X*RMLSgi?JCC2lCy+a?WXXY}KaO=E#70n3pH81`M2_S}f_{M20Q zh~%V1c~-u7U!zw#eTb(7C)S2jw-zl8lkCNr(w!JlPMBOMMF5SQ$vi5=|ZXXosw(2G~tZgUh!gv9;0+5J(C*ywLW`#JG@)+<#vWN z`YTh5r=~Pe@1rJy9_nS7S+LCa`}( z@f^t%cyqwE?>)G!H?E<1fos9%u+N`AfAi)|Yz4SMBxpArvM*o0 z2$N0d;6S)HHa2jN*gh>UX)d0>)ZEx|ysEmPuJPKn8!mjzDX)YgLMbybo8_dDXJ_!T z?stR2eP@tc^aptMj9AR!FA5*L=1p#P;irE@=oSCK8ynBUHs{bHqVqGH`&PpIbN8eo z?dElIe&cVsb$xU2ej|u_f&J6Jx{JIpa+41WGCfoI?afr)5lOGD$E4O^_mcC9%*>kH zhGQd!Bcl{9LdZ>vwB;-x3AQWSY9Z$!qeZ5xw1!%0obo%Jz*FU!6=>H)pP!Gd0iY+a>CI>Q!tDP%sBZ z_8aZ3Q|gX6Gr);vpyiI58Eext2)Fz6wEMOypjaErN_^@2E538Hrq+q2lT!4h(~MRE^Oo}9Fu z$GF|d%Gp6>_q zlT44X@{?*WOxaG&2jJXt$FdC+0Sdd&&W9HT=>lK<&D_gJ3IP+$BXfcL-oj#VtJ^WXClI{w7>Zv6Gd87^nzyE*7e{WGs=|OJncE4b3YHMH2B$5-PPGH2@)Of0YI4Jt9Odyb zGlOm8t03PDxk0;u+#uY%!`)PY+~=1P7c@zR+zB8zs-QPlpg)SBG)go}7r(xm?2xaw zOj&igmV5pA627CBYzc%LP2$Uo*0W0eNrm>plJ)$Y z;gU?#y=X9nQ-R%*Jea?&4b)0?jcQx{yoJV~XBJUi$JK#PV9C8}R(OYD(i-#P$0sdh zZYpuKNL(jpTxTY1_vd}O_%8LZ)A1>*`MP8tlV&F;)aWRmm8@KpE}=?)WpuJ@YD%`Y z=1OK!=vMf$-MWqQa%=O*;)#jH?lp%iSMox^p5B40>nXXjQQ|&2>cESGv9V)PH=@jt znbtJBabY2ZI{b`3idOVWHrH=*Uy`nzm8_mo=-TEC%?pMq=~9bQ)1}p$vRQNrbeZB! zy$53QV^^pyVWl<_(oR3`oNHRLu%iE zMBExp2Qv9+o>IRkLz-MU;UA8;ixf;ZF%W@Ld19*=+Sc^WG>Y zubjo=2RxXgiI{Z4VVhewVu3CLaM8_xyL%j4Bgkc#e2gPBM*%)>%vI0j?kY*u0NzNk z;+W0<#QmgVk!XtQBQ&Q6E{?zA*O&zdY;Q!ufDb$>To{j2RIq!Hr?gZ-OH+11UT1u4wY!lhiC$6m| zPACG+6Xx?W{gBb+&u;@+IdV78tTxN+jk8|ljx-IZhx9(KJlAJR_g9@Al2wd*Ash7K zn)ien(r)bq*&4{PDVD>rdm&P~tKDo>+G>>M<|P+w$WWlZIj{F_VJfWTE|yT4KPAF*Q$0y;V;%rv<5lz-QW;#1+kh(ISEtKW6uC3%;z&&KJ_Uc@{26vCq-DB|%TDV;9n~shgJPx2XtC~W zdrCVH0bJL4>LhEYROX9v{m_~RGhHZ^2Qyo63L9QC3>uu>I%l`m-=_=ot$BM_>;pRc zq{*iD`!VT5{TTr(X(W_GAbWjwkVzJ zmROpUgub=KV~S_S(1=$W3F$cFMY7hcFrI?AY|Y;a zOH`>Iit7gp26L*kGFqx76BMJ>k=w(T^}kHZ;|(v7%$$I%C1G`0*(x z+{l|0fmW%zWz=|fOsjCEJjHEiHtO1rt|bF=n_>AzQi}G$dJL0S@=y{J-U_#G(c{vs z2hn}Uequ3P>M^9w%sAV|ji+Z!oy%@lmiRsP88;X0Ff41D4c1E|MgJ>y67j5veBx+4;go$eBvQZUvhOL%wu-)dlyM zdQ=FMj+PnSEsYh(4Zs6(53CtakI8`Cr!}ri*4S-l?zVI5u45al8qI8!$19N=u+5SC zrg{5WjM1wDHacsF20bOwF7M$$85gD znCBk=$(Xk{@@oPI8H zqnXWzjk)$)oB_r^#Tj6J7a|u2Y>Nxyd1Oq^egNBKa))U^)^P=3^GPV2J|&`h^O|`j zyL`(7?Dq`0MTNy%KE!{!z`v?j;92BO5pQ%c*H2IC1qTQbx?o zbcK)`s}i|kng;h3G`YdKf!xfKoRC{os9ju6K%oZY25ckGkt@`0=+o$P1G!;)a+Z+W z&GltKZfMJ!3AvpOv;HQTtA22?->?pcHB_)M+H`Bp)g;k1z;0#HeR_2r`5g5U!)clB zzS?YwZ*k%Kg)RxAPRhYas&IGH*Jx8ZpaEC>e0&ylS63^LLJI2hGqqwh< zIkDBh?7TFkJvRZ_xeb49l?N#AYu4OnbYaLFcL&D&U@h1*p~KkQsfnc#-MR-c>N};4 zosu=bi#c^v-7(1qA6e%%Db=mJ6eWURI=8lc5Ys^kpDo=pu4B?2uUp5G4_diY}jd|{S+D$&sEq8@igK@T2bm&Z*0>JE=~Ai~!8T8$pjFAU zD^oqb|2a2g_RRJ*!$fNA#VKpx8TvkFp8G{O_med_3T9+!EPL*9KyeP+V8;M;TF~hi zX||pRfq*y@Yi48}f92pUdW2M3cQE8818m`k+1PQBzaaQ$U0~ckWoWQIH!mviPA(w- zDG`-eHh=Cjt!%uCDE>wV|9?a7e^s#f1Mp;@LT)gTWC63V+b}&{@kM0zVE?N3y25gD z%Fr}vX8B-m54le+CE${FemM!p6==&T2})0BO_7eERu)WAXRm5A9g0xXgbmuR0i)Mb z+-2HwOohe31%cei-Uf0v3@yS#4ZbTNook?sT6d!q$ZZC4pIQkt&$;U*rjs)5y+tFy z57@jI+JRoIU1o#;4d7ijV{9H@1#*LPhj+lacTq}3GZ;Q=O%i7fB=N}GhU1&ehdsoJ zp%tkk3LN#9(vF?zLewV-geCC1_9P_k;Yq-0b;7+V^=-=iQ4Cy3yty7z_Le4b zQtEG;cD7C!@4*y1PfB=8ws>bj-!!reLfrK>?hoXQrB{zWklC9wt8}MdEB@G z)3i;!dUMam{Hn|EQE-e$W1FHH2S zQ4IIxwqVOMzaBl^H(7n_@%0JioOcsa1C$vywsykBE%J#&jTg8=WStl`S$I|?RX-8K zRgUEPenal^c+sAR8lFxzmjrmT$YyRDLqEJ%{mz_N#DjwffvBC6`BQYBIkw&3&O#W48}S4aRhJ60}ZNB(|n|DF$Uy5y_`6^l03hbzO*h zLzW&16R9hv^c8dFnjv>pn*(ygi3JMhLDM>1(x{wU*cn-mqrQzkH}PPpvtf9l-{c2! z1HBNieOvEpl4%9by&OP(B{=s9`P$tjJ)}wCSej7b;j-r>=9*+EQ=7>nWqcJq`}JZe zSlmU`eQ?ck7NRpmbjT=&^r$RQr9iOQI)MD_v_;}h10!)e=3;EXqQ9=zVT}8>!f{vS zxwGiMy%M;!>b<$*y1i_}fYvRw{ifP`X>Q}JYO_P?YnQs3$Fz6mtuW07)iY%Z47o8l z?rN5JY9<`VCQVgS27otGNm?bQ3$wZbgJmriA>>XJ(3ZY1wQ`yx_jussM5trje|Fk7 zr1yXmlf14_>R$?W3}{b67A`Z6n?vZbK6(U*EOEobN+ktfrm?hd#k?d zdgnF01%JA&(cfIPUS4su&)I6_c0`U+dfSw)-@IvO&ayCw9ne`%OsX*E-mbP^v_$WD ziuXL*_gy=8ASr(WxpUXSuz})FA~#o+bL6gM+hv|D67I}hQ;M%UMshD_;Qa)0_Zc@Q ztr4Xw0{2sIJReTOY!lWiLWm;{zO3b0Oy#%G=EpF)DUwt;{Jk1e?VX;6`~6qOX2uN` zh26d6@>w>*7zYO-%@WuP2*iQ-y>B>oFj^82=<~{H74~0y!>*Ic? zM9!UXp!OF-n0R<1ev2u55xIFkUYuHb!Dss-Rfq3!pZnjc@e$B?~UM()=qY*^eq?M;xoLqo`o7)l`b`L!ehq+y+Q zS(n7RXh>Z)0l5oTbOjiW!u|A|+$JJ7Mx@64@%Cv%Oxi)XaoRWp9QU_!*Br||9O*n^6GilIJCr4RWM#z0%O~}2TFG71fVz8bW zSDctx#jtYqtoPvT`k~nk+IGl%Cnl^zrl@0wRp!&tdn?QB(~HfcYbOQ`r-m(Uqn7#+ zW6hWriC#^U%gy5&3d)pNTczG+iLYte)gg1kc=hh0)tTP`_Um(nn~VDTF%7l|*oA;_ zS25&1I&H3nH<`qAPOj}W7+0cU3Q0@m9eMNZeG zSU#y*Jt1FiS89<>QX@50%NJM;Ne#V66(|h7UYc_ZUcPldYZ9xGvkGcin}% z?yY-Hs+QB}Hco9tXw$A*vY6+_d7AJxk{Ju<+#+nhIC2BLE0G(cgSS>g54E8I=374R ziW)akUJS>lQE$y-i^-7)v)C}%2Mb3)XSldyi=-tw>wTH}!mzaQ!O;0p>6yNvvpu~X zckW;79~fU)FnWA&OrxkVWKshk*w73+HkQe&kfbGqO?}@Yclmh!H0Op?9%b{VIQKg^ zrirl4*|0bSL~sfYjj5$Pa1#B#IhA<6vax}m3dp@NMZp?k~%Lqb1u#nW1CKSc%-QD1V);H$v_@)n>zd7^zup zOYsxS@sleA-e=bmXV+o??@QXm1#RM-A$h@+zHG=}*5-lSNE~6v9ps#wBln4E=&P+D z+_2_u9GV+2xjAyL7I$y!kj%Z@`2V)|m)&ioTh})Hci&IS*k`Dt%6!JTVeJJnEw4MkOSV78tv9Wr4)^&Vjsc$^gqmLdK^X*qV_p9yuHLfE% zH}V0O43<}R=S(4kbb_T?duI2XH!NbK=*dz4nc?Jx;q1AgILQ6U7L2r+g&ZUI#SeCj z(p@&VpO~DUa*NvTmRssXs$V^cqeA1`DOLXLSn-@Xdsd(5o$&WeI5A^(ZpwLn&evx` z*Ix9@LauirdCC+$HDU!Lef2g<2`Cby>%M=;eA-|;KO4B_Oy72uZrF=RliYE@`wWI? zMD9E88psVZ7MksLH}2XSoZRbb+f~1hU`1}Z+QzmqS%{jO%kBquRJ$uYTMlwRx6-aK zeV{ZQE;`dfyi_k$`OGSi%XdYvR@IC$tTmq}-1e!( z-f}5h!0X&l-cpB}lr8@QhqpbsnFwe2mIu|Z1*&0Fe;M|Ia<*d|Nr&91aHHVO`^DKE zao>b&CK)+#G>W5Q&Eegky)TrR7bomtwu%>ss5Nl$29s?nb@QoXZoooUVV!O@Ny2Yz zu;0+MC;Qgo-5vO?koyn(7AaryH=cnFeYdi0`YG@)c|4n!SF*+T`ShRK>2akFE^J0cD9TpCi$A za-Y$;`UXs#+>|YEEI(RwpHj~Q$rsE4?2W!)h``J9%B17Lf^EK3hE|@-g53VAIxB?x zNfkMLVpH|AIX|GDTv<)7u6EnC#Ni^9Zb^pSax74X6 z4ZUfj(e(ZKFq+QJeDquxjh)w}PwL{wwc!(}!D9+s<)oB8?i-^=HQu9YHx3B~DbE&t z=oiClS*ox8HtXi(KBkYMvanC>*{62x*SL@BX-^3ww|gp|jkH%m?k9Hdd7}-6qqv&s zAB~?GOP?K#V8r}`jb1)3YtUr(X+C-Wg9Yt%*ie2`H@A>0k#lN$bw&`bnihI-rKO6U zR+r9es^_)E)7sc^trP3Tj;I%4DtB%s&};PePsJ}SRr*cIQ^*huTi)4I*r5VLc+?}X zY4r4v5kqumCp{My!j~3e=cXfA6?1I~{1(VG4f9~AbhvAn7DN3TbKdAz!0iUL{l(;d z>=HUTa{=T=1LHkQ{^3&gr7bh$DU1h;*vsk4R1@U}CpX^TnPA3_5V9XA)K*eLYblf* zo3}q4FxpNVX15Fa# z68EHw8-A8p?R%MU&R0>egA} zPHrU3_l^3|EPY7Y$&FSobNVEc-GDcg_u0A9xrO3IOX2)d_Tpmk(o*T-qHxUua<{I} z3Fs~F9dcdO`(9fU{xwq7?&g{*l7;3zt@l&HTxG$MjCYPOGT30Dv$pnZmS(!nXx!)Z zk*gEAi;$SZ?q4U|Ao_GkKnM6_zV^}*zJ&6&%F{dMJ*oDc7@T`JA3(2ltVJ!~)85D} zywQ)$K;53u`}dDW4w%yiCo+3fLG*H8H3r7QRjdiY<8d(hNcx6)=EA_jnPJR@`!7z# z&P_xBY@`G(nL=mb0^Zyta533Z+DQ6<#Z^tdR@j-m~Ud?|1^u=e?Tf>0y||xIfyG z0X7te^Yh%~KmTb7_NAC;zd5xu;P-ph8lm>GquKcA3Ewh$E^9*Pagi~ayr7C+PzTOy z?dSBivu5Y{DSyA78On~wFsk?KWbTqKe161o*X*1tui?sou9-r~>)MHPDr6%T`pqub zs9|!fNAJf1awJP`+68V}e%DL0+I-O`%*^jO@!$Gy9x*x{YOLDLDE`pZ*bwg8Q%~)Q zr}EHE>pp3+-vYH=zHcc#vKF6NvhVF#mA_z)6c*wo57v#;T4n69!$J^RDI)s)fuhMm zb=K*Br!`-B_V&hyk()!>d-`crAgK*R=MqtGA(Lv>3d_w*RY+IL*gB8eshD3C7f2Fm zn6=A>_tniEOtO!N<|JS1ubY95gMc3iL4fVeBpWjRPxhGKUFF_6+x+F9?d0vn#g)v> zlm2na@-2^KBhjRvDHX=0zH+)k6L_~gxzjUd^R?(53lq97z{})1o0A*&0djb=faRDy z<2eAizURq*`S=-Muf<2vq{RE?AM0O6?#_>G{Tyyc1jyzA{!Gr~YyDm--}mo716z=r zttMXPE z3EpSs^5>RvXBV>g(u@?^Ag};{B?~&!gd=hx`XeyeDV^ z0gZLI+Q1b12~!*af0)xM|7DfugC*%yqc;{~J!AG>*usp3u21A|PUbFZf|x%4 zZOSuMUcl_xOo}2KTG5gTEkBa!!r97lI$c^A`XtW0m*>Xyx z*CcG?j-1?h0Ae$_pSY`!-DNfyCeXxzwQ|o|0l6PriqDraAM9zBFN@{eST_P2mUOjQ zgvkf}BXYy<53!8>Xu-WY|C-?7W;a~d~EvGV9u6RWN6;2KRFQ?-l{-$5ZW(hjmJ$^fK zb5}J+Rbn z^e}_m$Hv?TMqGOtt@$=?Fq;P__d(js4FlBZvGMo`Cc6P|C~uM9v$?bLxifR=a|`(k zOXUj-l`D(YYl}4ue4{-L%doE+{LgJkRJg$pN2@&=!N!rLUX7b_(JEWt;Pe|;f{#UE z;WWC>&W+kJOLkhDxM0d-bgFOIeZ}A;a#vTdK-gSpK3NLWk&pRKjD~tO;gcE<)`4i_ z6>psuGskLcQ<9i%Da46*&NM7OSt$MzcC+ z{Ozm{uHY92Ca$R#UM$89$+or89u6(vmbUg8B~tv z(FzCOQ=B)*`kzpA7_?)@3A)>$xiQa;y(UyvZ?xdba(>+3Z;tj(M0?E^uQeUlJ#mAFHTsmPc7>5rI~6SqS;nkok&&(?dkh^*VTd93m>O` zRWBm_Fq#m|Sn-)#z6kdfmHU(`c+r%*F;hCN4)hFJE{{7jnFeOQk*6D@&7L{3Y|J-Qk}! ze#XC?|NBR?<)1yX|7z+eFYeEufli&QQ@~>vtkn5gu2n&9Z#sv5AGIs`b}@X%?75_K zVb7+V+=QPFxq)p2%C{gl66S!nB)d1jdnS8oCXF~dUqI&kEH$r{FU?mjOlHp+FiGXV zF&TO8$j&v|;O=6r{lS;%8Cd|^P8p~}4c^)>9Z47YpR^ZB?dK@Is@*-KzEk?t#Yt=% zOM^hSr^8QdVRL0=sofq+R_-qZFOLUct%`&*{L9XmAiEczSt8iCfev#FKs&3e*zi4` zshi7fgU}u-w2zEhjv~Qe3L>R^Vch*_DF**@2spTS(l) zz4t-BH`*`l`SasW-U`<{GJj@p{^@KG$!)+GTQ%=adX5jy;DsC?5C1Ua0=ZFf*gNcm zWjb>8SYZNlH%&C>T3MSZ)gN1`H1pWKCcFe-0z&5dmNy`|z)YvGkWJK#clT45$y z#7I~G!`!t7*2eSJ;!YzO$h{g*r0i~=%O47*vK4fjlf@~G(Px*}D(%(sYO}oD5a4@_ zF>dViM2|9-i^$d`kUcgN++UIb-z6!`&E)32T700HW9$zXxp#PT*A6Nd3c)u3qkb0v(a525fd;Q+XA{y;NPOT z|NOhS@(kbK8Q4JX&i)rNP!wvFY`N@D<>mqjy*mykmV0LJWxX2(-Qz0H!BIEY+??E; z*%G;}9+Zn^B1lz0Dh=G06U7qBa{INjblH^X)5cDYXU=o?YYES0c4;ErrmhGa(;0S#_PK27(I`+6thdo}(;ruZJUZ_kJm;vQ3B7&^zs5kr0j$%dJF z`++@yX}3dq+fkG2dI}p)L5a0fH1=_!nsDzJW*+BG6J0DFhq`Xe%h3|VriU2QNtYVH+XL@gg=Ce zQ^jR3hUQtf0Vr-qmwvOYN-p1;48jKPvMF@mR)k!B#K?_!7^DRxeD4S|P^z;Yv)u3&n*y&_6&HY!ZP{;emRS+)BcCAa*I>(8{L z{h2qw9WN31`^;zlNtYRNa(8X$*!16S8}`qAlWjl0k`Jqfs<-k*R+iU&hVJ?^5>&jb3?$guR zGc%bBbGftA$^P*mGRfyn{@)gpL&-H$wXMmvUwVtcNuM#)Gafvy@%5@)50j&+tfh++*%P|>F2g_@c5|v%47(eJvD{Z87$V!tvqt%FPanB^YPP1&jD2g zdKXs!XvTgt7aVeBC*!rbd=t9az0$U?wC7uGU11GQ;IKMBjWo3(bWH6S zX6Lg)ptb^Y)(A$YR@%;Hd%m=a&7L2eX>b=p_W0<0KU{051ON zGZ#HLFnd_#+N*N^|K7~*Gem#U_pz zcgo!}4u=`<4RiFBqi8O+=T_R9;uRdj%P(A`L8m;Txg!i!{ITja| z1iCIkpu67EYGX;cCb@QRHjd%It40jSr5-v2%5^g^W6m3cVK8Gse3slC+k8%pW1FSi zt9Km2ZEN}V678e`xu00FZ|xcG!Lk@Dy3*xnsh&ae(Q-{#u5fZwj~cp`ST{9BlHhTT z0;s6r*r+!W?&XzqXGFXuP;nzkS}6^TIZdbv<3M{n2B7O z@b#;$SYmzL5bQI@PfsR68f4V383UKKt{bMvc^ypCVh2_6qvM$#WB!CLh6ey$JtOm% zOpYrPD9kS$8(ut${%u|SuqJs#pFFG&qdWQ>UW>+dO>2K>_Pv}9!fNKxr0uqG{?>%$ zve9yIzqfiT1h>`lj5+rd@|8>^+mN6IhasV;FUv8FF6Mdajx< zGv+vDuwnH$7N8w7IQ!uIY6>0G`rycR0)D94rR#d6-{)V>TV7ioFD>r-vv#cPJUKMc zKVmwgnL0KwiM+z;$@n#A<3^x;-M4nhUO6=#hY20d-s$0)8(PbgDc|#H?~8>n((-pF z1J`u$by@5mwBDFZ+*!=iK(>d?YBO@f(ww~2(5J>Uw_I)W`>2akbg+#t1hx@iHcgiS z+x7}4H&W%82g4*c=F8Q-0xxo7ReZ8i&(#|Orhb;I%V<*~cWz3eitbvSRx@t7#TN+k zd@-L;TJNRmOfg}WNy7Y>YwmCIZ~a?6=Mw(G+~OJ7 zirkpKXpwzFQK&@oMN2dTa$~XPmgJ_!w6P5xYGi=g$(GBj-RLvt@Rovl)}}Ma8~0DA z&d#Mz&m;jsoET+Bp|?-#I;z345NfgRF()uB-K+MY>H4h7b57;&*MuOo z59>0A$8*OF*~6MJnz7NUePj?r-sly#9UomdHflen4)z!_`&Efw#v&*%A2)_hPXxiy zvt#yOwZ8j?z}<1zRdk$<&0I1p^=W4hjLaU=IAN)_e=K}R74I{oFoX?vHneb`H!WN? z&Rj7}T`|mDG0tB!%%i&8uV3snT91xXX8VA_d1%6O$n4vza~z;NzweaFe*D8M){&hx zg)n$_+#Ee@j_uKhe^mSTXoLH-A==WdhkuLz_^_vE6y7qP<7)dMt+i*`19BfUS&x|< zM>N(RH7-QFXGVRnh{3Az9<{C4y3)br@|LjfMPXlG5JKFR^n3yZlMzScGG@>Nd@!U3O9S$l@&#; zO#WL2-wlKJy3vcvg$tNo8*`u6h3?E3?kpD3CH~mk;3R(HUwQ0Xe&``@HELDcirgZ< zJLD#{?W`ccY?$DZ6|Sv(%fc4YSn`jTGAMEnk_QVFx$UW9upq!4sMw(0j=(mG+_>f5 zXX`ec%&eQQrr=E*_%C%NF~1J}TTjK{dIS3}F#8OFE|!E@ww%W|`makeepW&LRWDPH z%)jb6W%T`0a{qiui;lJVxY*pu8K~7PC<_5WAg_1RvH3u>S zj^u-xAl%KMce&=?-)T(aW87VJZVBFy-Mm!|1#blS%|g`Qu3SAEGN`UfD`bKY3U=X&{17_&shAwgZ}>xyI_HW@6v;Qb(I&>P&d@!UsZui z8aK6->8(ff)`NP-ejP+}@_!%v`-Wmj#UsaX1Y>FQ!Q&H_UgJ_PXbl!lI5D(+jK0Kv ze8P9o>^?Z@-#-!f#o+l#=iIAz9~k!@)_aeSQfK?=@h~R4&{YnRy-y!LU`qVILFYbQ z1bu4zRKb(xEd0)n4#OBVj_ZuWTJJ%v^Ptgjc+z>mXx*>19M^lu=0)Y}8}VV33(n_Q z{dof0&!+tQ&0c)p6VuU?GjWLKQxnnC#xVNe=mH29Y*@8|QFV<2U4C$kIb{f)oeH0w ziCmaZUb5t`*vnTPm22+m4Nv`7PxY$3ba5ed${gxddwa(`ILUBdKdthe9C4y)AFtw~ zA#r`SaK}=;XKw)26#k|dp4KCG6F8@}U>%Vy?@FV|v5nZKRi5?|N$n1{Z!P6vdiv0s zd1B4HcjiXDIb*0W&r;>#Vg-)n$Q>cgoyC^V zPxjP-7SP|6IY+QzWeh&WPfo?*=XG*2 z3e||)GgFDP(+S)noqK93b!NVBdaeXVwjNXZlsR+O5Qo~ve0GmMam0{6V94*)W%g+k zhqcjT`Vd4X5Oo?ZB~w8-nIG5r4~~WQt71P41b-S0L+0+)c=wFh_Y7DL4LW;6;! zmsE+f+7O!Hj!y&*Pxuc`gbtv6Y%KnNZ@vF}AKEh%`QelEr%~sAv-{_9%OR8Vkj`;* z+;!6I?=b}q>0EoYuDwR@PkQ$+Cf^=o;AfrpXSI8e+QaZ4q)}|_=rkry7!tUq!AA2R zhCM%w`4AxY&!fSg27L#|B1gue$6$G8%40h!vI~bzfy0yDV>5vRCR}Pb4{JRzC5y1P@L<~~?$av(4DRI=CJZ7Y8 zrhZepf07#ePtRx1TZ&hlmFuqRHCN@D10J-cdx6Gpz8V(5z@GNpR2m1|JDx;0KAQVz z-HSGU)|9zAE8MX!Kk&34`PLo>Rw1>Y1X@IHZ{w+_@x=G;AUCieH-J0CiGSoKq z(A>1-?<{8TS+b9G!tO-Kp?pB!009lg)F3+;H9|^X<`Y&CL_$a@ozvEx{Ycq98XQ} z--BCy5!9)-(L&0HapxzM2VeMKmHS7nZ;#fGj`6*t9t1vhP!rfY;HDt68fwM=Wh}5) z8$B?d_-QQoi#ohV72Puu-a8c9KNvbNgiR#4vcbxe_(6mJpviw^GIYS`+pqCrDabKx z;3%a3P~iAzv`3f1IXr5P9-0gvo=qH?OP-jG_fFxWCyZs=xZFV{qHm_uGgmt?*X)^Z z_02a=&o?hFwk}&+{R`#OOTsyO?Xs&z8y z`l7D&FL|Obd5l8tFF8xfWB!3Nuug7fNl40s?*qU*ccxOs#*w*jPUB6#u_iDMbYa{N z_fT|uf!qN0L6si?cz0>;Ey*pxn<1QJcPDe60KBV&L`TSwqPXmiTep>u_$O)^m zp9TZ)EcZ8f!4YGBawZ>;e*4G zJ%ho0qahT(_vt+Q4c>i5|6W}H&xsQVk91tD92yQlnxD|;Q6@MxkvcRPKQfm-I-lvC zPoA1fo|=uFnvNn%(KnUtpKqL)TRu9odSrU#*i7@}T;tqAV5ovlvD?nt4Oi=$WBICM^|G^l*|~bd+rHsm`_1p5bw(t7e_k0w9 zH-^C=xF7i#wmr3H?#6RZ6P{>Y*aoU4Y)kOQ7&oorwiWJJ^N8D)9I$=Ql1795D`y7Y z^<%J3i{!28s=rXjq7kIZ1@vH0=NUDX^Tm>^uFdVo*}{;OH5v(9h14c;(@5CLDs08L z$Og8dz;PRo#zKK02FWDT5?)yS2ma)gZ?WSUP{^(Pf+%O;^Jie)LycRP^9_1f!c(l( zAaOj|%5*3{=FPlZO5897&T0K9e;yif!?zr6+hF!V6)A5yxw#ojm*(b8=3I7j<&FOy z(GkLV%^S^YVhfu&(KC@moS2C9OvX>nq)yF3XT$tF*Ed-@K2bPkNYjkCKGdrZLW3R| zj~y`P_L=e^_b=Kwji}Lq1=vwJ_Kw)$hIR;DU}IruZm3%z_P>MnpT_K%%lc)+{mYPl z?_gyAKy=SQCz=VRZ~`bVs#`{bRv{u1}vd zl}}ETdZx3-r&C9#GskAKy>sclxg<*6=v_ZKmF}C$^-WiMCK|`g6g`tQ{P)yM`P6I? zf*WzxBA_LWcC|5b1KZT&?X2BKtGTOn-MM_twQ|+Hin#7x!F|Ma_bT9h+t>aL@b<4_ z42*V-_!^Adjc4BGbI#mN@^R^fcKY@ zPi^Tp&h&sgtqEi&qd7-L2o>w-D$m!N*a-$UqgKsJ)KX!aFM_7#aPm`IrTtP&d6W3Y zM_RDCruEN2HFp_{L?V8_pRFySPw_t#Hk0%%x2S#o?|nBvVsj_jmmIC4xxeHrDUbQL zoq;XLjSW0Pqn@wV5@o@aE=>fp!_M^cg#<>zPN_XuUMZ0qviraoFzMM$ZmGjuUg~DF zrU1Oba>!-C8{|gT7q?L1EaIrh9yLUd8pFp-kz*6F-bqTTBT3FRHvmQ+FB&ge%noTh zhjgL+hV))zj*&YF=7HP?RY4pZTt$BzSo&qyv2V=#%W&X_kKUg~0;n+~RZenNg)kq$7yfU+_22jQ|9x~qF+;lp;dq`O z2i?DX@a+HKKJd{8Jfl#JqBY3>rcp`Yv7=t?Br%fF(rjeXDs`n#Be_W4} zc@CPJTJ|PVCnmFq{8IUfUBG^H>`F(lHZkFrqkh}9e8;_V+q-(pw~Ds&U%ky=JAXcTh&%-hxgX}(Sh2+!y9nM{VV z=v_KnA@^5%i&Mz`)lR+gjla1wuug7fB}f}|IJvVzEn28p;yJB5@xdBHd)g)RdykT7 z`QcH|0rKob;g$;C`{By6$wrNbNO>#DvzW z$Psf0GgJW8G4ze=(}xY2!#Zm6I;eJIRP2Dtg+A>)y2Q`=)GxX;$bCQ+g5U=H0PkOh zT|W)lei|fj0>HlvhoFpq9P#`-=GvoiQK@^-`QHy76u_GLtkAZz4{jm2ZWF02&@!^?qirA(W%2MnfL7pIhPC6=wkGc-& zeTPh5Xl{^uzb?67mpNk063z|&V`#QF#6WHY$c@v5lg7?l8WcY|nLwPFqWVI=F?ZTn zykM?enyOr$E?{`8MEkST!w-&Mc>sPCA-BP(>tKW1q?|51O^BsTl zwy%buJO0)ke1u}{ws-kA@A4fF`ICd(4?I{mLgaqxX+8I>AgJ2yu0BKZ+*5n*t#^f7 zb7K`ZWcLGG{=PNyz>Zw6)!EOu^&u`l^gl$ve3lFi8a#G%+$KG zDRO;ZWT)IM8}MQzrdD&dVubI(!yR;|WAJgqpR3eauB6QW>9?^zGxw!v?tl8HrF_5L za0a#@H{Q|8jYg^>pvXNNN{`s1FXtn7CPG*OePYCYY}9*r)CY3!f$jDXt^0(!z;^E>+1;F(%w3qwUz|W>FHEM-Pp8h!r!Op~!EB6i zUtG#zD(tegfKKvj*2=H;>J58?!cn{FtfIsGj$63nDI#urgxl`g9qbYDHhJ83*YCJ! zk0<%7cxq@#DJA*wq)l#tUB!@zP&^ysMmFXmWmcc3jd{i}j&w zQs2zioROGw8B!eZ?wbA*y)G1Tf3-Ixh1_55)GOb3m!E<4E7L9BpxApHAAyAyf#A;7 zs_|0Aohc(#4#E9sE_xl!-eX>lZS-F8KnE6rfoH>17zehbxp|X04`7?bH;bd{2*mL* zO%%60Km??ru!1@vDkG zLk=EDnA4pRC+_?_WM@Jg?P+%ETtoZVfE$YWeAbPMk)^*?Xuh|sS2P)Gr9L5=GnB!u15}O<488J6a{Jx{4AxUb9KU)K|pr*nlqxs zEM~eUlZZ1IsGCZ`pYGgD64*Ysm^!xhwTXMyP~~CAd~$9_-ap?CwnLk|BD0WD9KnVRFM#6yyfBd90ILN8+1di8LkM z*Cl~%!26gQ7A0{!81-Pzevq35HOdk2##{$SoopM56i#g75(5L=!?s_Btf+s>g28*p z18j@r9(1FI4V;rqCv1!4{^gUE2SCn0&Xql|O#!Jr;s&7+MC(!8|HdeOR8cn^r}g@g z42$GOt%2t2*wqG_;ZcU5zzb3P7`go{f~?RWRu*`1LBclG5kPTt?HST+&@N4l_h{&w zNS&NWpu_ysbOPpdr{|I|M?1TaWZ2H0vE0rQMm2MadKmD>o-U7j-$-G*bp6T)4pxEukc}R1hCCSw!l&SCQ#$?)?Y=u@d@ls z?X_2U=`KHa7N0o^i097oQ+pncY407WVNXIANKHpGOX)nY9Vk^2)mi~NZ~*P)2E5W> zbc!OkwAqQnTWJgb7raaD{+FPTd-wm0{#{47k=)&0QABQP>OsE_mL8Yt^+XBA7lnmr zM&*iONhNkppVNnq47(3O9ik{Zf_5rVaMOSPsOAQMCABTyAx%lyau$Sb0+kkmn*!M8 z598#ftUD9jq`V2+9t5W#$ce&>8aFN{fDH7c1L>qK+ z*-grOz=8jQ+4ON>8##G#PdV#UM~sqTixfYr90-PO`Sk?dhv1fiWy`&g-4u8sxE6s? z9hl9qEhD#ha%7na=4@kx(F!&CZAtD^61f*M!1j4dM{wK97wnY__A1BrRcGV6tB&#R zn}BzR+_!C61dUuXr!&};A{e=A2w?l6r}@~^dg5zRW4Rak@CFyz9Y~A7#(!UXD{p?g!;&Ufqn~}Tt+>ys}5%iO*d}(7iJs;0HvZX+&3c;PNH_I(lxM>%TMDDH? zjS9K{U2j;s^wlWj-lad7f9+vyLvHc?jtrpDue*kJT^LjqggUI#?8)q8Fga)qV@23i z1MI;Ahle}|hbUDJZ0{YSJUP#qcNMv%RQV3sa*;;ckelU%sdG&Vz#Bzzs*7`SYoq)b zIJx;)xrA*XklfUHj8e6`6CgJgt$E#=?lANsByvme7KzRYO{gY#i@|d2a%{8BD;_C$ zGa8RyRBnu);V=N{?cmL^&24SO*Fsf*-$Cx7OYz2A0l8ngAh`1YHUjrwJ91bqGU7>(2Qt%< zoGn%4dGchfS!gZ`SS><^DkxHmnw#rv$?H@h_rLbNYd5_xh1|R8r|~@;i%ez9>)g@+ ztDM|0V1aeoVl+MOiM?3}+%mc@Xx*^OJ~)h)avuaY0^~+eJM?G}Am6z+ZF8NQpsVFl z61`?bVtI7P&HC3+-_>IHo3or|$!MHRCx%!)XKpZ1t5h&1PgWu~UmT7WZdm}XJ~XMZ z87&H!qHtz&-HafLcceCl_d4CVIu}1kUxhD6Z2A`K2q$K*liNpaYqIYp`Mr1d@lguF zk3<5m2#CQ>k<`8ihcyJo=XgMElbbBdlXu9S?U0*xvYnaBo>|CIdD~J1whg``xvU>KK<7(eDGD?d&}=Ur8lm^OIIGZi1(h-M|WY! zlh+0dlab3>3et&+YPT#A@^?hDSS`I zAS-Kc=uqqQ;-Xy_UAvGq=mScYE1q;=Hk2H7grCj&uIruXD(@ldE>hb-HG-3y-~R*T z2DW*C!CydbBGzF&4T4L|7Voe&b?)QFvE6Y~hwX_NUHoS4>!w3{RC_WD}zB{ASCcx@Eha?wmG@q zx{I9I@7%@rp2DE7JnSuEo2MyUoR5{sr94l0@^rPCZ?q^?PWwcN-0fv70c(&A%cg(d z)8Di5TEEU2P{{pt&inWKU3Vrotfq+EaCmN48q4S^cO(lYe{#SQdN}F9Zt0U_uH%G8 zs&F4rMG!C#VN=LK1n&G^%?)Pnhuya>d_cc0pe2fL)Z9#t>!Sz9=@yTd2cGAECIn4S z2xR4UvF;8(ma#A_Q#SG&=4Ao04d=if4#2;R=DeYsvF?mzv#C*>(&Li0rZP5OAVL!8 zT&By&&BZtZYqUsG&kG z zVZa-)CArCRbpyG>Ah#Ue9IkS51KT{nV5W)x2)S9dTqHNY0_ckE)Hc7?;ZCTW-@rBx zBKKJ6cafVGpEGPzwwz(Re z-1JoZ)!wv3S^1+^&cNSv1{8AtO(#})+HKCjCUT39W0Tx`!xB~~)t1}vyNHy8xmZr? zPrX_Up~!t+1kdg|5YJX?a)L<_KfK z=`qH|R5=&iFi@My^V|Lz!uHvPG6L|vWG!E{l$mK-`G&1>iv@g7QO3UOY_fcK@BZO4 zppg3?KH=Z}x7(iFEb)kU#&)f_R&A{n>aAcwmcnlnWwcS z@b7SkQ&}YRMr>2IoL}WIW{c5b_U|IMX#9?q+Z}lGY&j>lD7#H?PZP;~YPx{HUh{K{ zmGeu&1xw|k6)kDPO}hY8^Ps*nd-a~P{(z!^+lQ_uMzkq>jVIn^p!%tA<*C2*!oU2& z-+bwBz6vz>i1s@_uw6z;8PS@+u5x(G$<44`0<(u%jQNV{0FvZT z+rmP;Vo%Z%?r5Q!7HZt0T*5Z4EE%WScCa?}vbn<1nO)+@EEjn#5%HCt^#^q4}~Ay@M0Y~boRwWndr2=-~x8jrCs3)+w&a){|g zVlyAm+8E?(ruNG}$Y+$9FIbR^=}l(l(nq9*bFRxnJUH*U<`&!BSf3g$ED$(I3NeGInE$6Yhk4&Vp?D6ui?NE8W$jzUD=gGxvd8e?=7pX}hRk$g0 zpcaeVTywKvMQ%;_NXPH}h#nim!oW5o_j-FdCpYEHX*CNccj4qz5o#No!OqT=E-VU{ zEVWC7?HcC3sMib%yAz-_I1L|_CvGh0gcVwghO>RGr~ZaW>%hvZ;PPvV=DT3)eX#K% zSmzz%NP{DQZ4i1aT;&0Bv#6=U)zMIGBv|E|d(cxFa2JO>g%Mv-L`|F+i@AEs$JRq5sbBEmA zJ#9rKcd44(VBAI8*<|^ zip$9@wWP_h&7Vzb;@ghgyf=-XOzKD5L~at?jNEW9XYdv~%&9$%Z8w*byN{6@*j^{M zRk&;^U$wy^jajKUptEcD9W`M4v5R(e!bb%QIx&|0+Q)@;3!Ih$z!sSuYJLng&?`O^ zsvm&h$)8{{7HmYLutsjUr^^nn7p(l7?Ll&tbRd8}B=RInSq4lh5< zZf_ztgZCOO_);|Y_xVotbzikY?yr0Pzu)i9Hz&!-O(jEYGG1v5&DCOaEhV(DRM8O3 zesZNAO$RUQUHu@pO48gCxv9a5d98E?nYPm0T@qY01>@FW+90-7vWQ5r70WrlMIpXE z$=#VN<8$E@{ArmV>Uw@&6X3d^g?GQohrqTEF?gF;2dnG;Es~MTAG*_pZSgvwvzc2i zyXEzT4ZiMVHADU6tfibnRNIW)(vEYur?Gi3k=(R;#GJz35p3or!986@*w^Z%WG_s-kjcI0kLKS#mk?(cDhQ;?fo-e;!zykF?mO<#rDdZd57_pB(nA1qR!@KYP@z1Y-{O)KnlK$ga?exvC6ZLFb-HnfpCqI0LIC33ewZZMl+yFL)84*2Vze6>%0A~!TQQst;{ z>qC`^h%lR|S<($xp%tvOVnQ=pU&c@({K=6g#{k$G?G<55xZC&*6=gTf^$|+8{Cjx6 z+J&!CA@?r)p?nvIB*D9@Lyc$43ysxyrRm5B8gKfoEy2m%r*jl{3?osVFt3*)k1Zp&2PR$#o0D-^>l)Q(>i@Z>~nlbz4@@1hQ|e@q06Y zD@ISR#(7xf0t>Mvdas%S(^mU2)s3L`H0EN4a&jj&cbIo}aZ(<;!_>}}ZdT+LbKs(l z`3~LYtQTERW@P>y&HZC-_tQ{SjWv?xTwCV--naCvbvUS zt%fQ@Zk=Bw_f?bkq{eYn?S^;-p*gwts$;kXxw&)5KJAy1dp(cMd&<55)m8mXeP(>*K>~TV4Y)|bT)O9@o6rqTXXe19%+q7R`lUz1k&I< zbkP>q*&N;yvl+I_JTTAAM#XB#lY`vvJQV~dH?rmED%S@}=1>v7E2zu(G6Dv;^Oahq zD%2YFCbg4`Cs*roP|AhW+$M?nLhUP}LT<_1mv&9}x`L3DVr?vJT zjr*9^M+oH8R_dsjGygrBdqV~K_h6fxSY(gq8kBcw(TJ2T#6zlf^MZF*i2U9P+Z?4E zhP8eVyyfFwJ0pK{g?k5PQ$IO-rSjLiS#WdBjrO!&M(&f-IX=S;!3}bw!hLC}dfiIS zrZ7jl@2X>$HoDTp40))9Vm776qmbG)R=q~i8q97OqD@0|nZ+vjEdg7`Y|5GQiEW@- zv^(vXo&wvS0`-sn8U*({4~0Z-IHnncrI|>{p02vHwLq>GFV$&)yI!j|8&u)mD7Y1J z|J&Y+zU9}Wko#Ny>3xgGz8$%#WJsF3v9ioG_cF-s%vMan+$VeN$&CMo(Rof|>(jbU z=%`vuGt+Ernkm;XPx;DI<+#6Hw!DknpBv`hHfJv9nS^aQym_^|>mwr9wlj~x3S10-A^uuOm%O}A)UFBk`oZMF?l6{k@lhjpC#iW)yDOhOWoT|~2tGgM*=QM)V7`1UT&QCb3+daV*qck&@-p_88_K(K30s~4aXDCL z42*Y`b8=H-8W-HA)X9m|$*DBfg0anDg)_65;YO;w%E^7x-ni##!G|2Fa9hGee~iJGVDwr{$AcV%N89UTZVCx8wAN+_;SK8{AhqB3o`(BHE3~66vJH z8TGrcEq$b`1io{Z*=C*Hy;!AlHLz(``}64pIj|*K1K83Sw;bCbcmHIXf-m%(f!!(f zrd?dBU9r}AsvL{I$cLN_gK=_Wi#D%t!)%=Um7-{^jS2{mnB2_#C%+zQJSfGr6nfW_^m1Fz0y8?0}FA~$-|$TUr8D&*e1Zym~! z$(i zCXvm8W4mKTE;^IL@s#$9gaLlCL1pQ2fib)C)>nRu?VrBtYp?LqU4H2-A>KMmA6$71 zbHnu16fDd}iY`WO6uEO%(%e#enxwg zN2i5N^jqzu<-0_`T#IkneEucN(6+lQkh1YRz`N@LMbg||V_@;)lG-7m3)^ox z*nZ`yh~#z^-?+9QcPL*=lpFb4Q(oupB6rt3qmnKEhu=7qZzn$k3b~cvhky7CY(sAB zOyo^!wD-7OTWc41sXJT4xGg%&-&>-0Cq38Ao{I)wzt(#~?K!6Q9%tTXyyJ|B*y-!; zs%>vq|0dkZYSmwIf4dQ~E}bn}jB%qe8N8*3?V!24vgln^>Me(`H*4!1;QiTI?d^KV zrG7Njwn1)Q;RdjI^i5zh7{_)N*gk_*+_Ob;U$NA#*cvzNjhl`p+S9Od8`*N~>lEEr z{8cQ~#?}xl)n>Yx66UICRT|Fb^VvLj>o{%^BTL zgxs8(8!G5BYm^<#&Ner@ZFzC9U7mb%6~AlxYjd_-@=fEZa;~{Md2$lmoZQgdAoo>U z4en{TosE0$Iwv=5SYG<;Z~ax2y5F-M+X!xYinKXf+=)y$-Eu8X3Ohm~doWD#DT3ST zhhXJBb)uD6Nt>$L2(Gq)?dML=I*(g^=b1eR&CSWp>)fzmnU9q{*_x!ev5mWHBuvVg zf1&2yJ%4G+k^GC#fI{wn@eg-*zO-$~P4A10-0c=*Hwljoa3-bL>1?87!7kgeaA zLW1|s{m-fx08Y}C+X&iPGdYLMWlTjTa)NLY_> z`9rAmFfw@@EeE-~M#2J^^(D$h}n2ETpvH-Xw)KY}O@ zgtLI8uD=geWl?z-EX(6aXmaxf4c_Ci$#v9K0Y1=G9`X*;Xm404Z_L2Wkq=ha+7`AMsgp82WlrD%y zk!&P4D%@iJd>4?rYlBFa*UILdB5Vn_7+oGLUF1GJS3J9b&7LB;uQ-~QZP@Qw#d6O( zZsDH0`oL3u=D&t`RB?Lt+a3h$`PV;!-%z@k=yv0FZ5#%;Rs*|zGLK5U|f!qmUrAu>5tr$CWs43+BC*MAl z?y)S^5mi8+VBZogoM5>h&Z7SA&{|0;JYJO>7Am>6_qmK`t>y)JKMZSoc^Lp{Epr- zTYARP#+0pc0CI~{d3szbZ}YA+gp?$gV;h?L{E|QkbE|OGQN8XIK<-;k;WuYRePvr) zUDqvMyjXE5uAx|QcZ$2aLvSzdZpFQLahKq3H(DHm6QsBmhc|uBhm${$D{JqpIma9_ zA^BAb`+qga`Ir+82h_dpvdTxqx32~Oq6xjXYW!pn=OMS=ODYxrow}||`1P@d3>YAz zkxy*YGC68G(~Np0I2p0Vn3t2q8A{PH+Bs_0=YT6vlXI3qKCLfkti62yLF6Mny?GGWImFUl`mgTpHkui~_Bm-Mpv!fCKX}B|=G-*UmPLhIl9}_Wl2&tY-?X_4-Id^^N{2fn z@BuOP3&Cn>Ak&1Y|EM*TeDQi2+)DdmsN~oc90FcE_|~wdkdb|5#+h0NzlkS7TN@q4fpy-*Ttq+dv*Gh{{Rv(Dqx$KLVb6{PPo(w=cBLExmgdh4@k_gqIeI=f zk}|P`|A@+$PWL`5PE^3J3xOgG6UX=zl9lw3`VC62jGb(*{2su-I|4{5z@GAzsJ`?l zDTl6==SL2PbL_s=rB#T&w4*}E`1n+Vzh%X3RZCpE)IR<>Z&+t7F}}aBGY@$u*4H6i zue<2-fuh?LL&dEaSyE(sKdvdIaE)yp7*laU8XBOoN5Sg zyA5OfK1q0D5iMsNq65Q?M?dHp=DMfg_OH>+g&t=7=K#(r3|F^ucR?iF{UG-G_~~}e zz0^{fnpZgHwgS$`nM|r6uR7tCVzg~juI4h7pxfP_+1?nzu_!PC5!++?J`Wg(KwX~z zc#xO7%Olm)R1bAtO#Eb7=#Cu;t8D~ygrvRRc&1DUR~sjD>CZ+-W-g!u> ztPIgQnh9(u8Sn8X0x_S9~{v@xOaNsKpEi%1_1#gaiXSv|tBHp@yx@LQ*-@^Z^ zv4eHEXY&H;K5>GEC8K6`^Z`aNS9-K<*DAKQRbMiBJ;A1qtMY+*Te-P5otgF2oG|rb z&T0~&JZ4q&svu$O;D-lC#kA%wgsPswXU$~)>?1M7IB~^ecevIVW~Yx4W6FR0tw-RK z)|}0|EpCfu13akQeW<+qN#VMZt3)b34lp3W*6}DgdUwyI605&_n47Pq_NJzBjjizJ z%X|c9zx;JOuop;q&SX_uy=G0G`a*V_N5ePBoty)|j(3=6(;f&8!y4ZZ7;ngf5^tX6 zV<}E__h0S9Q=XAR@1X$_Je{JKP)*jsBDAuRZpLlJL0E(F_^$t)|8dg}2P+VD(E|S) z$M6mX3-W@JDGy=$<43N=jD5It`d!b*y&Wi3hh5K2Y>CcnGeuawE{Qt6K)>bPbB`k0 zRblJ(pK@j69rd&k_cJ|D;Negg;kKbe^M5Ve+j+IKVP0&;_Xm~R_kMyRWaElqXd|LV zhX5PFQZbpgMYBI|3a87_46XPcWrP&-%i<%fN^mZkKLD|geeb(j_unqWOi&f^moTS#?R6hX-<&c|jk@8a zKdaXQI8BL!DY;;()+q!a`6xXH)SMxt4(h1@j~~B`9~->KF|?PW@?7s@fssx0sqPi+ zkJD?=rxf?43$o$K>=s88{+L@sP%clIL|i^m2%ue?Pz;ram9s*5H-RC;5LIG$Zv2pB zr!`+EqN{23qD%asGcQNfGRF#mSBjK_}{hRTMxZ92>8`He%R+Prg_>EO>_dd)`q{6I5iSpj|l%zX*i$OI2~{N zsvA7Brtk}hsof|i#oaA7hd=i>;C>@v+4YT>H z(U>b@<`};GwqH2DR~z>fD2>g0Lb`6?DSvsck+%#rx>~YaP_O&YoVq;^O93fJ2*U5a zi)1UezHc5QU~J1`Q}{<@};%az=5@EMIF|| zpz;?dW%$+#iFh<<>rniIxwNbl0`nsh_PEM^eP<|hO z=L`U|nG1H>kYc;9GB-bek)~S+iec?TgdUnTLPHD9BwCE^vReaWwJmsqK!h$Np>t-i=rhvaQDd9)Od`M1i50&ihaukA9P(g+` ziIu<UKdE9)%38fagNcx4-x?c9arnno*AE~LCM1&)PHPw>A;j~%P1s3m zCV%0zKfzjwc%*t?o_$u%^BLmQ4o1``+GSDy0ah$FTUm~FabSDyCgArK5`MT$zW#uJ zAw*05%gzmW^FF|jA?29P^d*NX?H|WkOXU_yEK`rRRJQNC&cin0?~NU!mixv{ zk9m_`0Z0HpgnBm0n| zF$d7C;&r+S5d?d|>KeM#$9;W>1Nty7R8x(nJvqk#t!-RNj;F1R3sGR8%nDL}jm3Xn zTLNLOlHXbl9?78AVUt)qT29CQ>+?lRcHZ8cDa%wEc7OefHa5<5W*(eJJvIPu%H6TE z%R-hPA4Lx*hVNJ7-hN+&lTsHuI)^zIB=4IZ7o~G};yKfJHHdkzf4k-WX4S~xF3bzI zCUU3bb8{gyrVXR`W~bz2xa*>uR`4jQ5-it3U+PJ(IlT}>``45G$&B^IhT}Ge&wHJ; z%j{}fU2Yj{y{dm@Xy50e~$;FUisOP|4brCD-^m_)X2pY#=E^vtq*vOWnI8H zag?%4+9*t8?YQ8z{)u2H+jNPgd)X^%$NLvJI^omW<;5|$$Xy@LJB+|{|E-wj(d>Aa zYRu3qZ-4kC9o4F@eW7RX7Zs1vvCGJ5WCF+299CC7hSd#%H+{XTuUk~XaJEA_Vfn*$ zRlsfIr6yMHs%!t0Ex^{%4>cs`pkJKUzuh7M@7Gu!{tF*$0%U9}2<(ix@&k<8?570~CvP-xO< z#-vfw^RLYhXn5Q+fYw;}bsMs+Wy*UoMVS6^=n%t;tJx_o0`xHvG1x)Yx|*)j>Q*TK zZ@1wk#;Sy!7ik3BfhdycbvZ`%pxY-9Oz4o3v~$`YE&&rz3Gje{Jxilf~FA>t%!OZ(~D zfYJ%bzVuq|>4cCDw$=iI*!h4(iH#fz;TQD%D#pU_Nkx#-G;GYlrlk|g#17CODHRBU zcDb?BeO9iqS#ETl>;HY`X$@*La^tG0p=^@l6m*vT^oM!)8?)|b-pXw91a{X&{zSt5*P!ZDFcI5L z*P9K$f{D48p(>+u5vzTRQVMCX`F*!gG*JB9bcCCh!_(>7pmdsgUSQzba@FT*- zp}`ld8ZnL3RDpqknxi+#gq(fsipDh$!44IO_d+C9z|Ij-vRkKi&I2Ic-X4xAZrFXk zum4hxfghLr0<5-B))v&IOeNqDZ(7lbu!hNnx8dso;wY%DcOGqs@!_rhk8v8k zD#@Ij1Gtc`T4ORLId-L)(ym8qZs`I`9&~N$1fME>B6$OPu%|D3@786!+>4&e%`g@p zuC!Bd|JpYwOKkY<+J(Dc<0*=}Qg=;B28>HZKQ1Wo0Q3_fG`y2w1NUUJ(&PF%!t zf63%DyOgq)I1aZN!Jk>j^|5AgO0*z=KW6kAI8uPwEWoF7IvDs;R@mkLFz_GJ3}4=J z-1K6d#~|gW%Lt;%F(>buBk{bN;z_WgRo6u9RUbFp3mv3;v!oRl`hq$T1QMBl=0Wbx z0-yL?V}hbramRbcQ=5BmW80R!oW7_;kiOY~ZKT|}!>2t{u?2ZpkAb|uqW&`4u-~8( z9(_Nlp}LzoS;~=sF3@YDY84>5t`+prb|QA?zEFWmRzV+YPwBlJhoIR;F|ui);Dop! zDzA1G=(}A>O5&Jl4B{As_5rxiKEUsPeZZ&A6Lk0FoC_Vi(GK=iK(a4r&ZuH=xogo* zc0WSGj!nCT?s|3zYxLv6%W|2Yb=)R-=tU;wWF^SOQJ*fBp-(Q`chW6tz{7dL-()PB z`+BeVJRt44Ys6z}+lLq8+f7K;Dii;<0%Sn368M4WRJkut06+fYi;=TbBih?gGrg@V zt&GD}@bFP*_)$~Vwouc3`gp9hsQo%$-c9vpeE2S#aj~b@d1U34ISH!ra@~hL9-=xk zM+cS&tQ|!rgWNxKCt40FMrY}V<6fV6mY059v|7rg<+K01viKGq3eb!A=p z2hmo%T?_7l8R_AUn~EvB&Z+^xT}rP<1~%D8e5iouKJ!bW#-n62%tvACvN#Q&W)&&l zZ1MArJJz41Q-}YQ*E4yPkQ?JW^y(CnNLW|gE_Mp(l>?~0Ai|Iq=fID`hRe9EJccgt zItB3HP3i5w=DV~H4@xAVRK1OZg#vv%#LjQ6`_q_rHs&poZOR?!UJ}lpxH6% zAW}V;CmlI^F0Ox4P4H0R)7(-Ri+#1u;FGHR$Z(@Lnla|O!p??%1*R|*Oa8m?&OjVQj<3|{>Mtm&!k?r*SR8T z$ypW;O})+KTM&V?q3bmmJb3i&89WHWw5o$Ff0mAmexJn-=)mjwdLrTkjXZtP5rVTs zi4cp6;ZIZ6Zu0vY2Bm#=RF~&e>EnJY*(L4%EuuYpbBD1bk?)_{0gD%HkD~?S97JW0 ze-kpaHHju#TjP`p;TdXU*3v^IDDzu&@zOixE=cUkMG?xnrgjTh-yimXVcJ4)if=Q9 z58(9G@Cd?BW)FzPfvO}_Hkr8UlJ|1jsVu2MUN&?Yb*m%)m6N-gKV&k7^ev*p^_)kM zyP?lnM=X@Br{G4vjqe{H-#)Ig><$>5@A|#{7Ki00Id&Opp|u!e21k-QmU*4duI4Sd zd{EFpUhm3RozduIx~o8Wmp z{fCNI)k_4o?ek$*(!G*N3~gcl>ct|oNji2N77dc_MwOB-7P@m%}zkvI6UId03;wCH~Xc7 z*rW>8?^9kee#2;Pp?6DDX*`loC2iO>U=!Gx`MGWYi7GyL4NyAOh3|4A64JiQm*=m{ zE0}xY?Rs7i>w6B1$zGxCN7?)Xvy}UJ$@1QFf5dV$!MBGeDMvYwXb>tE0`ax_((jsZ ze@ue1=zqWpNGm62dWD-IJTNif>k-!u6%D`tdJhVEuDiYX|HnorI_;s$*8Tz# zjdMK2OKl?e=%6=t8abs`##<2KcId!o&waqE_{RC)2?`dM900x5AkyJ$g~-x`S56Ys6A=op8MbFrA;dlCmO6(1`{X z(wW7ZG-|EU;Q9p&uSd`lbOPA3vrx+DUKlYZHm7N%yNzcee(gX_#c%p_@0y|S*WTN? z!tUQl1S?SbYf5Dl)%UQrn&t66gd);67fR|$b@ZDm8mKZUWi&JgxBt!2iu!#-uuj@B z&I*Wm-C}sW!Fr7Mu)_^=hz6I_;gr*-SeMU3cLIWTOMPZLTpi@v8#F*IG*~r&G++Fk z{Fab*CuWg)ZEsBywP#S%pCZa@OWM9}-QC5!v@YMfhuyO9Y_$ z#r$`bD9iwyv`9hWtND0^e<{dD{viVUp&gv{(n1}nY2uVu%9B6^w-R7a?R^8F28s|W zx+coRq(uJ)Hd@FLbcfrO#GTaJHHhuk7Op0@y2ifu#P^~?7bM|bP*KIyS1rt^i$ z|HNAmy{%)!S@bcjCyor<=&sXktXOk7p49k_ttOLSn(?GppCQ1@D~(H7?$peeWkrj{ z5caG24wGKiv2C(Gvj$wd3m4MF2^qYLYJxqYH>JwlGrS9nXLh|MDMx9S)|;F`ePzS| zXdN9>3=bT0Bx)^eg2rd=f;FB76#o9s{ngZenuEX7S~`*S+Q%f=sztHR6h;5{T0L?l z8VQQKdp#{1N@6_&*xsG%@hM?Rydt_z*iW&CU_jk)aH&$xz&*6ivG{kmbqa&Y6mYoz zdY(`|85C?>oJyIHv|W%kFQxe3@7mN?V`gB?{nls{EMQ%2gouLhasM3Kb)(A{qrtQ^occ`@Eqh}!OI#eX8s!R;9 zC0zb%{j(L(ge273z>bH+p2ayb1Zl#wfRT%%-j!Lc)Ah5y^l$RW;GOFV=gs~9e&scY z+24lJqLaO&lYFxZ@hThSp_f=z5UZBzZ&hio>2HG2ECm$%D4vy?o4?v0iImg8noN57 zt9|cP-!Bssshe+FvEn0KMD7FbljuO(V^gh8ryt+eqBzwV5iQiauU^1*LHstzi?Yu8L;8wQ3FxZy=0ud|#`SCeb#Uqb)Ue+6Lu z(|#&+ft2RGUDENm>4iFJrl{HwCv^&hNs3fiwZ5QKeEGn zk8`NY7MsT`OKdvR@Vqjp-5UDiC^hb-pALOh8b5J$?8)^xHFw$zxoB*CIK^(ylb97T zj1vr#IsqPyXk5mu296(zI9(2yd}5Y#!4QiqzmXr@;u1l6#|OkGjZ0XxrYGm{HK_23 z642^-g>aG#!$~HxEA=C_JXUjO#{b}U-GkviKFzv?WPuk~PwU(sNp8Xg(ytA3E@5Ju z&j!?G?y|a{1}Hxp2{6^=nK0=_-I(C6NZInVA~+@G4CWwGW;GFA%^B``A%S-Tuds)n zCa@wLHYpP9;W{p+uWHtDnv7RCkQ-X3zRxGoWF(WIf#jU+sXj?4WXftC%xl^!iTdj4 z*pxUoXx0=9)rkFfgg4c#zloZ<_wfGn)jarSy zyOYy0>f%xMJ4Yth6VCR16g9U=Vj=p+0FnH1p~AK@|6V2U20px20Kv${;fPSVdn!C{ zh1)E%IfYE*IuR+zvt}M7)(Y2L&^&oJ4!- zBob>i0iOVV4ciev32`nc57S0*zeLg1A zbcm(wpbM6f?Y7m}kzNBE4h3WMR|&ElnQ5wb@#o>NWOYD$5z};Ko88gW!H;bqHRH0^ zd~k%+c>23U;N?WyBkn(nv4GcSBI9k*+N2Z6_bzN`bMf@qf@dtI-(X+FG!7!0QUBdy zGR3zWVU?^`h~(9S|GNjrcNeOt#R7u3{8;IGsp6Z;xx1=y58nZgz>#Ml>%$yz{}WaU zPxv>Gh&o)W8-%HT!)7tZ#(V(GOU)C6ajzMMaG|u88!a}5ADpKHohE#(fBO%fc3ZjbeS>=uuJQ`mayEkR z!Y;buyHU1ebVu#+ddZC0?&iB>6ojD=sl|Zb-)<}E61Ys@bfp(PkDZq@d0u{;JPQXy z*BijwL*dmzeKJlU1m|Dw3>Qt=0A8tOWTW?gyt9C5Pi6g+bX_jJV=@M8us zU#MZA1Zj0>D5u%l@MsxSvQsjvBn z5MNh$rLB{S{YO+6@+JOBULtr`v_5KG8P?q$cOZH<-+*3UiOl-<0q{I|;AE-G9S}~_ z2)P4Vj2ZKy*%%*Qp=XoqOj5O@9x}Syad;IbV^lm)0Y0EnDI4X;E*<`kFEtptW-6gNc8jxlL>AJ8>h4 z?yPJ>Fj)fUT+v#}qlt2N<4^%$m;xY4K3NDm6$w*c=-z*tAs_73I+ny)b^{I7*6%_F z%D6!lHwo!&dc~;41Z=*Rpw^BhN%mC5SI_z>t-{ZJt_Y=5`u4Ip=KkI_N^ME*Y)@mT z9&uIuwP@4!M_!?CtwU}rTH-H%OS#ThqJ#u(P`Zp!`-oqU4UVo9U@+Nw6_Fg35NgfC zCJEbZep*^W`OxxPv4!5pYnYF_wFUKt$sH6WSRQ+3}rKGc!g-vZiMruu}(Z2A`j z=>6H0I$x2h_WB#~I!t_6qKaHyXjBOQrreDG>}A`gCcu++nJ}zN$(j4!y%&davL)rX z+r~T!zjDy=63>a|TKq`L_adeo?X)MI??v!cTwKDKsM|G_PIw$$8hG?cGi2%SYkMP2$T&)-CV}Bt+D0nqb5o&PphL7+2VYl=>%n_q79t zZbGSYd=9ZnwgD}7Yakor^#y$_OX-ajdB6zYL~KQ{KHs;QvblFTe-#Lh9Cs-TJ}`g5VbEf?CcyGm}NN4M8+1pY-?- z?|s@IuPEy0@x=jMxmgleOo%|x{r*>7c&^OVFkQK-;A?xc^i8};?SZ0p9&~RZO&iwa zlxT?Wyq>b>WHNapEmHcA3w*OLR(s0UMTuo*Ui1TPilz#|#$O|XF6E^sL!K#zpvu{L z^Uypd&#ce&s(8T*{BQ>@<>ZA+J-ZnwA7!Dvtr$6Zc!_o!n}IcK3NGf8L1c{Sl) z#6iTIWiF-O%tqd?Rolg#8M?y|!f^h@ROm0(9uMhMJqXQcx4H%JA?60=`xal+7U9v4 zAjQcMFaa8R&{g#V8vWC_8|jJ@I18FWsjnRYK==={!)PcsZ2ijf-hE z6)4^kYi2wP3S1&Y6Im~|T;Ar=_Ef34F(uaK;o%y|+3~S=Dp~c0LBi5C*>Tkw$pO}8 z=*n_*)lu<=t`@s=AI4puk;Sk^Xg~-ZGno zc*%X^Mv*p)1GP}m}+9jglUX_f7~1qx!C)>nEAOs zORUQp3L?i6EP^lF{-Vc%N%ZmixAW-q1UTw)8Nse5y%J(WdDvNJ|Cv$}a>v@h(@Ol} z`UIGwXUoyP-_H&j9>kU*^R(tHK_&c?o#oE!G0ppVj{o2_d#Vn8u!!uW;e(M3_c-mhwlxqSnh zfuDdM_~MNHdDSa>X{FA#!bt^JkjeA7&15Zob!vQZUYds!&`K=msA+X*W z8sSu?A#3DY}~o z{Y~Tq8%y(7Qig!msCX|&<3+;p;$OSOTMDneb=agTWh0bvn@;2_%7hl3OLESEXt1P> zFa#%KI-gY>>&uzb-HDim4tAvs2w`_plBuv9Q3*yvr5x&i{>3$r=?1AfnGU5;cpgUl zOv}qsRsySGjgj$@Z}aFDMvJk~*M?rPc0l^bFD)XvR!zMZ9~-kpjVONFBf6Dv5ix!l zVqUTsPOErEo06?atE|5Krwx)uJla(lXk8F2d?{qKHpwV}KBCT-4<)3VhcnRaexmeH za}l?bmZ8zGJN}GPaYx+Qh7FcPA`caM&Q(`X@`(;8xt7~46VceHrndFu%(E(f|KVGT zw4Z(-H$B()ZJo=yNKD;_W&wbf2afFuxF7c53NYX=zjTqd_1B z4O!#M>L5KDeT*{Z6@%S4{Ii{1wV!NzQ%W68q&qs~{6NJ%#Mf7|{X4&hN8sgg^ZQlv z#ZJlLTgh{O*kS&vKXi4Q&<@CQBq;0o0737_pU1S7|5FDiuGkAVfiyTYdR=^fPVE!~ zc>A5~cc#$eQ7+Q1UI0F(bkc*&h*6LXT53em7eS77e1ut9=5~r0>Tc&pVWHh~y#UZWni7@d^-if?rED-XG5{1)>*Ve7vpAlGja_@m2(gvDe(FqfsbNZB z?0EH2Bx~(FiR2Rh5%63+S?keFW~jEqW=0*xd3f^;q{{TQfsHG8YD{w}KGyk+lK1bu^OET-Qn!&vhh$~>{(JetHniYnx1NlqO#)6~1f4gF2p^;IVc7jE;f32xFUKc@k#_Z=It%^ zjX$mYQU4zg)eC86QhrkYw|3hq%JR&7foI9UVL97A4T%xZ9BUpx(B}6$TN#0}5=`bm zG<<|VKM2De>ltc)1fA~(CH(j*9|5J*@m5vVe{3Jz`>1RY*CjU{x%b|%@aMoC5+DP3 z3zoHsfypS~Sr-mo~mFc)sJiX|#`P+1?kjrqbud)UV@d;t9ln`)a0K z2^@WjvEZ_xEsbAnB#1WQ!w70ynlydTf!ZkiNM?(Klb|bMJ5JHY{vwjqrt2LN725m* zG2l`ZE>zTr4vp`}UN{oRA~fPfUxv?*T{Apn^eZgWEY@wxk?cq9xPBa*;|Lgx%wu@k z)L$q&gu{Q+<|u@B2TOQ2#7rj8hFulwOHlB!#xRZsX`2fvRkTJeVOcS~BS>Y=Crsjc zf{i}7ms+Zz9&@E|wkYb`1v$KkUNsq&3J+*$Qw)kc3RL@jfuO(C{;U38d90BB&MS0a zwHvj})bE9}uh}iqe@P^C6^XB<=ZQkj+zf_gK6hGbqvS{deoNNaG0!48=KnhjtTl}|25roC<3sD72ci?Ge9() zle;Atd#;#f7+oYJwzplco`+G&jx9_CGpxRsAGS$0YJ{6cjhZBpn#hYIF+CJS9Su{y zBg?t}%^VF4X-4CmYXqEJC;goWWY4uJQJFo+$0C!3LV!=D)2h`#&)%#hRY~+ai@g(x z)6;^bUK24Ib+Tg624B^Yi)qTLRCkn2vM}M%LvJq`-)@^nl8^s|+H#^Qbc-P-bZe~R z3vE?93(UabUd!sQppaY1b_>ij?4RwFDqLYq$o{`7(NK{cqKF}ES@Fp((kE~s-?s+% zG-h|$67BdW{-9k!!#E-&*`TgTq#F+!q$HaiyPS+Rmo(|YfK%g0@K`!->nGQX5fLXy zX6yeWSYkIkkZ6`|zR7XQe8aS|#j@`l{6sOn8JI;s!NJ8fu?X!``rvAqWi9qN<8tGQ z6cMC*mz=bTAM9Q-#-=!S^440r`g17?H4rnF2aaWVloNLBLRc9d@Q51V8Q1Ycay4O{ zNYzu0-~Z2)Q<$`c7Td*fmJT`7%FdaiKBuhOl@LhQbG{@yl(>TExn7EG;s zTQVVRahhy5y=dn1dv&EGqyruc85kNun2SrSs57XQllo=5ylu&D_XqN_To6AVA7LUD zq6}~I7Zw$`;fOC=K?pL-gbWD-eA&OnT#z9k?<^`UvCrBw<{Vl(gS&VobJ$YO#KC`g zlowDCH+$MbDW}8PVVdOeZ zwiQqIut8}dhq2=apNt@Uj5*_j8CN%!oJ8`BOe0h)QLNFr zp%1UQbtIO}0`V{DaS^9xm`D|m`bZI;`%UmFibhobDGp?$AGko=2Mo$8LXqVOva@(< zI88+zt*SnPDOjNsBa4i;1n|0k&do*-!;hKT=Bb=k8DtPCwmYBVnN2=dSe~7Zu;c$o z%qFkpV}h6S0rv=Q7ITnv_B*AjNgwOV(b%{a{wV2~`Z8tZ>4JCEyk;%I#FQ&Y6I%rr zwn@7W$ge`YZN&|_4ty{kZS(Vbm1B@8&JBj{(SVBU<)Pww(dFl3>tFy`w}IQuWO?;S z*QOwg2X^SWyYZyC`e;@v%0d%*#vO zLCl6D+?)stOxuwVhIV3@_n#NZs?FciBZi+8%!i$)(k9hlIa2e|Ekh< zlM=(6QC6T?cANUT6-fx`qCFCHFR@0u(D;wLHUy`DNTJ49us6|BzW5H=%K_d#EIwS3 zN}nGi(Ga8qKk{f&j6KChdXq4?glA+`^r94}e6JO_xotcIc%Duu>clWk}@u6-gtIh=-X0A17a>M~uU!-B& zfp5*?;g`;G*{l*$r0`aFD*MRjv2MB4HlC#F){m7Lbh9>?7>dq2Y8cdkv#Vp{dus4c zhOk{ljCB?4LSy8X{6Mu&K7w4Snu77TPhiZNTKqPp>?{(OSpqlm$~Z5?M+V6lME|{1p#yClSB-@ zC)NImxO9HfUNvK0*@n$&jY693ZLkE-fAHQ=7|z)eNWGh8W$=p{N0X)*`zu$5Wu*sK z{GRP&I2=GFI1+ticD&dszJ|Z_l{MAf>TDJKIo%Lx&ILY6OZVfTVGY-$oIiwuxN$LAm5_^D*@rA6g)B)?2Nsrp`&NkuTsMe(P< zIAQ#1S2#7tw)V_i$D;|P=R67y^OUNM#cuhZre3WB8q(@sJO0<13cXKwT?oTNW2-Lg zEfM!?mrsgXZnci5YJs5kj@U$FFWnSRFgNe84`0`f(&m)Y(mC(>YqOdo4b6&U%-vEU>|vW&QccRj!E%1 zYrhUrLc_59ka)SR5%R5o`LOTkab1M(O zCvW2)uK$`#RPLEOF829u^~SW3q4%-RX^BpHMw-)ilGf<-yMzEa26(=f+YEH7GIt=| z-##6hLcSm~>n>c?55FQvV|5en;0MnW3y1+5iu@G$tFG1ajSv2P<=+rFG0zS{;y*`p zROuGzNm#A#CXaX?k4$hf%7QipUu=R5>Nu+vc=y;J5px_z@;UJAR{D}deAsmS+nrQ- zTYkMiIspd9Q~pb=PElVl2V!Rc*d(d8b4P)s~STA%_^O5ewfv2mr3Dn-<@|DmG zcF45-3TcDjBFl2MU*)&c3)~&W{R0t9lufApf_L@n2eE>9jg&bSNkO^^!m(plSOSc^ zpV|D2h|amE_y*Zk{i-d0J`S{N)9C$gzXxzF!4MTt5 zvG50(bsB{6zAxwc7O+7yx5ibqh;gy;?VQy))(6oj{(Lf0Jv_UBR{9(9*6zokKWBnJ z&ZPGi?eR|=fQH8-2|-i)Wwmg*f5^rGX8G1AI%5vhqcb5eFmMZUl42UiH0H$DWv1_v zRZW(ja&tv;E_cKPq5BbD>6!=dWP zO9hUZ_-~c&8}E{IW!^;kab%sF!q2Gh*ZE}tp2%+$r+<3Q0p29v?$lO$)XNF1_m>I^ zn>eM6gmvDTu)B)l@CyQ~2I}3dmS$k?(^qfnIN{pfa!+2Yv@Wl=Y25b#LUR2shW_3U0bd>aVFY%UO5a(r_L7;snxjXy|Pd)yvO-#LPIurHR!FVEXKYHEa0tGGjnjYJZSyTO1 z1+@1U1Aoi*G{#;*TVEhITVwy8VEoI^)Isnc^vipfN9$HlV zu|2mC{8Zt5>Lva70pn~-UwcwyOKI$RddQ?s7uwvNUwsjTk}mJz wA{r1{=!`3tv)=qwm(P5HNM*W@#~tbIy-zQ`gyoSX4hH&>lTwnb6*md_KmJp2!2kdN literal 0 HcmV?d00001 diff --git a/images/Geo-Rep04-Cascading.png b/images/Geo-Rep04-Cascading.png new file mode 100644 index 0000000000000000000000000000000000000000..54bf9f05cff7d0cf2bf390b2fa44190aeaa9baa4 GIT binary patch literal 187341 zcmV*1KzP52P)Px#1ZP1_K>z@;j|==^1poj5AY({UO#lFTCIA3{ga82g0001h=l}q9FaQARU;qF* zm;eA5aGbhPJOBXyj!8s8RCr$Oy#;uiS(YYTU5&%kJ=8tjvpq9AJ3F&8|HAC<>`)h` zu&Fp4vn?`(nVC5;irLI0S(a>>nIXnF4%jhUmaYFi_ezQa)m^Esq*68ZbEC-erw{Kr z=RNO%`xTeV_1qKwSzKIv{`u!|5bv9Xg#{e6#gFf1*chLD^{ZdS zHcqD#+u;4fn|-=-$Ih`sesq3N{LIbGee;{&#JPD7%Fl4=XXy?+vv&X!7}D|euYcVu zTwGp!g^N#Vhr{uWZ+wGa2p2lMc~}zpe(;JP?8H}Qxm@sXecXF|7klR4;<|-PxA(*8 z>FHnp^JgnV+BM z+k5}&-3dPDfBken*r)HVa8Tc0`9AqUA2`)dcetf*E!}}9dIvCj0fjMR`81xHnemF! z1G3{g)$ym<*;#&Kc*DQTU%j7mDZ{t8AmA(5`6Gn_D8ai2{w@B>PXK%1Z}>J`%zXv_ ziQb>3oiE*ir91F+?f`izPxQyDeY`f?tK)nX)58|si!JacuF)P=WS`F7^KU`lL2vr1 zIeZAc%$4cG=krAG($dbC?!c$J1DLU1V~;&dPEL-Ejd4i9Z8n!3yxQ${v)P=VpWo5Z z;gvzJ;m3rBMEHL0J)eg^gp6Qge8mg*ebEE=2fSbR6?|-l6ULuDo#cMHTeS48r91HS z?|`ot{q&Fic^&bI;|Z@`;|j?4I-yg1RT$T3*ud)#JyP%RZG7FS$GUf34aoPv^%_4D z?|bh#KT^Fu*5Q^mUb+KM;SSuseLE&5CL$u@{rBI0?X}ldu3RY=i_e@n0}R1A31Eo7 z{L8;Y3<4pK)2B~!7}4L~pPZbWo}OM_UhXyUnDZRFd-((YtCux=K#F~tLg1Ci$jJ5U z*KgRcVbi8fc)w-KmK{5GT)K1#hv4_Y`*jXSp2E#pI>gc)c*=KxgNgrukm70J33y9h z!^~M(Z#aXahewhD+_l06T)}ZopEIy#C^ICiS)ReD|qRGCMr2}Q!qhpoGtME+9N&;uRLJ zAn?!$|2iJ#}OavN|!cXx&uph z;2FOIPaIEpHI7#eA-+2}I9O0n0HW1qvt7S_-TNoJfwEh>cJ1r0zYfmUYPI_E1YV`) z#U-DPFZD9CeCNaLXJljqXX5*`yg}$^{1Gml-hbpfaOlt>m}&$ve(-}IgwXR+ABfcR0%GLr%s*v*0;We9scQ`{wel<_wHSY zGw=;Bp#pJEH9SZ;H_kaKQ!G*$Af>Fmd&}iTJ&Uc`Ie*3q78w4r;I^wTC|M}0M{^H`|d;`6o z;yH0p;zckzoel^3J@)g5fB1)7V?OMbeToAueP!tmJcT>(-h1zH0^pDT_>Te5c^tu; z3Wo59{242D~`ZY^83jK@SkCtwc{NxTRCz}F#SEiEnYzWXi@Sm3W5 z5P%3#tJT;5Lga-VzS!c}v15F1$khOG!Y_aS{{7ew7>hsqvp?fpqgTE@oj>pt?&{JZ zmhQl(z5_fF5GDpc4`&U2>fL@JzWBuPgjaXK`wt2V@-};b{(@@bS`4}d3aFx@0=KND zrsm)O{onuVfBmoj>wo>P%*;%#-2wsvIAt0f9PC8{kBnS+lP&n&=TDTs_eO&GKDpZU z_xI--lkb1uzI|L>@*K1;#3Nif$^XcAVDsk9n714mK%fv}Kq92kXoR>Sc;}t!01*HC zfB$bTeTj*Qh&6ol(MR6=2Bc7Y?|a_^#0TcUSCFR(w87=#pa1!vvBQ7-$A82j@ay-# z|NX|sM&CG~@B8D&kMn(EZ?UnlNY4cA0T=<00Vm?w zfKTq+xdZx5DwQJT`P{j42q_{z3A7)#GJx6}H*N$)2Vp^OG6(ng5SSieN04^DRR~@y z#$8-*$sxZdUgPxuF7bYjJ%g>kcI_Hu4eTH&NjTx9rKMmfKNtJ;#PI}f3J;WCym*n@ zY%l?Setz(!v|24_2}C!5D*o!P{tA37zXd$EoCh`djr-t(54h4?wQ3a}2_h!gCx;F^ z^#M<7U|;}A>_bCCcwl@#z5&`trd%{MGyunVJM-YpfBSF$jUU|$N1o-mzI1L&cL0j) z&;R_-InBW33!nir8Q3B;G!*d#UYY^@@=yQtPxvcj4V56cZx9(U2mD)azWFAUomYi{ zX5e4~a)-@8D?;M14gc`$*|Wa4{Q*3Ij&NL5`5W(v!6m>saE1G;4)r}2A<##1H>%XFE70C!i^g@ zyd@jBDPShxW+B#qrv;QJkx1Tt`)%B&_V#uVw%fLCLu{DeDIh*%oW1kTJE(c%d%#x^ zHeR!4&Hw$s{})K*AO7JV-g@gTL@~U#68nFk62#lCM<$c;Qv#Mm%Sjytys76J&#W#rEkd zyH9>sm$qEG1N;s=;N^n=fLwsd!+Zwih8g}BfAJSEO!yGY90ZCRJ5X`J;&7Y61;W3@ zhjn#zT>AdYfB7$*iTABD@a-(;z4PAAG5iw_KD_0S_&RE^y}8&p9|#)kqnC7i_0?CO z{zH5kc!I|c0N*$!fZ~gej?T%+fosJ9ftQwmZbIPpPyXajc!EV?VIg20cfauCC3W$B z#fla9E*`_~-MjhcxSPmt=MNjN_#C@^pe)S`V(?u&X1H8DHVky~$}6w%qaaJ;;c4{r zAI#Dbm+rtPzXO=ea7Q3kKmF-Xk)On46FiCs^N;-?`~h>0sSe3P5)z;Teo&6V=R6=d zC|oX#z#j-TdLw{59|B~9_Y-W4=!y5-9>xDp*?uNU<7yW+{uwTUXd2fr3~+Z(Lr5s z^9KZwnwko?8EiUtbh!la3M^zym39-T{p?rboQzs=Q;bdS9 z-+1GVr!Qzv15fbC1(cUpCE+f7``h1!a)E9_$^t-xmjZ2SYQi()XbX-PP#6-+xod?F z0ZOnRL`gWa1cCuikyp2;rKQ!^*W*vT5Fe-osTjV>juXG$PY~k(2x{==5J;Z**MI$2uBV`wuq!xQ0NlugfZoKfK=}`luHN(X=1;&0gSy1$oE=2u z1v|v~z*T@$^3NX#6@QY8UD{^p4m{;M;LUY_Xq-QPp34PXRv2%7CSIG3Vr<^D4HF)M z0Nj8AY1kut@x+M}yqg@Cg1`H_zXN1PQ57%*{t8(@^#8{{{xPS?VbKu%N1z1Yz?)d@ zTLQ_C3z6i`0Dd4}Uc1KQ^~j(Cv-tGw*AvGRUM2?~5)c!I1>RUM$5!AKptHD^L23z@ zQf!7hiyRZKxc~<73ATiS^CAzJd>+%q=TLL}(c%fA_@3Wos6VbZeZd_*;im*;$~Qyw z1fL-B0uTqkVy?h}d&;wR>2OPT;M3g!%wGIJk@08+pW6tyLN{^U7akst4cjj5H0FQ)&;Oa%cW`yd4-K%6 z_zOP}KKE*dPj}%<-&(o@Pv;JJMF2i0FCf4V@B!lTaA?8jK`yxE=I9weAt52$(xFW) zpgiOVLgBl*B#%mfx4^dmO}x8>kBx#yeh5VQz4=dE`nardkbx?0o+gD|fiL6$ol6$K zEuRZsf8uz;_X^SD0l8rR{lEWr+$>*06}}V*QeHg|x8{%j=#QYzaHoLU_^sjf9rz0H zgm(^sYXS!3`}yv7zY7(HXAB1j%mgQCkz~sgHoaf*l2Le&yj>u$2!I8DQ~)A4C>|1E z3P&MJiDOSkVwaBcsqX-M9ZX}03^)c9E1_KkT%v#bw}0d7d_p&&;RL2PC^v5u0|{kN zdC*Fr95`-($XvK^0pfaT;1^-M@HxC_6k|iOurX+RfNy*qQ4DTC zA85IR&EQ4w9pdOb<U*z88c)cn0DO_{*Gw{sVZLxI5n4b4{<+ zMMXvZ&hP#1bHD>%A*`@_=PrB&c!Kj2U<-I_KYSP268zqAJV8#6%VoD$AhqJFUwIxo zz%%~Vum2iw$#4DEZ~ejV{r>O$&hLKf8{gcqZ94_1*ohFvWVj9SEc|M=6!RTI&4HDI z{{$BeN*N#EL+loh`H4S|kG*$KQ&N_W^~~J?J}*Au?D*$@{#TX`M)Q^@6ypsOspo$C zH-GE$#Y=ogfbtwqU;|F(;RkhPKls6aLZUjpiobmQ>)-slzx#XsC!}I{OS=IMzWMcU zP+BD&<@xQ~cit!HK8x}kKYsG-U;P>m?QzAP$BuC-U;R3D%!G&`EtKmxe97ZS;a4y# zc=cIA%;yd~*4=pKp5@XBJ==HS(NgAPyu2ZD)DhxRQc}|X(~GLKm32K%`FSW9XP{6LMf8?+*iP1Oixniyt=XqL;}8uSVlt~85jO@< zt5=r2iV8>Aou-E7!v_y>5r&I<^Y$(GNs{5BALrsi|KPyC{2K)&@q@_7+`I#S!{>N| zS0-QgBKx1g6Y#kqd~e4+qntGy+@m*XT730uc(kMh_a8ifCKTA0@6~;h_w4>YsvY>pefcwZ+Dp55 zcJIK$kNv)Lec;b*qVnnbfMne#Wj;2xwz^^O?tTB~AOGKX|Kxjr@<)IAoj>@)Kl;ue z|Hb!z7_i!}qp8gWJ%h=(=(>6H_G_=d3Ca5YpZ_2!CE4Me=R4FJQvUD%`Iql}`%f^w z{Re;i_FF&m!h`($+&A8M?fZZBT>$jo`HkPi&VTr4fARjiAC(lBx&?gE<#f#BJqgO9 zYj(;hOH}^Vpa10#|Li~g`S<@5kji(z_vhdL;a~jk|MyQnefJ%$UNbd=x^b$`=L(5V zfGz}%6gqH2RB=jfar3zYkaT>=Uq30vdUj{S(wRLQci@rs%YACzzcImq6(C_`@A(+) z?K`-4*FXQC|Ml%Z_`Toz-S2$!8%Wpt`d6O&+P9v6{-bx^f!sPB3*u1GSHJSLuRr%S zL8uV+4gykPEC1T_Ux&5&%OCz#Q*9&P6}F5J2*?9~)%gZ_1qAC+De~O&-+b)-wDp|z4qQ9oG%y}G!e@Xdyw`9~;Q2exNzBhJLRI|ExBvLJfB!oN_8;kNZtrXByfrjD zG%y&OC6)4CZ0}(F&+{M~K-Ka)!Mjf32gzl_H*U_%IxujAci(;g zYj_aPJ^ye2{z6?#)8s6%5i@tEdRsg8ZQWIBwocE?L0#dpNkakIEYQrDnRkBsJAVZD zhvbdF`pX~w^v6GW>7|#JEqi_0Yj6DVpZwWZzebU*H{X1H-ZAZ%ho*DGLLVLOzKIWi z;(2Z#v%JV7AJyYL>!oeKaChKS;E6|kcTV#?-_thYoxj z{LIY|WT&IEy+YF6t^)fjUgj1Sq5FUaAhLAu1x7{SOh6X`+~30!Tui-6+xv#VglBvj z_4tLGFiR)=6z%|*CvLfX?bm%`MUxy%IL&EtYZqM;aEQqg>Y97saLP7r*-I8Si88G3CB^ z0!0>|37+8mBoKmoKw0<>?quBa(HC(0ol|p;Z=pW^`LBNco8Q1a{i`4R;I$WD{D;5) zJG#O6;H%FqU$bInVjPO9tF841-~S#S27(r5Qz5=QGd+1=|6XDffCInw{5wDU`RbLc zwr<@D;?!JZ`oVX30`tEn}kP_nJaer~y7&K{+gGQ=w z0XZDM{;gjF8};Yk`)+Ne9rJ@fU3g9gLlQ5u`-9*A-Tdqf*CGdDzVOUTys&GeOZ9zy zSkK>6kq^qr1JLd%h|$s^p86elq=Y_VCV02IH`#o)dL$BcqjvX70Zil31ZQ8PIXOEs z<({7mMsOj;Tj%@?qZ9zIIZJ@w2x{~91QCcbU2x8WJ)OqJP^k+}cM=lvF0gm-gGfwb z8h~E0F*bFB3`iLqBWN}^q`=2WL?ZeDDsgTW6a_(vdEk70{%$|VErSmN3SZH4Z~xRE z;nD%Wba%j4o*14W!rRAkc@;k9@vOrIMjhn0>uOBG;loF}y3ytU^Ldudf12rQ)&{}> z{@Pcc`{57%6b1{#Ky+jj4?9LhMF7qNMpsu=;FE8C^XotO!JqN|6Rc5!15^hk06zcR zJ1ajJothwGeVT9sljDUM=j@ydPAv&97iu2s$Ir!!=ebQpL~;4@52?W;$LB0RNu`t@ z-oI~l@-CO64_k1rf?bRg z>Sndyx^Mq6u>yb)-~8(H6uqDb!*hS|y+4v{in@Mlcy4wYZccN3-Cz9R`}phc{^oC_ z7_=_u9E|$FwXRoQ_&1PB`25$uf&ZU-|Gjs(na|8f`_Yg7?JM6TQ1aY2pMQ=jKBaK^lfY0>rffA`-A#ytO(AN|We(~X8Igt}z+>&EqA1c32Z3?N75 zc5z7IJ01luJX7EoXnx;Yzl0^ebUNzM7xdV8;t{~YqY()B-apu^tFVg_eS#(Z6U)mLDhzVS5_WL&;_X<}-;zOfDyz1(j7 z=C{839NgcpJ{PrZb46VZl9c-kw}Z6#s@&?GgB}B z=-*+dfGWQ6b?O|AbWWsH>-BnJm;k`#=Rba%$v6|?N4!27XYf1lOPCw>d>#q!e$yV=_YS)-IXzKZUz3;^e_-GKP0{P4B;kAZ>^gttLV0Z^HXfT~8LErK z(!^Z7eD3V&*qAFrgZ%{KpqS?8db&F=oIks3=k|>oqA_xFvbffP61;J9M6OluJANcI zQW7W#6^BbuC|sCdFg`hfJu`j+N|ZDpIb!q9_^UDJPMwaue5D{K7ax#w=J^4@(`xnS z&t5ov@>E7jDvO(a-1z18;}>8HZ*=^Liq6u`KleNE7*6#gHsy2gV-PK2Qi2N%C~zF_ z-gMSU%qRIejyb$eIi|_cSwwyj3rp}|gA*(RkA2)5kPgaR=F|dP)@aT#1?eV^fdz3r zgCUp&?xJyQ;mH>BIzw)zgg*iIAs9w+`xrc%=^lLYzVn{WBjx9Ff1*n#@Ws9Zz7yVk zB@awAA8QIZam+4EbGRH5Dj-)C{^~cL`|7v8@~1!e?jQc?cai6S@r_^qjeq!G|6f;M zCx_bMkrJL1Er}45ye&9-`>*}gU;iZq*}w7CKlsk?|Led1%isO|-;Uh60qP<$I)ZZc ze&bt!6yN*d4_|&|8S(;=4uy^X@4x-a&B;;NFF3{?=N#CaykEQ`$MbLg`oG{fC_X^w z;~QW5=C{B7hpZ3yH$bw#^umjvMn2YlagX+)SMGa!Ow7S+CVc{v_fLk$#S>gLcop1z z%82Wf2cQklD}x@vjYCkD;8(L3P4z57J%6! zgWmV-96a`(fB*|d@K2e6=d#H^1L(l3&nC1Laxuh1t9e6*Brj ztu>cs$QU;8^ui*t7a!giz5WFPo@+^Z~e|Uzs@QNSVkvaUwHB5y!=A1l>cIz<@?wB8JdsDUgG=Zdgy+^{1kY?V}yPE z313Cv^LJ7`P*d!vZfD4Q9#AHpQ{h^S!44=gGVT1;`%sb9#XsQ#?^%V1`$%+)+2>H8 zTo-$S5AN-~8I=q0raUMCA955j4(E-K2PJsY&Y_f9keKUqej1+gqy3egWS(zx)b4<%} z$iebH@G(zPW%`t86CUnmxn?fZhWygG_lViZm9OuQ`;+^$r1y(oJb_#~<0p9sxQV@g zKq;`ShXt(e&S>T&^3(S0Dl&m7djS2YIi8AD0;F87T z6;vq-9SP}T`3)Q|FbQ?LQ|!B}$-VpA$ZF$k1`fftWvQC?Dr0%N)qUKjS4_M&5UsPv#ft^V!R$OrQxxf zru+Q>5;If97$fm;Sn8=l#zNwh|ir& z?e6q2H$Us0^0YIaW5IJLc{dA^N&F5;2Q%Rvr0Iyf^+I!;uoLNE9I;cxgbT9UH=z1f zmVUwE1|4EPM@B;Of_oO(u5%;4m)O$pN;1C&}dc@2O1zeK z{d0fo9@_S1P2vk46ppjyvI9dlJHI$H=K{^{;W7~`VPeKDkN3S>_&lHUJ43DlKPz5k z!IP}-V@ch6ai5sag+4x>;D^M4<`KVP;DGK1rAIJiEr+L)s(hhQA*-El7ugKM8ztVxInE@g8u>U41q`K^dccoY_P!g5w< z9`Gyo%o99j`V!LGap!4%%sZzSaHAKHo`x_9KE*+yK*0>%buEm!7O}Q0O4^WsGvjd1 zu$#($+z5as4e4fFI`S{(X59H@lyXZsw?um|NI^%VsX+0Y``kEp^^SLkpSy4^oxtaO z2Ym4aGvs9Bog^=wdTgu-;G`ffRcE+gJsJ&2d|?`P-fgKN0>IXAY8V$tT_o5X<{HcE z$WRP|NfYyYn0PSmk8Mnv6v9O{rQ{qTD1wkM4UVLlICI~`t8?9ImwP`rHq%C2yG^?m zCSCIc(D4ZuWGbGcEpdu)uI9+)BAG@77#2*Qa4z6VaT)|-Xq^J)y3kEw{sK?S;7Gz3 zPx!w7oFC`X;h*h0z^gR5)uLoP%EqIaOjB_Y02dRL4S?N6SOLn1ppKIi&j5NLyED7! zKz2W&aFP)Rl@HEghU2@i!`K*v1GjH(D@MSC7gba0GGD`i7a7bz2vD=(#xYK2#h3wQ zD1+^Sg9+jmU`OyLoa8i0P9U&2A)Xh=ybhe(1X)tYorSsE;Pj~*g=>0lafVU@9`yrJ zf_$+h(P^$gKR%vdTqo{6OfobA#?0c( zM%7!kziW+YZ=jO*8j&bk`*lK2Oa(r%lVv%Z3Q6@^yo~5-D zfZ!)az-<=iJcZ_2R82ZNIX6BtcV~X?mUC{zF*7tjdt=^tXL|0=gkv0Er^f}Tz%n*y z{-F?WetK?xdU^)mnw)n6B+=c#=a`yEr=MZD^>kE+lYZf}Fs7dy`EnGzgeRVDt@9b4 z-6wd~98!=6PpZK&voJB`m_U^p#*7m(jld~L1C+7Pd0(ed<>{QAaL!D)<|a_-38W57 z4L-m*wcwa=&W+DIC+A#KGnnd-P?80AxRGl&raPU;-(;GRUg;39=!@jsEY442C#ao* zOi{g@YX(JY6LVAJv(pnZ(^GQ<{Q<{k(d!aYzc2;apIks^iFu5%xoK=U=a`(E8=arI zH8*twua22Jj`=A9JB!XqI!g#L1#uuEJaGfA9tuil=MdYRgs76hIv1y=9aEFe>8bg- zNywaI5jh|93#mUh$iJUC{0tx3(yqRwcK|cT0nCbNhKUV54gVGs`0o7pB&M5-OasO& z-oQ*S7at|mFjYk3IcFRT(=Z&6xG`7_*Bo}@a^ipdcE`1FcY1Mx$r=q<7N7&LbFgT@ z8k38}z~c?}#)}QP1i^}t1%WMt@meH5*0C@?>AVY52vNdu;ptNFk?;k`5S$oqprm<^7LKs^fB(xCwN`B?V7&jn!4qh8^LM^LXUVPfEajO#K8c8`QiyT+w>(B*rm&Q z;0}B;JVBm;lR(3m1KNb3`Haf}OgypZ7z!)L)V05=^AqYG|1BJp%CVoV^*j43++cZR=_);!~MPQpK% zU!1x%H+Bnn0=^HV(sLT)Bw3*HAnEhiKLm~T?{eYw&f+5Emzwa*jXN9w`uJcH`=)}OUWXQ0_)-)Q%z-%Ey8&`(E+zWYldVl%S_V*sbr@<4{`qMEv z<`~DdL7NQBPj$|Ww@gj6JLWp)7ux5XZPU*78CS=&t7FR5I^}AccD2vUx6Mqo&Q7$> z&$KvPO;fJsyUvjX*WCqpF?a8}#z$SVz4Nop<5NA;&hDv&u1ROt^g{QvtCR1C4gA_O zz1TD1yf)$JpLPz-EDp^&Za8Q06z?oJMy984IiNZhyXKtjbMtMp^EhbF>_Xo(4eYjK z#?dx2*D^Ijuha7_QwuoB&dJ5zDd(;E#gWO0yN;>5v$%QaKe9OBnCY0CYn__wnQ?Vb zx~@&Qu1&l8=mea-)APM^^Mj6sJ_p`ogSr0M`JS2i{#od5cd%q~&WWs2;cD(iO?XIb*KF4D3 ztgCO<)iXu&pG08B)idkro>}akadyruq8Tqn=iEZyjN=B>+rq-QbN-G4D#_V1J=QTh z*)})Z>Tq@~y1KBLgX9P@h5fWoxw@ukZ?s#?@W}<}yX(_)5F+4aXsEkrSi9)B>2h@~ zI9sQuTc>B+rsldGkgtWNyHlOB&Q6HftfOt#(K5TxJh#v|=W3aAbh|o|=;F+6*L>gnRNM48 z%u~m_1NN(97RGCVi%I*mt7XR3%EYZ}($O{T=yW)n<`)}a<7VfF7bti)Jv|8!532#0 zZJrwIbT~R-eWo3)Gmx6amRVQp9F3-FS1a~8=jxhubxe|B>UX%VO~H^ML;-Jkam?v} zqk9Kp>zrwvyxTH8*8!=YarKgHlHKW?ook<+Zk?QJo0@H%BKy?_`!(yjHivB-1LNTS z5wHYhI5X-%U(EUb>B)|<30P8y5@e0W^df9423fJ$Mba%y=rG8Z&CPdD&*BaA01yvl zQ6b1UGruqi)|2Izc=(BXF4C_a@8J1a;8=zyMsF@UuDcej{r&P%`=OMKBN_QeGR+6m zOb3&T52aX-rq~V}%MKbV4kg=+uxxv83W-DVCGzwiD^4$5O4>)u9v%-kiv=olLbJOE#S_7Mw~hIGbE}COKDW zDQz2{hSnQdBre20ICeJ6axkUfc$VcvM#;%!^9e)Iaed)&W5JQs!oz7r2T}_Tq?`Ar zSuhT!S`Qn|Cyl0`u};Q%2U#wHVOmB+G7htkcsiX%qbQDe!mloFiS;iUYd zNqNUo^UmcIA5YIamXdoq%XB8obU>eVCbKxJsps0=S>U@#Crvd9g`t;;+qb$8Pk5{T zK7SEgI)f*A2Yw-*aCch)-{t`GI5Xy$yY6yTPTWyel^;vZIFXijI>Ullcf?>mY_PIn zpqm15qTX5O1xvM;6Nc$)2?Iwv8&)YjDxc?1iH zlYvWTtf)JmWj>r-bTr*|IMsF}*?J<)b}Y$qBB}U9lIcWh;mM4`6IlgEGxKr6$Ft2i z)|pgua(P?VD4Zu^URg6P>IC4$5D&k3qEB*Z=U@Cg!1#5Sn=XYaftyQS?y ze$kP%oFf@95axp^W{Ad-Oh?o5j;0kIO*J1$FFu-KJ(6KNoL;&gRxAzEzT{*^8SL6&L&5Qs zqBCj5$MiV{dsFT3IE)ue*wCV@Wojz5we4iC=}>Cn@pS8%RQpMT^|;;y`*k=a=RivK zk<@&g#C{Gh6rb$%Uk*b`4 zt4X57%rga5g)QATW@wcHa7UC~Lvb12nV1G6cisJu-BGA7zKkwi!7sW4zIft(38V%! z?gD3oYG)^>r*1hM1%rdJwGCSh>7saTG^Un3FXVDo=#^YZ6NV_BO~r&FQ9=P^BrG9U zs4#^|3xroQw##z0w$A2J*L6@4GcHU27>s>*oIx0$8XBJ^kQIi;o7d~A1X9{QI4&PU zBrOhCSh2&v3z?gh*67$A;Tg^G)RKapo9!S)T&}W_iTJ9{jjFtmOU7t<-bO`{BsL4u zC%BvjX%fX}iW72#(p-p!K#oxyCNtyQg3c#xmSx3#}#!f|^Z z#OyRW6(aP@@3wCq>k><`B!i#u4xo$xQivAux1BC91cjXg$5IO-VzrWl6mde9ATB55 zYOdgFkvPs49$OlA*%A_K4U4l#V)DdSGQ=_I*d{bFH!wDHhtaC8>}$QfcxTadd%;!G zGkh>9C+vzkJRuWuA&Sp-n=mO^uF$vw$b~SzFiKt$nOGDClXNw6Lwug-ym5y#S7YyP z7;~W&G>DM&_Q8FrMFE$T!i3ClX+cz+DdJ+Dl_ zsfCJcu_8wx&k)KpL@KtLw+cZ#Z3dQF6hplsg6cUTurJM&wuLCpapZ znswcpA;zYtZ}?~~v{IS?XQna>RF)8#MIf_1WFoa9TSt&!w#pRtLJEWomiU@C{35> ztWX^O&?%U!iZUAV3JjsFTqrLMRhUB*<{(9hP+cxeG>flf9W#~}w2XC6!0U1)+FCa& zbE4G+5={}d6evrA;>;llkgQ?|lt^6`rm#Xzv7az$DgGo(u!#R4X z(1<6y`}!G6V}ynvNTjAzEGrho6-W|HV#pT&jkFM|O`t9b)mTH-rJ?GIFl9xk+!hjF zC`l;XEX&Jkz0o%3YMgc{YPvS*3Pq{{iNPk+m#s@ML#8CEa-pz5oU5i<`@l_jT~t<%L{Vb0% zQU0jAbm;LU`g6URtVC*b!Z|#(h+ub0bIZH;EG( zM6!B`ygEW=!?YJk%@VaOOj!nV5T!0Xo>g1abZZbXyBU|UvSp(pJyM$&p(z$AEnzZK zsMG|64VxvDW4Bg8q9rV$I8tSo$Zc?9g>glZvXan?*%7h1r!#7bI_|ndeK}1|ZxI9;+%!L6GsI6g|vQRZlSd{=KOlA+f3Qya7GN(SR zd88BI=BD#%d1tgLKU!}MH&nvd1;|SLrBD~eVJZuZAWWD@TO=_QV}$97!gOYVzBpJ@ z6d=op)~27xGZpprgQB2NBUNou>7tj9V6%WtiO+L@KMIWTo2@3-hXa zk(mObW{*t0S&8U6wB|dr05_`g+VyfHZhDB^wBA@55}O|_E84CsUVk|!@@h`FG*2kU^EL^U zg%N67q^3-wsEAb6iDY%+#LCSHMfn|Li2hcOPH)j>M;Y?M^tl2f9v-f!3_HXF7Af+9 z=^}Oc7!qxEgg!q|nh~lk4AYm0lFI{i)*xx_Rz*hZ1T`W?T0XCy<=GRSDq(lC5f89? zEX(?-+R%$dDwFK=~zDMVbns zx&%@nmKVb63**a0iM1k`U7*MdQDi|uh;`*qQtPFb%~!Lsss_gvT!S;NgZd(%^GID0 zVP92|SZ$KXaw24q!6Lu{yhg~)(W(-O)C_3~yjmcVS;aEzdUmBC`!c%mZ+F~ zO>yh+JR&cyQyJDMb*@-%hVlwl7Xk)B)nXfQJfyo=sLYqBbHX*bxJJlkup(cQY!e$x z0jI=?`RfzX3fixYQnSA~lzGs^_(GR;_-Nt#6f*yX&*ca0`h6`4K2D%N#pjrp?&VrM z{c(`)1!Yy+)CPA5d4lt(?s1M2u}YQ2?sM}69KJQo55;hwnpSx{c|@_^%#7<6Qjr|4 zvxU~+M7_{Z1Pd)tRRCebo2DnEv*O(R^FaIkeU~+&VpEp)rH~8VsU&?bYk&_*uu!mnURTU;j(m*JSR$59H}Xc zP!)x%tYW1-Tv;zjtP;!1qvMOxs)n$i;^B#1$p8}t61_P@V-3-j`N@g`rG?M~5U~h# zzF3(Rp~)7jGsAUxq3YZ~C7^s+sNPN+OEblBT0?aMmpG)GPo&L*t8H( z6mr|5RT27DVPZwZrL2PXTUbWE`qrJuL=7B7 zfiWvomjehQQ4|GTNe#SYpm8kR|!8JL|tHPx&X5-)+o4=Ex2SBUA75hOkwd!K?yql#BBI0 zo0Dsz6HVe%`qZ*6fUn^>*XH%F{Lbq)TrNXpwIEg=rc4137hxz3j?WIhoGiJV zB)XsuziNay1j`KmN~533=&wi%PACY8Gl3Tj*OUsSHsQq_RYCnNu*&nU{hAC!G{ng! zQBnbXj}S#};MI)K3mH-I`JtDS1aV1`@+1UILoXWFoz(}#qy;2qLWG4{3zT?tVrD@{ zKeDV)-HLFoxBtYi_z*s~uX}~j6OeXK;0N#o69G;(Ke^$C;~c&)>yOYZsvb{XikBIa zQ4NU-BdR4vg&}dQtOV$O0^|Obr5`pqPsdAnyoU)q0e|>5a{OoJkK`8m$uwc9ruCVP z0+k&>z>t`9M6QK#8IsFI;TMY|FPkJ+a)mKzepiitvaDc@B}`eq!Po$Kk;J5Bl%d$b zH8|@!q)8UY4PnMSxR=4|{4jMvkSt@}65$6fY1Z-uvrV%Wo_-bxSX*VRLu>Zyem0qY!l^9JBBDlnGhM_|OE*S(d263!G z6041t8H6!PSg}>-mFupi1j&k^ZKBia0~2!~kH#7VPLZ@UwLs;PXtU&A56f+>JT~iL z+x6IZf@yOqu)T{2t8-pfTj8G=CrHlNm|Fp_;)u27n6>tFS=E`+s>>x!=L#FonVTVGh5Nnqq6gc~{>Ui@2B|h&OCVDGHG%?aH;DE^jzv zuRUe0xKvRSQ&o4_UUk-FJ!i3>Dk?owTz$5rVXM&`8k2%l2E=YRDe{r#in!5bvneDl zL1HWjPq9WBEJrPkr_D7NiYwwuYA&1Y=WXSu%Is$x!lAVnJ-~ilx?s(!Ch_u(Ue>{QwRkrqadZD@Kt0_G@ zMM)t4m};Ier_t;H@7X$S{AU`~ln?y4E%Y<$4}HwpdO9*OgoQ@^<5P|i=i+%=aj;q* zVaV8;Y1^1wu{W>zR7v~U(x!{H>PsaxmrQM!imqKT4PLSIT((x9FReacX$n&pZA>bQ zl;(&|s|+Re$j3x_SNIhr^bROAp)PlQy5)quI=5=(`M@#bLnYw z#i^3|(`B6-jHL*&BIt|#?A7F#jZA}|xSWS1B&kqkh)yclkYYZNS8)L2Kpel;Qg_u- z6=$i8DXzL?tvGA9pQe-&-<*?*Tnkpo>*7VpfsP$+RQ zBO^er49_$HzOPTQAGbCiH&>stSDh)TI9phEp|ItmsTqdsQW<2V=6Ge(_M#fG*0#Zb z_+w$nd2On-@h<4VIoB?!J{XqXSSZqEZ%ivVQ`vH^s_}Sn+4-`{3#Aq3t@e}o=3{y0 zlZ7_euPf!PNAfBKaVZjM7Ffd#iRl@&9hl+f>%-AXRft?4o=p7rhUCIimgbZBHCN4z zSBvW|+bS;D%TJe8oU~V6sHBrTSKhp@pbnwTNMkL$j?l}=Nw#L-iTu8Ph;4{Ww>}xU zN@*K4rgOz@7tJka^Q+@54e^$`L`&6$Z1dS{6QuZ}x#mh~`<_%gVw6a*2uT2*NJ*+~ z!+y~G#_2@aET#u3;d|(Td%}-uqV#F-1Qh+u^y2s&@Whf!>tAeHS!Ql(_oyc=U&{V7}5?@VDE^WbmthzZB zaaARdrEf^HN;K(s5-G#ClLv3;d-_uP2Qmh)r(V03(c72NH)!m*mfCwGePHZlQ5C3y z_38p4;!N>5b$6W|j)gNOup?6VKq0z9iM~kIF=D)S9mmZdyph_`mDJmvG~B1|?^X8o z>xOO_u8+sJ3{yzOSQVu&2aS=}F@z-B>RTg(C-fOnnMGmhj59^`*`4FLJ>%)E!+D)I z3$NWO>b+UiJCb*8IQQC(%+4EzmcheWrJGcF5s6uF__oS3@52++uH^INCp;0*kCV)& zZG*jd;*lrBJGqf(#^U5?rk6GrpkCfJ9MeLFF^Cf#1q~-0D+pEasW@Dq~cO3 z!klrDh7@phQQD#l<$c2bsN*KP85}Ab?l<>#n|eo*ny#gG-#nCWM_^H?$U(wslr$4~ z0xT{XRG^9G7aLE|L}g8#-R+z?r10Vi)-j!k0ylKv8Qf2JutvtLe`i$cq%k7`0{f5VykN>(MH8pj;<3=0+stA23y>4^JDrZy~udy>lp~ zrzh`o@e_js5eiwD(zrRj7o)648&E5} z;5uU|C0{D5EIhR=Se~;tuUrf+>q-oczDsY|X4FL+R2$P$)~BULr=&+EZQ5hD2CVNk+?Ltw*K0{IDW%Z^w-ELLtK||)@r0k9H zD#;a@*`Ro1ZT^=aFn&8r6He4}L7iO0$E6;ZG4%#%bMWmqVs3ALZ>^vIq{)>6q*yvO0 zw|n>5y#vNiP)AIxUVN$`J6IaCAvsl|P78@i*_CeJZY+pM(C&mZsf@d1nR~z=NDFt% zvv(`?TeQaLRP#qyQ?_Q+Y|xs*&Z<<_TA~CTj&16+NPVuvkRPbZib%5@DsA4GV-1dx zMk^It_3C}8N&C~1x9T;*_yn;`w>8ZaCeIF$W^YNYfJ|-Ey$`-ADlU}>opxJ6LZr7x5R_7z*Qk+&BW++Dvd_r+G zDo#Nh?lP-_*+yM<=sEe;L|l>Puugwer`!>Db@LTj#6>Cc+d=1s zYx2XjNSrD{N@hX#;3(_3#oHBsf%7LGGP!P_k6izInG>E_?UVKPz!yKOU)>rBi78-I zsVg+N!HId-!IR3Le6;On%lE$elT8Xk9d%x!md*e>tW))U@ZA%!^*ND) z=bCnaoQqQluc&q>749?^hF#TfPqA%HtK6U|)K|AcQVK_IZc0iO8?)DEmxU)4hAGmv zBo&}$WV<|VuP$S&&Jd+ltxri3>68(AWw=g-^8uqDAkEvH(+HAMoS2HLQHX!h^`Xsr zL%2S3Q<_C2P2a4|J6qg{au4AJ<;Ga;E_w2iq^y(a`CFx$^|6Yu3yIrxdEp7^;nJKP zDV6Zg#fh28wOtHPpm!Wv;>~*VC)^6l%}?-L-TQtXkDuUp0!{1CQ4)A!aB*H)TkRj8 z5N5~-QDsLZTcqtblKSst-ngALe7oTKL{{H;#=vCe&}8ArMD~p_!|>R-hQ7$us`aUL zsFeV(V6b-&EV|0?I5(=Z5HSril!hCMPgnHl24;*yGrDVI#{Ro$*Qe43#&btzv#-yj z_D^OE&ZP{@ohoh%m!tHh5TzfH>f)MdSCi9q(p(lKHHeeT1V(#gQiZH@EM;gieP}fI z#zb1*Nb=y#wA*(LBe!)Uqsg}>jU!XiuF>sz^^s`}feEIlgskkg;l6oS-Plx=JPoHI zPPUGGq1qd|;9RjORG|phr3sbk$fk^I zyp=LAgZZB`cqeyYEWd9mzi%$TcRs&wCV${o=Jh*@{@D##4H3q2BsYXzG#aYA;L3MS zK~WY*8;eCcQ>f0otGHD$z8?EdMzp}Fj#>5S{+X~TEYZ`{rt zzMC~LlRB^vo?IzRYY5g?1t;VoI0#VE?Q$JT%Z<=vB3lgs#GM6Y$s@A` zaG;{$J4M%TzfY#Ai)d)fcastrdz$x8NR_uTMz5Q>zf9U0(Y*Qpx!kecx8@;PJ z`X)j9iZe?leU|YAahwh({9$xGhL{{k%|LtsIX!*}83#;_86&eP{kKvFZ)6PJ%paaA z9G=bY8_yfOTX17Md*rThc;Z}L|CW@RO-a?ENUluCG?cc24``osp~r(zT`Wqj5Ngf4 z^J+AGQz;|!x@+UfgHy0ynHaD{{S)bf)9C}V>DQfCs|F&KkhOe~#wwBJsVmzdDOm$I z!ZoSE>Rd@msl-rnsJJ`iannOd_usdP_z#jgCu^_lfy+UgK> zHL_Yzb*3%5*0kj2M z?98aznql9TYu}JjvNgMWdv48!)XFfGO`vUvGB$_D=4Q3uxi;^r8Fj2zsMe#_3Bx-IGDn^Vd*r`mSqRP8IMMZV0g+-hjyU4>0iY1Lu6(m-WNu&iLSGOu~k zgC}Uo5v=aP;~k$Dp5SpWJ_EemsW)@N`-cCCc^eP^@iYH~L4$h-@@OEwLM<}O`3l$@ zww`|N$KvgWX$Sk- z>vU~W79csADmk^F>*}`^bU>gsrr399)$GZv+>u(kGu^f?r($<@<@Suqtyy(MnJTR) z)ka48Ms-g1z$kFQ)v{{xds3T1bWNe!Mv0+ub4ufmwE6=%&3iMOc4gM%yW6tt+q27d zWL4nvZK=&7LtUt`DL`El6jy}wh-(Y3E|=?YIx@l_m?cQHj!Y`qmRY?$t!js%bZ=_q z_Oy!andLjPY&+5{yECeGWY$6IskTK|D$tc8>n&QEliNXf0&9QJdZ=G$euCtS7ItC_ zOc9Cf9SlwL>P+8dF5RJ#Cl~YJg2>zVvXzpkG4uXY#y3CQ|Kk-W-}>mpJL^up`tHU= zqZvLDTCaK_4(CpMRwH6af)D<`r>N~$06S0StC&Kxb_E*0PR}1q%@L|Hf>Z@UjV(H< zYIjz{uJp>iS(W=TtM;bW?$lT8Pp#XVQn4?+97b(>PTj`zIYR2RPNIUuV0{IODJ03&Tk~4B<+p4~uicbhy*;Z26MlD2{m!gfOz)kUjj&%EldHwb zQWRGQA#*v=q^s^5a=G$uOm9pgnH3mn1lo!SeL0Ab?J4#{nYBlj{ zJ2NYGWmUk0ZO^nL#Ir4@a%)a)L<-7EOHoip-5C;cj8zOz&?JQ}W~*u5mo33f$*;^$ zc)2bV^kMShRvH>RLK8vJ*n^dJv8GO>szO=0KvseT??_oicw)IAp&X?wR2ZW!Lk4Mp zx++B1EYP)uscTSPp)KnhAUv@cDa{O46$j}mgS1r`!J5ioO$7>P!*q63@&v0ac-~>E z5}~FHBT#M)LGik_2CvY$>ow-eNmuQx>vU-YQpZu18>Flclh+FswPFoQfaqi-3cE;A zj!Jz>cGVP%b)_iA4MvWx0r;jlD6vX-IX9p42P?N2m@ZD-_!<8epde-;DCh6rT}?SNOJR9d0nu!B}7$^4ijMt1SKsZbvX+0 zA(#+^5H+=Rfrk((uME}J2WuPG$*TinO|h1CG_e3KIg(K(l$ohV1@H~3^Hnw|(+Pq1uiyPnh!hy?M6@+ix1N(zE8n#e=??w zYLsXJWr!bNSeh@Uz?X6$jLa4^;aQsQm8U&_;v0OnuXGd$8afwYkrZ|&NWes_cGt<| zH$OV?=8BWc-ar24ies<7x9!yrBI1k%2#Vm$-FKhbmXd88Ph^8(~*A${@fU@4KC~O)hnS4g@1d5}W ziDqled-(}+jc65l&YJL5nfSt!PdNF@gJZZda#JR5Ck;!<(`{odBXO?CDBrs0{EI)0 z`q|opFa9k2;5pUEB;`#oUW4ZM>@=`5O>-(DWk!ajj6G{V4`p?5AI`@n^5+BTg$!RT zX#qEePHQ2`noxBE<`u{`_#c?^Sq(POwuWh%fS!Xc6(p7o4#HuY zb8W>Wm%}{QU=Kl>Mu7NGb)8V#5UOdyfQ$$;fdB> zu)zziLm8ET^$-@EKL*j0>Y8u}rl#5-;6vZ&udfNx*8+$Hsv80{wE+ei*uPM2k4`Ks z=omtjnpW*#o#DQ?cmfX^>rf#j=t_L5CdH;mvB&946O+m@qz1dpP$o0j6owKEnURLn zXu~J$wd_f}Vz-K<@_0?L%vhYLFO1jb>$0k@s4bzJufD$gz^f~cyteYh%O4$jYt7+h zE4IDzet2SvnJ>*ovPMg4;~f&6?sr1m;GWixWl_p!4N|aGHj&Z2KzWph(<~?s2vS#I z#zKh19{UB+18$(9Y7i)^h001;cYz+}qg9~l5USgQ0np=ewe~LL|FuoJ4(KUT5C}W? zi|v=b66P#KOJ<%NW_1O+Sunr)8pJklV0x3z2AdV6Yz|d5!$jh*@CwizG8~3XTN|pa zg%AmK)$o(?xlmOj(zb+YI)ha$z#r%}mt0PLs0s(CwFlwVBoJHII-t2T?45FMTICX-&oZP-v;)8NVF%7Cs?ZhgjT3kwzj56keCO{8mPpj zHUcZ*sgUY{+QJ>dz^^L0FSwErLt}uUIY8SHtmz0<)uNk?!QKmeS8>a=p4xu`PgDi# zYN1)!s17ED^&qP7#S`Ev(Df43F2@rns|%4A3e=6kidK#%$k@~Es|S(@)wIxAX`v#k zpw6IAh1xoSwu#}1dePPVtd6^cC&pZn@&bk@@Zf1nzVQPCPZuLZ#RiZB@C1O=eRzV% z31IZknV;ZJ1HBSmjNVW@Q-M^0hn!@k6TH(DIY>`*c<{;tya?PRO3R@IPq3cv!Xg$_ z!&&Y*8nf(!tuKGD`yK!NuYDMK=)Cea76WD(!KBQ<%}(QrNPz+4;_{x!c*o=6iOZG_ zFnnS#0rGkPbdYC|b4*%b)?hVeS{(!=P*>%rtMu1b1sdyu_07<6nAid(@I+ohc|YnY z2v2CseDFkbh_02;iMBaF*MW4dCeSgcV?{8Z6-lk>mZ(R2NdiJ`JfAZvBvGX@S+Oh22 zjc+gC_LG&{UVS&>wGTGDy=upc?{9r=^}&}{9DHN#$+rT|yt?Y(YpeIa`2Oaf`E8Tx z@{wRMf#4GZ!Lz_e&~fPJ;LAt47mj)+@dO9&UObVC4z2LriPDCF;PwmCI7rhByAYyo zhHfXhQ&S8*#6WTsrs@)C+VK^1u-23_JTb-b1bqFHt&#;U%L#}nKY{EXrqWO;n>gpTk;0ZuDW zSsg@XwE=pHG>fi2NLxcgUmXZ^-1oD{d{Um!fl& zmHZ^wd-otX27Si{+%n=nF#xH6t)8|{zX^qT@;(-q9Y;VNdS@rW>dX z625o>T9UbC4REJ?_=#r&PjE0nJ_P}DC-Ho4*+&!?Pm;84+S50sfg%(eIz@zaQ zfDRa_nnXW7ilfnT!i?RG+I9Msk9Pd%gWb!P?|E~@_7{IHJa9&RYl=>llF>b7@4P$3 zvub8O8J<9(a={51jwx=7Qj|q1&;hd!z7YsN?(vh;@5K}PNq|P<{<~qTK;JK@JL|L&?m8pMYk>#{T*yKSQIxf#G#G|5pXv459GgiDKjr<@8T* zJOS?wG@eLS6RxNWS5%?rH`;L$rU#PXhy((qB^QT|qHcs6=MII$<{%%_gD1)mUSUig zIE)%F5P`ZH&Tv3!U|~Wb~R7pZPo@g5(Ji#s47aUJu>YB?tmi;{BC#$x+ zwPx4rtM)8gweOXc`(IhH=am(^meJVZ8N2u=Pwo|B_1cO9FTS(;)erW*x$@w$k9NQO z{;E0jfG=2yyHV`{l30sDp?BwFl_zf!b1Vfk8?2!FtepZGasJ zX9Oi=sw&!$M2I!CIiA1_=Mv##x$d)H0zGg^g_~bT%u!_rnh8(H(w8?8o){e6tVe5n z&QG+$4~I_+(+SoPvJgNtUK0pB_`o7{J*=AtPjmu>Bea2KdXg)6{>1pUq+&GG2dw8* z1h-;tvjr(53rI{StSUzv_>({fh+oV2i3jk6o5}WJRNXr5;ke`D)psz0i&mt-)B)cG zenMB>i={MRYXg-?eE_*i{u3Sp=*mESWuU%1$Y4h!YOtnZAVrfa0+MP7WNE>jR)`bw zDE@R0PhbfEkWP(01H1zA8@TbNi{}vs++REi1fnFWvIwaOD-Lixai)~;1O;5=%@PIr zomJw{fd)%3TKmDHB1l2c+k+>nDc+&0!R*0f6UC-yb&VjWz4q>Gv>a`}i>djjwj9r% z0xLi>G}z?u&KPYi={~0A@i2%_mY3d-Cn$#a`STO*r|*__Oainz02^?3#x;&HN72(U z2UdS^jn2^h3wrHgO$ZAAN z#0|-xVDX3s=ofke8>}mbj)Rm1>l;G!0P#?TaQ*E8S27bSx-o~_=UrQMq)P6)_kg89DBh8u(>Dn`kx<$!QI<$FbrNY^l&m32 zUL}!nJOQ0Z4iiKbAONZgCkB_!gD1+6ArTy#5o@g>Jc0U-OdG=!;9VJ>fcv4NP$uQC zG1W#yqsA^)*CJ{|F-j(45H}X@Fcq|qAl^Z12rx+TtOCbhKKTf{15Z?TzxICkPuJ~y z^`qS{E#LRbs-rKhKKjbqHURr(lRsZA50#5nu)cNf&yb*Zr^|fd4*B5^t{q~9tij-0mgs_Sa zmU>B|cORa(mtFeH@5)o(0#j&V-W~5q<#?h7gZb#3{{xI5B;l{G^f#3G>8*jfk`QAV z{9#BCqcOm0mjxxHd-23R1J=U`{*f1c(Xz4P8r^ww>}V)HbZEmt5J(^D(z2q189= z1oOQ?RpOPb7&l&E2T2UMqt0X>QJCs5JqV0yh9`>d=O-S*6YglWH>2X-6a4geg2y|6 zC$MrF&I8H_DhcKpXNKoNVI*A{0;|EG2Xw^(jY)t^!2~en`FjkaEjG)$S9<&4)Bw59{9#Iq?#T;8!1c z!|%k8KG^&Eiolm150Ae7~P;orb>A@3bT0al? z0GoO61hxk=;cu)CG7y*u)t6%M03zjNDDBuD9elm~1mOuqsYKl*No9iC`^BDBwHAK@i9QIqcylD1|u=c%lkfW?no21_kF2NI_DguVg1cSy1E{sI5ph zLq>&I0at-3K8PnI{mY0aELB4<{WScw<-3rY@S#MuZd=-#&8Z)*H+;B3zkI!R`3Cih zXzhw<{qkr7jrI8O2@V{0^|sVio02|^P_2wot=XVnxk0sRlj^;Q#FbmMA8u5x+@cRT zkh^M=;jO@{FRwWM#;QZhKHC2JhZ_@Hdkv4HL{4WmTJ#{C$L0{uDla@DE8Y6QL>y+1=b^>r^N?P z9CHtBPeBj@dJIoM=^!1K_zA>2kUvp9?W%RUPL;KV$joA8lStYeA#Z?g!l8p0KS8;i zoM2%51V?8N;fWDXydw|Dk*B};;0em)VCgpWNcac@bBnwXAPGGwN(0}{>k$2SKbMK>1WZeytnzqceniL-Hk82x8XZ5!^`jO`qv+Ce&OdkURl2XwRK1Sefj=Z zSD#oOA$#eAW3PUA3Ki1P5sbsE(@`|!emp_^M5n{GP0R2E)E3|}B7lf@&}$RTTjV{U+?qm% zcGP6zl{2C~cmnwoJl@d^Z7b9?ixZopq|H%SIZsZjiZC_w0G_C!{R3NQ>mZ(BUjyT_ zuG(sVC)yZ4fu;Lcax{qvN=}e(!K4xuAK+svk%EdwGQbm1rlcqtp77--P~U;@95Yy7 zV15EK6b2Z0;+3CAzOi!Oiyt0Y5p4+CRj_hPX271pfW7&Fdve*po5H}|CK`D2#0Cz! zZg;NVo;<(Z*#WzALiXkc?#jRj-k%e^KhJ+pcF_L(wL3F|_U7T&HCs}C5)${?>Ql>B z9)9V)?XSKUtxhi=o$(Psjwd`M__HaFEc}a9GB=($nu1j!QJhG40+t>nig=}5B1jE_ zfLbI5l>xhht47+8P+v>2234C_jd%y~6J9*A_aQvN{b90nZsSF9d@icQ=Rvfw{1_^* z)K*bqmZriJ@6cgM()wUIx?UoMg75@farCIbqJRi52Iv@`(8I`9G4q5JQI037QKsR8 zC&qRdOAspzQ`FE(r1$Uy!idalY3qFOL?sEM9=RQ#2cBSA^zd?LXBQnFJW&i*HdJ0M zRMjCr7U>H}!=$?j^u#j{R25ht1GQ=bJ>pNzNX8_%fDl7Mp0=VB;oj2G`3QMdu(A}P z2j=&<2XvO(a(gp0nI0sxg<@GP(%`LFbP>yER8G69v6fO<2TDN2$`(;#dxWf6A}78G z6M|ru=3a>h%Rb|Hf^YzfcT}?ci93WR#!zfx1{Q%kNPG!gLwCG`)3BHpbb5TEP>2hu zbCfxcXg8KBE!}#rzT;WJ6FgB7;Y^;L02`cHT>s9xZEq~!w(P^bZ>&7BY~|5cS0Q40 z=+#vRmOa_A@72|N*w{zMLdbUIVLo2|Xy2Rbj=a9+&`Tff!5gH5y|M1dt7{IvwC2Ez zYYx8de=c}O`UfJ_n`%>)D%joPBk8?O?S)Tbb_w&i|#4PXx>N|ug zs1NukC>E>gatf1WSfWlvNN(#9~2;hN$t1 z%uPb-yuJe>i9~U>ekn+Ts!#+$*6Dx773F#B7lhe3Di-C7|H`sDUA{g zN}0rdfbRiEsJ;VQ7z!8A2kfzbT>cenQ#Z#G8CaU!hKMg@gfSV&lCEUz2!f#yKACN3>8rPA`tL4S zu{C?`u7Wi?=^R#XO$*(Z7q}z+?Vz|&6q5s#)j36JCCzo%o4?=^X~R>$L(|J&872 zE$|cycU=NcIHgQSmi)x?C#c$y*w9Dt6A#pj-dCdktMU^JPf&q0@C52b89z}>>FClr zk+OlFhpI6Q13F6uA{3DxWRB4DpmWv3LqeW8@I)Kc$t0jJ7}2w3cjlw8bQ|Fbuou2y zq7gdF_jz!?3Oz&UNoZ{#g2eLtO{yHuPXJE@%CVeUOPH)RT;41Oo}e0ya+Xc$W}@h+ zP=JF%&cGAy=rZAn?5xgFtX<^A6L_5L5&-d-n@EG<2@h*Rl`IHpBW9)skV5u*14@>+ z%1xen(PsirkO;B0z8RiC8$2gs134wl?*;6BW91%{guTAz^s==VUiCY-%>V2%|1-<{ zPQJeGNe!fTE%QJ6s{c`XLkBudW9=FK<_*77FRw&s;}lZLU-;cprqJi+pa8HY*w2)<4;@C2D~A3Q@Jf(2 z3IL-Ni|_=hxDisJge3Br(4z%-LRN%&QC{BxJb|bma?qe{C5i^16O^X|Pxu=v{EXFr z6fA49nrkc8Ifn2APW1gr1hcmf$A5Gal(SW;&N&^)DYQ~Z(Yun-A` z#^e;Bx4z>Zo}g8;zwrEowYmqE_{VGaz3{<-mFsmudkfZV&jC-cZcnz~?kqN7*Yo{$ z6|#{}zdo6PgRb6_zjAN>+P!&ecW15Mo(?qQzb6l1V$Jpf|2>wqJ58&%7XbEuhJ%Rki4-db&`oLSuH@^L0m^vwkSCHNdaxh7J2E~!%3CJ3{ePZ=gtXFG(i zpL)z_y?BDrc<%g(R#t&hB1HZK%f~Dlm;s&;N&&|kDK`L3n+Wlv#FElGsN#}^CMo|E z7LBSM$?d21Q79XE+IWH#AuZB|iXo(s=~I1B`j5&X+Ss1FDYa@e4B2hg!5WpQ=D=n)iQCA3K z^D;Wd@KCEp=Qm(&6?rKxN=W}!0nihakriOc4B!a}W?UK2 zznx^3iaYAi-bW;_K$I}RP=zE651yc`CSo0srNI-EK~$)!?%@gavq}f;QA|!Aya-$x z21J4ysI4cjc|bp%0X- z+L^y*XD*P$+U;35AOGD&>vk6T? z{z2pie$j>2nlB2TV9$u!?KoVs<1Qy)_ra7>gz!R9OC_z7D8N9FO{JMk-Bt)pFhj;e z`AB{Q7l;U8i$v8-cp^4OQ{F-K9n)B$rkLRgveqnZ!X4>wyT@*>!c+G_Em%;vK`EK& zpanY!o&%9Vw82Kbs8HSzBI^(;I`M|0jq2hst(CFzgeQ>8jQVW~2vR;~qe$B((4Z2e z7R_zNaTzJ)*WlzA4$N&cmI4_tUzh9`MLm#}Ng0H2P8v`Tg6wIQctFKjNS|W-1j@KpGH?7@9Rz38R{)QeJmhi}@1@5Aua%2P8R4klO{ zmm5#ijk}uWIG!*;6H)OG;Yzo$XG!t^6Xf><7@!Sn8J=i>7l`sU($Nf0q;-zrWC%|v zu^5Myd?_V*bFt?^eYAK+XEg&lgeU0qSZ5(>6~gcYAmOu!C)}x0$j78!{UDcsCq4+= zvuyc3kQ48URS4WdovqoEy>>6tT%^mM*dWaay~&40a}`PomwQ(l(F46Yl&qW)_(nhd9)Icp_YhZd7(? z1lIcmc$c?rV0eO2l$dxV2+%AXQh4x0VmSp6J^2#|MDl#hI#%C7Tf$)i=f?^%DO}1_ zj626W2w(!OwA^@Ng78EsctpYz0=XMch-H*>4d#~cgwbvFsl-D?Dwz6NA)`%Gg=+Y~ zxcsXn&6JP1Nc}Mxo~Q_?VtOy0ASlDLE*QMPuhlFPOn8FmLog`P@=b|m^q=^0;0a#e zVX40M;!g#ye6aO}5BDMa5{V0-ADB3BFhPJGqT#oL;R(0AaH;u>Um^8C605*Qu%HGY zJEcnO&R@MX!+%%)$}QPA0mvQB5U?I#;-e_#E341_H25l_ldpWZ8MULReI+RG1$fVP zyo1Rb880uM*q=&x0^~YWH^UQTszI}H13_{}vc@tM2v4BF5^Q`EXn0_Kh9{{11j7^5 zQW?dZ!K3j6@DwttBoqqecmkS&;|VGp4NR(h7*EKndccT+pV*k_!4s_cgeUmKE79GJ ztzgbsayu^GhJmmU%b&={k{RG98X2BICsAl3(5;N8dIX*bG?F$XUrqIBJW(+^AFayw z!4rTuP`kc(0?+jeg(ny)XHriwX#4}7cxT-X^od%w>g3Ob3aGQyJF-^q&H|qBL~wZ= z_el(&U@!%Vf60K`v~G7k5wkn8R&A&AA?{>*`pPKH%O4(j?W6tg`t5)Dry;uzUmBZp zO+Skn6rUJREIK^=gzb~y3G(R)Px$f^*@@*|egcZg;!`i`?mq!?;yygV0u7#eQ9Dti za>P3*AM=rT0#zp*Pk^GpV4#GNfA{edkH8bi$0TnI>?^gDW%X$+KiY#Q-0?)pRR1_U zL7iT|*zpeFiK%&4S;KYk6F*tE|E1*zV4)F5071xYcEBDaW@ZNM#%p%auDpQV80_;W z^Xk7RC+vs?s3Le@Vc5Zq=t!S**k+E7^0J;m1krPUqWO6FP5$MnGyiynTWEKF7lJOKw1oImC>@C0IT%(+D+ zAm;)pGS2aY2r&_a4DQDh`;s`G;LVfnwV-etFHd7e=8?Md{}4$;auTvZe*vDLmicZx z0Zc%h>`{0EJVB_P@e^SB5%HwH7fOrp9zQX&HOY!T`n*D$I!yTD30xf_k`z5@WMn79 z6XM_jl1jRc%`~>k%G{9mZ?Ur@^ro>%5U0J11{scA0{QT=X z+^)m@c*4^BzW?4=KiZ3I?RP{<&QGk~jlsjWPjvVMgDFV-%f_ApqJnqj`0t|t^Qs-` zYj$J=?aTGwmj0ICxi^ta`rekGuio_X+pAM^ETCVWJu3Wh@kEP6UV=hs@DuKM#{>KX zsQ}_9eDDOzuT)hAT?U?@epWnxf>!$Q^s@@qwX&{O6hNd-lxXb63MeSp@hCiz&FVX1 z%UW4JCX07Kx)pT{PoR419-iPGHhu8~{2|Uy(0X7VeuCo(ma9R{Dmb2C{j88027ZF! z3Cf?Kf+^N_j%AD{R&GkLpq~}-6RbJrmmq(_RyX*sZw9@&V&@C*?|oMgyF5zsUby1@ z2<7{c@(&`Uo+1ArT=_w`nt%SJMuhTRiTo#F@$ZMrFg_3`d?-o$KoXAz6Yogk-d`_& zCqlM*n^AJ4Xw9bN*Zr=%@Zqty)*Jyd@%GBirkWvSLsJtxBrs9MpKsnVtHXIV)0tRX zKZk2>+;tC6pqK!Vf(U6X86YZuxDQV-1{zhGdWI)Z6pet0vLYzX9q(wKarxj0Zx2S_ z{?0!3i%b~upHK)&%Xt7#ps%B>sttB9`#RzsjGvIVvv@}nC_SWPB43d5vYDriS`2j7 zLd=ol2?RTUCm?RL#8rHHN_jVncRY+Ic&#PlCkRi}6P`ed63MSRZ%U)GqEdk^`6W%d$ZQ}G9X%r`DSTG(!~@A{_sa9R+QmY&NN@%}amm~ykz2x5Hl!1I@dWRO z?UO%294thP@e?Q+eGpHu{+MhJZ2gWv#7fP{~|!STfX2`mo+3pbu(cZa8$ zU{$whbzue8?{J{sb_e=dQPmk7DL@L|YYz*Vviu2BUIrqIIG$i7bL1q^a;8}OG%hEj z;|}sME5>G{Rk@+cV%FH4^!S5#!rLMf`|-sS$k~zm){8zH{kFY%*Z1Rz_x$#}^1)8@ z#d}Ar2;7%PHRwp5*pmfQ?Z21k)+h2x%q{BLiOBW5Qgjx)tM4m*^$r9$0#FLSFK5+` zwADM)*_-rtB2>%PoqKutVHEMd`p)_-`(lS~E>5Fc_$SsXPjSP0{V_RoaQkf@_ss`S zI2oR3h>#bdLX6`H-htkOC&=(~I)UQ}%FIGPD>t4%KBjNKZ5uHh_u~m*3!dHV%|_y# z9v;9Gkql3CVZCxY_z5e8R1|2|*#Mcs8}i}YTGEZMDQv-tMR`6Z=O=<<^W)h19UeS^ z^+gF!(DJ`pA3Ql5X4~r@ZAX>F z>#GmHx%Ma(NUz$DB2v_0vVk}I@yQ#jK_c#bLa*2@#*fz?c=bbeGAp*f^xkH)t9oPY z{+H1ra}8>&PJI-e_}bbNKl<>%>uXNE{Qi!Qf_E3#S`g--p7*qp4lmYbV8RDapd0?P z+;1Drc(7z+7snHd668JuQgA%M*NXr}L7W*G8rGYD80Z>Wep5$50fa@UUNkO?_s3-X zM3E=nL9LP=5wB+=<0=29tUI2P$^7a9X>|wj6GIckPf$NAtX~Q|fr0?)1kLj$`2te3 z@SdR@c!Em37@k1JA+0}hAD)=pnvDEV;0cl`28j2+*ow!>w7}tvZ)yk))&QkXlVXL6>dz~A$Y-BP#{H|8f7M-zgQJ% zK(=TQwem>Jk=1lUZJ}Ni>wz&mLEWxA*$KSF!(HFuUDt!9Pcra`=G>&YED>5So@fh` z71G*_9y~#drZ7B#tX(`3*0qDx^iyN+dR_(k@pu9yaMaiYgRgtUDGG1zZFf@Kk( z{p*fQ+9!X-{&%orxnI-P?M(CEL9hHA_*lCm)o)J*LNJu4wwns{-;Go+^FROU>SI`= z=#}?2tlxckc$8&gvgN^^B2f5*hx&wgf)?xrp15MJk5CkZt1Ybk{yjXw;)b+%59Kft zo*)X)gD0pe8;XhW1gq}=Ke1T{EeU=Cp*mWjhVv7?c*3(7jk_L|vX!CW;KV4*zIX!7 zFbE<5Mu6}`+BL%y428H?{dJdVuwqe9eMc3o2S#|J3Dw|T3$CN-e7$n2kYd;cc!GKx zyPL~VhPjQ^Paz+Z;|T)&_!SLZHb{%y`Iujd^*eA8Mb?IQmWu!>UjHEar&z+{qb-=m zZ!X{Z=JL&`z{^WXl{)tlZ}9{oYkend20 zUAgy_l?UEfcO0#0@rG9PSg`<^KxV(|oq%11rLCNn;58i!v`QPAy8=u+uztt0izna` z-;XCOsFbF1DgxiME*%!{V0gk^7QyiZwaPY7zao+~ScLoV#G(gJpni(mFCXLOM)2Ns zsrcG{)l`(hI$NM#R9e+_Kb~l&npGvNB(=Swc)FHa<&tTmRfWr`6C>-Hdk;^ji>aTL zXZ?KjB$X@*x%YY48Mj zRKODpE;pWtPX|vxz zg6BNagYoGyMrAzr2KWr%35}_o@e}8*Z30<7a=w^5Mf?QE6Id4jPlE29-Y(D+*5m7e z@#cfHlmO!=GCOWlz3A9F&i|8OC(u3CuZ#1zdu(;lQ%=i3vz$rlk$~XMZ{7ew@TEMy21JAz|a^>xym?u7hue^P4TzWg0 zjgX7p@m9zs?|3`(GEVHLf`qkOQ~h^ke-NQXw~Uuo9DeDe9dEAQ7QOe5e zc!Gch>mf+egtTiu_zEB_Zb%E_5V5DM3XaK-FKz<3VR!;vdWsMk zjo9mk?R7);x`Fb#0lZ?A)<5YnR8rGl)ihFC+gs7tZ?5XFH(xKV9f{s6C+2U(q1RU* zM`1Q-l2EY4Bw`2z9b97)jd?R50)t_OAxPt3UvrC20rF`z)p zZAy}01yGItNP7fmMhT(D#(L2r4+(8yIi5f(BP=9|9ISt3hR|q%LCvE-T;YrpCdeBETeVB zej%Q~`W*t5nMoYwHZgnW5qY*C8BzPpkon*VLKt8*MDf|i>aM4RC%kiJ%CRuA;8It% z2477MkedSJWoTI%sHpIh+7O79q%=s-Z9-larK^rksuCK@gvq7oc^-(>+>=@f38KS) zY-(avD{xuq&6y2}>9`W`HdLHuM9ZBf+;NeyRg&CF3%VEbX88c&RH9U(8a(9(#oLta)F#LHC`RAtqn)>FMWkTUnbP!JWC^znlVUU zYs(p)u!UYq1)hLARe5_-B1?%#vV|F_;W@hN5P7Apj!0{c&H%GyjY_K#V$}mx2|*&T zEbiw`4++T}(TSN&6N^|WXJ&5FF+G9uOm4zG`On@(F+CmcpRfreLraPAbU3_0x-PI~ zuDR}gzb$XC-bU*gt~!AAhfoTSfu(?6U3qlbisMgeVCVOSyE7(ML#73h*Bs`rXw&)f z%0pPs@TC<8u@Vwq*^0@h1GlEXFI2v|^86cX&mi;T_2nBx+m8>9JNR0o?(A|fuuu4Y zJ~k4$Ps#ekJJ=%eEUSh7r#|RT*C^s0g%!g1)G%pYu+$W!w9_(ssyetb5<_Ez0ZEtk z5JgF3N-bb9!l(!!VjGDOndj9ZGE3m4G-*{g8trvBU0bv!>a(e@0!b@KY7R9v1t==S z$qfYP(KB8NF@!Xgi&!l?t-OQAaVX~0S4h;B2t{t{wb2fj>$0_8qB2om3T;h*)Et_(qZf{sVp+m3>0!f@AVx&2{MZ)3e|@oNNZ;BF1t` zDw3%%V-qks^gwuWqr`}Wj1rL%%kLEdD&gxu68_pUtS|yihpY>9rpa&bLw`&duzANU z)**cCNAOI2!Gr9nT?$KP;)6F(;REu@-{8ZiFve%82S2Ur%bMp-EV}lbOnMWo?ms-b z?1Lk3uRgZygU#;+Zp$gJ_bveDQ_bPU6D%6#kv`TykH#}~@4Y93(;S;$=$LjLOfC+M z&k0B@#3aYu^H)``kyisxh*HpMq6{Wk0$N>Df~3H3)HH!IR9CErNm8I2tUWL`T~^va z{U_!Yk7QaxEY(QXwicayyUkX7(+qXKAt{y@i zbTwC6Nj!Z<&&@5`LTZJnszv@lKq87IDTM%bD#B0~p{b14SBaD06;mTDv7r{GkQ&+P z(fqq2Kxqn#Ptlh(qkgq$aBPzvOJtkSN}ok-Xx$%>o#9FK>(iScaUz2~G6`i?C0J(% zQnprFEI>LGEUCVndV9xb>MJ{dColsYb5rv(lMnTu_>A}oJcd~(l6h&CjL$fR=UvK* z#-OVPe_5fw%my$4#Yf(f0WljieRl6tAL+GVB%$loL$D8v>GeoJuqarXJ?Oa`U#- za)61ZdDmroOK@B&)yc>z{9~;MM7h^ZLC2CrtRYeok%U@PEA`N%j+togTZWqZb!tLt z!55OVJ8$7(RE&%YV|BrbTz{o`jSP#KQ4S7PW2e;waMP8=V!ajY1>Cq0MKORaFcUjH zJ0e(NXx6F7=6C`rKMRhjMF&LHXI0;Oi)=q7Jb@?W#uJ`ZdtiNY%+)_#A;BVtFTWS{ z@&}tx3jgB!n_qfwD^@GUE5HUrW^3y0RBZ4#e(Yu@f^_@*v zO!=jcb|J3(;s^VdtvInJO8w(?SC@Tw;;NSSf~&qOyros)fet zwTWiG#9~-tvDUWgwC1X<8F->$at=K%gJlJn>QI8H(-x;TQHi;M2ArE9MF}lhsIpOX z7V^bvI4MS}E=9x;PEyw3Bm`4h*&41a1cHI2tcfiK>mb%s&UI*_EmTS(5T0xg)|i7? zza3Fhy}!I1?gaF5fV6b=Y2(%W3S@^Ng<_{NBP1aQys@9m5}>pPsB8QvZqfi{8V0FR z7la#%gxVq;$WLkw0NH7<2k1(%#8H^E0Flt5?tbtSSTG29;#plq_7mGp55DAT;U0$I zGnfC>(_GT?+dUZi*?+E&lKvq1g$Ax?)*C%^TTO|Y5FMbGEtSwQ5O!xNZ4JlA8?vDl7v#*@q;ahZPcdHzy!fV>n0Lx`az z)YwYoy37PH4XX^R5uq=SFv6DCtdkb|#aSd+fK*-_5St<|qjkKRCg=7h<*$p$U8kt@ zlOaRDPH1SN`cka-fp+qWvOsi4#G2bOn_prHLKQ5ISrsTNgB?VYlSEpCJVJ<@X?T2J zniVVYLVSST*Tt6syF*53sSddVLzQSKhCEWPj@1jOm+Ju;C zcK40-%Jgup3FJK;LJQx!4#JD$lDsR6!_;Pp&I-|^DpM?0tgyl#fZTz<50Vz5PDEGH zip4aL97FAtiTL%+mY`C>`#-ri`5!Od!DWDkb8&QX7W_nJYv1ln+aY_;fzs>yZMXIo z-^4gz8#!PdK3p<%(t7=D+3*Q#|2}KSq4H}7?fpAV*LD^R?8xsuSbS|?c2#^uqv`ss zobKyKGHl1JeY?#A`)xyq%SVn>+&E;%4*L$+`VN)#A1@y|QrdsaK6u#Hx4*dikfryq zZSZi(z#(hjf#U9ymbT=k!Qz3j_^S5ZnPtaq1Bb0QcI93>Xc^vZ9^7r}-&@ePFSql! zt@m8*@F82z0ZZS$;_G|OLpauMOaErmwIk(2hcc`6Ro!Jh*NZy)j^>q~uI@cnbz@KI z@ZJ)-sGWt^cNO;UFYY~B+JBsl0IDHp^_U1ts{raZX77>KWu3`U08>- z;W4Lk)-i>geQKlpx#9^X>7=Q+A||qA!BtV;xnbvt@U6!MTTY9%o)>MsAlQ0dxaA_= z2)CTW5I?!s%@=VXF}>o57<4$n)^p)IF5w(R+b`1Cb~$*{x#+#pw^r_W`NQq+1f70k z<+0a4I`+o$<8OR;@JDY&ZatVdaBB|Jd}0N{0BbiQ)rh~?VBl52;{ zhY#3?_LlVRDCpdg*M6e3?_gdX61ysfZ^o&9kmVYH+Anb^+8(qms~$+yK%6%??`di$)W~B%b;~+LSET?Ft=vE zx#Ng^czeM$NYg>fr=)vd>A=C#n>z{zc3Xz_m0Ukw zIeN6PO=hVr>m4q?K6;_B{J6FAXyq`p#9r(5T_rcS7Y^?$x_Qui{h+1eL`B!x>h9yE z9eawp_gRMamtNml(z~au?{LN7v68N1x#hOOVIZx^S*#;F{l&%;^w96+8Tf*OdvVE! z3NIfn#r*6(L90ko3r?p4{tsU-5VCaWr0&Nb?0Vz9y|2H!=jSW-l++APQZ>i?oC{fu zT%GVJ9*0AoNQVbiJmR{ZhNOL>W6(L`TAT~FX6CVe$7NI9q5S$IWxX&o`>n&fOYZEn zjP0>a?6urDYUw^(+I6OK;E3%SB;|zt`T^^}Zd32>g1$Y4efw<#yK<`3<;|54xBlUa z=9)w1p1ma_nEbF`$Et1~s~CYj13S5$hl% zMOoEZdSg7ZbMQ!Z>8|XCId1AXS3YpMW_VX|CrsGU z(i?jU26q+=?X%q4S=6_$q-#%xOth?>}W|V>?7oQzhz{#fo0 zz8e$WLld2FL$A+tUZ3k4ob4Q#=^mQv8k*|5KJlbpvGc*%u0colfP=l6#nHQlX7JiR zG)<>5>}VOluM4dM4s^2JdRPwj0?~~(R~~x#=R4nBcl5=#!nYkr>={DQyK8EOBZ;Yn z1^7Ns6R$Y779_hj{L82eoXoe+xS=H`U**20dS!fSnJER9F4UB~T%$LvD~%Z7n> zp%Z~2_FFnJj@Y{Q7PSCj1DYJN^dBNvJao_k`#pHp)_cCVCADq1YIIIt-*qUb{D7(X zc-1hDwbugOG@OKSR@kPo zHQKKYl@Hzm>2kEV1&S3~cuxs15HR}fL*_fjEjLcsx{g{KFiw_r9xEL@T7LUb`N%%| z(7y74W7R{a%6d*0)s)}31%3i*2lMw!fhXLo1i~6Dvf=|#fs-aH8(JmJHvJV35IInDOz_Dl}d z{KC|nb7X41XNK^^;evX&h)2r?j#l1+xNSGx-d{F#$aedbrTb7`?UCZPgXXrAWib5x zfab?5Z=ST@*_qdMy8g}~bEC@MVCxwu=A$!7^UQ6|v;t90b#z0hla&F@- zFAP$a2gzzMD3l;AU3)%b-I>%?CyXIivqjpXAXT0pqQb}#Q&t2fT327m47{R`v)6aJ zT(#rQ&C>M1m|TCUC0JFwPMRGkLq=qNpfqD$T#8?8(z+OvN0DS^0Ue?EI1-F;WC4zYp`t!AWvB3I<-52|B zz>#vz%yN)6KQ{}X30H~D?`1xII$iTbzV$Ku1myv+2gCmkJ&xLxTZ^s>c@|NeJ}faa zAhEz-R=QSJg+MH=OO#j^bhW_$T>A23D!)sqnB;-d!qssQ8N}U6FhZq8>(1-q?X=)w z&!P*9A}Z=9Gh;Kq#PUFCbx2&LAl@dH z<_28Wth=B@^b_s#p&39a3yjE;wg#$-g5xtI;*;__x}n{s9rLr(Ghb{x;nvD-=bqsS zJb90oPo~Ve4xl1-L-$xB4NKlkti z*D$`cgvU#I=HeZ+dIuY0^RBKb*P$d+C=-Wu@!8P!fy&x-GFrCRKh72wleO-&;-gav zm<+MP5*U{U!-42PV0=liv}8?up5JAIy1Wtbg&wEtc!n7TO^9n?0jU6Kjvp+PDnCe; z?H7|AEKLuHPg!w64MP?p%?!oLmx)<8VyG++CM+m6PZ*Pix>J+_ScmWIGv*2tG9b2r zQgg7(ibSO_wBA>g`p4viTuE7VTKV1?6$}%y1=htBic~e>x~8>prAUWb6_@UJL9Q>Y zN2Zav|JEjTDg??u9zjp5e|(8wObK<4k(J>jLld(DVv|>2)WVV?2LLOL1KVoU1oJ2O+{YxGoN>aPlG|$hFUkXC=}68(xCkYqXdi~CPhf2E zdZq1B>EP+2YtgEbFnO*(Q;4t)ZHBr^G{;0fV?t^|MSV9?An(ksk4p|qERq;&&|VbJ z17Jd;FA9@qOSJj>3+v8RUE7{%7b>#DRrw-iE^Im4Vj{MRH25f`IXpJCV}^=}<80O8 ziAjwKtqlo!ks?!|DF*tXWuW8jv`&noJVIT%GpqWvy?sY!Db55bW^8>U)T7o4Da{QT znz@k)$@OCn3h6J-Ab;X>#}i(2MwMOc7v4bxaz*Jj+&^Nfixi8y1EEKmJ%L~EC4;;5 zCHsIWEPgU>po6u`7F|0ITz&0>9j|=2_w6-%UwKawx%1S`iNy&NRx<77bSz-_a1sj+ z3iUl>KOci9c>V-s%d#tQ$2;a-=W~li@mirQn-YGJ-iTr|Bzx0xTvef03(po`yI}3v zsIvf4i_rm7V*`>vrWlk$a6-Y_b6VgDD5egFYj<)nWF#ap9|=}edMYzVY0Drl5>3I@ z?D8{JJv(!%AW9Gi0Wu+^CagRZl3+$_PAs$=9h+hvoIvV>vb1))CKIxNsLE|8M z2_5}5s_nw7S!b=CkWG=kFhH3PO+>vV(4$jbE=K+}g0dG3@p(1~)Zn6Pzcvjyp2)jI zHgup82^Lk*5`uV0tNmb3{ppgXL;2;R#7yeqCAU*H5*qm>p_iWpx-VLq%JU~MADoW) zF9n|9)|W)^K8nI?kUfb1Uh1jWVtf6>&*l@PXU{IoG1h^|AGh@FK73(CU_?P_9Sd`~ zOG8QGW?@>qjw(SA&n0jbav!942JyYuj{?1Udv>vHY;nIfKjf-0P@09TIOG+f2q0M1 zBGh)QyK32=UVYKlexRTpG9s3n04XTjKw}fgY)F}e-|K(HD7RMA`W;hq2MxLC>xdjl zY!jjcjxUHzDiNs)gvwmd7-uVc4w@RmWPtBk5;Y75Oj|CTVX?9Zdlp?yGt_ioKh~k! zd$j2iX$HInq(cS3(Url#+eGTJ;P~9VnH8rj&AW1I{StCSN=qc_ykr&B3PIP1c6o`l@g)M-jG~I*@TcOX?YYnd&=wrsd;~X-3eREzWge&Di14{qHP9j z9OY2c<)g^FK$MVTsBQr+0p;s-PErcsljA4Im0XygopIh8o$Q~SSC&=_;g4rJF@CtbD~*OpYfe?mU+1hV46 zPoRiHlv)>Itk{&0TikhjY|+(t<4)976|hd2(nP7S$W%|XN1~!6(Y7U}Hn!oWebFWB z7!fOS#7Z+j2C7J?u{ZTUMulGXHf?Ux#KHvbl4FAK#3JQS5L@KoFrP9%;TCdQ@}%*Fd5ZRus>3>3RQ*o=D}Cf~jGh@(@iBP9tC#u3PB|9Grsl8D5}uGG z>Z7zp(J9sIGdjd6omf9FR9=f!l{cu$tm7{GjO$EEM^H?9gw`5iEDuO12ADuzuvle{ zR2C{~dOGJ^o%6&`geK-<=0o)aON)UR)@!kZasK+`;tP%arg@j@+TAE^p-_Q!bBZL& z5_Aa+i7$*+*)}SQwj^ZacikFxxm4Dg=s3MFF-2l12A=R&SdmaBOt41Dtr2l~)+tx+ zgllJh^~$((NEs5ukZ~5CS}#nh6lv|q5?7bh++1|^ITnu@vqIyNgOo*XJfWyW$Kr5V z{U&v-rga=rnLaozNz4vclt!Ss+fa)G1!_xCIRb^dL7tx1-UBU(YIrhfENu4${^6M3 zorvZxg19$neg5k9+&t#{v%wGd?~OBjEIpCj32G#<=(;=Qx)854Ra8-ogT^AQ?1~@a zD13882}AIvRcz3;_)K=MVtIz16~FCM^YeEn=LW~7n{Uq_G89BAGS{bC*Qb?Bl4&&= ze;Imdw*+DLVmQqG#ExlRf09`0KUTwc9A?=ThRzHX_=Tgl3ai? zjdk%X%^3-gGEGX@oh4OB|JC2Wt*65tvAoBLZ2WqUnZh&Xsgy0eAvr$;PDG!1z*h3Xe!?r7A6T z(MEMvdSxs465x_8nzWGEBv2p``bw;r1LOfbAxJD;uPs-1j+>`l$%9kt^(LXB7+ecH ze6bN_2XL^Fpj{+ROx0I4bADoeeu6aYlgr1%)Ptiy>U-V=Wudm$2VYVK%QM%-<-$Sn zta(}?NVJ|Z_f$As)>+rijH*z1ro>nT;N~wYLL~?y5daNAm$lN0x*lBr^|7cJBLE;g zE!3d}qy9o!LhF>uOq)|{nif1()*LG_i*AA~a1w*$AztBQJ}Tmr7!Cwj^X( z2ByGIC~Q?5mBt8N=Eh8`IHe>yvo<=tHd<#7mzg%G&6XKg_FdOLOXE6O&N|4E$`+_B zfj|k2W#CTyPpe~dO5o+7pl+`sDMFqRo@8F1RRxix<#Mz*net8An#^HW#iA>9a28eX z8_+;A5&R`BAWX|4YKn!@+zpATg&n=%C*V^OBlV@i6EJUNtKE~P|vI> z#`@KM@MAS)YhtodPoc2YKulUDXTeVd$K{Llbwn1b%YXo)4Hc-=h|=4YUGqgVuA~7M z>!WW)V>f&rt^wL8L2FK7qA4;V8>Ij!%PJin-DgM>%Z*WKMIw|~r|hl zG8wIs?BF@jMj=dUS#K!Q_s^EOTq!pkB6$uRiEyY6)SD>ms09m0k0@zIc!JST-2?9a$0`KTHdXE*tQ9n}}_NGVoT)^{(ssz;{}r4}RJfwTrF8lXH7 zr+7AjC`(h6Z%Azk*Vl?vRiQFFtv9BqgeJlpiLwgIqesdt+mwY3ldg8BE54$7W0EmA zNf(rv7Lbv%&X^sj&cQi=(f}|-O`5+n%}3Pg-_9;r@Mv-;*EuB)lQFW&OBZGms`M+R98U zCF*KlY2>dSYaZMQ841uinq2unYAu+@J8mpW0?0@2h!i#q`Yl-SL^5v#zV= z@{KygYZ?2cMt3PN5Y|X|59lj}f^v0kumYW0GDTWU#lj#>z97>cRfyga7EH)3x?EL# zTcguiJL@`@+bBu2fj<*#E72YX%7Q2XtmP4JU7uVZl~hNi&Z;u1!KOkobg60*%j?4v z%hxBC>`1C1HJW8IB7DQsfbeT0Yu~?`=e;1-BL1FGtq~|nt0ic6@0x9){#}l4O=Zg{^o`~MR zKD9aO6`}j#3(w*BSOMb%=QCIr`sn+{(FBtJW)^SHxLWTx&t;T_$7O&pKt=*}A3*yP z)=x;JEk)bq2z}uiX=#Nvsfz(fYgHwh9&ww6HPE-Dx8Y5gyd1UG_z-J&;te){AxBH)Xr)VV%M4|bFB3~j)j*OM3)U=GUY&ORnu*AJO{Chg~W2Wz?z!St7O-(y)PCNT&Tv;sx zyEJ(biP<G9g36K1~2gg~3a(fioW+&K!V@iM|zyKpJ zY=g{nETf`+g7Ac zQlWoRkubYVoM{zmb0i7IO>r7?&vj(hG)_5A>BE=Wq{_OGp5H%9i_P%nFesWQxK8mWx8U82cRW>9 z@$4Mze)w|Fm2vAVx4(w;%o*4A?FTn(*fTy!qy&|Rg6;-Ef%#2v=h^jv?!VW+=sbyJ zW|D~j;^f5KsKceLY1xxr6fVn*R-3jY)vVVci>d^PiV~d#fLN^05oprI+6<_VNCP~o z{NR*AaaNfK-h6CEc#KNl*x2E6wYgm93tFPUz5vOU<=jKa{84<}j zB6TJJO_(ApLI?Uc7miUxQi(Xp3~d{cULwghli)~=k(bpe)!kr&A>eXbZDgWGsL~0O zGQ!ewLz44_Nkt+Z@}RTCs%(KQ6**{;%KVL5D{Ar42n(rE(RwRBjFzPxPb)0FcAW}% z*mC7x8a(mr2;_gDGa-MNAjRFu`CHSj&T*HbqnK zPLL*t+plnaChS*4x>b@@i{&Gba2w3E=*AycC2WV_*(_ynV=}WL$P{`FxVSH{lma0g| z*rZB2l$=`JGYEcS%ITT~aqNk6ai8sBI?Z?fdH>7F4p*dwYcqw)lyHS1T4@NECrMP9A`M!K76xB2pentrcM#X# zJ2@*iRcw;x24BfSq>9ob5{jT%!mx5_Y_2G-5Xt0tjg*4eUM7CUX!^^mAfH64gEJI@>GF537Sr zYjDPu)6~0Do+-FwSdZ3KQsg%jAY~^~Q;5kKsmegeK&(y|D3VFy6ncrw$n6)HGZaCb z$+f|LLDi!VQ*vbs>{rO;EG!lZYbA^=z^Yo}_#(XGJ-1&`^5Tu^vS`F95=^jV(F!Xz zj+PaiFKQ|qoVkfMbhqxRD*-+dg~d6tWA~ZG7z$i)Mtgm6_KSt za*-hsAx+z;%G;(ZLUkkhbY}6WKYa;C0@er{xekCTG?5+TE(u@GDyXQ?j)u6T>d+#Ic63ODe%dxg<^< zc10d`IWhclY~Mhth ze%IJgx!I5ykZ|FHb0?RdIujA2*my}3elk&fCL#E2oZyNy^g>M7g?Le{D)6#A@Uk-W zvO;(+Zg;%K)YvgFF^eLin~nu@d-wV5{Pj|mFkU8#lL=xHMKN+wj1pIbA-bpvJ0}l0 zktjSX4?8W5xTM;gkR-eyT_3BtoM){b8o>;^>s*8eM1>nlu-Kh;8^32V-tn1B?I+*i zqvCKrkYZ+z$NI$iZQM?s?QR-{B<)(Fq2 z{Eo=N&gsRMjG{|w@fC&WN`l~0tngy2_;NzTWts3&V&Ij8T;ls`)gq@Ry zU69eZq!3(@hhCQXoxAFP=F%>OK~qxIGv*jwaG^_0+uf-P**QDZDq&oFNL+#-RxZ4v z661=_%W(;Ui`Y+s@T^pHP62@uomEE186#t~xWKd7rjp*l>keu-3p@c&W(iMxa_L&y z^1(a6@WlL_1J&kWyhfa^>g#vTrWNj#r-z+a!YW~eT{ggeiLPofpM_UtkF;O@XOt&0 zY}t)HuwSE&X|(RH>K#6xX^*^;25aRVFk7Mvdc207QS-t7gioSkQX{XXguo8!3X|*l z(A^sLYhY%+{CfY1{8Wi5ZuQl3AD%fIcrGS7M!n&pGU}X6d?6w9d|c3__@Ijr=S0ax znfQ`C>_Vdd`Ivz7S59T+=QMYA-liLb~+ z7iIXm=$s4^2NQ-NIj3A7V_1JhFFKoWIz2bNp&2>Uh%mw*b|VO%;`RsmiTiKy$(&nOfJ6&f!UQUB>DTLsffn zZC6rFC*G&jbsH<%l$JVOSu@_GR(EFBUrVp;POIrKbqrmbaoqxi1!i@2x^41yL0`MP zu3TDEDX*-~YUs|X>&mKXPp@g!Ry7zJn$zl=lByfhnmY~k?Yi1FLrtr(qQTVE-go=% z&9RA_qhnZ?7llHn!;M8jW>LN%gHpY+TctT-#x+Zd2RpG^O=;O|5Ou zZS6DGG^f_KqE{N`1ZFBS2a&pj6$dCKfF+$Q@e{vPyCHvK9u*!O2J?h-7+O!0D&BB{ z%*lgb!V};9r3ZNSHozsb%A3X6+1bg-=}Cv<`n;p*?s!pqZ*FsMTJ1HJtu?9oT5{br zV{JQTAo$g!x`ve6225j!RZ4wZT1QuMOQ)ft+0@*7ZEW=V_{fck;X88^qfS>%&uC%8 zKz2iSdRq@ z=mMWTK6YpH20FK*+h)c8&)!>r$#Jab!Y9m3H_6FOZullQd_#Xu?m1?e8N{?WydlUh0DXZ%*@Ovt(dip!!!NA)iu*I3zB6YOV+Myp30R0 z7xN&vo4Y~Z`zNPBZlLENNdt)!uHOZhca)hM#WVlee?>mQPqmRn-{!E6&rX7xO>y4a zZr4?(6K){XR(b^yV*CaQT+MHK3G;$cW5n&o22>h1Jlr*KC?^rRcH5M7>WTFrB1;C5bA`P+413#>FMbyr+e7ty6*O(5`^(w z_mUWToO{FVYt>E6y1eAvn@;W~8o=M?MXUYN%igc&U2G<4RK^Sb#hUD^xMy4<8B$Z$2-L$*MW?iu9VXRDf9JjnyD!d>@adnI`j3z{W;p$W#`Ggl= z33lSoD_%FM1p}CwwogqUt)*+m?Y-r3p%N6|_`(Z>mtwS#(TcI=_I=||)Q0_o)3X?K zV_xqpQj<_i2lRqG52KRo8|D*yIG@wuvQJtkrX900oMX&sB`-mnZ3qI`#a**HF-ND^ z-Eq0b+$0$T$RG+{F)%3Hwt0tmIyN~we7zL>yQ0O+~ zJ#LqZT>Ods_!YH{xLwyKCvQ$nA~zB8xk3c`7UJ@){6wEeAo}ak8)@HWizuHU z?92b5fBHBXzn_;S&|%Q^X_kur#Fw)u+>70Aotc@k+Gl6IWI&C(T%#^8bPS^yFIF;S zMqFNWvvCx!3-T3x7$TpDWRC|gL=3C3X@}Egb=t+q}%Ll0>a;q8H81=Ai19j&MqM4V*)_kc{;W~Qdmh}qfcvFY(K$Lz4%I>zxY zWv2`CxrrIi@RSpi0q8jKqQG#&y8?v{DE}u*e8!9M?iuws$2rm@QVBoe$nbGH zF*axH4%_sM3*2VLK0ZBj+iD%=Jh%lSyl5elcDz_1#Za>gx6F$$FK!&e7OVxwT$`T0 zG(I+FcOYvZQLaOH5j!I$c7gdY^{Jq;Y~JT<=qLEMyvI2;GY*3WdQEhOAIi009NAu;)Xs0r=Ss7mGsuc?5Nd+jj5>#@9Y?78{_c)w9Q~fg~YCW zj(Czh8S6!obA1cnxyEN_hbPC8iUj&^K9M5*CWqk_j5qSk^dy!G!XySjKyftfBAKLS zX25+26jrB)uh4@SPMn3B;omT7v)>w@gqMTEX17mGxo5EAhKqCB@Gi!JF*pGA;LS)2 zlok_M;|o>cDcT&Pvo`!mQ$!;)6t8UL!klv1(M|$4iR9)}JYd2dWbbEu;vR1#JUf9^ z_*O6TlMMVs$%X?_AB!X*Uh3~Ta$l5B*gdu>`wWHv-e2g$7!hFJxMz$k_@rV#x-ll} zB&Qs-8ob-@)sZn+I;W;>GZUlW_8xc@y9q6J+DWW3Mjr+#(sST=bX^>eQE3A?a9c;F zM<(r)=J5Hj}vv_iWn)=HoFb2!rg~rA+vT>B59aU=%;z!hn*y76I%)nc zBEs;J=L7U0!pRVGIWaKZUNT*uv^s8%&y3mJ*qEN4bWe|P)=@|?H%6|DvqJb{h?!*kl@ z!K@ym5!*zBS!eMlL?K=*DEylN8xV_Dv>7i13^yXY5Lri)llVglzDYh2#7bzgRhkW* z#HiBC0IcA`i&UX0fdhDI16BYLkC_i{%BNhR$MaXY>o*lG_(%E%uYnW!FNI7@07HGHe=E3G{G8)Ih<}$g16L%>o?i{ zc}j>y|NJJe_ifZQ|E6empx|CgM^jMW^R4VaJEZWdUnhxIB1RL!ynM+zXkKT&&cLEA zwBP#IkDyCP%vqW)nD`3Ycf)7i!Q?RCE*h&tt`>Y>z5{swnBZIB$L@TyU|^K6@$zkS z(B*qg+A{;|r_la2?99sDCm7U-@L*2#8-Je-A^C*D)l{z$1cJrC_7)cAcDW|1vs+s z`tuKd^fx*rG5-R{O8a zCl(n=qnxZ7)M;eNj?U*ts={LTvSJKt62vLXR)@+|#i zDCkD#9zC}e!JLUgaaTSO^z4I7F+pzI1HT+nV3k{o^9IHsLT~$OIIP{#RJ=<$R;~+v=0-y2}EA z`Fz5!OsAcn_OUq~cn*E1LdrLNr(E2p;rM#;OCa1qL_C;bFV4>A@P7YO@iP*T3BLWl z8=XHA=Lx9596MO|5U6v35EnYZqB8Wy=M#%fN*2g~#pQHRTj$*Mm%g*+9K9gh>)lTG zsZtmBC0T^4+@;ZTU&@P0g}GBS{zX6kRleZWzAOgoH+2(?Pxxjk_XFtscg})yzPxYK ziOAe30qM2^N4ftYIhQ3Y&Y|y7Tk!jfzS)Cc?zBDZ!w~q|2+ZLVR7DXq;}3qF&yiw_ zy~BTe`9=Ek-ADSO!z5U57cA0gRr6*AiwK@Y_=KQ)2+W6lM=g|Yb8ufc9KR7h;lGA( zdx0n6(_Y-?EAMM{5^~S?l+J!o_=E7}?7j6IrzYpKd*v@?}JI(kpooLKiF6-dkjT|FHb(`UOLJ_ZRHHw-E3^_d=WgfJh@Mz2?2Vr z7;eWx!;Poqalmy$!`4&}VbL~mT_T|UK1K8S48Q5uFHC(|MU;1`&<2ioy;zEQE8l+0 zj}Kb9a>1P5Zhc;TVl)&iquy|Q<(3^#cm9w!Xl{Z#CEz^+M!SrGmwOWCgH7Sh)6C-% z(2st5Zk;a#g_22=7tcP%h1WkACqbh$jOm}Pfk0=glbFz3Rdn z`&AB~1KJkFh80P9c)^j*&35DEsieHPG2S5f`>(!1aYvgf7wDPdQ{**q z>zY_({THNYttKp{STn{UDv2Qg!hkLQyvyE)=b1fd^beOzcdXi3vNw-`afWe*PxcfP zpLjBNO#%@@1j3k>KT`V4lM9o-*6%HCJW5lB`2jQp`+tsbIxy_{(t)}qw<3tAi`OYWLZC)=>|BNNL z-zXff2jhPHP#q|}=9m5vLRwoId_2wJA-t8$B2_{3S(Kag5SGc%2*Fr zS$nN4gh7LjgpvxTyxfoszKXf{d1K{0 ziDiQHpTC44hCuBCFk!T`fsht`|9RY_kHju|wc)pU=0x;wUkWS}(2hj@#EHd!J3>eH zd80y)0DJUjvo=`%?Mns+^GQV*N%LdQ-!JXMdT^rbdl>RQ^=<65{I_qII7Af05XQ=s zzh6oSv&8~OnD^i$;_TJ>Z(kp*zeJ%>we;ltXB^CTSA`&TY@xHJM7QIvZA6y0p2sb+6xFaY(AOl|4>&ieK~;j%vtt3EZ~3MU*~~M z(SsdG7mQ9$4(2-w)O*hS)OmyK7oMOFcZJ;lYrt96bOiXn(vZN28Y_sU)cFhOMW>mv zR_aQ}U5Vrw4{toYuM>bH=+K8kTATeJMX1YC^d*+ngjUdxjbz(k0zRoT z-}MZv{@XOfp8`voA(T`Zw)3Je_ud zi4z#MpZE7v_-m4wWKd#VCZf?nd1UFRB4uzqBB{1I1_mm!IfGp~H~5JUm$Rj1p_Sk@ z@g=bfWmXXWwR=LCNgU`8q({~8?!iQeE119cTuk_88KWg&tIUjg95D~Q+?@#c1kbyD zM+2@hcPCiRpV+EASsJV7y6Rge&I6T~-N^(S1>TwZ+&wv~(^~xcQdrcY4mo=|*Bj+7(B;NDvp$BMYP8 zJN-VB@_ww;mOPg?DEn*Bp7hK%p#3#D;6eIeJB8>Xx9$P8<)i8USnLXgiGW)u+bCE3 zyIIh^5AQ@0n_I%W*}#jq+c5G<)XENP5m9?2VTNsY0b{X{BaBPrEJ#s;Xa4;ID}JZ> zNhOz?N!&oo|4Z7qzNYl_yN3hRS2#ZL`hlQ<3&ZW-tYa?2u3M<_1$YL6>BXhUoeckD z(C43n!O8NN`h;i4_gP2#ScJ&hb7gkJ=ggqc&<8zeQi28plaOE$YYVa>Q zi~K*3Ifo4akpih1*0FzPK!{}&1b-&alNw0O{uz3olq>|!qA&zHDgOP2X`~P&WbTxm zaEks{`rVH#x7b4-0n9Uf$l22e)^3=$M$Oy*og^uXUrlS8WEY9RL!KURBB$z$lpdofO|Y3`7z2 zGOSS(AS=tFd=TDAkV_wz=oPrmlIL%z@8gsfPdvrL!;?)Q`~3N{VXKp&jg1Wm1hTWU zv$Wh_UM6(vY-#!J9dz#GMhi&|Znojh9E`&JMtizcn?AXp7x-i=A<>^BckbjQ4*pv$``#pAr(J ztitG@hg2XxuX`%5*KBXs_dWL_4&XoOfye3d^Ybn@zP`S_clx~%BE7Gty)~MQ{m6X? zG&tk4Z_l%Dn-?#exg!2>9BlNM;uvs&6lJubu5h#Yh#D`5`+&LlcS2rge>Ds`KMaiL z-L($-ZC!t*A$F&+e6thB*3#TOt&t}jZ9VvZFYJ@}Au%yAUzl`4@l|2265bvX5?9`y zYTuqndap?G36^1Lp_gHsaOqOytF=kcBAIk*RQWf3cG2Hma*5q=QBqPqq`1L=`f$pW zLs4E$Uf==??U2t(w{F=#(A8m+l71FEJ|qa-CYVbeCG$Uf`}6iHwkiyogYv`MzD2xp zfM5$&S%cu;+b5G1DyUi{iNK4kUh)nU_D@-S&Sr1r8m0M7%kAz!yv^l>`T0<1xqxNj zx|c!nS1b#d(lFiB^z`)9R1Z(j6$f7Es?5yHqN1X1|N8;G5LhcKE4a=`6Km`TINm-y z8}oeXN65fx*VotWmuf!8#~XEcAZtm)z!=JW_<*vY!n}4q=Je@r5K5zmVf+#c8Csbw z^;awp4-fG)so#SI+s>~C&VhG2L3g#UC$+0F3IiXTMt zsX4Nym)UexLV3uym|qpeuBLa!?acoRo*|a;;%aKxv;eHf#Qg-Z*N5|s&TZEi3XBsP zi3{>1(x6Mkpx{BU3^XRfTwKsoX%G%B$#U>8+!i)AHvj3)aNJ)Oj))3B28R)51bwKU zG7*c%Z-}WnXrZQ3t8J)I2t(g|FV{Xf&%hqFrwzP zxF>9?e%}sMFG?Aa%@h!6=ZG!EK8(+K6HOoC7V*}kO=GTN zU|@jW*~Y;k+$_upzJSW3w2!b%L&nsU@~$85hClF`pU1B69d*L>dpu8xL3`PkAi&PGja zueZDR-U#)Ey+mI?3FJpcwr={xkwuDQ7C2u13|EGc4aJw1k)frdqq@GinH77x4|?;~ z`<20E4JS&eioy+>M!;_}zzo9^sa!fp+o$$=|Mt4I2mqjebCkkmvgCTemsAN-w7}1u zc^KluV5m&|$8aE3RL9}E`5sILMna|lF0K!2y)XKV?d9>?Uv!N6p~u*vw-lcrArIGz zi;EZ6FP~0pHLkIVh!}KPiHV7^e#!eH&>}A9$09yq#|Tut(8tUrq@>ig3#6jz zti8N`H!m3t;=NsBz9r_y<$)8%uRpTnk&T~+|09AR47ZuPn;Q-^?m#z%3v9{fzYHef z^(cX&w;$p4*VgMV;kU=v`(Cz7WIy2_D#387FK~BNa4h+UlE0&(qAu22F;^I{XhJUx zJH0~aDO~X}@Xe$ns4j{ zk}iqTD3cHbddtU0kJLZNA7k#bBKO~A$@^b+HoUj+Kr#h+`4&4NkvNWH>tdt<%NlfB~48VFEy&|(NQ9q;{^DByz51VL4&>vmp2RT3#K{5vjglh$%D z5j@TA&uab4rr=J5=_DB)Jm})!efSfU4pAbJ>;CPjTL318zBbq2Uq(0b_T_% zMWMjwS%W(>e0A9%4YHLo+282=|Q_>;;ypXQnP-y2EYd8A7>LSwAErU#nJD2Ks>B>cbv zmN(Zx-?M#Tqj;BS@v9vbW}bNXZq@8prM?NDolDZd zMIp}vt*8i+SbsXqXNi6U6n;d3dD%=5vwRu$Vd9Buc!iHm-P)Gkwyu~BC<2;4T&X}7 zvZ*aXM6?(FTS4o5FkLp`aCp)Tc$4g;s-hC`4GAjZ9_o0oZ&wN$MpIAWzT&ZAimH#K z(%AUBfHrNctSizp9Wm7!aHR!CUY}8j%2aJg`c)IHD4oRkkU^oi{=qI1%*Vo_32!ws z@$JgOpapYS3rX?dsIb?)UVRkoUBLYY_Daa#8|}I#?ED-5F}=JtypqSnPl@9gUx7X+ z;w8uIcn`y!6bI!{!wK%UxSHPmDajPm{RYryqbzL|zBmH9Q-3s3Q4>nw=*RRVq`6C6iHe{~*g!jxxg@9eD4?LKtL6Xxlz!iF^G{*7CK@8Eo> z#Cg!4n{ty8rpCs**4dPf-{s(M4IiL_eYt#$1|=p8%qQNV_h(xno?*OZ!e+(@Fvgm| zi`UJVa1>H!)6Yel#J$IY&5Sbv`?Wb@T59$igm1#Xq1Qcw%tG8kkAZ>N@#8aphgU*q zBZ5`xp|oJwxJ8s_e(f1(M&ckM@f~_-tc5Je2i`prA~^w2Q_=dB5!}ZRwL;*q3GyVv zQ0)78DYM0vxXk-KL3;31bNwL)gw${WVV5+a(eSI{PCvbSMTJ!A#v|<*>1(N~;ptJE zm~VHWG4|}ILV>eqqzZQIbt8X!Q+-Ra;w}`y=ONZ;6o7CQEa;2vcgj6JNO@q)O94$) zpe)t3DS!fgFU$dJBdG7U#1woH7#JO}MIK5ubht0RL2>XPh?JXE#`|?tH4klbqEAS^ zFfco=#4JyxbVsB0k~xf+T5e5_7SvDb%THolw|$umv$}VS^D1pWEYI$G`Q!RES9Chg zcz4HKdT$P6|yEWKHgR_&y9_k!6i?O}AHPPtN$d6u}Dj>9DmYPyQOd z-YF7K+tq1Z3UM|%0dOSPskJXj&FmXAmW`e^o0X` zo&4oB<42m3Z6E5cEM?%1b&#EnifHBK| zn2SJi+4c)^>RyY;U$s zwHW<->T+>p+qF~J3l}lzGW-b~Of(}ZgFd)H()n)#BI3mm`drulz0LP67v^$VOCbx!Zr`9U~hq!t6m17=hx{U^oB(aci&~V*4;Dm+@|!`q_x3yC-d|@kMa(qu)#tRK@J1h;BNp`VfV+ zqs-^+NLu}@AeV9@^ARRroRlG!&ixtSdzH?){i$k0B9~$i?y|g|uQt~mEWgq zVA{HPgRgbk#yy6hZSC*--@9(SV_D-)V?wsJ;rq=1bI{DeBJ&slOSM^$Z7y&BVW>p8bk#Atv4$Oqj<6}X?P3s3l zXWYDAHGUtFx987YH+->dTjgz>njAjeH&1hkIaM4edhF*{Xuj<=1@#rEd`x5ApHlN* z!oHOi?F_gK53gyZN7>cHYZ{YRL*`1LWtqpN-pd^}cggEXPRP}YDWQL#k1)+Ix?8;L zxJ==)ejB56=l zU9Gt}O7D_2h>{lsu2TeJ0r0WA#j$J?`@q%=t1-1Uj%lrQNRY0QZ--i%Z zwC$9L?Bje13Vq0Dy*34zA!8u_wD@}`@j#4pxo`qzR$=$&&)#Hc^&T7KNl*+_Q)|^q zuaV=rV8?a%fghI=D>MG3p{Sy)=;^4)3s=Uik-M5v{R@x-_q|;OO=emh*PUAVT?`9B zT+Moq#hp>kRa#Exql6baO}C~YP5~pTPI}CbVooLBPbD}q1yzX^6@|A-`YhGo$7v~` zzh~k^w9xE$SUK1kIbF=Pe?0HXsU**Msp^m~nMP7jN&+Q3{RR= z`epLR7dOM|MO8Mg));Pdbx)}=0$=@q`0m;t>_Gx3xV&CAd6nsd1HbxQq0_JK z^Mj}N4)T`O{^D;R#H&QhO@xyv8ed0>Sw9+q(sg~HK?!^qlW{$;j zIt)uXPCwEd#Zq8JUB{<0IuHhDXrkynlSV-8?IjX{TZa^4p~731*zjvdYm_^l^cpvV(y+J%Li5}9`r|J+Qq6K(6kNPA^h^>q z;&d%`u`Q>uEhpFCcm!~?yuvyC8!PAsG|R_oQZheKt4}3!os@jHZSx(7-d#{hgwjb) zW=gCRMsgkjE?@rR3td3L(9ng<60sEd?YT>bsGzMFy5P>;X9Frf)eBh0c2!^*)-P+) zE$k25xVP9{w^X3C(&;$LSH&n*M{5&AYZAw7(k!Ka_|m+F-mO1>vR}HGcX&VvE)23U zyC=F!!(G5PcP-*HHe=q%)Br9W$P`-+73+DGSf0yG2iwbNN_J3jpo(Ix-Ql?f_=b1b z_`m zxln;~H5-jrgP@_gGSGmcc_V-_rb14`N&3CKgQ?U`gwTpqN`%xdZ7Wd0c{dSJV(k#|L(A@zdoalO}!8>?HY+gh%H(sR@o_- zBz8jICZgr%-(Y7`aq`<@ce^4&3T(x**T`tL?{ow23E{aq_;QkmLJ**YC-d$=?_?F*luR98M1 zOcc~#bNK&8zo>u8Qq7P{z8*{L5NRpLxG|}T*fx`C(^qrpNdKTTTBvQd!+ z?YSVJbt7P@A7rN!^xAnOm>9(txt1$w4s z{Hp88KZB^t53gemdQy~f%5%Cq$kgXoJC(Vc0>7_kEqXf!shL}RU@aThR+>%=Kl>>^ zE0dV@`Z_(XXX|I{Vb!dw1M0e2y()Pv8u3z6=qzZ~So&G+;H+@&pm2V!cyz3E-hHcP z*sG|6ZCR7RsXdwOwul~9=gqC8^u9cKzeI6!lG9p8!c%VxPwPH{>rN!*vq)wOwE5CU zbL)hXX4>X@%KGvb>B-?2nH)1E$ZhJ1xq3}J9F_JCDD6y%5&#LZv4-IrV_&E<=Si$Vz^irx6714x-US@q!2{WOGPP%ap<11=2j>l&h zB02a4(`!YO$V(sNo4tb!wC+Dz?q0>FrR+P>FY_Dy09Cc@f6Hu1AbyJ1M3*MAYqf5Y zv8~)MhtI?1=~wP;fI5)hrS?M!9$7ML zPrS)iE23-k=KH7pVE{qxgG6eJueaY4nW4-n^U|PexyVaF#aY?eNLv+edC#@YVEcXL zsu*jfkeX3YN8y!)%;4dV=DeSaf$GLLt^tGfkShT34BgBf9jxp<-hbm~5o>;3&yTXt z&W`_K^KCw^(B0mW2+tqo6tFg%V|yZytr5^vccrTZ)Rl3QS}UJ8QHlGo$;OPxu#&q( zSTUjXm1)PLx$}pm2!M;4wKB(ISyIp?nK--ETlIYGVW^eMX+`TlA^kq;C@3YIag_ym zGE2}6NnGH;$M;PEa=wa9s>%%A-}296NVbcNp(!2&u3;Y%4xZ zls$L->e`+!d)%>kdUTxxOOJrR?mtIA4%A)EM{hrAt`?W8=v~lHvABfb7AL#JnC~0v zh!~7sPAMBQP)TC@Vq4k<*1C!N21>g7(s{ZFJGw}F`>uQVa=%POA76hxd9EYgN*_r` z^^Q}Im7LXNoIhzT#^X_OKjddE?hJC*Qw}e9?_{@`{KKokTh7x|5<(D!%} zL^Y*f*wk*0w7%{PWy}Z(xplg_gm>T&v1xujtaUYL%~>m${Je-uJtgf>wWc%wjf}9_ zAbb9Ig6*s_(ada#aprUvx08UFgP>TEm{>(S{~yy`yoh3Ar`-9ySV!Ao*aezax@9=+ z`(Uy-YaEZH1o$-0wAquSvzscl|3qrQ1r)(Q-dT+cqUn}~9QYk9h)_3Dy&+>%8LW$# zT?xZs79SfQQoYKw!agAn6t7!QsmHI)D&z2T7-IZqKyhK9ukdh{;NQyN>0=THpDqnTP?V?6uBbQqg-ln_+Nt0elXNs zMxO;B&d80o6tTR}y50cS^ULKGvIJloKrX5>((Yhx8;lJG4DD^(b2NG1xm5)1)hX==zg_p|nvzd87QHx{|v&PY> zWd}xkhi_#=)$l}4L}1}5482@l!7gK_Rk+{#lM~lM%`L|7JTgZl)Sn<*FTJ5S zDk1Gp^-^X>Xc|1R{dnpqcS;2Lc}LkhzmKqL)yPYn;ubyM+hi6f#mw_kp{MIE(9L^> zV#<3e;RL|iz1QduNhDUVv&A7g8Dcj@=l6l{1UHxC8 z6XrYUszoJRXwT7bG7<&7KizC$nz$hp3E~z*??fqWILUlcf~rt2VN@@Pk!?)tpaj0F z+-~9naVd+X+gkW0ARLIrC;<^UsB3BYrW`k=&QQ56gfTta_m#`*th{&fpI+3ZdJUFX zKGXz>^&*z%b(D4sebzFyh19xiaWO}htFa(EA^b0n|PaCG0_?O9Ymayk? zMHkpa{%SNO80!jO3wWfLKEcj)$(eJxSuiaAsvp-1Sa`Q1GzToJtr6!O9{~yW|Gapp z7X%7r{3rOy>QWI@bmeG_EFlLx) zRCjMESZ#v=+d$NOXWjHgI`z9~&&0Nk?e|C*=W>RazZEa9pY z$N6+*E*5}M`eyaJW=x5!X**WQdE!YHEd%_-Q{V)!r93X5S%QQLopgJZj$fBTfsS9g zD8u*G7-eijfF?Azn|#nIj?t6r>lrH#um^`RlVa7D@rTXa%7W%Z;T z(1gS$z2aU9gxZOJ8f+s^5BN7$^s`cuqTR)-h;fU5$IW+8^B7XlQ_{_oO#mg80G9@33f}tXaGo_mEP~1~=bx=E?L=b1) zkn*5HkodfvI9no)J7dUbyGef%(F+$@fAF^&Yd8p3fhsW(ttT|Kcm-_DwORc5FV@jU zLQx_j9R-(?UcW!U=k9%gTw!r;V`MuzM{_kLU4#gS+>AX3g@u_n6P+!ij{P917*o0l znpHBMh?PZ82-ZNFww6i#4F(`Zcu3kE^aW8N%a@rD93|QLa~xwH01{W`u%u7)x2uVq z>^`%H(BHqR zHb;CFLgHfg6qk`O;PALh>ac2Wp`>G}HJw0x32!*Glly?T55U>~c%(qpT$j+anLgo*C6dhoeuk!2 z4v*o+j3iuFE5iQa6}%p-l9+R#eTjJ)v)n{HM@@s>YKF~2E>I*O_pAashqVIjs6kS` zME<+_hEVs-PHiBa=GUZyk#w8{i#TL;x|jo>@F0Y<#V;)Bj-ZwhB1Cyn@ouKNXxO4! zv6J~QC`rs@T2=}GH!T;r)`Lv|3}`^CJ9iRTGf1w1V2r3lP(>n5VtNujcKC~QgNlV2 zBe92t=4o(w>L$jizCgdiF=4)CB6(7b)dn{2i16wP*@lkdm=<9GNKx%83JvoeffeRE znHfScRQ=aUpob@itpNJM`KAD9hj|v*LDk$<8D9UfDoRHdvEGjkf!vPJq7px0Z-}~|GlBSQPd0asKZ*bA z2C8CT`l3C&Q6railL4K02|TmiM0cyJ8)6`d-}5Iq7eTq-{Y_&T8SC1LNDGR( zyy0V7ZfkX?4}TWb_%%Jut1-)nCT;l3B&Wt5#jvxyV_&1~&`v_`!EnTCLW zHp;xUh?EB#Z6TItGXk@dchGtX8feilMt*^t1!B2xr~J&vDEeA}W;1eZdSFG>qU1QI zVv@nSOo4SCGCXR&&_G<0bMqKpktgkIlQo_c`QGe%aC%t*x_xi~)`c>Txn#D$!BtF} zZG2TnO(n1=tqmWUv0RoL5eZI@b^bmSDA5bki$!l^zTJFZ7?CKlcjMU86)zbQ$h6*K1XgkTH@C?S`9oApemN&1Bg1eg zc1=ZMhSvblU_mfABI~Lo$`8_$!lGBM{oN06Nej`^Jk3;IxF8rt6!JWFiufiB5RAHU zO(jB}O(xY7vev8?TosOg6!FJy|Ltnz0f~q3dNL495h5?@GJ~p*Xvl%5f+B3bDE6wL znqEP}F>}4QqU##BrI{o36_GtVeA zdUypXv|ZtA2HHs?Tdi{adyh&b-h?Wg*B7Ll^0W#E+;rU;Z}k$snF%5I*>itZF{skp ze)_1jHn=0oj?WE-s;JBt#a+~2IQHn+=wC?>Fgy4je{uOIszc`EIvs*{apvPW=yY5&tB>H6F!i7duSv{5nfJY){C;~$Pq+dk$v*KQjAGgv zLN$DF;3Mj)kgQ4nqcy@#F!5X-44}~uGZvFLBOM}d%-9R0fJCXy8JjLgs~*bo(N9Cw zMMEA`M0MbvIr@R%onZP;MsQ|WTk^2=sz{rdsVZ{xd2>IV zI@ctyU^;BJcK!ZjwK@;@ z^$+VO6h@R}gL&j;8C5uj+AANLM>s>D(69E8<$ivKLNw`zr$V+WO#i!z5#lUTGe^{M zHT1C11Qo0$86W9LV(`lxu^pvaKK6*{)CJxuhUZO**V3@FnC{;(p>3>x_)TK`Id? zrh)=L@f-^d1|{<9IHR&P(oWw`9X-NdRMB6^?Cpl+w`jZHy@oxeaQpAM$?d$}Z*qP- z&wE^z5YziHI$I<9w6of+(#2>*+Qaul(=}ijo^Gea3Uj_C7<)VE5FmkSj&Kumo;4p% zr7M_-_o3>TDiuZoOY{wSr$GLhLmq^9<%l-=ytxSEgu99fRnApeF6)L2fr<-m982aw zx+oAMFIY6V6rMC(g<`k41=|S2{SMGNkYZ;<>eR^n6B2n2%Z84Va)@U0sbiOrsvpj z`PWw^1W}Iz6efOOC7*5##E(YFKl@ZnAAgWuN^aky;>|Z6)p|52ycGdaMp3Pi{X>Bh zw^bMOQBw&7<5r}K(D{+FP;}^D^Z(TMvkgm^%tN=zHYok{m%cg_DJe)`H5x*NpZ(D~ zMKG$w-#$q_GZSZSIenPIY>6ng@Np z*XH*7qDrE&$fyqPC5$8H6&LAY+tjB0{E$OcZp7428b~QV_)GR+yx<4(mH||pTts#_ z3L@NflHoMyy++9%$3j9?-bwWeG5Vvf0arsn4Qb}Y*q&WXJrJh6(^D1bk>sMGiUR9^ zlG7!c|Ct?+CXhW5<(r~QtOKSLCzI3*z82)#=3iHC6E#`e<60<#!IC2MI_MNV)RGG) zm7l-TU^%VEMr)MGN3X~z;8=|*S;y>4>QgICt5k}4_fIw}xeMLlspI`bb&OeTp|Hdc zm8NP^3QzxvtOX+roe&a(IE}h=?LDz(RyQA|xza{k-Q~XyTaAZ&c*QSZhJPclehC}& zx(xZ|owvN^_Z{!Bk*uInQc`qkjUYt92%8X_R=w?HmA#jvBO7=3M(xV!v9UgA%|s0| z$~tK1tO&E*26R&~<)XEoqhR<8ya~pV(o!HDozr?-$nG&&)@Nn|qA8iQUD)D}j^n5aY-@EwL3hdv_TS4BoS*mVo5`z|FZ*v!b|h6URgnJ_@szzUuF}=S6Ju*@F|$}VLG-_Fo}cqYwhaE(c0#?!n@**9H_;r>I;b&`PmN9Mt&j+D9OVs!?uIDC zsu<1eB!-TyQ0d{{F+}sxD)lup6!UUdD;6-$HyehC(W!eof5$bw@@P*A?+sO#uEW}QF`Q6te7}gyC8g`+y;2fq)#);Y4El- zLdf8x{sr}GhDJ!L%1{|E5T;&4czDBg*4qn&difXKaiy`es3=@L_U3T*Kac-n zXoD{Nw}kv|>G4hi0s;`iD7m;AJ35H3$@cnsGecA^HOlC1q3p3e2>aJ1RbwH0ml`Nr zdRErWcK5@RAC+%gZ-0VRMD7+VYkNBU@10bMut@cQ)v!8rsP~tzf%#m5D$a;xJC=ATwI7Y%DP{y6SJGMt%nOcjPRV==5T-^?9rImxb zc|P*jgXu*oUBrLRPP%8Sg)a9h9)Q~C#`WihJ_e%APcNm~K@Yd9DtRkDXYF+a7r&;pw+KGT9Ab^S*U>H%3-T$I6Sn z!=+03AjW#!6`mOD`bXI@f~H#8?qrz5;yat-Q~d+}#U)Y}jJ!>VOjmUAw+K0pBStyS z3o?2HtbOb+X&WctjxJryGe2Fs_Nhgdh1+l9sFH`hm>fFTh;4T-K9E-`K$ zd4X+ z2E86$eV(0to|=8$H-5FWwPm2uu(m#YdlY+%I*+m`<$K$XBA;*U6U$e~*A(xQsq-)? z6=$GF#tNjmj(Ks&u{cx>k~( z5K@c$!I4q9+wk*zY^4jx$1KVQ3MzO|&9Dj5I?A0u7n z7_?WZLCY_*>pOkqkK z3N&*GgDkPPmhkhj9kLD2*4AMHeCc;Kh-Ns39RE85)+*pj8>K%5b6Vk=pQP z;%-h|mQKx)izljaqO?ysm0v`br!cjPZQ_A0^Tfg`ruT0(Oc!Y352K=SbRew&4u1%yN#l!kJKPQn`= z5h3!pmjWSr_o9nlRrWqrMnpu|2q8h!%b1&+%gT<<&0%c-$jRU0dLQEWuZ9VXvokXb zCt&Fr;j}{ts`DlDN&0&T`d8um9nE$}K0`7&#AJ|%YM&=+g|CJHY+aCA*Dou1YK4sf zf4orkytTL_!Wapr;iaW86Pd@(uV(}Wm{KuVF=Chk;09VV5%SdR7Dqe{-%ZvYi>0a8 z-Kl=?Ije6j#5B_H4I0)L3c9^c18%NAK3Blb){2CmVKZry>0QnE&(^$Nj`MB#U*}-z zRK1)P&vDhpd~s&Ftu&jqZ!NA}wOXkY;^Xe2V(y`1?qQ|r@w@54Ia_JtBiPFFCV+5? zRUB3|dR$e$A8h!t$Y;fGPCkl&W_s8-V~PYC zb&0-$20G4W@XNr+jVkh|*>j#H>0`?4&Vl((m#_TYamK7&1fWm^$3m=P_+;^~t3wCz zNRH`?i?w4%Nm;~&n(h1FXv9h??)}Od;DjR|tSK17m#isn;F9I#n(jEw!-m!go#-;y z^OyVPPv|)nRKB)|s_SD{vn)1ei(MR4ytm%D6;PKFctFG_pxvgt1V*&ed;@Zl(q+-r zV}oEST`z?}Ev8zY`>Cn1b6 zW=I$!raEcg+mrDdYAi~HITRO^EhIzY;jhdfD@#jL;1SFxV!@ml3r@o>AMBPGNahP9 z!2z{_{BQs_IWsfX*z`4jI-fWilI;RXR0{hhN0XcaRDqL-WP*~Da)8{{a)B! z=GnggejW{aMr=c^VzHT;&f?QhSAT`D-7ot>U}0`_k6N$puyoa}lfV-uqLhRvmpuo+ z2wt;Co+a`zQj`lDzW#POyinZNX${J>SaAdEKBRqfj=J#o9jw8-a0q(-4B6qd|0(zW z4e@ocCK^xUMZ14@muh$4L!e2t{`v*)RMb~sZ;@n2vrVgJEwjhe|7Rqr2{L@apGj5T zb0#zqU0k<(o88T8Pa!o%zO$P3wpNebp6BVS_ET?br%jzYUt_i9*NMw8Jo^^w+E2W~ zjlaze_7T~bct^Xo0=Ic}G4et~?gGMNIu;UzcQux(@lblIVQ;Dj@+uu)Ru!aJUuuc)(8CJtFu~;-_^IpMcSUziDnY6%R zgo8#RqDEM_KiOd8^N0t3n`7h|l4r>BqWHRKe#RV+!a`@!%1iPmr;mP>1M1|KRdB^f z3rmzF-q9t{8SUGS#G(2cTEiuHURR)j1Gf%C?FryfD1?Ew^Uc^WdH0kWkotm)dGw7x zw%&ZX>&(^fe!1e?%{6TN)9!gSTO$@Njjd8GaJ#oUs8a2JUyd%DXYlqtHqpE`dZvr? zn?dVfldbp91CbSW)9~|2%osK}f#teZlb2i&dyLYR49U*T(#51vE2Lgl?hd*?6YsC; z&8AiyM2F>cQZIh*g^*9vhz&o_IGvYM01aXn)Aai{S@9yTBmGOfJqhdDk%e@+Vl0#J+6>$S#;Jq-<8HUfkKtiELX6$ArgkYLr{AXTzNaCm#+EUc z;g2~OnK{W+oY6~FOX|C^m!f>?bnmCtiTk``)7PGgYF-SW74MVX-VxR>&KIvDKY3DAa-YKo@%h8dsY z=BSa{;;P_hZ=jte-2Kr`(snD?oea6H52zXhp#cc97;6ay`+T2R+tueLatmNM31O8s z3t;!H|Io1Af z^lqN@RV5!zCsCw0R0t$-2sszU1ka^Nl^&P)GeQU41kG62_fz-~D4l#+$+-Uq zenEl0Fg`xMYSk)uIZ#UR%fI}~FiAke@!WII;dpp={^&0-nai#PZ8h-&F2$Dt0 z!(ADhId^-qrt4C&seH$w)OF$yUtM)z#T&a`S`+{5%H1f>uZ@3x?SZFP#y_?C;By<` z{c!lnHAkLa_rX&eK3oy<`AecxZ$+oA+^XNOCj)E%j+4H!FK^?%yiM^1Yj$XiZQY@D1hcP7jh|`O-rPb`H88=_B`8{jNLn^br=FWl9<-h6n>~5f zadN=*dDrx(EkoFu(0x0h>8!q`Bmdmx6Se2l&bdyVb${AxKRIYWJ?KanaGvRRkh6NH zPMw`f9GprRm^#@rcA|Iex*8;XWi3@gENU8Hw=vz zGFt}=F5LLMszrBYHuZ|YZ>({SGj3{+}M7-#~|NFl~KY?GVQzXRykt0W_uLD#O2sEV^ zh2R%#C~MZNp-vSz9X^|=7L4Tc&p)Si3T~BD{Q#cv{qKJt$DqQkTek`sENGqdJJ=KR zJ7wLJGM+_h0h=lQY9)@x}QP4S-^H!41SWh0V0#69)q)@R?@`^?Jt=T;tk zaou~*tlGa~4LHPp(1@pC(LfmIrcYiBIsU3R=?%GRRh((v&g@Nl3ped60U_8JUjnYM zAwG|&9QNj51Ao%84*n4F`5+UU;`2A`&0W7I2Rvi*q1+Ao(?bsDgdZvn*;BY-Ti&{@ z87t$oZ$u@oi1_%a(4+ska{qs?Ir41SXU~Uy{=(*uVO4o?^}g`vPc994aTmB3JD(N{ zJ|9bwPml+~Yl=K>Zg|#q+rwoJT#}|*w&%fbu{kdHtRj72OZHX7O~e?wsi3+)?GTKrXYix=!G$Q$gTwNY-C~F+`^m{nVxm#jo z4lEJTX826B#AKe^nsq+T)Qzx_J6VFU%iOW6cgj zg|Ytpg&98BM4c&0&667H5&8k|&QN88+}wpokeKwIIAg0qSs_m?*k-DMwgXxjMP?^7 z6N=nEc}82TxnY;Fv~%1xM)-sid;%#lX*c%UI~bq1|5r6Tk|D4Z5U8rEg4F}|2tiRn z13mD8!l0+82l5`eihujJf1^8e0yBVq zA}%ftbmSL);TNzIk*F}E2&t|FqXrBdzxR8;M>)r~ZQBIoZ~(2D?xC6tfn+QNfWJ-~ zDRc7C2!A|F-?PFV_3aIgXqw= zB;i7&;*@{+|{33Hl4|n>guG1Mi34x zj1Kn*xLAg(%Qk(cOD*lf9f92JR;R~mOJJ-C*VKn7>oz5!VI7gGme7Q1)FU~QAO3|| zl#q=mk4QrUj)4b;#L^L_trn|`qf#fStn-SP}?OaSnIO z^73+cJ8s&v33d&z0UGgvPh?~y4uD7GfBn~gQPqW7t;QZWSbY5P$B_N_AVmaB4^+*7 zq!j=B&;Kl_I^b_XDFO}IK+SNx5YlAf{ue_&f&;|k>kdD$?mgHqp4s@> z3!!ITm#9`o8`s8VLT#{VS7FGW;?TV%v=BFl-B2?Wz;P0`jE(#AQJ{^0{bKFzJg|oi z`wEG!A|6T$zQh-W9xM$zP#kffD13i@$ezqKTeNRUlV6ND@qE~)Ppp6UvA2&r`Of># zuKeKHmG3{d`tXYzjy$$1;+fDW2)M8|ag4!S?wL+?E*pc0#&{T+%b3ZR@yX5A&N1gk%Rg%QA2xV=E zvL+<8RHCbhHPk_|5v8yCuKThmhnl1u4jhR4fENWk zBoKQNolzg7%s12yTt^(xxNUo~L7^-_5G>XkgNsH;N)@tAAV@^2tpB*;vSDEAM9m-) zgp&oy;8?1u1v`NgM!2?0t}BU7G51Z|h(D$W*<~o75G-2E-mxS^1ZyNi;Jy*)>guAY zFsbB*q`!RmGJ+|lXAAls%k2eVR)84?g&Sa*mf@e)-OwJ9J8ojmI8) zj8X@=T#l-chYAz~(H}^!iBElfJw&76LP1*!l>_$t%fI{!B_e%yBG_N{p9!1HU~|IusvpIQCka~nQ;?49=?Tm8ZF z;m2Q=CBO&b?JernTa4>>WQOd?58GF~c`tEm2#v1_Jy4DB&3h|0?nXQ(Z{Ub80-1pQ zV$=TIjVSS1@SH%f#^(LSaJM8LmwSuBJor+uI?l2(*7&w8?bV3WFKqgJ#X7`}z7Ji< z(`ybq4Hu17hhA8H_yq)syuSUZ*WySn?JcKX3OV}2HxE9${*!0deF&2A#H(8rJ5Nl) z!r>wde~?_99*_wVJwI5Yn>4xqBSz9Xm!xM8wX{9mg&7o=KNJc5wRZG<+NFjAJzXtgy zy2l(llM5qG6~S>*Vr&s>t0BvyA+1wOS(_fw5YOD|?D|IHm`a_ALo&S!#~LP^B<8MrF2!!N6f?5T#W| zYwJyewgHYSy6)VUQ6HIB9Df_)iu(9OAgLOQ?|8)e zHERw-;KxP)vL9>!!JnuJtb$I9VZ8X0Klu~N6_AdC?t$KdvIN*W=w;u1_g#p8cu*qr zL&#hS5f~8>LDd`3b_nSxC?`Ok%_~=~2(Ayp`W^JTv_g6n@EACOQcVUv6%`do+zAT{ zwu7^7|JVRLx)lf}9-)cAXp`b7OdGF;eEjsP4_=K(Sidb}?bfVyJ9AfV$ym2L4?t6*BK8oEOC*~LJy3|e5hx%K zs0wD0HCrvOiqf9lbn>OpQ!Cbgia63I-Z`{l^`Yn19DHu&z7_B6dT!ND1dKfQ_STo* z*%~T3^1)}O+={-A^Va6`&e+3-6{`fjLrekNwp|7SEQ+ssOlq?jdFdbT+=E_u8hy= z)Sa7D_6_gJu9d0^WV$kF9pI?|xf_~VDA5$jH2EKu)?fB;og=dcw3Uj~${1st+|nMY zu8c@6*^${UNv(+1*B@`Yqq%JTu=aeMt{NH(nYvo8sfkjOqEOewD$BPeQm~y;`g* zk|Y?6wLQZgu4iIqm(mcc%ZtgVmZjH<&9z~w;w@P%5_J`>ex~_q{&h!U!{By(IZ}N~ z5eKWSB}HE^)t1NTilURWT@zDdUKelgAW64{B&|Gr^GPK0$9#0(N33NGfiFeipZ@8e zAit6I;UE9;A3+`9N{NUM>M8-E0EdRUx;n^j7$g4kKmU_549X{9-GJ_a?!l3e^hmr- zRTof5Am)=sYQVaYn3xDQ02M`OXedHP;Z6~p@G4kpf|MXIL}N+EZlSeE|v)F%!4xo3W?|BV?Ob{X)lQa4c%V^ zGJ&Abjk_{8?a5djr-8TTicMcUx$fhq*M9u;x{siZKspMzb3VJ~FoHy0TC?}XHM?J4 zv+I?0+c(PJJN%h0x3p{E!sOT_(iRcx1wu~l*tsq5s-Jp$FHv2rihp6{c5`kMaVABs zZ$2NFV5m8){(s;n&;WpNo*k=l~Dyp~<1-7+(>Vi(7ym!4>7KWB4wj!b`=-?l|tAD&i) zyjbC?3YZeMm|MaU%EA-N<18)kb3_7ctZyET1gvw9VYU#n>E`QU)#pK7pk- zvM-#ENmonL)m!p%x8#>bnu|hp#bSM#OjjaF&5qLKN!3}QDHesLG$yM$Ok298pd-4t zO_Ws`sm+PcD9UdiJU3$Pot!yjPLI}POHIYH%qnquRfN7cE~hSBRfq&!+x4Zp^d(UV zW_fb9%uusgSuTMahq^l2*Z_@-JS}^d*3vsUg}`8hG!tWoKPcF@atz}WOZ4@`n#~Yc z6ak28xJgn^OlSq@22~RL`mg^w7z56NU<03keggZUcsO z!}SBVU);R&v3Is2Z^Ya3#IXH2&?T(dnM0Vw-lFw;iPMA6%W@CN6#*u(c3&O}Tr3f$ zvFQK;G+^T>fHMTN5uy8XkzQi`PBVfsUXvtaWsN7-ypN^4uwEAmHlA2@@UeH|AA5Vx zvuh3_;`F6;dtckQCp7BldtaCfs?VOiZk<573*H}sENBYFgBP(5JFRjN-_o45_*-TGs@F zMQCa*+zu4_8aNYxOi0o!QcY$|dJzZ&1hd#u8CQUWR0ScbjIG&ar&>B-?|@3+bVctL zZSAJSQe?q`eIY_wses#NTAf&lWkFEj#aX9Nx5nx_CCa9-#2UG>Nug?%rq#%j%TJY^ zZJ*%IdAOV_=i{=JVa8K2`KB$!1rh1Fk?F;;CAE z{s<8td3kx13P4FvP*4Di3F18e=lB2U`+xZRufO^lZ(AlJqM@PT%{Slt^FRM{fBz5Uv_Q?cD2y+!y!3KP za-z+OLX~()JhS$rr`LQ4odlf9&Ht#}rUeJfeKU7wO2) zv}qGvoWtgvv^s|z-rfn@$+G&G)C@QrNRtafPGrILQi0@D%KXSAQ?x2QR%4c=>LXLM z;i)E>K7V6^Av`T3G}#z!%uDP#S2a3)3whh@j{JdJM{}D)j;G`Fu+v$Rl!A~ivSrDo z;-u1$6L|Hcm-+5M_rUp@C)Gv{4p7ySek-_wR$NdAWYikbNQf~@*hLy{Ft~0r7rew zlewcmsUqE=avx@F-q+rVR5Qb)LsxFx9GaS;?)QkR$J?G47XAezkc$QjUi;HP;6ivu zAh6u!a1P(PIXgK9KS>e;7$gc{HobG_*2T*gZVun#{rUN(l4&W~5EjTLY(i)@n#6C< zOwC@pbmiK$8+I#J5+ba_SI{c7#Gk~1M4$=A?BIL%TjgowVZeuO$@T`I1X-@lsd@Z0 z2}J%=YmXv*Z^b#23R) zK8Y1UK_FHhefpgvD^|V#!kQ1CdHcv?uk3u{m6#Q;N4>soN2vV$?fXuDb~?Sds^{YM zDcVaGcAEMM^B^B>la(AgLQD?Nfwb{FLnY&3z-=k08V=(VEWb83!>@{WX z(x)Fx&x_Y*?nu!eE37QLe5Y%C`r_2YZI|Q1l&zrS+-DiZ`_t0*rtn3XO^PaKpE`Gs zGJ9uI=JtdPlwHYLhjhj78%qwTvJY#rQ_Gv0Zj9ZA12xB8;@p)tFP*L~-EY#xr6tFt zrtZ`l_G>c^s?ztXEb$u4UY%vP!MxL$P77LfI5U5HlIpX}yrwI+&JRyrnXp}PxH`rs zvwF{dl3%h%Yuv3j#hWws8R_k0>^EfX(`D>b8e@{Q(MiVG6f^GhkU95|Df@^qQ(e>8 zH9S6sP7CUZo>eCMu+kqU$bO;ObfV>HL z8O-l@lBNF$bs#cB2{guk|6)pX#Uz+VmW8n0JS4c|pcR2W&^lAnMgDTTw1w9x!t zeA`IF7UL82U4t1bT^18Ghb(Eu-_#sFahFI!dXq%y;e}>}6t4rhoCQ8XS;x}-&bYncSQ5BK+&wY7t#tb`1{2{Pd7gAon6{mVpa^|Ux zpZsX`k;hjZd3@!e$KE>d+}b10tUCDYI|mSV`q;}`e)Lk@n;YL#>_|NDzUEA7VP*Y= z8$%wOlV}?#KMb-;_dAd9X|Hr>zg(D0=pAgCg?gHOZ*co z2z{~*YP@x);nq;q^_$gKZ`547iBfz0R`rcrH8*dg z)ZV&NcjGon^X-v_>$fY8u&3Agjo=vc?qn{8KacU&KByD{2$ z^-lfOI}O){nr@6VULS3|GTd-^r18pV_1Wt!S4X?YZI)?B~Vc;j~4)#3Il!!6f_o39Vo-55e+s`)}%b^T`R&7s6&Jizn!@~_u&*G+PE?=p;ejT@lme=1Tr&nJeuDLOS?}{t8D=wnkth{!k z;ntm|+e1y)Z}i+5xnXmFOpt_SginyyLlB?vzo3FT1>+M-e1x#(Kk5kFKcApS5{&}q zb;2ybVO0lw6G=}BKc|N`9RDjlfanQ`)xZrO`(RB5mEeA<9C!s#p+U3+bEpY9)vqnU zDwKqsf&MPb2UVwnx z#v6NlfO z;ZylS*eUo_BB1n#Z@-H)Q%H`vV%@v2Xgs@m@8hq<{=b){FRa=s-cAe|X_lJurpxE9 z+9;c(3M!(QnxmawY^W^8CvfiMEEgSjaQ~rWV^ic|p&s(s7TC)l^})BWeFCMR3NO-O zU3RB!96Z47g6tgicNt1QOB#cAqTo*)gKs1kvx9?U8Mztv#Dslh+A(T( zqa|Z*Zp`DmF79R6%iTf2O?gSV4JL%!^zygN-{UQ(_xiN`)|7S3=0=4WghcHate;@( zi!Qz1&sx0X5%@}cf=Ge+g-Cy&QN#ZeZ~pf37H!{Je?I#M%oD z^lG#vq!NC?Zd{4TGNLje1{y(yv-Elf4k(IIfw_*+NY%#nd zOL;Ez*wY(7f*<8mYu|ffHCZDR>xHgZb>z9#?_&M&CtlnBgBN5ktlkkC^U*P7kv^}n zzWdtc5j$iIqH-ydAj_Qd%X1M1;J1rW@0P_sL41Ng$!EiwvT`4NmekdEm1w;9Y!m!b zTf*Se+xO+zrd`m_Ct&cKvCd9AY}0Pnl-o1m@<95+JriZZ;RQ>;DzBtKoP#v*>#d+b za*tYFLzA|NS=ZF8duqx#HEpxnre^72$oIebtfvlb8FB3S?H|FpRP1}*?YsSGdFF`pW*gF`XSbA?D*5W0PKw$EHw_D-oGcMtv^}D?e6n+m$&QBK)qzV#jqrlw2s}tPt#+rdz*h`%Ch}Ohu z<=mtV9uHo0q_{n^91PHIv}eZVnY1~_9qviIzP#QU9ENW=@sUE7JccTTm}2+@zC2Zc zVHQRahZ6-G_<@fd;oUl%b`l>!tR}n@hzsiYfM{HDx(d9PfiDDqQ}3M@*KU1gZNyfisHWb^mJiv z`MK_kGq)$WJJSIUfn^o&)CDb$KqLGNfT)ocaeAOKbum6c{5)*lOIJqGBO&LAKR+Q7 zU*3}+{lPj{y+v89+v9fH?N(%TC4w9QFMNTL9}kPD;V?DlNwL2V20T$9{--9bb{o$3 z*dejeQ89>I=;t=NZWigYzMkvT8~N6Ob9o`Btr%Xo1W%cL`Z~&`>2o1-1G0?~fCI1z zqTsagYti!jj<$*_4tWaiot~a`A^|$a`Lvz<09RXL!6HM7XZz$B;s)X6tQc+lxg@`- z!!v7jV(_6JoI#aaZRKX|+$`FI0x~h<9-VcKIy~b}=a}6(+l@&P7!BU<4hN_XCg5If76oSq0!^19pTH{{_kVkmi`{qf z@z$%?5iR^5#40HpDv#fz1=D4bsfphp6R| z>fHEXDC*J74|(!8ErF{L^1=&nd_z4){h5SI&}SoP2q^9F4JT4bpfMmb)J#PW7HA4> zjc^j(4!TF-u7g|U!UV(*m2kRG#WS}LFg~#a#{_HgQbOR%`2?@rCsTAR;y25=COF5i z*E!4^O`#)&rW6I1QKHoh)|wKFDQvwi_-(QiaSsf_uNt(o%OOgHT#jc5;4FkEK&=6*FfATp zOP+ut7xOK&fLDkRKEWHP!7T(DvE=v!eRV_bAW&0jDt+SR@aI-;f#Ku%H9MYLy%idZ zx5IYJc7OIoT5f66g$qNTap>D2p6CSB=fS|&z`IzINh6U;yh{p^^uczqg@&`Qz$d7v zu-ZtJD>gv#_yGr+hv&AmO&*Kz2|@yRE5RFs(Tb{77~Ap0PXBxMNv(?mcf?>j}0@fi~XcEEc$4w0%!1b9FZ zMKw|Zo)n-1lrs7Xuc`Ufdvoo%NX(#gVpPlrao#1HcpY--kJ9?+A{Z>cMtg~yz zJ7D$Ru)8Omh^3#lIwnT#GdFCu^RrHz+dt(%J$*CY{#mYf(%n0SJud7#H|<0joN=9> zc3qfpUz~DWo_5@^yCDBzNJez%wA%sSv_YG*YjUQ0$~G|TIX~mQIN`oH?zl8zJ3l$o zH#5^UGu=63?VPdqOu73eotI`Dm!_vKO;6r}>6Yk!Av--|cE=Ul?Aci>ZVESYe%y0m zf*YK~jd;&Zawvn7-iuS7i&LZ}=cgT{%sMYw-8UWF80^ZtFBsuFR%AVZj@UQDT8hL1 zWQ~fDhPHEWtyjFcAvQebJ=u;=K2OOmsXu#joZKap40H)_zhXNeCxnC|um=SU5P>&7 zFGYeB60j~21JeRA78qQDy@P=2>#MX+l6KO$H}BZHh=yYc4pDkX{tuPbfp{f;+aE7Y zopl0!&_wk5jY@$HK{oS(nRuHJ6@|}@I>?IQ!jF$au>{8iYx2@TU=bD{w2QvA$z0Isg4@)X(|O75?3kHJs&3w=$$mex>iwLW z1BSxG=8}W@;)A-fgWBqY+S-Hqx`U?LcvI;fL;gNP(ZTepz2@4_OS@~YPC zd=?ufgTk1paZYcTY?!kJ@HH0gRD5moxsh<2{-(}_C0I*w_vg;|#8Mv< zto@HP0{6owFuWSAT% z`7SI&VB@Z%F=O0&rOilD9F7H=byc#|3VBK;7C@3HD?^fU#F~OAeSt!kixk2Vbw+rK zQL4=nsnSI$h7U{YOK*&xcX)0)-D6hxjrkVEc%&csJsL-PgNL}R@RS1Xc<^q(8-E)9 zu9O(emq2tn8%!n^%p>XLf?xOZw#mHC9z*cL4XQocm4+=z!*T1H-UT!nFxNK!OphxI@+RqBAAFQ=p%ufn#Y+I;TDWf zEY+6|YxgojU=E*HFeZIkWx^*s6SIyXw>Num@Z+kwXl=e&S%g&s#M;V8Ww8XrLR%f9 zZirU4MWuGiRUHa#8x|Q7X-kkISF9@ElvXFzwj9oCE$Y8>b=q~q>NPcA*r_YW(mo0! z)*PydNGU{#Oe+mjR*AKZGCf&HP@Gm7XKKQCcv7h-r7AM1EcAHJc4djW;e7j$>z0Qr z8MyXEMaMQ%rA$?VO5~~-NV~I>(EYlap zSW2QyMRa{5v7tC5Ei+o1d!o9nX?O}MiWxpT-_q7kpaT#md;i?j@ADX75dqbLUok$P z3d}d3&F@o-kcr^ov|uj&<#OUKTx5w3(!1WT-=)B>fuJ+^`CRQw?pl!0KCg(mdWB&5 zeOCrJ{|fhbpm+HxSfz2-`OIB#pxPHfdl5t|g4%WWKEw1AOLSPUW-ko{?vqcE-N4^`zwn<^xR z>W%7#jj1(JDMgu`m*KOCBs?EvlpznlNK+*?){8V{Vs#-3_=i+qiv^U$$}$iXsj^hA zt_(e07@1NAy#*FgR46ONr!wO;g(YWi-S%?%J?HmWiXxSn;i`POv0AFBmS7d;)T-#@ zGU!WWsd*9V{4jk{h_N!%SR1Zyj?}iq=-cI~HR7b=*wg}T-QX1)hw@ooom`(EW+@4^ zRE8U?q^fFpD*2XbDy3S&BoqedC-P&AMIvR6NLv)4DVC*I%T49s$r)QznFVJr!;S-! zGNL_}Jf9Gp_Id9)xHzH^&A9I!O^xG%H>EFg`5e2(;xBeGA@*rFf=w866dnr|6@H7! zB4USLW_gi`d5WAlNX~JOpfZ(@$+)NPXMsfb`&i74jtLknXtjY_eN9+Qn-Mr@-a{a) zbV0c{j}(3tPF(c9css}fBM!CuFg~#~Bo1rkaz@~;e1d<2Q2hj?2ZGVTCy<}+cui%b zS|iryZOLgAnd(9`WgrdVX*mj2VN6;a#+G_MIWusi^*(|XEtIj!MMy4g$c)J#OR8mQgfqHvf`B4@`Q9na(3jY zbh)xvlu{&9RqV3V$E6kS*W^`SBHj=B`bKGrGF)R+WE90_*FYN)nV2g}$cawKic86e zQJQ61i%4&YFy=%UibT3{v9>x&+Zdy3fIdTf(x@zJAA<{pmph`##F4V>B1u*m6c4e= z(x}7&>6z^4w1TL#%s6!>NP{BD7?o^^IAw^|l*lxtSkQTEdV?aZa9dhdVK3nm2&Q#8 z5y-lfmpI46=JSfuY~@soPTk^y2d$^2|S0{Ng~E^7>ru=E=XiNSTG=XyF(P zK4v+OP`x$r{4zbJJOOjKWq?kq|~f~(^1dLHAs zE7e@AQ!xF+@*gVf4!=bN?#(CY%D)f=K0d*5$7{TCno!XqOnu1;FH8J|i$mGH}ePvW~ zUc4^9;SMn|B$ri4l2b$mlgyGYvlK-e%eLstcIitFnMw~Bv$3$y?#%RU*_m6jbGBv` zZObU%W~qbqB*xSP<`H^an^fG0AX4y&eMuUrN-N693OD9R4Egbv>I3?U?aIPE`l9{j zy#2<^eOk-TRKtEl-cC(kd}h5Ur2u>)+E6P_%-NyLtr)zCC{rjWNLqwtz$f@T0VGwI zn@Hfubun&rGmY=dvwjt1$ zd+~{{aK#1pvXDhCtSwkkF~5Sj+KBss%6|;w6JG@_b}U2SD#F$u0*6wQJ8YzLjX!~YT!=Zq@#SsSu<&C81*#Y;aYBSEkp2d^fXOP)=rHU-I?5( zRVOyLZ%(Tf9Z%O+cOYT#h~2*9v^Gv%EYa6W&2>jAyYfeERaUNI%F!_EYMipuQad`^ zgq`DD`82n`s1x255>;u)NlR36dg-+>G)B|dEK_O3MstKNb9;VG+S$>H39fORYZ`Mm zkIgiWPoM#nGvgIAQx((JstFH%6{%|#=^Y|N^`_It#IjaYa?{D}OVvf8lJv|FL)QL^ z){;B!&RMQ{gsUEPHBHU5%}ybt1+{h9yj3Iil2PxmrmGTl1*{<(PozgDXBD5lHcnPJ zBw4nWwtj+25H)#`6}$f&)p%|7UI*g1V7CMEGIZ|+2Zyc{|(;@J^mQRCmy*T&l?>S6a4d$-2fz zvI{@RFWsrLM5h{IaoCsNEZ0|VOw8Y&)w(^sey^pf^!oG_=qKu$MX8$D?4qdjBB`!m zTSnz>bL9@Kxva@PXv{ijvh1@M_oN$lr<-=CXF%<-%TxhRPeoQwq^T|}IV+{E6Uz%B zSoB>}P83RRnK->DI-~r9;`T%7wXw-ryL7qn2Gh};jHB6EJJM9!QngWuy7-*hh}7Ij zHEcA^3Qfg!O=0obE4=@N14%KK1E26q6O!=MmmXk#;HRHY5Di3-cvyU7EaVe&^%jd+ z>w_r8LjAn|GC>n`zf4`~WAm;OQTSo5DM5G;cZ<(lKsAE+1g&aOKH(=5{OJKNPFh>g zwdUxb<~dCSQ_{seC46!;R%9ww=^K8cOr25h|%hl10%OjbWMzSv6$-aCu>*~#{E4MPP zj%qGVN0}RC<~FIWVe=OjW5?ASZtk+3gK;F*P=^>0cy#V9AJE@&TZTPJ12=V7M$A`6 zvTsbFSS}CiFAN(lO`2~ycjmOi`$43vP$MKgF&R*uY)W&YHDqWslEH{-!o6C>4 zU9eoAFr2@Ud2J~3+DOLLJEki)OxJIiZ(O%rziGZYWVkrJHLFLS-YqgVhNomF)-+$` zxbq(FXm)v&p;T_Jkr+!4mUkMj+p=!D)B~f2!68(VaqVW_t&xl?w~gno87|&2U7y}p z+AcAaOLP@*=|Jdd<)tfA$Pfg32i6xOsfzDs(!K<`&D@FN{07fqqD$azmP->C5QKX_ zrn^!+K?CvCd+Yt5@udR(exAzw8~iHTn0fUx{lxtM&W>RSd~*oQ%H8?kYWd;_P(7g~<~=w-WmAB=+4( z8Mu{t_I7gr&4ivC3B7lc1}5S%Tfry9s!9ZXY}b|*U7NkgaY+q*Qca=EQXg$@jx{%Z zSbILX|MuywOKIm&!=0r5Aykzxa4qTVmE^P6ld#=08k62FHFibl8^e;b6Kb15N(MdL zk?d-Ts$6QQm+C8b7PKDkzMV8McDmzs>cB{1|Fy)vE2^`1()w?u^j=Hsy`Fl0>`?W9 z*i=P~HEA_ll_ljDE=?loGw-&zocP2y*QHoZ41q@i0mdgDiO0Yi@f|`Sh))Dm6j&O; zjg>pNGU_uvA1&)}3f0Us?Z0wY1yTg)Okg-aVRuh^~ z6p>mMnTl3bho)47rj~}Km7s(tSA-XFf=(I80 zk+4cq?tlLF9qLx>Rv7}{Is%MOeCv;ZHQ+mjKoFlG3k3LVtYnelagIAxH6T+X6-2v4 z-2vH*xxUQMjyzFPO*e{M-6_*F%QTG=ZKG7%F4c8~DqAFmZn3^YtZoWDnV-^l={iKX zjoYcO<`YIW$;|MMY>U*li1ba->1`5Y6EadDk(A6(CpXrkKy83T7m>R9a80d54<1tz zr!FkGHVyqmLVZ6j6sGABYX_y8L5ZqcqG?0oEQO&%rtg;OKpomex>m3m6p6M|tnQZS z`XV)5;b=@^USe%K>>cO4+eJ&r0R-Lwvq;DkqxsKSC2IGL>f9G zjcxcwOGMf}89BHI*Mg=YsjdrrVi5X?>=sE{1J05gTV=+k2yFu{DmS!5svFTTR28Y| z7Hhg>+Ac7pFm+qFu?r>A&?Qo~A$?ZS1rp!E_{6g4{p==|GXjiHEa#`o?(I=TAjr{< zU&WZH?=kU)eqt2)tSb7V4b2KeSD305h69n5I zVgY6Vy@jDI!q9^3mZ%o03$eO1Mw4G~eH?rusi_|plTdYMsIpI_JB!o7383&08`~l5 z!wo%<`eBClFheU!gdV^4N%ezbU2nLy6-$Kp_=J~xH@i)&Y``7CG!vn13fDqM(xxzV zLcM{r!VSF<8b75l^@gb1!}Q&uhEBp%RBcgdK0bjc7_#&u(@#8#eVVn2AwUpde1i1@ zhQPx`V1eU4&9MwVLG%;VJ#t;OOy3-)YJ}AwLf0H&XbRUi67~QF06z(YP8e!Perr=} zq`60I>=)|?Ai~A!*3c6L;1keK4A{9{##>slJV9CUkug(|ITlKuW^&JrTw}u?eb$ zE>s9+0QZOpumMABsJ0P?5AcatZ9)FEG2)Ne)QgBy3D0ICb=}|};hK6lct+^!p_2$V zc7&PQ!pzN~riL(M6Np6wh>PJYsS1nE%AX?Xc`>41xPZ zfbog@1d1KT5cq}=;Q2(bP^5eU@{;&Z)bz?Um9PcCtRRB&z}PG?w~EcsNwo2lqBX+U z!1IZkP*Y2U3F-=>qL3Q;A$miO=d0STU*ov5PHwxt0;#3IC$LNx_=MOvDAEz;0X`8< z$V5+s9_NBg5SkLMZvj6Df$xI027F?RrYQf~1o#A;LnZ1e$oOy#%q6g!G{eUNd?M0N zM^qX{kcrmK<|goo5MyJg9xd#a82gD9MB5sgSO7jj{4sex5s?hfP>_jsTnNnu`M@@6 z#`Y!+R3n5*fIp!yIrd_rV62igIT2N)^Bh-(6I zdO)>A;X%ALVfbi8PzI7%Ah2_bs<`kP$!C>PKY&HT;P1)XQ{cVP4r>KmF2Hh712J}( zcol`Qh1xp^ae+&Rt}|TQ9F~+1e@wzBuzp8QE9@OmKj11LBB6S6JZXcW!*46$y)Us- z*9esi$OPdN@bOeOZAmLHJ%0)LC&)@*Br`SR6KsGz5(rRdYWi_FkPP1UaUx3@|FPL@ zbYq@IO*m0Fk{&}33$7O1!GA8iZ43(G02f)emV8jHFK~>nkS?KzJ=mc8AKapPj88lg zU6VE9JA}YJ`9wn1*(gmzl%_+hB2k*e=~B;6>vcW6(V>%fcW4SVG=~{FVYwiDf_Pi;bZOJe6&FmfDp~3P9-)gYTrp>h5?F+>} zr5Yr|dXSw?ko^nq4~NY=m{I}C2JllbhoGm^<3S^YYOx26S+w#8iYVg~4-{TjIYVFp z0{7NWRGd?&8WrkJNm_?Q4KhLa1ZMp3eu&f%he}9UGR2?6CwhbU#CgAd0`ZwuBpMTZ zLXWfhz(`0W1@sDVynuIt5m_sI1)-#{twgFx0H-{yeCu33fe=u!vQMf;ZmVvQx)pqa zq`T1b{+PT=CLt3%<3S?{`S9_H+{D^e=qHdL^ZgvechtdQf?S2TgET{>L83x@e1b%v z;5Lc3hqeVy6(B1pVt8Yuc5X?jD?4|a@CnTNP#B+JL+X)1fC_lp96=2rNd3EU1-gG~ z3M&Bz$T3a_8wjQ&K47waA_YYkm;}*JKoVgX*|J40my1NAYuBy`Mu%Ws zg`mu!1ZAEoj{Yj_#EC*B*hzoI9(vu`*;#6E86O|VPiWhs8jEiP4WSaIpZI3+WVJB_ zz6^nT@`=+Gy>eBJT-6e(Y=$xbAr+AH5xN!+_$hW;kT^MBVApR4`JVE^@kdq1tB*cbB zp4O16z)oO)0mb2!DBS{|2u+^PC&ZdIvp&>xM9|SUynVI?Qv(Nt7pZ(b%{J|gm$)EhmAOG4_yn2vgEs{D1hG^2{W0OR=;ssEe*%1>=`yi*IJupM8pL-5@riCe znge`-I8pj21#!mY>o>HZfKMPlCYCHme#`(KLE*l_Ym>qLm;#^R{V|&mvr6MdeSE^$ z06sz32IUjTx2ntiy^P2s-uaCwzP&%!v37 zVyE!swG22hko*%OL!CdL)#N39Jutbt9$ps_YT_FK4FnQWk@yZiI)fTJKq-iRLf?XX z+q}I4jvACt_~JYG^+j8ySo=fWF;72%TSJ}+5~$+Ku7Qjc%_RDRq_t=zE}5#PSY>(f zg-a70;$O(x-GUoD>tPImZx;bdDe!&x@Zn$l#b1QL#`lU9D~=yOo}8R4kx2gKU;gFS ze(l$=e`I8Y9tmPWKUBv6Ux#;u^+I zq??4N1@seO9%05_6yA@Lglh&lMe=+?-@x;ULgdFJ`K%iHWE${^KAumI?3`rnG2UE4 z;ya+~2=<>4>>VUtl=2CZe}d-|R6hYeLCrS-dk4tGW)oZ}kZ`jDZW$z46q!0rt>6>H zD5I{ARhAZ?zXJUP@S(w?OFO=UKK-TJd)bSPM}TSts8C(;(~`@kFEMRDZ{Mh3Ta>(;;do4o`+xuMo;`b@UC7GHqT~QI zG&MEhl;@v+{-6HopB{hwahXg8#Rm0?5I8|_&?yuiaL%Bb64*Q7%t4O~j!=D|!-w$+ zHa;FE0+?eWZ!!Xz1Y@}G^$KAjyk=c4j8Yfp1{3EU`h}11E}pj$ASgixKg`>@$F+j9 zYX{2_AmRTB^KOVr2j?RBF~KL)HCS^iT-^e{2Y5)JP;pCaA6PMq_(Bqo3L3Q`F%2Lu z1wtXBjNRZ935~?wG2rC38EVDg2gqoN9F(SBamHCvo4%8f2~!{M$VuEONOmbcIwJ&< z-UMGrqMwLX7nIzd#kw2Gjc2j;7|B|p>W~_Huq;`qvH^>AiOq!Kgz7s&^c|oS$VtUl zLRJaEI$^MDAeBW}a#2EEAJ!`$@N$Q<$-<<_{7qLR;YkdFLgD|>zS-QhiQFJ*3|2k^ zD+S8mbLWl_?m-y_BzRaC5Bza9s}a`D+`pZ(tN{T@94oP)B2U-*SzNJ&W%_=I5i5a6D)eyS^?RE7CBX3iu3MDt)23H|87DyHykK>P*rPml%1NLnhw2EZhs zU?7>cu^0!OGqkN(6BV=o?F>oEJ6+v-iC?e$P1V8dfq$W5CzX*3#${G6N|S|=Pl&zv~}|ILVq2zX|ISp4;0|MmIv7@fowl!k2hmMP{P zGX;CcZ~Vq@z(4fG7hi-=C^nvZ?zzOo#CZ`SKgNA8ek$V=-#DbK!h0bQBva{3&7Y-+ zOtXBiR1${`Z+^1+6Q9{_UVZbG6M>iVqbO^F^xKQ_2{OaL;vJj^d;8bk+Ss~USMY(+*W-RwRiA| zr-+RsOxq~|t04I?NwRI;Wr64bXr4-3C=@GoVM#D;b(Fd|@A}LI=qH-aMd|7x=_6He zkHGTi9nd`>@f5Tp&@Rv{mdGSQxCJ~sP6mFaTR)rPe1*1 zp}m7D9$@dl0iZ05KBHx$ z*wlm)Zfb^y0W<_KJrDzgzD=fY3{T8QeoXL*K^M2fR4c-I9FWb%?nqNVGHJprBDD-) z2{9Dt5@^0{lFS>Gn6OM}C)6Qixe#4VjJ6bf0``v6wPZ07tZXhfcE}ARKLq#$=mPdg zEo}%hL6M}lh|TR--wQ?xsgXYhHVbWCcyd8Pb5R0rMwrPRNev`BCv++#StnWJgJc$uR98l)6%-HNp7iM_ptV}k{u8vrQ~jK;@ESXz z@J^?}pWyy|mwwIo#KRx(!AT$>q@gf?>%$-Y(H~Lc1tksi0}qLF=P(Gkwzf9#32M{; zx1gdO3JH43wr$%4J^}yDU-^|^!G16j_+-GI@?Zb;U(Y=A40ck(M@&o%4u<`Lo`S{* zUK9$2f?gemJ@Ld7|NYHZnp04Yb zs>{T>dT0(rsuq#54M7})F_0+JI+3YPY_1iX(If)qNfQ zm>hR7qgbXXC5Gu%Z3uO+0U9-W;=~DoPf(TsJ^|AMLPl4wUQKC7Y;3Hchq!U$ z29Ci8`zbk~EQC5n-~%U#MQH(817x(|3YT*u8G}Pb8EzMW<#*DT^dZB+PE0$*0Ap9*L=o z_rz?#@}9EvW@$#VII~%jiA6!{kT-(t*Vlqiq%;ve(QoIrYD*)Ol`wh0HX$+h$}!9TECDf9tIBLS5FbOw}C5RC=a%i#IMHd9UJ#YrT-NoyO7)|bHo0^>_)a(!f4 zd$e%?UYQ8{3{#dzrPsw|)Otl@eVA5`=?@Ao>)lmk={7DOmEOEKZYOa)Iy( zn;WVttK02c>*&$x()ZsIUgYy(D*ocP_wk9k1kHrsRoBOx?vFk(K5_pK;>SNy2-k!z z0TvBdE_}*VKiN%4NPxElH~|R3yYIe>UlE#t3=mY5rLYM|(LqG@+?TK$}!^@JM4ie)FdGcQuF(8zGz|8^*462ZrmlXWRzx}(_ z!1NOj4qI09A_&~Yhk(m1s5Dp#FW;Pkf00mZe9%YGXD+eeU%|`3-ho@dOlQ{R8ghG; z4V`-}We4+G_ZRiw8LX?eyA#v%R2y zd+~*>rI%t#&TlI{hhGoo)>U60#p@G%!sE7qPuzRO#YcJ6(Y`vz?~=0V#R_5GkMhlY z=i5dn`f#2=Wagj#>7VL!I;a7VC4$ld1bQIcgR+OBq9W{sPJ$XN@PTh8>>Xes@WG_V ztXsE^aum9(7otPKCqN58IIz+O?eq8v6agtUv$L~xA=WonRc{yk)KrH4YUI~~; z<`auY_sx4B_6{Io^LVBmj#~~_dhf+gt2z@qZzc6l9q+Y&+-Ccr)%IzJ>twg*c!%|P z+vLg4@e|#oe9<-bdH4A7e%t3gGw(MJo#~%G(|!XHo;jVpC4-lf+peDK8a>lDbG&=% zSnuqqbIy~!*dG6)dj#cp*U+i%;ZvO>Cp*W!=$JUxF?Fol`dR19$?oaX9V1C?*Nwe5 zs%}lFTKYb$==`kZYSLN9iB8)mjn>bbogdXtecn8AqGR}Y$MsV^H_r?Vo$4L`ykq>+ zcKfkT&+$&zCoPkw`|M}h?r0kZ3%W0sU$~J}*OAnB>s0^57d^9|by+`dpT&KB+U7al z?o8+zP8qnBdUgmK$GgUkbx(cXYx|_n{z<>>*jekzo{^J{1Fd5=@QE3xmFg#!E}x)X zKENkv-w5u@bBV>H+p8byOXE?zm>`ueU%m{<4G|9iM|}SCKmYTsx86c#Yw7?2>VTwD zG=WuDR~P&zs1*a;f_7sVM?e~I7;<4D+#^0d9*Pa{hJu0u>W>L#@DKm+56D#U!3Q6J zE6{6w@x>Q(kq$~SC_t1YfS}NTk-z$@zY-Ss2)4^Gd&i^LpIDo|76MTA+5`C~{BKW} zhnsbAyAPgt@g3PSuSc$l*rUp<>Az0$wt#xji5kh*Kt0v?vtUOL3J;;{I`G+;AGHr! zw4=WwM@r{ zLqS+d4vZk16RRSWtuk$oOxqQnToamD5R#a=`j|E(F+-}a-kegtDWzU!?n8(N$^4yC zwc#^!a#wpz!gmT;9}zB`NX64guhaId=anU)J_gv*w<%;P*@+s z2&`K9l`3ZkKTHIuJ0z?RAP_W$f_h5``K^EFcYX&^nlN5aWV=(Kt5nbLHoiRqF=7y%0!ZUG`vJr6^ z5$aB)s}iNu#i$z;Y1NYCl4y0sj*J$mrW&h#!T|z&0wRCYaZ6Ie0N!5b9iIK^Rk4OD z5>=t85NWGJQcI&vNRw3>smzZxSMJJg0A-P;7RG2Qk?umOLO4l-L{%$Et&pV@?o<_& zTpqg(KGDz_r^%6M3*#(}ymw~{iSN+1N|p6t2?blsRpPX~h~%6oV|A1TNh1)1(uIUq zQd29|3=2D%omkj*)6HFRaEJ7{afT9!p;CsB5Iu3;M2xFg-7QIN-eRcOYAlqe<;qkg zvF0YJzBxqIKs-h*Na0l{RhMj05J>^!1%fm4^Yf_-Bs~nS z58JnIhmHY9BH}|ZIzU$e|4L-VhH2x?H{Udy&6H2z8_R7RJ$e-Tq2$2ljW^yv6bTF~ z!E2qFMd4H*W^3*t>V{!-tyjiHDE*clwHMFO(6~^U~+gj#Ctu(5qnBdB6i1UZg%@wwpw_TCk#w1;i;?*X(>wAZZ)BRL0&ir$ z8Rd}zc#eL8Y9##ni6FfNJVj<4p4%?&WJQxGIX&7~t}r#kWOYMABPav8DaC1x;?!Ep zWiIO)_kKw~sDnaTCf8PNOe%%^mRs7R%#C3u(~}$fhhWp4b?nd|4w=3lv}UuqJ}RpV9;3LT zEHPhI-UEho&B1+ODNrbLBDE!Q6Zk}Jn6?&FNu=z+GO8z=u4G-CO6t5RNhwfh5#rH+ zC|ME#YidCyQdQwLWp?S{4e*H>NL#YD;)7ax;9>O|^7JWhpu}KqvvCJ&9wdjw z^cTJUzP%{E!l4iAUYGlIpdk=^6sW#|t_+S18vTI{>M|+h=MEPC*rv&|>4|h>-m2n) z=LFp^_*Vv}(5CzYHP9;xOP)S__!*yg_?UmEujn&z5LGW>6?l8Py&h=$F;ktlLX;~YWrNqo!6)2ahs(Ld`Gm*iq*>&B+?Ngq5+==i715X`H_0vTba`j(t{K=zP)IiV zFLkD1eB$Abe4#5smeXxvopDf#dv{d)DjX~@1IjIe<3t3-1C9wkUNCdetIWF=m`8Bl z%!i2z;}Z`PZFViBfu%3Qg!Ff6GM0iTHHj8HX5bWJjCtyEe5QPsKl+@@$vajd>L#!wcK zmbWDbv-#SM$ps2S1=ihAHuqk0bLXtiqj`<6iNxv~6ndoEtVLC^#zqCWg{JaAN&hDe z*Y@RgNuiC<)J3VAqLAe}ts&OX7OSa`N-o@`&M&_*3SMGpY1@^aEi-1s<&;KcmMXF; z#HKQ-p;V?TQK*VOs_Z+E-MG_SqtKN{Tk67e6_Sj4NqSYJp+sgZi&5q3syc7kxT{vr z=Q+i3+AO)bM4nk0VJs1u%VKkzqYcfH)XJm9?H`o3>^4_JKM|#=MS^X#R&Hog=3+%&sNffr z6SPJ1+$V$08S`8sgeVV6DFhV-J!PJ%CwTGWu7ys<4Z6VPgoJh+X znkEKyj7X%xBCAC4X|f*0$?HA%1f>)_pMbFgeBzFi(>M0Tn@W)sGFDp!e*&?lE<#-s zWon8x)=5-Ia+L?y#fX&j?U`jU=3#Om@)J?F2u-2KzG&x+fj zdr+ur6&g53)T6{1TB7yDZBwKst7&XHm5+tTqIFGR67cj?Xxp}!x)f=(isa&h=E}0G zV@O4n)zh~xJ4d27#OCD5vT{Y{+;Bs-%u*C(Dv3Ea#FEU&ABox|#9%#?x?79P}><~E-3iEra!QYz<< z&Lbco2n!>J5R5P_pviff$=yw!*!h-|M*O9*w!yrtl9hcJA0p4b>O*;M_0wAZQPsriY)EvO~)Qz z`@v)H?0@3zUC*uF{?eK)>mqk2>++GWLa=?1%9fUXVga8Rc5nr~*FGy~j!DViriD$R zK0KuqfgT9Wkm<@~+M+0ZL9`)1)>0_dXGdr<#m3wSLqSY#wZvGm{Dqz%VF%U~E4g&#aAA>HuZzpgi7m*NWo3#i>GF)cNOihQnY~3{600hT)s~=P zQn*`Yl!`NpBQ1n>plwm9=DfCZuwe~*IekNOtkx(tWJ)s%#OVd1^gL;LG42S7y%nih z(aJ1&ayGW3w3VXN@{luyu%|#d5q{dd*I1BL(bzRSPU1!RrD>NwpCFT1!Vh>4(~nPB zo_a;{>|5KOU%3nAwe|Z$6`vgZ&|uE#=o*-~GsaENA|TMG$s&=~NQOmCeLm?yB$~fG zlKw->_caHhE{Oc#3=r6!XYW`j-Gh%uCBg#b^JYZBW)Z4>2(BvFg+fqN2zY-hPP>dx zEZ_eA)$VL=WQ5O(aNkwj{dk|lgkUG8sW748w{t@y_STL|Clb;lqmI1rb__f*pINo@ z={51sZ#w$;>I09jKKM>d%ErC9oA(#5+Gcq^^31a#pRU;W-czghtysPDh1J`heN(zg zv{$JwyncInevY8~ZukQ~FvzM+vm*NaG5Iu!g1y5I{e)}8!8P8P)HDxnP05lcWMbA1 z7fVOfA}vFnY7!-D{PM%WPZe%f{@dBkqL!ir}9K+3S-r^QE64ur1Ge=YFR>Q z#PN)yxy^Zlqy07x_Kv1IBg&r6IEzlAO$|v+Sf6krG&MQeWRaw5rHOj+DP80Vtu(?Oj$`egn($ZxKCTW6Yo30E5MUs#mqbx+ctN4WKXDlU3 zA))YyQg1E)T5i;#mMD*qF(2o8_;2`&yJ52;KcsH|1%yuH z+bLvg4Z8O^#|V9FuGC=bcii8QThd1bkB56~3**P1V<-5{M7Pn0h5qCJmAoGL;Sd;u z{t-*VJMlXlorz(BS6M@!FnH5hTzCF+WI>32|A|+{KYU62+}hpGt=s#|+Wk+hIrPMu zLr<$Kq^SkD}T`J4=RP!!XM!Y_E zzaekGA?JWG`+zCyfGGnd-fW3CWyPBd4jA+ItIVfzitEo^ygV{;b9`*lSNMM|FAcsWXjr zon1pSH{9G9$6a>1YKCtnHP`G<*KJd$Y*D7{)@t?}&G9<(etpIP9ZKeYU1q#KbDuGD zuZesg$;jHT)_+Q$+vbq@p{~2uS7ll zX6!rRhqu0)dPZNJRoT~acI50$>yVWX0`a?FPzxZh!oawsMiO7~X}^O9HO<(<2d45- z+(ax<*Cs-AdAa8zZXRS9+w7iMJ5PmhIU86a`8ZwBv)2dU#EKEP36Bq z-j5rZK!6?q^ewc-VjR56reRZq91wY*yrS_45*Vom2kD+h)n@lA5r{9)d8S#gm+Jdna7ntiww}LI|L4 z#Ilj4mcb!m*Ts9nhOPe z@Wh35MZ!PuR0WzrcEa+^4>h;r|7Y(l;M+R(wBcoD=6!eH{e8qwvi(>)2!Q*c!~HRU z$*9A#M{+y*@11?t?*lE z?|8g@b8{^tV+}X&Hr^UTX}UexIx^FEYl1EG$Y|4@5mN4sHQbqPxHVlrbffpi?TNX$ zdD|R<@!`LDH#0mw-#$Fjc5S@#=0wNMk&f%PyKaxQ+?Z&-HQjt`vgzhT^Y!uOt7FaA zP-nFF?o`*!(e~lngQJtT7hL0RX3EFh^>}YE?#`Lf`dh>Gx2`tbzS(^HPV3#Vwvq9c zJ7dl0#;uV?wvaB}9c#He-gN73GpWTZQSf=&GkFg!#8TK-fX;ir{&IQ&)8JQ%{%>f$M4#SM+7E07#7$N zwd6U+6YRWC7G^9ygE#CEai6~S!HyLh4!sd}>>cs>w}t1Ihn;w3%dwX>9sT~gBi~bh+T`!A+V#Ibi2Kgl(v=%`hQxld=QBl$&XRBKZ0x@_JiajF zCQAWi73`4Bu3?L#(@Pw!QT)4ZvTvo;2^Pmr_aQtVyYzN+Upkkj8J~s)3%jHeN}p`C zsnU|&b_XXVq4UYJ_oA~Bovi#X7EgE_J~#AfcHjdC7qUV8Z5^o@bn>Gz+{-BH_$?ebbDGKniw6cL0w#SO-r6!aM|aU~$G6w|m`A zgpT*RvC$&aBl(3Kj2kh5X8-~m41Pw|T7sQXf%WEn-dV4G&bt7AdAn!9jgA56coyJP zY-bSQ!j9r3PW1TJX7eu069w|5-8b%V-eH{MZu{5*GdsuFXGl%raZf{-ke{2q8#lHx z$n0!$6Acd9@y?@<9*5m)x4VHu&|k+qffTR;rihiYA28D3@WiwCj6dxjJ<-^vTtcuHQuy!v^X-su-qI7@oap8`C>9sW-PBI-v`T`DFR(t>1eq_NDiCf!$rX z@$k!Q_J99_`_im6ahgrrb3^tNg(npQK!hiihV3p1*;x|02auvDEV*R! zzP!)_`Mg6#Z}QK+wEEDS8xFs|YWwMwVt`~obfQ~lgC0F?fqv!fV{8LYAoR_w&5msa zvUrSomvXU4sxO!!E_ag-^B)PXl96cY&ZFyrwdUzXi5H#a+tyZ|#c z=Y-8W;Ud*$@k0R8v4x&R8>fwscOR*PZ*lc8n`>m&KEB`@n|Dq)Tw^}hDB~Jq+^B{| z#?TG750z#s{*8LYoV&iJAGyIopieGvaiPGQKgkw|5u@K{8-hfq0;_ z|0oEhMwJjvDwO>XMVyws{z9%e7TudtY0V zv~0uS)luo;drZ82`5U%r*2F8;CMZ`$$=(*EEZ=s#26?d8Hj)gJYvJ{yaDs5@h%NfQke| z74l@Y?EdZYBD>%Q$iTMM#d=?pST78dp(tGWPPp1s6#yI*K9I?P1Xl=V$b**ZXKgT2 zeu6@x0I0ym2zzlX>0bMc&pz&5nD99!JoW`22v1-QgvrC|?V4pgbhA%9agc4t=bZC7 z@WA6~pYoF7UByG(+i#jVu69>8z6-$`!M2s9-tN=5}X270e!PEt{KKT$+)M0V!Uqhtk$;K#SAma{#9}ymM}B$VkV)lMs%@=>t2m zus|S?3s}YBB%VlklGvY~dNp(W#86Ms@z2Zr%?O?jiSkYK*|MM<_ZF6~v^B z#44m_>R8DFY#tdNc;Y(;?&u4el0Q*~NdY6aefQ0nm)_a2eC>f1>yNBh|M6??sP_SGZ+w-ny-- zS2ldKY~8`-AM6Yhe|&3o`0RpDx?I@s4=NxXE+c)jB&G}rjUgwP4HS6+>&)ddl z77#511r!MF5_DLk5nKZOu{)+*9@NBqHaCm=u`evxr|0LUUCt?=dz|s!^|*mZpu(7P zGItl;_yGKL*X2POal6O7UW#=n02;H~@6Jpkvk3(gPdl5@(;vwgK2*O>7;0T%JBG$< zYxY9Zv05q%-n8q{VA%_ak1*vJZB@^4^XY-;RIvoy2!H?A;7hSD20OaRc6;aklvjTsUKhTnz4vCobb}=)5Jm~@ z1reP0g!n`|nWz05EP{!9ObpAs;#LqaZG!RLW<0>Sz_l2eTUPc$>E%?H)6 zEZM(t`i`gl{C>mjQ-&`XFx&?o9{7SiaF4q5Gh2Y72s*|3xdlH!sJ!5YnvlR&6f7!( z*QVwAs@Pp;0VCd8pY+l@+g|-(AJ+a?*Bu6icx}T`Y_nEuIYD4VtY*vhe2@?kd&)z0 zmxt}C+>%ted3VL8UF9fSlFCE&RDhWX+iectV+LKZc~@cN0n4Ucrsbh0G5@}?>A=d> ziH5>fviA%*j|KgQfQEc7pVd5lf!wobT55T}UC1!E9WSbP4(^d-T+3JVg678JA3 zn8P>YbUR$mh52b0GG~#^!~!t|XE&xq9m+;H5Aa4f3))~{1!N4tJ7xv?73HqkNIWbt z1tys6UWvLF=3s~=g}ynQ_`L}igv}EQ+1dqZ>Cx1r-G)S|*w#2f4?~kn8t@V>m4V{d z1LhVRAan@@zZ&LW;wOx`%w-=i~$tDht1feDTwyD6B8P z6aUKUdD8*a6 z^cA7!UfW36i{+a>Te;~o0QA?^eE9Nv2Ue^;^!nOE%Rbl#gt21vj#c6NLZXg{cYe0( zNLq@@T-|>0)~uUK7%r(qQGuN%eob}|v8yd01sfrQ4K#;-w)BsJM|peme)KkW*Xf&M zWSD_`XD1eF9=OO)+}rj0r;#r?6uyq?>>U%U0d{P$Va`3i+hdae3)z~L%>dS2GRn+%o*L9&N@MkT#3L>?b>U&ky zE#~eES3S(I({;y5vibB+&E%sD)hHs>w7yXxwVtfuy?=3aSYzrxz1sqR5(;bYuYZ!8wYhQ7p)iXT(>c!E)X6rpc4#hFSVB- zG!L07*m55q%;AZz;|cx-uR-wyEf1m!f5!q}uz%l=CxTZovWEH1wb&<$t_N5H`frS% zQx)(NK3o;P6WKRbtl9qZyW0tp*l=Xo#!p{f_sOf9PrMP4iW0IrCp0NHbay`HN)Qhb z$(5l=6&tr(LibjM?6Cq;07-=ItqcW4vA1-~9)O9$P1{ZI@<72PyE<0={ndw;Z#w+S z`w7vzPR%%&IXkPtWtYH*I(&bF!hf7g2iC|%8b4^O#~Q$rc;bO+nw{?hdMLVmB>X2i zip2q(E5re%E%KETQXSt0i0;Krht2V0NO~a_!Yta_A;B?bT&(5(1;=lIN4gn^ zCdRhO>LQ>RW;=sE5iKwq+!0eZ_BAAyDeV$@2N0nfoki^!-TBah&(Kw~N2tLLYq;(w z)gMQw&ukqM6`pW=P>Q>l509H(elPK*)dw&-pytB^zd5NG1rAN%>NfB4!|2UPMF7*A zb{FCUMnveolCWfAQG^n*vvB=Z7`pUiepmsF=>W4 zWW&iSjms?mB(Jsn^5h*Beu1F9H1F254c_pGuoj5Be7A<6&V^Ry_ z&le<|&r3+n#fik%T3$e!O)pH$EKSTT5gpeY(pS{qn7eIb;C~MOqhs2BzNsr-R~V;3 zz8*_-N^T4ChMjM1-1UPHgreH|(y9bFYryRjR+cM5j=?tK4PM&ncmt+LOq3gU=7j9Y3r{v< z6Siq*(S{x7&AZD0AU5tS-L$J@GfXdGgSo#nG`VQwZqwTBnh#>L-x8((MgV_c>IC2W z;_5?CIlmXOtD<3$PHHeGqKgjQUOiv0C1GcfKa&_>`wg9_^_%~;7C+OUhOE5QvJZqN z+lYT5rV*3q4kCaf*;dG6wrGhLWTngQ(fmewqz+rK+An&_5r;I^4=q4zQ>RC|HKE4? z@AtgdfGeQ>b%AlEz<@%9#XCaJC^5oPA+$C2F{F0yIN1+mDVPXK#QA*vybtsieucnN z;Jqi9xTLgK-`@$Vh~;7D2x!=2*|@s|Wy{_&41`TPi$ju1LiZJiBm-d-Z`o(wj3Kk9 zh?b4J&0F@Ch9#E)UJzCTXnQwdGS(&Npd5KU;@oRnPA=PgY}w|c2n`2^k(FDHKr;fX zi&t0gUa@}9%8f}VZ*5F^bIrCjA$y|}Po7B0v()rozBz+o?HDMX2{3X+#Ww!Suf2tM#_sM`kH8M1483VGp$hs)>x2d zN20PISglA~Db`fQsOn?nbqII66|BGuUOTx5*JqD}1xo_Cv83#S-#}kVH zk&M-QVdDc&Imb^t=hM?;bU%1aT3iw+VpwD;gO-ZGE|y}907*{WG@y99J>_fcxs|3a z6($^c?OpN8wR>M%y&nZijb$4Tk+SLVvMop6=ADGSI*iLA_Oqsxd(E5oV4^IAn+9wk zHYMe4PAUx9OSWQLk}Q}y!}pbi?JWVw2-#DxahHBoeD>?YGp~dneQEQN|5or-G5E(hIV%9M$R~;?*a8DI!WsA*CR=|NJwP)-9{_){Ru)FE5P_`p zP-0NH@P)+nK^7q^?FKuyFDldss(`}h{?5=a?a%p({T(3I z%QtMH(MMmw0>A!~*CSI2Gm%`rX$8=idc+ynbuQcV+44=tz!xHn zJ`!kR;`tv&Gi`aN5sKN`3`Pqdsga6b8o@xF*BsGTND13ZJjR)C@)odg_$%FoTf z`WS&+t+&SZn=KM;UbrGxVkisCEEX%8CGt+B)fFh(_*rcNS+iKy$fmq0iIJd!~W$ne#Ob5`|u@D~`^o=E=(luG|kI$oo;}lE>f?J@JP+Sn{pEJMl=Bpe5&s*5Nm5gLS-wMq@OB126`wwb52h*jk= z^2(U(Dg==eWt4@U%jKmO#K{}CtJ-&IIup{Xc4U}a$K5x43@j%k=dz*|`C@G)67vFl zNYbmN>1EQiLTP#)vIYuOc|x68pf45bt&v$)p{hxw@8D^gM5;Pzda0`RIt(zoZA`RG z2ks%N0I72-1WIeXx*@Js9V{_miv@zZn=}U*Wo)Cg?K2-)`KQ z2c0qhP%E&qO;cFk4;0>Z-d5;mB^4=Z7Y3?ogLP=Lb#+Ms1q zQZax9-YM-Dl3W770Rm%VQXz#G_)LzYGTu>=pcWyn*KnR9F8jX_*F#VJ@4Nfn z3OVxX2l0xWTH-CjVg`?$-R3XqJBab@GYJmKraG_)(k{X+(}_f`#P>np+t16CMdg}f zimhS#;z$i_7hpe8B+V$2q?w_Qken-tK3^P_TF6f|gr)1lvdlbnO^m4>iUvt){<+c? zcu8EIu}RKlgr&)2@+x>*;E9STgEc&>AS4aZY)vAWNpxN-IHQd`t>vFHgrC)jG76$I ztWoLJJ5;SZ6*bAq(hGLt43gj3D>{=Nt;)ygB5jRSUduaE7vIlAjXx%Sbclm4+NPjHl19t`8bG&S3Wp^pcHRz zKKR0W@&B=$k31a_+s>^F*-sz^_D|~%z%TBV_qV}wWat*AE;=os`>b0Oizl8(`-!ji z0emO|SaOMACL*{#ooSbl4PQd4Y`fGvj~ZP*)C4&AiVw#xLEU}ucD`D1ef_Ob34`!U z%`W_(Wt%|&EMJGnum=I7K>)n4{tyP&3+q1w6M)!O?}*ZVAjkr#5W2H4a!(1Ip zZEJy#HKHa$P^C@G&piSYYuqpS-uPRmH<9NwvZoQ4gDzCHc0Dt{9@3>7NHM6iP&_)g zmRMPSu)zY#@lYF=rb|HA!KLc$)9CD8>TEvhXwrJMk!|AK>%5S$;D^cME01|U5n6Me zdKh|v&ERC=&Cf43Bc~*BDNWY-K*IKYQN>c)<@AB;79~C^EiO*WVVk1eqvC#SI?%#1 zQpD2Xb`q4-oT6%yr+;-vdH6tW%-3`aB&J2-Pm*I+%(&WAJYYbF-sjGgK%qknK+`*k?On+` zscOgC@}xRqPHVd>1ROr;cMFK5486EL-rqB;E7zjTymO9|?Bso&{O`wrON?e0fjllf zE~Q;?wwGLd54&ochnMeJEwQ2XFtsRSd%tc!DXIj}76)<^3OE%Ksfv#u3z_k!DcLPR~aVZKbQ@Ue+(mS*>Et#Nk$ zTFBgb6;P(aA1YutmjJY1)hg|l(*yb%-ye!DL+LQvkN zENXQ6GeKWaX_-|)L0`rHqy1ac0N4A3A!g0h_{=aF9JUU{6ng^ikFtA$alp>XIynl84tv3y4KKQBuX@VMCe4Ywe02Al}qTniyPDevyr1T znb`7vXrmFG$XJzUzT$Yb?YOzQluf0;qPkCK#fP=gVaMpqq6;X2)+Y%%xOWmA`HY%+^{d)R^WIEFzI&n;i|ZFJ&7G}RC-h=QUY;s=6BUsg zNhvv>Fj*wWWGJY#bdh@>^K48(SnTv1g=P-!2UT7Od8?`AtvS@}m>l`O1n|QOM796! zUb1Irs-kxD8WkMhrs2ExabFI2peI*uJbV_z2#xeKyu#C?Lvl6IQ{Y!+&!`yv0!*QH z|H}virKSwGJIe9Hl1w@0iMk4RjT?tiF&N7I2D7Ge#jyUvXcN9zyXI*5ev)*TuGhM6 zG~7yzT&qe!^oe{m)dq#&g1s*SvC?tQXv z30W$nWdl^b@aAatlcwHxG2*s>EWd*AReGD|Mx{GW0Se`B5z^|ZKN$BhKbRa?>_U{?Ly!GIpQd(m6>Kl8Xn z!i(vc2%1%jz&w(d-{GWPKYu6S34mRPVjogm>J_=gAr#Im?7i+zpw?3$HvunN{Li!8 zj*zn(;^q*Q#|8>x6uXRm;lTZJ7p_P*_*3UNiQN$bIj+CFmZB2u3PlmxLP$5ofkbq96&!EM}X< zZ4B}E*5`^(`$NJ^4~8@Zl`NbHPGm8KGCk=+@ns=QiU#yr7gg_>PtHjkx8$iP=QOWm zH?n6_+3{HFiQ&<4(t4KA+L&RyP!FU zZUHe7OYC5&aXT`XUHwU%ATc@4Q%TLMs~I=9=GRd9G{|7<=)P!b4Y)p8nLa;B?@qM= ze~#GLVlTU79xC_xBsRuz@FnXj_f2a3DmsytsOl7;v*FiRai7ri>+bwm+=wh%M$gl< ziqqQsv#P_X>n8@)6*kD;&?}s2!OZ%!>%g?VSQ~8Zl1-(TsYbhme$~QS+=7^6g|((9 z?^E3TlhpXZ!d7!?sr5&B39m_+fVP^~;Cfwa4TY#p((%IJHlv*CCdYwz4R617J3Lcr zAyL71P+;lyUko5qWWn1stDNOK>A^|7pn)GZINS~SG5B-YH1YM3?n4#s{z%TtyK4RK zhDS@N$50K7A2rwXj)SBXxiETqQzpHV9T-(GLi)L#wgv`|Zl4ILH4z*&+4GsDsw+yw ztpE*~v_Yj!Gx6Q@@9%nKyI^k!i02R$ul;~l>*qhikXxjj?T3P$9gUbfWy9eBtJ9@? znBgn4PaBr?w(f58Wr~V|U3FZIMV}NFbY*hiX??$Dnm`($*yp9z%gpnq{ffo^LHGO( z8lXT!ucI(UQK%m*NdqMnG1r*b;hTyYvvmg4o;c@2tBm7-W_r zUP>`OfK)g4+d8H6YU&x;%;d0tJbJ5KXKY36#?#4X5ckrpyVn)-GO!^@^h8&JIcxwt z**(Da3oD>rey<%EvYbspBdZ_BmSvC(06zs{PY(|P-wDJ3p#^%<;6RE;wPuA?{E9@B z(jiT0h%baw#Wb#}ry)?yHjjizE;?DR4*|glNOf9IWnt*{b3gtNv8&nDP+pcO3~wrh ziXz_`R(jJ|5}@agt$x$s1-`CZg!ht3r^WiDm&l&Rpo9VMoG8yOotk_+p+bDZIaaED zfOZ>6ud__9z&ENK%~<&DcK<8M(?mCMlU}jy_r64?EuYRD*MUC7m^NfIqeV5E`6XCq zD56yzt}Tn0lFUtbjq!pidxtzzgMN-Hx?CsBXikVy?Uuu=1UyHpv1;f<%^Y*M$!Ms@ zsaWXzlTJkZu>_9Q&_iZ$$aN!)6S!MXNs-JtrxO2p!_>w!OOx0lOYt;xoJq2Rw(AJS z^POWRadV0SOjGUs!7_NM&qP`|;4NLs!_ysR%dg7o*`DLdbmIp*9%*u)5+&VBUM04n zy_*S&o)V+ngRb7i8V-+*3nUj6NmuAY zJl-SLy9yoXVDJ!u=#}P+&@cVr2bb}?7tyZ?KySFO^Sw!})w`AVYYMTru_eND_=@BN zAy}SFJs>^jwh2hidJP&D`pq1PI*{5Nx}!7fCMFyjoLIF+Zb=xpoA`)Rll-1Z!>kE& zXY+fblW)zvCX(0xC^F;YV!*1tMq7qSkatS`A^@JtA32VcOTmjJwN6&Om;B<6icOd& zsUG~NU9%t0TAOqw=1#(~Z}O*oAhrltXwC;K0;Zq3qEA4Wj8aQ$oiA^fnu#~l#uVE-c~2|gUVd8#I*I2!1Pj#Y^)?_|vYvuyzJ zM4>m}!~;D;awks*#Tkn9iezZ5&}yOv4vjLz9j71N$DmTT=KPU@hyZ*UEaA7t!7E%? zilhW{Wx9TSNv!v5{jEJ6Hjf8#E#O0{*oS=JDCV4lH4j>T@~}UHkP?L&=>Ih zQq;0aR0Xf3JW6G0Yy#PPka{H*S|%l-V>-b@r!X7XJqbo};!nzx|Irzalg*;0`@{p3 zj!|&0PQFbXvrAM9JQP2&RXNB^Jmq9j^Rggr@Sizl8xGb{a~@)5+>uPzica6E!fJ=q zjy;1V9XH~IiQv#bzBfR-a~b;PF-l9K6&GY1o)I(DD_X&$#0>?Km2@$xz=Ev9SY1Q1 z+XWsu2BV?jR|i)glWICLr?g4s)&60U#WNF&t9!17%kU@~A)>m-@MEax9&rk)l1+5; znHT0NDuu1&;##N$700LSL%-RibQgF4UW{rn0ZN!evlOhqZymNXeTVCw?R}v;^#kx! zWU&T8OmX|ZYA3?yVzT+?h~j34utY&?Zr#HGJV&P!MZp3CGeBio-p$ye%wSMp$&Qm}pzPaE9e} zy`+U~Nvyq#V{jF-T}Larl}u`NSZxJr*#*T} zW}9-E{g6hqOW3L$+rs+T0te|B#N80)Fq@LqqR@%^<(zz^4;FgLE4%kFOb;aY8Uy1w zgft6<$1z^da_?dZ<30fJvUWrY@nH`#;S`A)1_!<#*?x|2=OUTJ1qej9)%5v_!sN6T5i(Z4tIB$(x9OExU$z3 zT%!ozCY(Dmp;9$q^u%Gq@1!>-_=QsJYM>_)vBdKh#)>f_cN6)x`URA2@le-7>b?qr zJzT2|?$UPV+=)Ku9aZE3uPi z>W|+~bduEBaf9{9R90qN^6jBY|MO$)8)A7(+WJJ7l%#RpWgcPd@M+t_q+V$jqvjE< zL;!*+sKu+|*9%%8k}qn8W}Q|yfBzuQEl4T5MLGKbtIVZTp&+*K)n4$3G1wsg#lxyH zc%_*Q!bmjda^I2zBHJdoIlkH=aj{AKOjj(6S3n<}>BxlqFwBQeGfocJrzi$zM%RR?YPtvaEIC+NBajcrq$VT5) zX*CgYWVRMXABuYy_Z{|={43YpH21D-5&7V{SJGs$29cZutT1BT2ge;`|CjErBVgtV zVZj_8=cPCwdhH>If;^6K1eI?MrZ4&iOcOl{Oe-iUxCqb+yXF51ad#p;uG=jA4B)&@ znI`q#nVh?|UF-86!oSMUb=T_r3c|84keJP&WeZxp>D~M0w*Ty#wMef}|WIjAB0+o?-$`E0dz} zYHU`4j6E+(R1^pYw46U30J-6d&)d;}PIni&LVG~pV4qMDmb1v$ZF=Cg=KIb>T_PmU zQ^)-VQUTlki~I`wSD#z@fC`XS{1IXGH2Hxys$Bf`h(Q5tRN#)ix07Ep87g!jWuNMTlhi5 z@6SIj@P+yTjC)qM`_R3q_UOnTGf<%UKtRBEq7?b!#iih|yn*u2Ds=K@Kys`zA0Jrc zU9aNmWD+f+Dz{GpkF`tTE<7<(I;+`<=1&Hgd(SYEZ+V{b2Hi=c8mm+N6;$?ng{n zr0yUO(8aA~S*Qqd-q(n?O%6iBd?+UgYraG;s3nRl&FFU3(%KEa1hlv-!xQgedw&rQ zrKH`NnH@ENaP%9*WCmW7;=RMSAk$b1HzbL<)$-vvJ-abep2=^2WzQ$%Uw&30=;JmU zK88(lTzTDp2jBM4t&{S3-?ev^SDqfkXkcY3Jh1EE%c7{EK*V%|5a83(xqfuVR3BAC z1MLvlaLnvSm>mWjdfuZ;?IF8^FoJj{3QFYw*#S>OG21rl{i3TppJr8m&69sbiS5Dc zJoU1R)boa1GsL{gHyC|-$PB8h{zh6Fl)gl|KLCPb$j1(_I{OTaW%6N_KD{gi{2|_Az0Y#?DuG*}7x0p;VY!40%ODD+qHL_WZhCWe`#@ z=MtzkND2gNc_4O%FMrT%9-Qjy)Sxm_nW+Z&)u2u zD+&M|AQI6hhqM-2hzjbI;Jp+oeze63mYmv@9LAojTb<_nK|-+(Yi=6T`1<*lyRK{BoWGber}_V5I9z~9MdH zec`@-*$qhV?#Ai=OtSm5J3dNC?gT_Y8`SCJBX@mv4=$~_7OW`9b-9o4sh~T*wB6{+^T>mDdSh;n#7+6(S9$l2jG{z~f zBTLFj&;4V0Ik*E0qsVQLc<8u5=+a+A0uPst@qum7%vVLTxL_v5*6Tgr1JEBld8m1w z^s$ML#eNCX6B`p57VVOz`0oU#qJcqVvjLIl`V)MW79WRuJPQ&3lJ%?Sq9Xp7JAc}0 z-Pts1nrnDZ!Kt_JcWym%ANXU7kJP}5xX$`X5WsKXm~HilI_6o2Sfk`#T_d* zpBzRggstV(L(*D``{E3VXnpuQw0%whiQ$rP6ZQ_<$@S!fvs>{LhT1DOcX1%>tB1~^ zt}e#Q=I2Ewq@Abr~E2ebnz3VDtMr0yK`r z?R`nXgr$f`=|~MoU6YJ;80S5u?GY;Y_14{?Qnmf@?pDt}3mdd_(3xvGu&#EG)%u4H&FYnQhi8{L&7hcW(LUq&9XV*S46|(@iP2<_hJI zP|?pjtf3@WG7g%yzL{umH%BI@JMkLhV*$sQX5t5hs9&wYpUwMY0??aiy9BVm_D}$A zD=^C4*-$*uia6_UOuPrE^y9XC%_Zsh0BK<^gS3V-X&E*WLG*A^2e;UDkKuZ6mxo5S z&wYf*D=vnbNUdIWdfJO{?mi30fS8zlLnqmf2G9ehp^PkZUUOzfT*+}WxidhO()%8v z|0fG)cfQ3qLdEZ{Gmww6235Wml4c_Z3plEd!5-fIX;Eh3e4(HG>Lk6@s4Xa-qOPC( zO{*UF#VIdfdg1^Q?wO0Ch@)*F@ZlIS5vdFFM2(W6M!>(_i?Px_Dt_AHxbLrcZI!{OL5$z&_;-tnB@jtg3b# zC$zT?iu>6fu@cVnE(}-*EKXuT?Y7^|0m>vXoT^d)Fn>e;6(^JKUjK(Nj*>jQoqb57VxP z;-}#5!hgmfHOR-;+DFMNZ1ELgH>}s}bCcUgX=ssu3S!eEzxlNF83j@I&H4g#>9*%L zKZc-AR6n&y!g*Oh?q$9oF(9g+g$pNp6T^mK#)-svu66$qN5CC4H}K~j+EuaLe!A7r z~?U`I9@BPyKlD-}8ZE)=}-pr7((J`WcmibMQ;1#JLpiI=a z@EwA_mG~Zu?~#ZJIIKi1x43p4p}p~Q-H~KyVMyMpM=Jck5rS~dkOz@f7)Q%aGIGW( z?5ZPuu5&15kiFt6F1&1s1qhaio$(3jwuew($y3F6_swYXTJ!<{JDx8WO7|0V3pBTB zFI*gWPEbI9d@K@Q5UxDp5V(~voesdzW}Bl8VKB!axGSWE`eyfc&&~ffyHAF^1CvKR zlTsJ3-t>D}el3;+k1am3m;1MqwLnlUhi3PArEDss^Le+Hy*rG;gM`J{zeWAm6!&6b zXbzxXss#TxuUWe&40Lh08AQWsuYWn2J@Rha+?@l(kymlvtkmUjaNJdRwQpe`yf606{INUm75` zy~>}n=EwcrO`H`-RS@OwTQUkBo6>fn-M!o=#{{BwdwBdLG%ZU{yI9Y(w3#-jhK~bO zk?JP(+2AF7_{Jn(f%*D@8-+<*&U(JSfOsv-hm6DahbHMAmixia{CB#$A@ZNO7tm1^ zUkW5n|6Kk$`HQH1L#f0c!bWgh!aS95&iKIK>9sb_yP$Bn)KF6R1%-ddW1oxY)PVWr z?w4r2Dlrxq7#fW95(XhONcTukFj#Jjf6l)oY!3NqBtf`*vN>SSd7xfs=e-Dp$wfR= zy~<~@rpbd1IYbc*%(^WPKb@n3qKp)mG^7y69Wio*L0=I2IabCfx_+La!`7mD4Fe2d z-#cf__LUxdh&wPgQNNRuGzK#1T4ss>GffagYus?E7a#MyN+Qa3MEu04A&-Wa!0DL3 z%H@!|x1Y_~zJ>Dw@#{JVKDhtH-oZ{n%%t`IJ7w;{d@{=`0L=^#D=sUYD>&La;Fj=d zQ<0W)3sHEAILfRR&{khKkXP?&|ha# zd%%k&kVGSe$0$4>Q0e*{u)~^xA!tJ9IX9;`cz>A1;&mVmQY!+3RscwWQ5S~6k;i*n zd-sF)QB-MfvOD>g=Vx?GOacMlufGW-85tP>vT+)NUT&7igXM#zt?f_VzxOi8E;yytQV!HG$hjI`H;_JFflM#Ud_1e`% zFj!q0fcNWQvx6Y1^A`T?`2XkQJ4%5?%n^9f!_f>ihBYh zJ%L9SfuL7_p&~CjJiOic9OXh^=U>e3TTI?tMBdwf4vXk$IM0ur9zkk11JRD5f654!x+>lskwDa`yr1X2#PKDK6aA%Z$6>F=w7tHgJdo0 z6S!S&?5=PjDKX)-!#@EJ6Yu9QktAJ7KIC)R3;YxjnjB|B#wphTYLpxlIus4Ct6TG4 z(GP&BV*zfcVijDNb3~)&691Lg`N#O_#KL0bLG=jJv7|HUr_{J%XLT5-MJ1MKgpz{%`gixdW5fwa3yqkM7%B`wNohV26S0RWB z-#EhtFJtsqA*@W{gz#fZ64>#E>5-O%uah=jPS~nddoLSETdC2ea$CaUZ(WzfRGmU= zslS!Q?_vdD6tD*eERRUj>@-tlxplnt7XWPm_%Ns#iomOr0HpWbe~OE~pUb{KX^#LO zNZ;AnnP6O4lw?~MWJ3!g8JLK~ ziPt>#dD|z~1tt^Z79omat$%M5fEKsF%o`i~2+M|s$XF}Doy6@{mJQZx08NYS6Zi4> z==t8kDNsHj28FVvI(Kq}i!pqtn~M|NfLq-U56mGHfxw%v%0v2lS)!r}ERy>OGH%Lp zyYOlYOgUkZYQhFlZS}q-CUNTpN{2+CjZ1qMtJDhl#QDCthF7IWP{p;r*&yBkr|jhQ zgdGMA-XIdAw?Qs`agxLNk(Cd`MnG;=E-~mb9xxA#m=VkTu{WU*43!kXsGI|ULWC=#oL|3t|Sf@ir=RHYaKK1JcPB0@ISjd+_po|RwQ)69% za#lgE#K|qC5?+T}BV}!lc!G}~Qtjw4n~=Q&10w0#9Hz-feXSY^68*Py57xo^DJ3N} z7#K71iTJaJ_$QAazP7Nb2^A-X#cbw3oy*uQz(}yXw3N?$2PxQpb#=9b=Bo!mQ2*6; zKZ;9;X#WAA5^wJA3b3*|f4*Gn8~yf2Ka-6XH&ReQc1N%@`Rzi6CJqKiawY^r)i3T( zTc96U82Rtf?+1|@%!&{HmFf7x0cDT0|F?fqe-mwh|GNYu(-3`pKWY(NkEJA^~ojfpi4`4cB$;d1(FIM`?tZvM11sC zTveuRy}M!83Uwl=xATLnAYyRLo<-;I=ns?)W;}2?0_|o(2A!cxo%}9t=~=kE|LNHn zGeJRhZ2YkrJtLhhdDktqTq*$X7RBJ+Y3C4Ow2`}myn*8d(DyX{hLlcYhE5X|w*BMDbaywey5t^9ujg@!+RsPbzYl=KN7v5oHiyS; zrB>(jKN$~N)YlEbU-KVt^l;N=vsHmN0LJU(>iyO4pBo{{67c^0f1KGpc71ChI~Kus zuq}==7l)7aCNrw*d%1s4a)MlMKx9z^`cTeS8u&QcyPVMW|FdFy(vpPXrHG6)!Ih@o zP58H*LugNkV-wNPr5BVUNyBr{4|?TYXjuZkau&&yS4T57ThU#~j$B1?wGLo;j9@lM zlwFW3A!wHyKFAeX-vteh^Eb7eyqwEQg=y;|l-Y&ikWw}i)R~czHoIER0+*KPm|Mrt zUB!+CTjsO%uL3oNEUywTxQm)|6m6^wZ7*xPFCu@drG?ES?nJ6$NH;GdU!2fsOwVTy zTz?c?ZwUPsFCy)t=Win`Essdk!n3f9KXDzwhA?6S{i;R?=;jh%#iFB`s=dZ`WLYLj z+HCG%%7lq1rIK1rN*6#g>m;0i64T?>s?(<4-{1S^ zf;5Ba<`V$iKtFIU`KT3jb+k@41;&)ToXPF|ypAwyHscW)h?Oo+jvd{IIjHc^Fa}Abe z`^HjCky->3t=lItteO^eNi+n{*c! zmv%3~_Mcu@Zx9M-MMVYhrL?8+eHK8zGsn>VvSCG@MAc6$i1-lZ_ZC)ur`7kYW!jX% zi~%qeF@wX)d-sWm+kA2VBo~_Rdx89>HjF<%_?fz|Cyt zE;pNI)8(5dhHulQCW0|yPP0h1WmBrtNah;F#Y=I#<$7dDp>zgJ$tkc!^b)v}rgu7wAN`vCUi0EWJB7z4mg zpS@oKlyKp|QGC@@=TdS)K)TAH53qfA zunrCm$>aVu!~fOPMkW6MmMW7-iu#wZ&`?EN(9n!0s2u~@_ncerFm{l>xZ2v<++2Kl zoXIqXvX~*Vl)bt9_H4BEIUrTWo?<@Wp#TA)q`l7OmLiF`g&A+$1C4Jak~p4}Bp?dl zKOnor&~s$*oa@BS?E=4IF;`sp!GJ<~MK+n*kCk1^tIdnLiAA@6RbC}mLKGeGDQfhP z7HOxe{&FalPuIn-vpFzRc|cc5&{#HfVdgOiA|x*epov-Yg3B*xLBx(K-l z{d@Hwsqs6zH#SXWLQRvj^8w1kwq1rH1ye|jI382N7(=WyWthgw2z>=hW4~2K>3e(q zlE1Y6*3!C@N=lb}VM&4#r${+x{sFEYG8qUcbo4H0)GIm z0>vv@?n;7HG{^2BN}a+_9ZFsg)dK?+q0{9G%q{qRzaY&p@AMDw&+k=>HMm`C|9@%^ zz#2&s`cqH}2ZW+Bt$OIU2Mp|DO!4gJ=~a^a4^&`>*}~w-j9|o z56w?-8>F(0uEF6(g*9X-B@y-36 zrN*qNLR^BwzwOLb-l0}V9!vq%B_eKNlTx8b&;X|*o5wz}$>_jyf|DVT(McPo(u^n+ zHLeC!rgteW2G}Zr)!e?OC9G8six&k(&uXL>k$PTJM(V5=LDsI+P46p#`eO zTU}#4GX}V3Z>)PnRS6eV#rlH*x=<46hE^&RD5|Rca1X|oo6-4$S{s}yNhQn*i@*wU z8PSna`U2o@hS;>rF0+&U-TgZtg*BhxJbw{TTs;5wb^1{$E${YR?p&zqv+!SH43V+| zVxSfl7U~^TWd6r~fcozZrW1ma@Q#9)0qG?&x6xn-qVpvzBm_JZ9F3s9IbV4g@MrfE zv3t0vsw*5YnN0Qh_qiLUaN6N?3PSZ4G_5oe5ldzc!odQGXs#P*7id6U8>C!_;oF#q zzCB^|#y5&Y$6oO{=Vii^ z=IH3CPlE0K*B>8m20=t< zcbRjVXLezQ_r{He2n01;fKaQ|#?EI2xvvK7lFb16h>-&`M@IXcga6$=0TjtV(D?Od zv^f2!jBtXL0ob(rcxfbZG-4BtbQ8Zj;9mgaJ0+uj6m`kMCU#Hy0S zWkud5gs_ZC)F4KA**^eEmhBf+RTmWIH1wveO$=j`X0dIE7G$CHJz9%HY3-ad4X@Cj z*J&C%mOR$oh{d*m{%~auV&)XV9cmTV;quKxB~WB*Ah*4l$L6HOeZ^8Tei2R7L{S1T6lGQ%4=PFTB(PXhgg!Mj7y^@UU=XZ z*(^3o7$eZZiIrN~+rfWIkVA12DyS57NmN%#TVgSGO%-u!ZCRF}pPmaYAcmKOmuT&= z%vCFy#!M{+r-42F?e4gnOPLcpF2M4*qUZw*Wf z@~Y4s+cBa!T zLB5DdOl-UxEJW&|lF6ZA(*)NbE1V)0^7NIA%q?RaJ6Vo&vJ^%?zBk#vU;f_jP$8Q5 zvI$ib*&L}tD79O0nNufKLBDgx5b}K%S5as$plvOWFy~;NG&)MTUr+t3iFjuud@o1 zf%hbVfx!3@g+Un-75C63D}}!bf%3?^Q%>p7DX4I=2q=WDuAXD z2K^l%p?Ch1GN(Msp;7F?#`F#^h`WE@q2(fWdXbf1c9z?a)96fIc`>Lofo!H!S&eR@ zCPsr>bR=89C5mJvqzaFyE#~S}*sE4<4sjK(=sZH*RcI``&>Sf@Wi!3FBh5eRcqm?| zOCw)(FdBYWkQ{mBGXj<3Ki3Ls zJ&-d9hP7yHVH9^YVT&dmyj_T8xGi*fKwbXx-`Ux+N|<`IeB$yEP#5m+y&%MLhoQYa zLPISD74Y7YGcbEFgF2*LV*5TIc;K}8f4BcW|50+k-?;Tf!2k;&cmv%CiPCOB5vzOC z9N7G`AByA2`riT%2bvd7u7A$nV1Vab-;?4*4o!7*GMcIJt6Zjua)freGBQf)OguOi8Yd$dXEu>!YcZAT~evC^_*A(vC_0J|go- z(=w!%q5~O7vN(lQY;13QZT(EPb;BJPHNNvmCPsQWOb}p{eODYpfBZ7!8heOa#_~OI4}d63*Ml#(_RAD!COJENtQT3bO?(%G)u=S48a-7rTrE5e54o#dj5Z( zLW>wU-KdcY@%A{^pRW|aIP5s>p0a6UC@cCb%02*u7~YgY|FtvoPyfo(t>>Fer&Jm# zHXbf988fRaK9_RvbX86jPpplR;t-W}-rxOS6+|_%kfO>F#xs8pm0; z^V0tC(fM+UbGS57pWKvC_J_>$Z<2B6^{GmP8T$QcQ_(^r8TP{&t*mt_|4TPH(-2lp zX)JF-r&a+&7b7e8Z4Vni&&fmURyU2^#YPGHNWP<|t8lAll_EjiUoqP2bY+`C50}C9 z>T;#?1E-00xW$*|?3@g)P2A($Edp*!V6r91 z-Gz_>X}~jk3JLi3*%u^%po$8OL`YTC?E`|AbAkv6A2xUA&C&zNoX~eo;#6Gs!%CyAK{Za5m(LMk-Y2TpN*qe+P}$0?SJ0Rd-5k+iPgJ32m;~ zSli56+)iCyFuA-`XmK}h_i%xxJ?mhy(ScTG)*j4EVv?8HPQ}K^=wvm>ZMA%E*i9d| z>Zx!boMn1G$MB7g0*fsAHyne=@ITjpR)*TDxWtu~s%dQ2T3t|Gbemds8(L#gpLcV! z9)dunJ93+L-$Xerl3_xy>F8v{Ojnkse!cW|kLG@r?sT8%Dp}Q)7HO@|mzdwGwmu!X z=rXrFU1oX3)bx~mV?vIVnx5n&p*AVGp0;#^0D!0-d_kG1>^4?i4NbTd6Ix#AHlP=+ zzB*QU1q~vbbEztuxwbaTY)?nc2v{4Ra5>wlQE3cQ(}Ob0C&GWp34%BaDXX)thuWbZ z^!Z$Pg)mRZqC5$o*mQoVnDcvcSTET5{U!4W3GF@Y*)E)MI636${NX@=Ld5F|@$y-J z?^Qt-DR~q1zANBIZ}m9+-=-W_D$oEb;gv=A^$2f~ZQ7VYUN%-%!)#0^W^LCs<`t|6 zKEc{~7A5^jY#8=4lN;Yo7Q!if7wDG&v4!qqqMhAay49bPjbAK=6b9J8dOganQzw1h zX7+onjBL5Ek9hloryp7mbdEegN zGtNUYJe2J_>O9jgbs^Nvwnk*Su#6Ye?-;`o5VYXaZHOZ%Jsnqvq6_lyPf~~6CT~Q! zQQD^2c$jUQc*?SCyXbdq2+rXYjSJwq_|)uc<2o)#=ynZn??baX42^8LC3RVi*}4uK zbX@7RwbCi7m^Ji#qG6#g11Rb#8BBA&3%dU0xGhe744AHZm-_+#qD9VmC_bHqC6OXJ%t&xe{JLQlpdb zZ=+YEoXmQrSfhwav2W21GhBm$?FSH@MlXXaO;%(l&Z56(*4n2V{7YY$4`($Fg&g>k#$9zkl5`s?VBg7f%gs4P0iqabB}Hy^L49uXl{E zb2W!!%sqCoZw7Dj9k}i~dW6~F_d{Wny3ga>grKz{BIAko+-%a1J%@Bek+hvcx@cw>i0`I(iI|ehLzABRvLr+Mh`~|j`JS|;+MRC^9P5j>KA7U;-au1 zh{?ydB;kR^RRLulc5n{*3gDk+;qU%D$J%gsc*@6S#HK7Z$){}<%H}yWI++*2G%~8Q zGV>0XyCDq;5jq2^{SyMNAD~6H2EHyoM}D74vT{$y3W<;7>sN`KG= z=^JM^J9m5&{J;0nOt70>#Uu-y$IdZu*R6fWjcs}`TjD4G&6d4~4L-QYp%`#zrSG0V z9oh(<7?t7kjNCT48TzT|O^mD7kPt4%Tqoe8&czVz>>q8+H~*A`uW@{bH{M73F#yu` z;K5`4;9qKW@G$)RrSrx>OOZ5tbrd}VOfmvO8RqnYp@chC9VRypnllRB)JAqOh3i=0 zqF85>{-fa-ywuL2Z~ea}`q}Kwu00cLTY^6sxqAIQ@x2Xg5vdkLu)i#ZVoJmB0R`q; zHY=JQiky2k98${I$=!xo4=il-c9hFfTYo!nHxhENIiYzg67A@pQ3#G~aMMu)&SR%Q zI(troxBdmRKdc9{w89t`gpk6FdjhTvX4Gra>9aJDv{8R*tT-yvFKAJ0sGVOB{EM{*XqjID8;|oCci46MyZw8m zU%1MMT~b}!h&EqN2n#z4tBE=^okB5SAU=ewQ$>0K;`T5z@N4+IPtjy*-b)P6A?|~I z#OSGG*H4B*CQ!f9^|ev?$7qe545Myy+TKr2d|EvSh+O@xtntUX?QBysaE(q*u?P@m zIQf$0>VfS#PU{Pkny^kmQXTvm2;MChE;FCof18A67^XuL>_^>E0O9E$2F}7G4I~or z1L5R(rQR13`v*E@HpH;JI5Y5VFbSNdL%QDu0E55`W&p`TRks4ULPNn& zaB8=Le^p0CW_c}i{e#?li>ULv*sJ~f-;ge@6AXV#3zAW^-kjeI_Jl4Oa-#mToAg_nh+ zWAPOPLQjTVAz%azXn~SZP&Y!lO}4^^eGl}qjMcz0%C7$*r2d7~U0+E^87wc48dJ1K z=mi~GLIU+SKd)fbesFKNN8f37_T3sa`jGyxcgWp=_(CpV)SyaeK2X5I${Z#{7r}WV zc@sWdUxXHQ4%ha8E|NsKPb^v2nyoO|@D?2M9NAm2Ne~w{e{hZraTA-6cA}iNeLJ7J z7fTMwyi2|Jj^lv({|>wWzg~?nAgne@T6JYig9kLq>i@8ImThfqU)M*AyF+nzcbDMq z?(S~Ig1bv`cXy}7-K}_WcPLVzPx?RiIiKKpyRTd%>}2gV*PP=w#*O2L%H+#_&5+#H zrAo^{u>|U7zDVwD1P;pfV@5DJ%H=rDew2um=*-k0@`?$unIDeec2!bRPJc{iZwRQM zp!>v>`2}X$Up!LF_Nv!l(`{qZt+wJxR&?jEs6Tt;s`{nZEBezFHpUGLwAYW)b^Vr(P#1TYhu>NxUAO^x7ShlPm*%4P8p9oG^hAY4+hMa z-)v61*IXU$j)HL%8y6KvKkxTCW;^wG)Hc*}mzU29@Tdpe`s!S3$gQR=T+su12VeXL z%YY{D-?7a8XsGRIxR)IaTozr?9|~%PQ{lh{Ie)oDu9@?6YkAvSUg$sPscx|2`R4y) zArQt(eNH$>*c%j_syu&Km^`V)5TeEK{(Po;7`8#_zpCpv3kHUU9yWah--^tk923p{ z?Q&L&1-XOjzhXTi<@>&I7x{D!L0k?`JhAmqGk^ zuB=V3@$Vx|gjsu+(R&h^yt{t=R~L7o7p9>`aheCiws#orog3~xQB|9@YrNe{!`+;e zaCe(2SksJVZ5u3ekLcnkH{3DCWIwC#zJ7z&iu-4C0?8x7(6tAw1bX@XiGXkB23EltHsE|cL?q+7Si?h!2+Q>N(L@s8K2+Q{L*;ZEt2Xdj{tqtq6?$u~E z)~GR@qg#01H0eFBQRG>t*0Aw)bAjx@*ovPkN_!92pDS{eA&|5jiD)a)EV75!O2p(X zJ>0`NB~|EEmSEE!X-omV>V_EP%u+Mx^Dhv|lbqGzfo-s&GKXBidszGl~S$ri$Qrli7(Ne#D+}xKYr!J^L z=eCNo4p&v19O}gC!T)t)fKG-Ga%~?*9V{?f z-Z>LSXUN}b`XJ+&=X?BwOkpdy(l>`Q&JIG-zJ1R7n$rwo&o#J5lvcb*k>HFpMpCpQ z57Jbkb(4A&Nt})*`JwLufc8Q8TR3$mQHkw0?DRmmk}6AJQ&M?_qPiCT`KzOR=Wcd& ztz&`~Hd#!R#A@UsWu``?Inx;3sPC-S3TO<_b3qrNS*mP_W>u7mz($uSNy*kK_EooQ zM;irtf@KiXAWOh-O^$6~d@_h2F%XcdL6B=eoaUHjt`6T^BLL6{t)haEIZ0wrTpZwT z3!^(5XeRKf)Yw?0M^qThIQ~k?owBl&AbsKIUrN#rEU!Z|*9yyJJT6Z%wSgz{NG*F zhRAQOW1crvj)?hrB_zv@R`F|agI++E&nVIff?7x&=f0x*W5AL)iu&Xc;;tX&xnRJ| zV`wzhpu3s9oyib&Fxl9?P;Nril+a8o94XG*u&9-$5nZ z5?H2FVTW3py^JMjc0cr$wYPzat~;o{6&?x6&w{gR*p9Av*#bQcP|*;nk)~q%ro)tm zg`j#*J_9njAF|df;+zIys2rR-KU}a}`rZB8i<<8M(dc&(p5Yox?wMN-Fq$>2Dn_f@uhbz++wXMmF*XHj&^$8^8s}b5%?b}rM z%Sbn!OPp%3u5r$$}+y9=~0DY2EuSwx{SRn}0oKa~SqXY+* zeaaX)^jNtdk4D_KHL|6LvHbAuLcb&>iSFB;Gm5EKS0e^?`YynQER_P*{N=vr>Gb6*iNWja^t(6F1obSxp0Q z`~*1PD4xM8!{lDXt@!OWowOz?YN!!f$tSNurb>31O?eSY%U24vnrw-i*u#0@E_qTp zm59+4iD@7qDa2gOBk7c8jzeXB7URqG{T(Vv1Qj-BpSthY=+|zNzZ2KLb)VlL`m?~7 zDPt}N?N3t~ocm$b3fUunV89qZ%{5wT4f@bPEoBdP z8MdZUS@D)Snh|5(b(V^50h2x7#cH4Hj zvwC^+{5f;D&+Sa8*=k&uE$rdB>me0VP+Rv33;p5Vfx1l4-_7$U-s!pXh-voU4zy7{ zR3B8Hh{S@-?>Cx#CVc#Z$tI{S{lZEP{iH7)MZCpsB&Cgv&6SMZjRtFTb(K!5T&>l5 zdd(ZUsvCMU?vGKe7L6~ps%ma0c;eBWpIqh(37;jhF-oUmX6k_k1Bx83J~VCx+@+0_ z&6VOS>*UubS{}b9H+iJ4J98^ZZ4X}5>Q5vf-)*?6jp5c&&E84Rda=V+M715d`l~@!&p}U9f~;La z+A=6`OnT*T5bZ%k#h|EAVu8#vr4BP0%5zU=%8c5Z1bH}=SJI$-S13!96G zt^IrEy|m$Jgb#ECylV=A>o4r1D|$Lh zDFnH8yy#-LDv7Qp?a~*0@jp?~bp~|ziXK`JzBF=SrB)B^m4l=IGMt;pW$HBA=_K|85>8#m(?-%?Y6Jw@9Q@ z3BGvH*aAYv^XUXEpg;WrJ*kna)TzWYo;LeNUEa59L{P)*Doe#{t7Db#utlDK!cwrT zd!}R|#)a3P%mF5rU`9`uc{atqAs#G3q}P7RA=e_FOFx0Nb>NzuteZzv zMceQ8(*-r^cy<}mZ!&@Y;f(Tf*^)zGAdu zIHvba5+IU$W_9b8Hi=tmxl_+Y_s5w7%b_xjePkUOu@(=(vDCq&(7|*hvw)ZH)3tO6!IGi0*`cq7z%KEk zOH^y!uywAaE`a0oIjrPs&v2)@>&e3vg(obEP zIaLdRW6SiN#Aq$eT-Iq|2*wkO@YmtI=4{GJvE^QaZ*$j<`TxywUO)bvZ8FbgDyt<@%n?XwH0t9g@ ziJ3Xe%kSy2>q--^^RQ$AM6{QPGR!A~#uKOd75HTX^-oeured>$1)&l)HS^{CzS$4_)RTm&+vaWBi{_cj= z?I$(UcD$BGcqB2%hs2!hwJVJpGTf0e`wxVVhLRDGu$I z;j`_Ad#rN0E@mG*Wdy0a^Px2lKaFzfMQZh@I}QT@e=QKv99C~9PX0g$sBi$!16vZ= zgeCe=Uy-uNy6=h*VJvBfR9cE&&h@iEiB=YOhC74YESOt-;md*ElF0rRv7QI3pw!Wn zmZ9QO(?bxaZ}l}nw(GgcR)m;@w66;5Cb9rtAx3#oW__t;@5~w%D<~6;%KOYy=*y%S zz|G#$!sOaOdSutlXN|>TXGm$>Am!hG98@rvp+CENL%5VQ+NK+I5tg?FqyL=T05IgC zo^?I1JAy#~yAZq~&b>nGlJ?;8`Xb8DAx%XT8oT%}{MIU?XAp#r;8}Gl1`UlWE>au3 zA?*B0`k2L52`bNz&gY~FuR-D4{)8F4tk-;iA5ODPp~X#QNfCP0a%PKcp727dbD0f2uBL^k*u{@4Ijyz&vG^*vsL+tDe$9gBYLlo;?M|Hx2&lsc_Q*nSs=(B#XQE zq*d<2hy0CIvQTCPA4L>LL)^#AS@SO9B^6N%4iZCG0sP-ETT5!UOp4*PLhb&u5-}fSmA%|L-co z|6Lshz4pJg@c-qtelh~P7G=0)7k9Zl8kyP~-?cbm)W>LL-vCLN9AX;)^x)=~{v;y~ zvDMEe*5rz5d=wm3)i!*Cs@#Bux=J)}jI23v;AXq7~tmM9FCcS%a=w7 z;i9}2m*|yQTvqK-NI1&gXNGxo!t#scc|G#n&K`*%)v{A$i$>DKr@}UBv3W{$6q4Va5{rXsZ9|xz_pVuDu^vf23Q1*> zeQi3X7n0#FXu2Zphn#PS_l?oN>=0%00_IdH7qY@$ZS%_Dcvq4H2vH+SIJqBT;k(FO z&j;hu1k^N2%IL&Y=ofYk@n1%e!3Zgol*XzUP6-T*aWgi~T{{JcGG>oWaIZ~YYy;Wq zIR9G$RFpsOO#~^JLoMKYV2I&9KX^A_C*pwO8i;SK3JBXb2>paxbbJ$63Z-r`0}ARX zt>%SKv1~YBGyC{I$5RTvO;A2d3y2Q4LfQYoJt~W!Jt2-CXOm5}1T{x0s2BhxRgCgm zR5RyjCykx;Fdk@W8`wql2;7{}hzcf6@HPEn{>>ESZgN>K)}Z}_nSF|7d}QKAzWVzQ zYBofPs%}76Kwq0y#Wl{^7xDuIle=)xzW22a>x5KnE0~-Fi>$R*KwQ{_L0!{g$C-FA z$EKG?GwptZP&~2=UbK!PT(J_Apf!)Y zCt75OEST}TvGbN|P~YM!$Tp#GokBSy29XhcQRHh6yXrQEMHkI9@j<4p*yCYOP7|yF zt&v{8)`|q(xpTRnncmkXA%AawtIYD?hZ3E@zfhOE?baS9*>TWT3TB6M-u{ntyyV*xPlW@JcQSh3^EbRAl1GA4C1 zBo)RfpfgOR<6!L>vb2<<>>Jq2Q8fvH-yHpYlA8cIi3ByI#DreCuE%~vO`Z>6*3WYG;sw~5D%Z9I3|ra`Tf78G3**a-9!j1R zC-8ldY7LIIWA2W?Uo3vFfsdXFZM+0Xy;5deG*RCt|5iUXUm1p83ww!``_=5<{LJoe zOxipfP-gLJ-+u0wpyb(~vCdU?j$4J>XLr(jYXf*%`_+wyV^H2@U(T;jjene=5m(lx znD0a|`#ER4Lm!a>`iiNI^r03I8-y931tsck zz7GgsQ9)7$n;QBH1?`hm0Y|=r3B&tKj@u3z%oRvoI$W>no(wcMB|Q{593}Gv+d^&m=SO{j1`5$Fmb-H3uR>utGr;Szw$?d0vD;9sDgSKc1N@5%6DcL$Ix z)PR?}CgE`4*W1kH%Vl?T7kaEmw<|&0;gY0X&BmQx zTNb_-PK(nb*VSA*RR-eq6oLGb*X7CzNL|tgC-B3?Ccn_s%kh0?U`AVQHGV+ucXZta zb4N~zozg=41qYV{=Mv>n+Z8Q=Jd3A<4SyRB$~8?VzS+f^?b&Ajj%Q!b@dAb0^2lfU zx@|{>!7xv3M0@7X$!9QTh&Y~yDA(h7@)M`>6Hd{K8Ppk`_Oo_tX*=!3H>SZpB!%8A zeH~6#E?%yVW@};mi$Wt;E)08c-#jm44krBn>(0z(LIpcu&UqwaKmdc7HvUImI{pMv z!*FnLPT$^tUBAs-|NJTZHYhyD4@KZ|yElBkS}#r@wmVR(*Z%Qn^asl$h#ejt-kP(8 z*gmsS!U`=2?YTjMRLp}0qro^Izd51 zqnz+#>u<|&t8EKoW>jKKAc!|jh%YPLga@4#sc+DaJ7VNE%g$;)=O5iG9S)z_zC?pM zPr6?X2#Os-=p+iPVFh!)RF&-V4Q__@4VnW16tRp@RqM_=oyvxk>Zvi9ly5a+Neo(F zGn;o?J_SY}V0*g_(N9ohVkgx^=c`bT*Q}=-XwLcgq?*KD;&{>_6dhWEPPOumVFZ!t z^Oz|b`6Sg?xvViWSYdf=;W^Bhc4yCbx_Lb+CD({rEfO^N!xOsOnVZF(NM2i0krBN1 z3`yp=#fNFZHCZ=PBaC=)W1dthEu-d3Qsr8*3kY2KsxoQVjRWus@WS0mG^U8tN$Y%t zRcwZa`g8n<3W+g^JgNDhxZFi={Z6ib%ITXL>7XiHdoEmTHV_>Y2rD0?;WrqL1Uw}E z3KotjBO_Da_e!^{_VDl!RP_9?sAK{F3Y!?C5fc8TypB^9wYxnT`}jOBaI2uziE3+W zMX2poYJk5e7#J8RDG#p0gHPb_@?Z_T3O{(ggEy}f532Lv<^XvI^JRds_7l_20 zl}*r2s)~>lK+zb$D#H@HaJ`|U=FZV*TjM{lTC-*V4c2x1%Eq|z3Z&VZ#4~(Qe}K%Q zQbMh%ykpn8)!@ZFh{pslwFP%rv zsvc@rgO;Vc=aFo)Rc4QxL-*ElF(7sLuBR|fiicfT2mYyWcO4tk#!)M_GR;fqPiUtD zx?~yC(SC3=jgMhY8~!ii-L*IYk1v?9j0Xy=nXTg^zs#0#kTX2`OQ8#>;XjIocv>{9 z?RV|YY4d02V8ox{1jIpwgDtVzzonS~Kl*n>;we0LFRwqR`5&~)I&WJ$?^~^Q ztByN;ag;fpJU^3wSO$g4Z%az=m{lMq_MNCBIz0SSG(|yL0kMcvL2mDtr@f)m(^LQ7 zzurM4Cg0ZtqqkwBA$~!e*QAe+fQcjlX@j4xAkyfMj0PQv^jutHBdfIr6*cuOsqxqg zN9fD&Ej7rXch0Lrfc<_(hTrV8>vM)pK-5-`ahRKi?qB-`_2QC@ZKrvFpcb)qk1KJobwGZ9x#3|+E9GE z)c&l6z;@)WSQW{d>i97_EZWH;tkQXSIrB0=QB@iZJ)n~JrC0nXwfki+E1QBudY%;n z(tKLj89g~CXGKayRZ3a<(hTdg5LIiStG_KN$j_|LCZvjwtv$`jHdQYJiprcg4SFu7{ zE3n4o%-HW~g-ys`k-0$zBqJ#kt^b@hfL00++Ih4|VDaGR+{$HB2XWD6Oyzj3Sl?hlho0ZpSn8p9&km27;!k zf*@*GX;RQ`kP5j9!Iy$j{(M~jQLMlF3-C3J`t63{eEk0Qhk#N1uuz>YKM#=@N2vgY zD8Z?JH}{@xr9T_g9%O;ZZkpg4vlHiZ*j;BD$`3Morz)tL;pFg;HQ5$FTn=HW=>=9<^;pOA zE`AH*z%mY1-=SGpiUCtzxOe+q2Ru-2@p4)`mUe@upiD!9*6i!lOkPEG zsEC=Ui`3F_bM$q+V#{yaLe$W(S5*(qJma>l<~pMrd{|E2Tu;33Djtp_uL&Fl3Uqus zL%+8xy2lsiMS zky#G+0P+kV~i_6jfMBfhJgitzSHH*c1fz z1eBGRLy`qqK40#9IM|LlKb~pOuvPdDn6p8zht`k&-D?VId#b9dPfu}}^b0Vd70Y2D z^QijIk(a%oe8r+LUIR|%3d>J-ZUNsUgYumRi58FY8rYX{oUT=|qOkxbcdk70WUA(0 zn`8={a0v9o)#N;nT1P7BOKcHfAyq^lC3PI-0}Sj{FpRXYs#*pnNNXFbXOF@sPb!}8ShrcSCc6E3!DY`mwo#R`)>TKxOOEsD zaS5P3b*<0|0e_8-Mp@k@++3|$Ip%GX3Ss4W zw0t!*TJD%7&B`{F5?j=&AXnSrb=k%m>x`z34li8qf!&aKx(MpNlcc@AV=DL;#V(B1=vG$Y0o$SR(o`lS zlbQs{igifrj;y%CN;u%twi;i-O+1Tvs&$hM2y1N)C;K&8DDdZ9m_!c*n^2EfW@6s zkeB~11*s3cDfn^Z^buYJ#p4Kd5*pM8=^ZQNX+tD=}z zNO^&bojoNQ@e(TaBDGW`Bz465y7%dOZh@#XsWS#4F)_-+n@!|zkOHgj(#7YP@XDpT z{rsw>tCL%Zomx^8x4MGF5l6=Y-2<9|uB57n$3rE!ibXexd>M;uuSDA9V7w)mAw@Zf zSrwBSM_tRX^OIVL3bC5Ysbo=+e4-|mB6G}4O{8MPUI9o2qLQkKleUISZiiONDQjK} zS2e@qW9IJbV~;&ABvF>`u*{L)Xy{0ME25<0aqlUsCf*`R%Xw1T$-67-kV@qgQOLt5 zPE5}}LVH)7>LZY8STnH?;wF5XMNC9C>7wYU`Skv%sQA3cp@~SO?oHx&Wj`d#r>UDn zb73O1zNi`qWkqXcfwE8MWs4;W6WB4GxHTL#G$K{+F*GPQT8C9KM|%01EBEih;0WRp zqDEr}F_iXY4%ZL1~wbr=>AW+qEdCmh%mHMg3T^f0`HxQ|+dc5_VZWF75g3m@DAHam-c+*1~%48Nruow!*v9#2PA zLW3e=+a_c~tz416;!i`Sc;z4#xgzo!V71o;s@le@St2Z`i7VyC%S>)Fau4OtDoQ8G z%~%Xu*izU;pUAg9*Vt!oHYijkC}zaWT3wATR1oMg$IdEFty9?}K(eRUv}%mMeqKsQ zA}DHZy_Gf4Z=)hrAkHX?n^u>c#0++0!;K%xSspIO^%{%aU|VuIZU(_J9NW0x_Kh!~ zq)@xFN55kLmxw+K-^x#;=v)0C{7y+B!u#nf+uP2c-+_h6L3hD*=k9CaXq3xdZ=j7^ z$Q$-EZ{UZ31@gy9!Uyatax?&9xBS4BSF!vfzqhy7m@|m_2EL5!28%_?!s0NFfEz-n z6iB~tPF4V22f=gq@%wszHV8Q^Ks)(yUvBdfN5h{Yuj5(*og~GDchl{@%_iYEd~05i z*Ut7*LziC_AzKlzdKPOrzhMcKAd$;Z&!?u7rfZn1s_V0&>aniswy7UbSA%#n1#3Qo4XWO-KriF{tlhyx9~D-pY~FUJACxWZPty)#{N7#$*-lJ zWoW9vTscEiEvTaI*;Il~Ijx(os+#6l$?MdZN7wXumU+6W#*V1Z$7_U(pB3_3u3r4h z-6Z7Mk*52jxz1%}wC?6uRB{uh-22Ny*P=xl_2dNdiJR~Y>1q!61TGBV@LZv)K6aHIU61wjn=XDqD8 zyGiE3^x!)v*j$-bT#%7MMK}(bf=Ud01*!3d$d<&PJ?4+Sp(qUJbCHl|IwmIMb)=p- zhVSq*(7k8`1i$|x+dQhey0T%oBmf9o9DEW4W?Vh66R_?#(1VkT%sdDacs?4!RA#iC z;6j||9mxnFA^jmJu#121gZjLiHRv@u6U&pSg5x$ijMyoYt_YPb(lv9=gaRV(f1!Va z8`=s=8`R)-o1f8lVcn;II}ZMG#kBv`xf@&Z^VAK*y#LQ<%v2JhPfP{R0>IgeP8#cj zZ~~*`d1UDZEwK{q8Fd{ihBWZ2bQAV#ft!CM0nrcQ~1E zlD$)rF`@`G-1<|DFpx13FVN-w_7l8$XOIc?d7M@=ap(t#V|IH{V*_0&#r0b~ae zA#gw_<>-T}?NW3r=Q-N27`7p$Ml^En%H)TZ2Wv^i1Bxa)V?A2`2WJ1iN)7xz5!Wk3 z1MKSR3Tl-gNrobmGzN@{#gm{$Qi$?aSr3Fd9a4j2dQr za#9R&V)-P|HYP;bLZ^*8b6vJux}AIl+VN(2_6gj&8v5dS{VAaJO+#;D zx=mgVxdbJdw!T<1kh}ln{&1nOgBeHdBdZaq*j>_X8VwE z4waIOWxl6({r|4-_7w_0KvV?U`8gwAKl8wZ%%ZVOXcK!LC@2B6E^;Z8!7F+|qpQ*5ttnse9nbH`OM2r;C-mACpy7!N!3zyDI1g;Y{={@KLk`;ypHrNXo zR`~`+uo)l|Ob4X7J;~1??WZr0jhwxP25H(DmnX zLW?s6LYJyPb%A+Mx>W6mQ+xNSBH};Lw|_i@lKmB^IbeD1%xXIK#Aznz;{7nhGl3e5QM$@>vJB zuA^R14C1EVSjY`{7{i6u!>6^Y&2H1V;S(2lT5SF{@47!z%mWxGF&__^R+EXk$Hm-j^fCYS(Sn=z+_}wj;8S{Gs`Ga42SQLdBC>qn= z?#&wQ1d%#k{E|itp#(+qMhW4Q)B79a?K4oVVsD?`sE0Td%?Ou`K^sDNiqPfQ^?wNA z-(x%l_y9-4MDQ1H)A2hWr}1xA4L-ui-V}(PUP?=ZG7qA}v(59Rh}@bZM;U~&b6D1UC# z_wry)v#EzL`g24R!S}?4J~1z&3}eX6_?*NEfCFwT458cq%=JNQc?X{hr4&b=WB&V* zpcqKs+CrZSgTGYUG=N)v*CqRrMouF0O;~zv!uJm)(I+*UO=Evn2+O)Znrc9#Np4gnLE=z_zr)L$kqFuATS`Hg-?Xaw(FCy^Uh7P^A9`;AyX|=lznGbPn;-mj$?B0 zDCty63EYD15~2=AHl~A(5iO#TBT8U8suY}Pk-j5Jux)owWZKEo8HISPuFhT+i*Jbl z5aYCq^$vH)s3E0iDc~KUI-b;cV7!2E$OWoIq=cGTjDqwRdPs*9(*gk3tOznv3^*5C zLSmSl;KJj7!qNqd%sxSnpJg|c@X&vN-F32)QJ9t=GVc(7at~v$_OfrY8cgG$Ioe zxq9S0OODzCxu1Lkj#waSd@BWIg$d=JXV#;Af9Wly4ae{t854P+|mGp3nf@liKqOU zC7$-ve4${oG^*qM^mK=n1C%GKeP|RgRAH_2f>IXWGaQ*o?qv?E+nT+dM8B#%?w+-&j+-hys3 zVR#!4HD*LKKvP)N#XyJqDggr+F&;a$;xFSa_(yaJr$ZuK;iY(IR%{qn4~Y zw1Ln=%0&GE&cPq$c*hj`M7Lc@B&I{>qC(NM2Q|i<(n_o_po$)H#F^*#XzRWrK`7x> zpo0m)`bXLKM${p3p;_Oad&2_CYfN+n9>GOJOYG79?;u++^GnG#xzXTJAr!KRu3=YD z8n@q6r=bhFw-_80CJe~t7)a6p^vqDk1Y%_Tj3riu0WUx$BKlYjd|w5bdIZKief!=0 zO`J)>?*lXvqwWd3{<6dGdH{>g0TVQgvCZX$PYt9_6aiD(M4@`yjmLWm_77NhfZ^#l z%OiH4E=2sIFYOd{kTE!B)%hK9I^}7S>nJ&{v*V|z>x3NgtVP6e@}QRq@E|JZ*4Ij3 zdeS)Gxf>k9akrY6e?f+9`T&}r7Kd<(lGSkDzLi3TsqqLuX1R(Aw-D9L)uJ1p6d@4}m2ymKMbz+XsXk!d!hShgu@* zH~&P(z}ouJ1Oav)WG@NS9|y)p_TwQZ-B#4sCxl=cX}0OV*NZ?r`3OQGBAT@R{1oMB zWokNg*>`%cx!vK?w2oh|OvsgmVlDfNR`%C8eaa!{Z%mIbphVfj709ft-m0eFvt$Yd znWi%P`nJ8GLZRNK-viH|mk=qOC*&#fg$~~Phd~vq`iC-W`&dhr` zb<`S-)I627K3p6Xk~e$Ut8XGJUmMCEFyeHy7CjxdJvMeTjWc7TzTwRzq9~AP*zm*p2Nf)g@Q*@9hZ)VkBACg{n=@k9f22`C)8R4 zxY0pv$d}l|-*75a z)}mDxWVb8Z2%!v31AIJR{j*E_K`I?QH6@Q9h`lp2GqdnSX&v(Gd>oSuQ;DRS?T(z> zg8h#3&sqJ)8OXDye(_>lx;S=qz11$inZ|nR!HO3*e~P4H)7sdCZ(|uo(JaA0A2m~lYLyeGMyhPoZ|Y}Db;I=;^^4}I&!uM^C0(~P1ADP+Y7c{?VU(IF+k~=d^F*7S zZRjzAx0NdfW?J(7i%Pl2sB5*})l@JAn3GDN+ncw_BNrf#huy2YkBYRHZr2a+pyRgm z;%DP)*ixgMWxz1S6gR&Du)(Ledp)`Eb&=)$ z6q}UYIn>12*c7~prbbH-m98P>bX#Oo3}w|Gje|=nn-UHCkhNS9=WuYWzGyU&bj9<) zqaM>WN0Za5aqM_j%);0nN)PCCSj=D1!T$$a8qDwe(9lqS2+UvOGh#U&gXY_KPJ+O` zm!oV@Q>tScWG47mAVV1+LM^SRSVkeCsfn$Q{@2|a9b}R`h4wijPxffso%pYrWL>Zk37}oGM!~EG$)rV(CTP zWJAJS3&C7Hy){nv2$OY5LTWis_@7*fj%8()SVdKAsvdb$t)EY=^mY(g`f^N@?Mstw zV;8HKCRUzJ8V$ko>0!?zFC1Q2<;sj90s9b3y50VG=Zdz6&DR>~s=5@*B|!PI>>?ci z&my=6y>DS)QT@){L%_kUql{C1drGZMf>!T^7E8qji}<)L^+7*^ulQ=^Vm$hnL@p=| zP*#D_&qG|>(!*S{mZw{>swSnXsx(Wsu}|N;CX%rVWi|Z{Ni(l6y1W&BIo;bsMpj`1 zE5i%%lqCr}#k&zgX%3f2&Iojrx(Xjm@p#@9ohEWO^pQA!nAgFdJt$0AN_o5nHuv4= z+CxVtUE7FXtGpViaOXW*8sB%A42A~<>?ZsHX=6ZLr%&c!77#au6&0`|yED1`{GJ!u zAn{C@LiXL=owHT}WjrCjq>_H0-^<&ZpNQO@IZLqYgk^K@@2hTbPM9(DyUhm8A`1%( z%=4dwUm;m&oQJIpCU&YmphsM944~u6h6gVWP{-O|7}3CUZz1qFZE!#F1x_(_P!Uc! z5Eq~Hkc_P$JPZ%y$O5}mODeEwZ7C+GDtk02?9eQ^V$2o9baK@VK*IF!5*#`AIM&Xs zq$;k;qFr$XfapDmbo|jqbN4|Qs}3NlOt)r*%mG9O3Wyobr0C5TXd5CqiqO@gnWx{H zTc9;f+)PD})z~&RanWtiDbNcf>un(v!-Hajkwz!(htC$AEh?9knPuyvXDYy*!7IU< zfXvM3)(j9Kocacg9Bp74upG52>&T*(wIr>s31x}~TkI1oNaP4GuCO#{cJ#yRzQ8K% z)jbPMIB?U06>ATl*cGznufu0#d2Q})p5XWt-eN}NOX@f52+0Z-S^|A=%$s_V^cQ4# z1v2tiUqE@(dTfeB9t1I`UN+tsE5OX@-@&1mxBzD-)7-)+X7-#KnK=$~Gb{QzG1I<* z{isll!mCAl;0I#-n6o}|)wt%bQ7IG#X^JmI*~p{yttEe;v$dH;IiC9N?-QjlXFnx~ zZ~JPKHM;?c*&DEevrGq11{7aND_->82tv%j@PYuZf4%sDxVpN!BGN&K&j%?=LFfU8 zS^1-B4a^(;@S&+nFYi$iq!n9GF z$)M|jXHxjAM!%CS$Vuq;1v@;|_rQy-Ztfsz^1QpPW2Rnw55f5Q_$t1)BO$+U;^6mzF)&i=Wkw6m%N@cz z8uA;x?LAX)b_!n^=vA#^>8r4->H6_*)TZa8U=@#LXE37`F^BSK(THXf6d18{XOZ}e z6Q*e5Ion2$3=gc9hVz-{R-gKHifwPAZ&KySCgnJmrI)e$$f#G;(S0@XTqM!bDzQpw zOjTP|j??y<*|x`Hk)#%fnu;Qqf*m@iH}2Pqnq-krCyiVDGE5d~9!blaeX7VB$I(;N z#)Zw$^Vy;m&jD6T!ES4B_iUo!VhpPi<`shrtx1_L#%e=%S>6T_Jl!(Beex%&nLMQ38Rz zJNxHtHgFdw0NfZF8Q&Vb7TTVd__bzlVKSW&l2gnlQp?ESW2etwweq31eS1K|8LAfi zDK@Tu<^t}Lz3-0o?$XxTCjCr=V+0TbZp+=!aqv4yDE7(3>R72=VUM8Er)9T>=Q`7b zoDmSax1s;;gdiPFi!7S>g1nXi`fw=vU*EvePYAgKzP8|Bk^KB3{(YmLwqD zuS}_#<(0NnVuU+$(U&^E4XUMP9WT{eevRqzE!v)?5el9G?V=I9_BgT7PQ%dfN`oJY zLii>*i6L!Cr*#QE4aZm0r0*v7-6mDDn9Mm1Roe_!9*}K(nd!QIv+?t@ckvxUhUh?w z+XxQA(DMyNM_Vhf8)*`!n560H22s=1ZK35%MoukBoK6>z{W@4J=Q5hK?XJ4$syoG7 z@ibERdZ^OlOI2`Kzna8j48fbu_=U3~Rmq_0k;&3)US5S8Pr=>?{Sz(&CW$tql&aQW_!vAGudvkfW0dBdV~nckKYR)Ok8&b z5aEI`90S{-2!)zsJ2CR({>@}jwok<)N?_p1`%*PKJD`ET(CZ0#B8HlpntB0{C1cL( zr(V~Hm6a7F_S@fI(JaW4Tvb3+2cF~ZZ#jsmL_tY8PwpKNH_ZILz&vjY(id{=$@s^;&*ww-r2PJk|+cN2LHi!QT8mJc0O4 z^Rsr~3E`zX8zV*6?pkh46<--Ex-wdFbE0Hq z!aP3xX=7J}JfE+DwxTK~wLsf2fF-=<;?<~Bl|*TdH&$b#pIp^$9G%a-HLJUHr|8z0 z`9tCUb{I^M^Y>hvk(Dc} zTG5&Et2ZU-Sz@IjE~hkBSDI8%TX@Tve{IGzG@g6)uKDIj*{wUJ*RPw0ua(}M&bvBY zaLsn4yqlj@6p>jJdaiJ{rVRYVl+RB12`{2^V)Y1`d4qm84o`d?5Aip64L_K;e^Oy5 z3r3gUy!;*Mo9lMG@?QK)@5JK$-H@bpk$ZWuAMQMqb|$OHQZsn@CW4}nReHfqg1Ay# zN;!;&=ClpE(}x;B1gTvJiYEfV;^%(u=b(l8 z!4G}_my;jHP~&L}_PK5UG4MoSh_S>WG@6vcL|kWlaJpT?H*S9X*~y#ZlMWwIgkYr4 zEx-jb@cI_w3Echs>;ghs!q4+)MRjPFN^CHTbfqGd`ABW&?!vOI`kdr~;zLDcM@wt= z=ahV8wjMT@@5wLPUk1!!mKsX8m3H#8%40H$v`v?=gx3z;jLXW6)|ST^>qTm7d_mpu z&Y=V4tojlIaZi! z>?!My(ly1%EUM}@7;{wIyt7rUk24j<7nH~7OSk9M9IqRMyT^82$qqx&q2kJ8*4krb zl^+(AZc`N{C`t~Q+Y+=^v8HNUVTVLflccG<>~M2<;<=p@zQT^vxdSu6;v|CqF-KsX z`RG*6%C$S*+LXLv-NBU`KYV3%^0IXYmVK~$<(i#utlRzOhTU(i-}yoKK0*9vI}T-J z8>=guFW;OX;ptIkT_kQA2`TUOQ=|SwcY)M;G|;l2!=Sbf2(bo@$|7xg&-NjBo(g=s zxD8@s$WKf-0g16)Vl^v^KqP=C=uXS875)lk_Z&0@#IS$=eo7<&Sl~UtIzY1k!~isa z+ZY`kg+_z&1Zin$G%f|4FR<9-2s}JNX;3W*8iWo7cml8Z_;`3>3=9l(cXz|xl){3< z#Kb@bkcSEgIu2SBuh`hwuCA{7`g*V?@GJS<-~C;RVDK&$i(v%%9EkWi>*6D97lL7W zkkt|RN928ANeF@@PzytBX2BK2)X`YAun9((UF}!)<5D98+#%tn-ZduB~FqXPse6$6VzzzRCs0 zV)FqgNR3vZu3^i0IG2`cTdqt3PYmDPsw|4uRYe<`M4Gxo%|qHTZ_zZ9H{!KSGi6hZ zWs<3yVamrnmT_;{98)mK>?!TuoLUr-T?t@uFsI^@kAXE)Mtz$g+blvO#zu+0reKUI z8gZG&oTZcQ(rHi0l(S&MhEhE1GtV-`b4<$M*w*|uslIVbxl00W2sAfPaXo=9JC5`k06qa=l&8?6QWMNghQiJAc; zP=!Jfz&+?Kv@p-5#GmC(`?ZR}xO}m@fvF%c)ChWjauehuK@!w%nwU+%$3b8D%F56W zHg3;Ql#fi(0S=0gC_X57|6)Ahulj5W>tT3;c#Xn34E9-|@&N^oSG7bc3gD5!Q&b6+ zwcGRCqjfbgy2@>N^$CV*lsKJrM_xmWt|G=znON8ulhYv8H^t=iMXB4O(=F3jPTR2*QYjl57TFHc;eZ5 z+@D%+0)`8i6v*swXP((0J+y57t`}DCeS>!^U9n02iUA{Kq_4NsHdrlucXUHq>>K>Z9 zGtEppnR!@6vTh^6Y7sJ3`Lz;$bFiS4dO>sFl6i`q+OFMM_+VXJro5a?YQ%oZ?MCK3 zYM|~v^(It<5+|N5{0qN)ePv%(Q@ z48Rlg2&^K|4(wgShviLRl}-G>KFV@VbmKwMw+Dd{wJh*DFx>7OdaliYnjlUVwIX$`SY3}IQq=*kNcAm3RTFw5RJFwB_M;lQ z^#8N>7tn2_ZI(E`yZ>c|+1cG$n6qb=&e^kcmiIR^Uw4P0G3kI~2C-z0LuSWeW-y7- z%(l$T97e|&GfT2m|NB-+vg{&XR^(E_qf))M?sK2}@RY6MlnQ0lJrs$Q z4lYFKD#W@f@EifkI-a~iqHYaTHQ^|*2JDhpQwsorAy!snz!xh}+a%VvigYa@+Sa3* z>NoI&16Hp7wpl!}uFw9TY3H0^g4B!Rih%#scD?+<_sAyqs15GNzZNHP5{tZ|a(!b9 z{bP%KqYK;*XMG#0`9>`LQjoHfb8D;bmCc@KH+h`ec;Lhaw}g#u@tfV_w|X82yx4x= z@YcOyn|23p-4ndWKPn>rPO`S5xb{K&z0t@0vtx5~XIw^v>5x)0mn2vD#j3+}x#Tfa zN>*X>7vF~X@Qy#}H{r@hZY3Du#mcq8fhX4GEz!^PCBA(%hYufy+yV3hldZrr9$*?U zTQf-zxIy53)ORrX4ov3|KrjoRIB^2Y;2q>UoRkAfKkyMIb%LT(*Cyl4n!j80fMvi> z;Ij-;fyD(d3dj!>n;>c8w5GsDKl$VnrpkkT{BjX^gIAy>N93nOs6+a%c*4nnj+v5( zn_ujW`(l6Mwr`H^^^MULHq0?4e~Edbs~L-K;16V^adFobEJJ@ zMUObST+#dzw43qaXb9XU6g6Ze`W~*K-Cx%%G;|2Gtz6vtYa4{xX3W;$KfrBpVQHnR z$NUW@1ApSmO5(CBdMyM`B-P)8vXZOr;wroN7>Y*hm`L43_JPI?*a4xgQ>f{Z=zD!r zn#I~afw~g|5J#9)kzCpX5k~p&a=5OVqpad-TX^a=k*n)2Xe$d(YQ7(*X3;L2l z)DE;7fpoExW>?;0Vv79ZOa0@EIq@X~UPR`4 z95R5q*c~MOf_G<|-_9uDm!sA>Ml8;7sw=KCc3@yPs{oPaY`u*VRvQ%>dHw7Ckvn%E zx^y#l&YtR^E$tlF27Aq_g97;|zoZ|#()})zz5AVRVfd{crq@ zzrnB(aN~yPl1W$qPhgG*KLK+HX4Z!b7Ic@;SUh;}fWaAf6YRnt{^1{Dwa9<~ufXHd zXf*oy`Q5m2JG6Q+&~v!-O1Oq^L1@}T`O19$Wb@o1?FFCTz0Z;N3f1?3P6*+lZWYjs1wDHBnqC3{Pjkj3CAYW&sBd~Fv`)g@AO11Di6WPh|BJWVIogJT3nfg_ecNj3rW!`HNmlx;`V z^}rLT+9&C3`nqd(5@hkjFMIZ`i>=Y>In;52l_1kFQU-wHahK$q_8r`C0C?i;?hvJC zWWH}qsZVUFUwo-=e34g7zI$YjXCz@P0^*8+D!2)yi1fW7ns0=%FStpJ z>H@%GquZ&?2TpErKLJta7Pq(!-yYtuC*rejLO=UDXv?0^z1&zy)Xli_>J(LJeo1@x z{jt$WlC~v%d6|pQYW9LTWwEP|hfL6f?${f-`I{sA{NfwBhj19!e0e|*qf5xy8fC$WaX)qX|*<=I_ zaNR~i1j!LdHNjOlB@{pgzyvU&_@Dpte==&yh=_>qHnswuz+$N5aP9_!PMlx@>xBCS zh>#!GA7owpsMhOFFeTxue+Ud-K+7u=M2F5&|vH8@!oi% z64NbLjvM`UOtF9zn7J|O@-*#SeJfYr9H6cD*AP4*u;Yndk)}@|Z;_;wOPgO1e!`9? z8VR1z^m24v{`xMiwg=NWR|jCx25f;BWDR<}?jU#qKta>QQ?0@iNevGIQfh>nhg{hM zq3QvHCos*^PypzJfC+ls0N6*vG`O5kD< z7cq0Zkwu}=~0)*jxG}|pyvs)|!dx6XdQC;jmvDxDkumu@z37huCY}kA3({GP# z*cGwitI%)UqI?DCLyq1}IHyfgRTyg@cRwD7>ViqoGBqQMRtw$9j#B3~ed!s!Wl!{1 zUZ+3%Dm3<-YSLu)-C~Z%$`q0k2hDYN@%jb6>07sMA#s434ouR_tv_@3kCIVnA(3TZ z_5lO~5CfBjxPJXQ(+mtk&SS@pxoRzePMGDP!$4`qAN;`|FlfWcH~i5b{SiZWyz2pJ z1(WY!Fa-95X&Qom!u

|M-v5QUpAT(;?#*2+?n96K};6_N0a#w}Y{UJ1*lQ;3)or z*A?C-KR99vw?vu8%$C6=Dkvdk(}9@H9w#=rpWNwpZo6kZsKO+ftYEloZrxE7Z}XpE@~7Y@7G3xW;E8VF33}$XlL0PxqLIcEb*u0MW^s8V8iPrj zpAkIqWQ^t~Ah9G;1?t27^eNTLvCfO*O~2Jr!gSB*m@v-vs?7W zJ;z9LWLNlhx1(QsM}O@dx!X4~AoP6vCH-wp-M9SHC@uNi_rjm=irMORa^tQefVymAyr4q}5hde%<=PUA zffQ>den!ao9r+ID^3H8JcVi8Ju)D1O6-SJBrpP+Ml_@}mc$87S7*~vMx!7Fi#-0kD@k6 zS|Y7(A$VeFY6VY#lK`I3peJe_!4n2R3Ii6@wRheXkGOkE2+L2Bw~JVBp7 zE~-qI^N+<7zw*lwbGw3j6MFKn7glI9E56znxOsQ*w!OzT?TOvEKVg%5BF1*#^Qhz4 z=yiUp_eJO~zu?{46`0}?p++^S_px-~15Ru{FTRkABuGrY*RfpBqj}&bXo>L# zRbT*fU;tZy-XI7A*^^k%9{Kyi7b&`~m-fEzq{&wAsd5Jb_HjDm+mJ{X~Eq`~(q8(s<$ljVJnu5K?Q$6A&;G z*#v_pknd2S3we>Wc?D09ca(NKfm?r}8jyk@5=K9PtOt!JunFWK2%ca{Qh_I8(#xR7 zbiot%`LbS#vKM$lsICWGfRK}vp+W#**HnU^AgKzGwo{^Q7pYqSZh}a@qm}3}2WJi$ z%6ZCi6q1l?4mH^jAO-LQpYRiOzQe(tti}_qz!Oob+UEUL1=btD7Wf*V)BPax2N;U~aMxMV{hyeRRF zEk>ybv=?4c1(0?^nStSWzrZJ^z$-Fm-$CtHqU3D>H;^3J=1;0pHyk+e8L`+xBIfuO zkHp{YiP_|N;?w=dJwm11JuhwEcM=Ys?!1_L&xZ(}pb1-?*D&RA>6-X4eo;SlpD~31 zSU3p@*SX)xG?2@@gP&lgb7%8U=Cv!I!7vui?1mGFILVK9?eSX|VAjl#D9rw_V$7fV zmB0P-^pv>*CTQ6*UE_yuV9`X)E>Ms{cibCQ8LH#XD%^ypws}N-vMY4cw+BD>i2c&@ z#13eycc0jH;M`~XPJGF`xY_sA7Tpdc0gD>wueW;~-?s1YSMCS7 zfpN9X52j{a1Q~W~-1U~yBKk3U*!?!>dY23CpviYG8Bjg$e}7|g2I600yOvd zsy-rkggg^fo$7j?3XT*V5>2m2*+ZVSouC9HsRh!O$Aq7FG8`%^#yetVr&vX32?kF9 zf&c_K^_Z@BLZo~Lo=B|{YV3GIh?-SGW1w;sNexop!SD(s9i!@kRs%GNMAI!+wTome zA*q$}%1#%-w@DmK4 zAPs0)Jn>JiHC$8&4;&1qeo%_-^ ze)GPl-+dkNyKjzc+82iz9hyq`S;2V%SCEZvX8@0b$r7^7p9;2mHNNIs#@@QTbjaF}F9zL6y3 zPH20;%})kJvE4mk)3Hrj#4;kU~Yi<61?BQ0BlBgr7j!1!itiJwca-!X*MJ1A-?oH-nZyMgw?4qUnT{B=Cg1 z`7x;%rSSybA$Wq&5}5c`@B~v;L598=`~*=^IQR)B-w~5;uNO^fxE}yKq3z?!ff9Ow zCq&8?8c#6wqIOLNc8>?4hN?%R?h91+VxuTt4WjEifG5gcEgsg@2xYVx28}1M1{zO5 z9l~T=aA=P(?07=e3aW#t7Y$T26Mn)4Pq6xl5Bt+!_iJC7lbBs)EN=?c}8G3)b@EioFi&ks|qYZ`4k&$S=G?oA32wCWM-tTolL@ zWkxaVx=YzQE~ze4zT@k#w|iwk9%+sMrI^Wv9s-!aVNpoTLhuYY$h11cn-;=V!y14j zzESpTwZRJjZ-Sk%&O@}1NHDo^yvKOYkQT3wD<<<1T7hS;`68E5+-U%Rn5bo>N70FX zk+LG6jz})qEVR;&p8IKE0_+mFY(NA$T~2?BULkf5pPi*_7$nDv&zQ&_n9P<1bhJlf zN(!G|#@>==02oFi4$jP8WQZ&k+zCw!804=cX~o=C0~!G%(usL`r*WJBH#FJidP=Xy zy2N)~6V}vU&|(8e>s<&ZN#k4s>`1Fm=DS2Rm;uPVY;i&{CU;3+*;h(F@fu$SBZ(N9 zwRS%nFRs0JTUi==L34ngxPAYjKmQ`=(_Il;_D7?l8&F_}=gCcOC+x-E?q_!RUHOK8 zXJ4=i1>qsOfU$7+*aAK*$PC%4lga+OtaQySX)@Tic;o8Y&tY&|v^fNR*9Y*iR9f zq+6it6+pnDy-x@XCC~<}Jnsb|`J(9HNt;kFnp%AiO+YG!mX4y?n6!qbCFsfy4at~@ zNhvI;^#D%dsU8wY0X-%hw^XDgm8Wlq|J7>!g#G%uCY$Cw+u3}_x(5RvW(OHQe2v|q z2{JzV@`PFI^)z+w>4dTNzD!qf@peu~8;;J{O;;@B&;$e*CKQYA@V)vu_vY8a>pF^eXPXiXkkEfA!MPu1Cx>d*z6I#&Ersb!mTN~bqI5> zOQuG9zp)P!vLClr$2Ds$o*;@3OzuDe$rr4GYf^2SM;Z@@!H6HMUu?Ngw9z}V2({wgk&t5-21FWtk09%j7Z6_*a6Eq-@5Uzwh`btChU>{W zAVTOJWI)b4Y5;zpUzF1%!-YVh0TAIQDExw~uQ7#}cLya`K&;>|?dIY}UyTaPZo&ts zAu=GI4L}1hh5`a$I0nq?$TAR?Le{(nPl)98nA!m&fF~fBAanv|Zo1Ty!4sr-L~q9v z@W>+Om`*%F`)w!H-G`qQyd`+b2M}rq70nVQqo1IwSW#U<>mC?9!B=(%YWo7UaPw^A zK^!WrN~wN;CLk3<3&0bww_?&9q})_P2EYW;G(=GWpDqo_cQAN@r=;-&3RyMHVtF&8 z$HW!EZe2#cS%;{1HELw>#P9R${ylEnnO|R%HHWZ@v6f;k3CyJm*Qsa2rq0I`WsOhZ zniqdr6A*gt^8*Jz`BH+S(@lG$k%Pd!&UO!y8Nq+MKnPefE^eFi_ij3?(q_zV_8@m z1Qy#o;$ZTfS9uRJx{G?@Z z!n`zNBfJ5eZqWgGlCq7%Ndo`GH$uNTX|=+%Zp3QAMG#|*vd!SW*0Q)X_j-Yvou=le zDC>mHHnL1%ytZ2LS=>3{4h*C&GnlnuB@AMiBbdXwU|pCs&j6UrS>_PgDH}CxHV>FB zBUZ8llU8cdybSDueZUq+DBCb)9VHt=2SUpo1;Y@4-M=ay}FJ!`d1f!x6R%eHwY+p=sm&n+x3ELngl zXnx|2w9D`D$Mi!s&71@bB4B1RW;pUqF4T*CwWRGG4L+T;BEg7s%94CLixlizP?T}% zk}^E{riUdXjukN&{7`9puYuTAMC^vcgniZhJ)cJfG2R0>da&a}ceA z8bzn=jD7+yh(D&bk<@S^v7#panDC!Kz9T-pyw6NMhd*Y`171=+zyx3ZSfqFeJVBan zpnQV{B{ZP1yIMg%0Yh#;A2@A;N8u1WQJGwG?*;N5!wW~WW#A_ma)PHt(P<;LNQ}D4 zAg)%#5lef|i6@YMvEvCvlPjKJpgW5vexVcgW7&+WUXRh-Ii>z~9u@STy)?)UEEpaY zm-{I*^>V`8^=P!Z?Rko>HtMWqA19u)@Ys9s3(x4!J)$=5J+yh>u`LH;h++$sBW}l0 zLIMs0H%8E2pehyC-3KBx$c*?#W}^TE)gKJDpi>?tJ~Xhv@QEvQJDTYglgD^!G3`b6 zhNw1sT);XZ9}^zFIyyskkECx1Iq~t}33B0OybQ_J4Y%oYON%qs#S!b=2(>h5HVrH- z0|X2&Qv+tI-$L~-Q!kdO=Vt1Kg$%s&5--fuQw#OjLOsM-w)IoU{^It3ZhmQUiduNS zJoR$ff@(2b-N1Q1#qExjdTb-2@ke-JC2T6LH7{(`^JNN)4Wd7?$qbZ$34YoNWMiIM zUK(DuyfiQ47H5E7_^Fv1pnEzGV1KOC6N-XCH=24qfRicKk6~Hvx0zny5Ue01te{V3 zt@Gp7*%8X}&@%IoTEa@6W6zhVmp1AN4#!5lw32}Xd}XnZ0V@GLLo^1OvDsj51}zB5 zhCmQzf#%eXCssp>kK~v47Tn;_2GU1BkOiHTTIu$=y229~rg4(3Up!KiTMnE9hS&s21n2G^vFt#Y-s?~%Ok$W?t$_Ff z)M^C)(?{ncN6GvK8R8IBA_kM1q;=Nh7m5LF0r7P}Tq%}^jQUgeQ`=BVy6>RWP|b7; zqitXa+FO@;TW6YVEuLWV9f0p7-(l>KB$YtY=r3!BNfhy-w&MxR`mH1#plbs^0X)Hh zwIvNH(4hk)1a9ResU>MmkMV0VJ|X=(h~GA34cb1&&x&vfq$ab2@v}lz1xNzA0jV8N z5TJne9P;vnjH>%4y1!`6V}5djK>1jpcm_N{dWfh1JDO;rDA^Nw5U)(scOW|hzCjEk z1ZLsNMv<&Cxwa1^N7?Wk@PtTOLsA=>R*tR(!~=2}Bwyq3|E5i`uzMY@`~=B&C|CFi z;vLB1iI3z;^Na50TYv3L(i+!p*D?X*&dCLiF~D)*EpSnXsTA%#0WV(9Q}guXmcQ=6!3z&N493uA(=I9^kFI;ZwzPR-4#j*5YCq_2?!UZSkeEpxi| z=JPqFSS(gs5v{4hIF-{JmsK5`SrMIH7MEF(m|J~1uQoBOGDcf?GNbH#cG>y#qO7*Q zhcgR4Y3bXMuHtV*<9 zj>%|@ORvW+ozUkcS2UJAcn)jyDUca93iv?T(t5HG3m3-cRmAGc<24nh(i=`@)yHR5 z#^;nD&n?3^kyDnKZ9I`vdMX$9XL*T+?5ytINAn94HVg7BmgNQ0(k${U&O3M~p7?n3 z9rXP%5r`0h3xE+3k-!uLj3BL4Aun9jlr(~8#@GVY`*c#5-xPIP?kPI8FW|&C-tk|0 z#C_=%|G7KSP(u{~j_{Lj6Y$f3qs$i1%U=ZCaSK)fKzJR>f?*vphUmxxJp`BrU^&J2c94({|be{_igC|-rANy;X z0`x5$1A!DYo*<j@#D>kBaS`lBltCTtf!D>?&0iY$mI666Q* z#{^5!ERt0LPu#N*ej=szF+UZi-H-Ty6w3Rw-!{P$jI|~0Zpj#2G95tZ-e9l*2WSAE zke4Ud_5e>9M`jM`N`-Rt0citVAV^3{2IKfi=nCjIbR9f>H|?%LoH~h2#6Zeh7(DTG zf$_6q@x(eho7dg0yv0#q4Ij%i4I)(lL=?>o#%K>115$3t=NJW@YjWiE4As>6Dm}mT z)J458?2;$<#(;@+EiDBej)>6=i~32Jzj@kkkUCW`mNe zf|BZjlk0<1DuYrB0#owDDaB#viF_yjkg`nG`Rv}jb!5phwrr^$cy&6z=wMn_n6x-l zUKJ>>LG$7;Rh?L76v+w&Y5C%`f)HhCxTZWvTExAT2`zePTA|>I>U?%p^^3753s&$V zZ9}gWE!|Ouf}pg_!Y9)wW7y5&@SBAP@8kt1=ZK}*LS-ROY2>P^iJ6bAJ}|8` z;IihJqC{SM|G}8Gf0pW>r&>qm&X+cZ8j5(TLb0|yP+BgzTNajDe+ai?iXwr!K&;LW z(iR443MA@$JO}HFf;IWPyQ&K%HN{U}+=HhkTsG)dwGP)sTF2<9iu}m%1S1I~wv})3 zm?8pKFc21MVvZV|T817%Q`B(e^li9YZ{HWPZD06~1IM-1d-YRgNPM+9L)+yD0V-Z1#)6%z#SBn;#a)WEv%@vmtc81DMQpDL~Khg z6zkFt#?A=7E#G09qio0<(s-gkqG=9LVumJo0+TM2>>ybQI$sc=ZR8lpL{17m^`zAW zZuE(PL~2QDvz?y^R+P|qqSF;mKtYLVJ3!YSpe2e5CV>KWfuI(8D@Zi(D2JUCy`c3E#RuV%z>BFu>U15f3LT)TVCoI0I4V zM$bzdJgK(@{M`Zxa9(erTguJEm2@^7W3qvZ!} z*|yHa6AXdv*eh2&VgHsL4rAE=sU`EUZ8@+1`Gv+NOs)cHNwB&;P)(AlVntbyx;jYN z5G3memh}Y5yMq+15>=I0RW6oS2df%+$yJiHinIAGwF5JQi?%_N)zIAs91tci4pCHy z6xGPN^3!SpQ8BNq4pdc1l$C*s%3xJ>n5IFZsOBVGvm9_lqIWg)g2-LeLh|;3` z(cq2hws38sP+AbIt_y)fPEuX?-P+JwWf7@m;j)q-O{rK{fr}b$Wr*dlrZ-sK?sKg; z=x*f+O-p&t^s`Co`8<`~IS`>O=W8nhbT#nL5h@#mDfPigZ6UHYfwF?DE*5G_MLHv% z1!^N7#H^v3t1k6U%8byL-l^?u9wj}^=19wl6)~p0L*|Nx@fZHx{J70A`Fk3KJ8G6h@if5H~em{0Z{gZ1?` zpL1Vuu7A$Gy)z)`OTOGKEXzNt1Stu?1<-qR=bd680}u=#8=!-REC~rj|KlYXK5>vJ z<+?{^yOwYxwE+z#AUm4>Aqxq|eYn^`^RzEa`L#F&o)eqAufc%dD>@s9AB_M%-5m|r zii8U?Xl)3ZhqRYI#Tl2{_38)nPCNm5EARyPiJPTOqPy85Rjr=_>d1}&4P!k?HwL4N zMFqM>%;G#l6Qq<(vrs51$O6g+_*tbkKce#;!@v`yxfdjyzz_!HJBZK$VhivYG(7+n z1sZ$e8Ah2selX;Q#zNZ}plL&6@tCZt&SjEJyiwonCoA`Z0#u763u-_}IRH#U>{0*+c-qC~U>4kiUFwF=&LG%+NlZW(0LU|<)fx#0r8$r@EB7K+0&?wNO zg?b%uk{vLqnh0vq15VY86%9vKRWzPJx{)phw&Mv#^t0-Rz~YG?cU}1CKedLLSdnKk zMSM;jG=0NC?m2M5aL$LWZ_<%)ew>k-F$rL&u9H!B;+Z5SEYzdHrP7*vcU47+mlVNK zmyjI!+UxL!uO**-9fD%E4SS-9SEdKahJ5CE{{M43b>OHT{Y2ctwP-oE)$7XdzCE!O z>5{#1n|DXJ`NdqmZMgS@@DsEcVIAuH-;O8j)pQO#0V(46{8GQgBJ1rsTv9C78^wkO zj-t+ARwL3@i#3%ZMFq;oMQN>~)DCf4he+Otra?SKp-@#V(KH6CNH4*#wEXOz=K~A2 zAv1L$w^Wj>4Obh(4Ru0Ig+N>Fms-jp1#D#!IT<2psYqHTkyQ!ODgy2p1=3oPw2phT zC?vTgQCm~~Xllqt<#awy%&!7xz*m+?H1&eiI!RJ}L|XH~SxxAo#FbX@lWGF*G=-%# z2PrEgnv!6xF+^7u1SFv;!>)?b8^jrnTtyjhOq`*t>iKKni7C{0Fz}Y~$zVEef0THF zCMW1$1l%fzCYK+)oULenCd;Tln2_>~Pt?X;A)9uG!YXQu+fn#KpzF<6uTyY|_+)?F zr*83Zm->Q#{VU;Zc-P`b0gw$t2=4J}|F|lzNYrB&qdzck1M-7Jf#lFQ`_i>k=o^gp zK~4ZnAm8B|Ux)=rS7F%0V9Ce=_rqCk;dd>fehmGI_Pzl$3@&ulnw3Z{0Xld$K7 zF9VakqSH6|T-kKsEI5)~KF8Yo2FblDU8clbB4{%h*yuqsY8OAQr^}`Nmg8M|Ojt); zFRtU?GVm3ZK4~=p$~KOQ?q{KD5yOlDdIC)3x+bx{S)AS?G5}kUB2S`@M7DzVvr21z zK=8!Vp%6u}K!w(zXsFSH-dec(Cpi&aCmM>ZGON79 zN|Z_0>)U)}C4Pn`A8nJLn*7Q{s&?!Nc0;0T2-G)-^z{H1fH<^|C(%QY0puZX6s44= zRJW7*j*-bDx+0`M07RHAlCl(ikHm04D7`y4y(!R8gWnyTyrI)1WvdEosDW5#lKYw3 zBg*opix6wS4Nt88YFRw7o=)|3vM;N6c^4S*KJU96f>VypTsoO&TwsW41Z)#%c=38> zxufq@QALMbU!HJYB|duHU36~ao+A)>kvb8NGq{9#L}>Q~%Ammnqr1)X42s=hhOuRL z7?yA98kj{vgJb>cuDIjf{Di$v8+{Xp`)(*vW-Yb>tMy7l6-TWKkmrQuqRhP>zi^+F zT#>R^jFD;--K`PbsTC*HqCQ@fmc>moaODMJP4&Ty)(CAq|EfaO&4)gDx4 zA5Jd|(iHPGIpWMBkv?Cb%nDKz1gGUOLz0}!yPdXyU0meADl$h4xW zXVbV*(>8Q*Z%KukT%M{ZEVC&P=D|sop?Au|ZWe~!&In7%2vQh?%5=UepRX#z#gr#+ zz=bt1y%UKK(X~8zS?>g7l~(GwDpQzd2+XeHWR&@;3Ia7oNotYsdeNbjiqPb&@U-*@ zS!QT*y7;z1a66N8J6o)%2+e2?(bS8Q3qo(D7xy4-LR(On30L+uL#=hlOTFvIOml(s z8H=){jQqNYxa%JL*p0h_e)nb2roBhEy2p~fHtxqay2YbT3e^*s(MgdM=LVc)_l9U- zABTyb6Hf}T8JjrT;)Up!iN+{8&jVigamC*8CBP6)X)}>nl6*%Vs0+Frv={}S$S1lT z&e#`W*b}DxHc0i2P)0}y_w%3ZOWfj#YSNf3UdO-Soc+^XphJ$sy$V&PpZQ$)Dlln- z=ef`J#%Abb>ev*#+>D73`YBA#{XoY?P;$;V(Fa{{= zILa2^v?htZHzd8AfOuIQ$~A$l#h7R{b-`IsGt_&hRB&bWNFSi{XRx6Ya0CnlfJk6w2S-(lzG6^!`l(ttYG@=OgJ{O567s6+ zrWc<wvn>Gqnn}8(e8iAbhpu^#WN1NJ{)L1EgiR_YmpZK{oiLHhSKz z76SdKT6wn$Qmfj4kV;+-1tshFDMb)_`pX-bW*-1a#MYA_k1~m_BG^#NksAZ#wMdje zMhZNEJP21^19tO}qV&1RHjN~g zp)=fX8TPlFYbCq|S_v#svu5hmBvs!#c~ez~@{o=DB0u*%x5Mkw7rxiYmdrnj;KnHXtKh!nIVP}o}xHdTaV_*qHC(m zw);qPj@hW->nV~HL%6mW?1IRU6;V_elAX(wsX`U$N7Q+dntaUSlDoQ~l&o+?Ntmh% zQgETP5(0||MR~l+SpRBn*hZxoYsIPZkgUR>jIzM=3Zz{iUq6^q5+yH+R%AzMGL9P3 z52j~?W#omW7l-RBgSE8*>S}Rjt5{LTyPB0&+D`BUMa3(W!AgB_u92h9^;2a=WEMrJ z@`7(=M@frg=-Z^`%W9n+h*p-g4 z>)lh=yC*NT4qa*=zR)&)zJ2my`_z?=$;)k1S6hbCS{@fYA1Zq>kk-zvnrDf=D?@02!*H>BxuqT%~ zX0f(Q9g~-OCNE%DTSik`Up-s0I`9NQ1>IKC&JMEqj`cU^teZVz5CGHCm#)hD4nEob z%XFqj7*VEELA@fSe&gsEsDXxlc0Xov38J?LyT|EeN?+XjyD!9>_8kU**s?G23wPM^ zB0=9Q&p$Wk*pUo$pl93M9f z&4y(*1gYx4SO~AGW#!Eyi`1xvI+BucP+5FXTM?)&j;*NGj0_hoEap#4R?RF_jn7t$ zjF%0KSB_3qjW3i>T8qc2xS}?Zyo7f*mv>Vik!+}bH9k&J>iS0h-4w1;CpP3q+ktnjzKL42z2) zYB^4kCQ6Zt^*KCUR#bU)_V8%U^a4C~nkTJwBjXLDBN+7~qwO;bHDhx{LkpMc?u!+r z{Io*O?aZJ%dgFsvL<@$~WpU==F9}9gjXQyP+y8ZXURyuKOk+5fFc~*U3NM8$%eBZx5oh3YQ6T787;eo9-_w zXZ||+J-!1^;1J-VHat6zLTE)}@A3TlleG_HtDYXO9z9+?7F#+PTk_&e<-m#3r%^fG zQ8`_S#z!YA9>tYCj463=s^(=v`Ln~heWxm(o-Jutwf1$6P8c6PJ&|8`EWhoz@#)Fx zfrOgpF%^%Gmpx7_f0AIlA8YK5Dea3X?mAxDaiY87O&cfR8^R*vD3wK|=Y^*i31ykl1vQB!)kicL=kkoG=e(Fvc`38z zQg-FVJmbZ}f>T8$F~tqODJ7A44-YDvf^KGIv^++>V_=awnwB4-sR`3H^5msqxn&pI zd(PCggy}Nlataf33eOiCuNuow=M+S$)1%T0&y=?YDvNn(c@gQ=QCT%84J~^ygdA{M zbxnjeJvg%VnEvOdi zs{*rIiP++1c4|c{k|u-Ha-v2Np~;NMDMQXBET{NNWApj4s$2BxLAv1_$wECR^hRHBlhBD{i~FT59v3#do!hei%+`IUb{sehjK0(D z6fADx7PG@0cDDz&ejC2qD>CHh%{$W4!iw&mCu5HXr%Cc$@xCFGR4wdi7ZDVEfh(MC26HWGPqcxhO795 zy5tdpkDO!EJ;ANKKvfgckpW3nf$DCNvW=foE0I+TlZ&`_a(u64a+6DgP&leY3w2_e z2_*+lR?AH`3T~vQ8Jn?^>K7x2QuV0e0+AUYt@M-EU}1q0g{dv#v$iAsd)$1Q{;a!ZnNWMmA*S zFr2GmZd^#q3C8!CAQjSR!}&gKI87Id;NA9nqnp0wf9)OlrC036-3Pz%IJ$FB@Zp%- zeNW-iNm9-vKZ28)CRCZVIf2f4Y#HF7f1Qb=RaanISAwhZ{DOIQ(G07~o8>j33VoP1 zH&B&}vdL@hkMu7ml=u5H9t~&RAIk0>&gmP;zBiQp@Kwg+7y73|Ne?H3GdmAwKjGc2 z@w=c`H9P_(@NAAcp5AmQy(uR1WR{Bp-L<>K{MXJbt2l`b7Esh30v`?#ZCy;c#$PQ(zVvDz*skmZnzs!AA>nu(KICA&UG! zeI-w0j54<9UcAogACq?WD|!Z{Jx?;8zsh|zsCn>6_vo3t?}?^=GPiy`DqV2x#2p8Lr01bxO@NzWPlsAerx>-~wn9Jogu+w5@y5=(eY@e&DcfJV|Y zs>CC*kS@^3hy4Rxx`|{5yeWx=C0hHUM;C0Ve4{dbA`M-btEx)>z>fpCTaU!1Wj-{9xJcb$K{Rr z5z>!U`U$!L5bj4NOw=ngb*-qLe_O{_7I9S7kWVraNQt_YbGIT`S$D4D*{SkJP&A{u zD%4ObQdjYnRY3-*Bby{pU*5@(HuOS2QSs>2AyrX`wpt*qhD#=2SuIr8gzKAy(h3lP zU;@rmKaR<7lSm5!WrcxK2$)buT^TH`2A)7=_*p{*@B}cuqOP5PCtISe3o|sK;1fa( zjM1D#M*vC`Gu2aTdUTO)th!HCbl&zw8Rofh#-W&)K_Z@?>qJ7jxRXra*BGm_{ z6@(cY#M(MPc?I+qLHZ7fqAgI?5SQO}sGE=mX*oZ1nF!vxqxhqaoza1x;mdT{zz((N|T z5rQ;`vNr`KU9c!hnO?d18_^dY$Nv25kX?QWg77ni<()*ri5Uf5tLRJ#UEfZ6Uf6jQ zx|DyNhV@-|g3+eZk;WC-v}9c}SwSsaFKrYiX9#7`I^+Zy$}hLS&Rk_Y)t#~b>_?Q>{pZe7t``5Q%8$B0u|Ag}8n5ut5{c2M8d`j_jCOErI zoYBdbw+NFeU={oXZkQBxE~hk1T>&{daETlQFzpzDVi%mqAO=mM~SQt`eu!Nu>374oWeeG?c^87CeWnu_fTPHb9;qlu;?s zS3>>?4^40e{&#aO);y?Rrpo52u=F~grrS71(d}XabTgTVeanx0) zGKE@;#S`o8ME*$j$B7Ky4iT6ek(Jw#w+SZR02wapbU0Zs(Qt4W^a99tpb1&XkqaBY z5pUUh$RqgbWm)AkU6+q4Q|CPsmZf!z+HqOwUTIjZNO8~0P!3JmokRU=>4;IF7fp*bJ|s0g7nU!o`q zR8^qp6m?=~nJ6}(cTqj$kQt9B0R@0gV1yNv-V50|{^}2>3haiA7*~WSs$hu$rkATi zv!W^C584 z5CrTqwlQ@3Qai;!&RA4_V%Lz9!7q=PUP3E9FlO@+ChXXEbgzG6UFQ(vuFc>EBA;+H z{zem8S_-x1bYC}DzjxpX#x4U^zbHq#Wo#AP(c>)TYN|P^R@93I$ePeb3zZ$vH{^~| z`C|l6NYrJ(6Lc|lEtZ5!giuu}O36)YcmPpn>HYo)CGrck!J3xPjBbvi1~nbw=`Fkz zW2ml5{&XgDluEfji|Prn0tt~;fuda~Z$~Zzw*lz85oa*sx6_aMj@AaALMzNF3N9?= z8VW*k+k!G%1j;gjtl(npy`l-5?&ZS4Ot@Us2B_-&bJ=JKR0!qe zN7W@xJV9C(+xdyNTyEL=j&*aQuag~d-f*}Ai?{C^Ga0VrL|pE;-&Peu-^Va(MRtP- zC>ffRanqx{BYN(?+VX|>9-r`wck{bnl4en8&drpABF8~HRx#FW^i4f|{R6gO#F*qy z>kv@9dHr%;YMD^e$AJUG&lGs#PW5Am8F4F*d6%PW@YmJD`3}Z1ATx1?B*v7lZXiE( zZ3EgI^D^4G=`F;ePt%F(9n_2Rjz?&;(X(g;7mE88;=h~G$4y6{z(#NeF!o0sJgo7E z?VYY6NZ){3Q~36P;iM5mGrak@@@kZO6udG42c*_Ll1LjRy8C|8R*?Z#!Nf?1l;Fe4 zSBYlFq&p(Y+c1D#@J~|Ru5JP23RhrwTHLC+2T+3boXV|5FCv~2-hX6Igt{6rTrsqy z(j5o`etg(@*yg~PD_n<5YBT9F0NXxT_ekp_a zQcb5i*v;?|^k6!m z)2y}A0**Nw>7Nwcj2^8*(X|a~C3s}mKX@ee%8qY?PF>NH%q6Uim~)%?Av^HIN|~qw zNw1R&+X{|feQ{!-4=yK*i`2;U^1u?6RMw8VC$YMiKz2o~*npN`%}8h9P92$rKvkLN z4IN6jLo%v@)8PE_Dd~~ z$moD{8l(jhB_J~bl1u64sRMVzkAye!>Fu$>0gv*Xk|zZ!DfzUuXMAxHoH(BR~EKa37>l>Q0=#)Y|a?G%IuM zbG6Z0hiKu)9IYKvIXg@;*JK-VlMFu*tgH=!IUU_H7;`h+Z-}G5x|1X?)JTq^Lm-Vd zFnQ~l_J*CrCr8!ne>)dmzsOTOHd9BzZ76CnG2^c=$sv7>)uam|G2_z{i+(@>rne!U zX$xUu(2F(Wb_5lqMg6m|)l03vAEc;<>m^UoB~bNX>pWZ;bxg+r($Ro6#(^gfap%Qf z(b*X;nb=jXs+p5qbf>l($&QCM@Dq@I)d`ivW(QCPJCD+K(y1BU1Yj@>13~b~>JD^F zz)=vtVS3Hj1oUEIX%)stgFu$Jj4|o1^_$!J{ZqVS!3_%z) zJ3(7(yB@ad)!_TX@q1Xp0ewjIu~SV!I8VyFM9nNvusX@>dKsxI5T<5^qDER*15YX_ zBtS*LDjpz1sLU5Aazxq!2^=Ri`66vmcy4V(egh0%d8t{kn!KWWFP__|rqRWV`Hexz z`5|fLA@W-IyTFu1EF+%u^niU_4F=u`(R6T9n*47!1S`8j8NMQur_AB0b0r4&#gzj_gyqy6EounV7Q&q7 zn6@ChwGaAS2+HtTT_4tfh3$BP?)pQ| z=wvwY1eg!ldmmI3mAss$@kAekCj{~yk+Mgmf;Nrd31aSxeul&+80duH2@KLlQPm^V zVnYm`C`hVp$4b!1^mJ}3WaI=-p#1?l4{1oR0dgR~A~c>LSVz+ZNC9a&aoV99Fyg31 z(zdX)D&wPmC{+obVB{w2Rx9%!XQ)$$gvktG0@HQJlko$><7jF_Twh^gz3=Sr_MH55 zZ{mi1@f-HX0YDI6ihZQz&4yj!|8JXM+n$4vT*RDJUQa8?E^cq==zlt}IJLlZH=!1w znWKL;c*d>dd+gEM7jgaVIbRPA4HXm=(G{;WRp@xQd?Rm4uYU9{e3%cu15W@rEzR5J z=z5l#r>{?EmcU+CkYYfy8=ciwML3gXe?v$R=)fpd-LC6e9Q8Dx?GF>fFrv_&0cv`hkstHx)hHLTz6(&) z0k`HJkHr%o=B#Dc`lBLXGSQh=LTvx|OW}{&@-O(IAC4#J8JEQqzXm+PoM#NjLLh~W znl@AZK_|EGKLX-mJFK-2Ad3-?b~ZaaqqZM7ylHpv<~<-GB7>rCUP~*=DerCW8@<;* zJ3LJ^l7!)~k>RQ?Vm9$sK=A%&IRg=#&!jg8{a}8f4A6Jtl{bL@I%X;836LRTR`C<) z53oedV^CDz_`I~bC03soCeIAGtl(VJ2yg1dclCiuTHzgq=#EU3Bo*9(Uz}WgS1wLc zVbbN^kOd~`&*qlZJb7_{bacpU>o-$%FUD@=*BrW~7hh5ZUQHL@$XprM(#7-;Ue!u& zW{R(82i+A z*^x3MEfh$q+HUa5B%oa%ES`A(pBTI32aJF#6|uNzV(x)RY0|In@k8(gNh{Dd@hqPB zmGBev6&UhiW>&)m8nTA)$cx*)4cxKs&=($2pSwq*2iO9J3vhC2?vh>08F-#pNZ&6@a9-ak>AQC>$q@aP0y}J1&!>csKvCzVU<|Qa~r- z@arSr#Pa;?!sOKaEUGHZRO8@i!Q+?N_g`dmWAtbB_UGK|&wcPR=l=7Y`%iNpKFN9T zIPcNZtiH#1r{M8`q3e;p<8kfF(SiB-;kl`i`PtVN%b&^N_)?=@ONgR&;Mw$R=0pr0h+SYnFQP+i;c^6Ah5jNKqcX6iXnXfaP; z(49O4Q7GF;H(y5oh(9h{0W+NS*!GLSk4_mfh=jpC%)`M|tVQ|nhA#}NSZ5E1N|c!# z9T6q^?!|?N+%TN5O^b^i%2&UedDT{bHF!A)&6pC0@Oy5>ZDH-t~QwfBYOm>4p$_%|EUJ~~YO%Cco~9vp=kCMU}yrln!q z^03u5zD$iSS_fxM!waU@CNuECxWzKDy!_f^nzULWQJl2eCM?vLiHsSPe9w%{%?(e_ z4o}UFO)pGMFV2rIE{vN@xWC81@)+c`&4z)Bi3!vE9A$wtfxHjDK45}H%gFTX`1B0E z==sID>4h0s^vu|nhZYxsOQuY=*E35PsQU%n1Y(#p+u$}fVX|ViC>O#4lRz{kQsO(i zU|E`7T$-Doou3<W@>!NHojoR^O)H>X0eS}tV1T#=n}y|GWkJ6b0iDPeTfH^kBFLQvPA@Z%3}*#Ey`e_h3!vw6}J!56%)# zyNNBN_isyfxX%4Q^i&6)VAiQC2KQK%C8ngu?jD0{rr4=N3#0lOPlQ^kEicS1TIT0%OG{v1aPnZaBs+#r zLAPtNEtza4GeHRWCw$^2%uBBq=V^}1h7X{ud6u%w(}shyrbY7g<8;MVi9P0ugBgn_ zezr4{UD3}Q0dnPBn~(c>x3E6e#^QCj_BlKKAiqLI34re2g}Okf~E*NgJCJ>Oj~ zDpc3LSuZL?q3P;+QJ%Kx7poTymsT_NqU4hOYp55s&rFVkv7ZG_VBnblNoIo{PLqU> z|IA+xN3a%8*kPCp(_nZi-YbQ@b?>s;ZGh8o9(U^k?6nT%Gz19MNgjteDzuAuzF>sdYbuKw4X<{7S& zx+|V=dZDcL+WhV*d=*vn#v0yO(^`nL`ns_S!>s=7+av1SHz%T6Ie5ZjIQ>ptDY3UB z6+2nsVy^1gB#S40;nSJj+;2VtES~rw`Ho)`KjFX=Oh>&{wGV6giIofanwwPDyJx2u zW97w%|8=cPmF4>%_FJd?H6OSCLS2c3HBjT-+j|d4v1*cb`3LjTsl9bvCsu9Rxj4fd zI2U$(@jZvQ=Fe}sakw%rjsso&p_P=|nn194;{DG=c1bn@KSu;uJi+3L@5K{N{@kwQ zTSK)WZzv~8^-2T56PkdK!*Ur@Mz!s~pgmogC3mN;=2)seb?VOng6;E(U*P^%h zZL|4~pX(fU@Dq-{b#L68t~xbV?vmGni8nIqzhqkEm)zFR75J6)yd}aeZ>^qczlREX z7Zb6fIUu0qE%Fy9Xs~bjZ5M=9n8UvNWPje==o&WRyU2;vD}C4gGka~{bH~1|{-`U` zV)4Y!f0nXqWFzq25n%DeZcYhM5N#S?t&c*5a{$;`v6 z5yac>!WrV{EvcpN#}lrXgq2x(Wu|_M+VO3Y8pmqh@r}EpmepHv9^#w$;B80G?ASZw z;}WWOo&zjD@!rz_yC55ZpCtn85>KFtgykoGmaq4HSAx^PRT)R?`QLdpf8(5a=lu4* zoBz?vy$No9^s0X857MebVxd4=GSLUZd<%|g82@*q66~^Brc89zj=qgLpcil=kGJ8! zRnpou7{zhBZ?EW~_w5a2K)<=VVB~#Z&Kjn{1*AHYWh(%OE}dQR3}tr1<=k_5>Cg+W z-cJ@!{L<$%yS?9f1m1=x;8z1fn+ep5R(1u;%Y&2+0(m2kZdndnC%OqIX*iDZQPK{S z?k|cKo%Uws=s~JQr|(9(U-?t`=03Dg$Mn^tUpeXL*}>J4{-R_JTDl{s4y{3%{-X94 zqi9x+e{$4KOuzC_Wl6!ynWq$$)NnsgStHbRF#XEWUld)`CHNG&lWWlhw2fnE)eF7yYRFl{?OT<{bE;cw)_+=v$|g5B}@=)!~WN z^Cit0tN|0N@q}}RUK?sAAK|>eXCRB?ncxWrh@roDm+AZ+cw#N2Z~&n9ff1{5z*_XV zf_@xm$sU9^@Prdm&izHx~Iizhz#?~mO88v)0!+j*^FE)B#{f%w?aSDG2i z67a;$iiSXGPN2dllvi>Tb$*HlKeX`HbpSdD4IQHNHlD6QnBK@W)Qhs(=!v`uZwhqH zVr`x8?X2XMN5B)EbLL|iHDX{6RlOj+Q=I+4U*F>g_wMvwe|;Axy~jt<6qwb6S)8Xv z^YJ#OML192F3@9YuL)F_8eY!aqo`|*eZhv>0IWpU86aNYk3(np6 zzE8e!UxW{^c;fq_#{Qj+z)z0A_u>h-=*`;9lN5EmyjGB`oSYEgOvp85KE01F>^rH{12S60bWmLIsRx>etU21O68RIt2=D<|*! zsu}`x?PM_b{f17=*HC~6rgDImDlqz%`0jHq-CLo%Jlx(E{eL-{#c|b z^H)@Yy6{Y@s2hS6SyXj*8(2-s++v!jUru-MBm9(l=0QYVqJy5qQqNT>@6$v zQ`dVd>H<`aJVlK#x#VCoIgujqUno!^{L{^VTa>&TLqF(pGM%f`->y*^p=BC+tnrN);RI|t(H zA1BnmNUR??+c18rYW#S`=<&+IvyH>2jNP)D?uP!+rjfC0#@Z`QPtP2l21skp zykNC0v3P?0Ww8-hw-I3R#Jc@z*d6=zMBwdsg62PgCy?xzw^?7$&kdPvS@)h_sp-Dk zJ$&ch^wr+^bL}(d+UBlwo36Fb-R_u6?wG#aI(oBtIJs~7X3x~+_OV-i^Edhy&(x13 z_01-=52n=h=63YtcRxsOeRijN?9RQ}TYa%s z`^K-g4c+XVxY<2@yLbLh-vVBzbicma_*mcbyk>Ax-P(7tqVIa!NZRA&EA6wFS{AOi zm~S^P+-#UiY93GPnZDCIeiJL{n?Bz-6 zhZ`C=$W3H)LW#+f)rM*tB}oOUwr5z$gE`9)RdJxAJWyRN(bn;m)fi-j%2J7{SRl(4 zNOJ;i>jPzlp{nvQZC#M6RxGa$RMv`R<)O0TXmzpi+3P`yGBkG|lH~`dU3q2EmqqUmR2T7Es-cIxw3Lhgr>G3p)`sZH#ftoc>BfYj#&B(^AUQ{zRs;;e zOECtk>wzbN<&`0+g-26!s-C~b6t1jpj8YedD2hY$HC$CSDJ@Yq2P&Gw6^*=W`LXG( zp^6G&8lpFzWE@5iJQEg3ZW=)v9SddgGQPqev4G?w!PXy-niVTfHS#g*wr{r-zaLJT)VZk!T z;)xG_va%asBk*G*z~YG?8#(rKYy{pX0<>!S?RbKI7gbl_DV8kM#H?x9L|rdv2u{fl z)7AuKwg|F1{S6%f8dOep1glyjRL#W$RLLN9rRpg^rAVx)49sZqPN@h???fp?u)YaI zLTb~)XJ+cDg^JXc^V3TC(sGfimLmh6s0!BBi&fIl}fsUA#~zP76GPlhNe!gNgnCG?@BWE2WezNS{JC=HWm7d?CdJh4EV2e5eJ zeNH-d88!mHmk6+U;`b7EcB8+U2so;$*474+e8&pVFk7jqdCQoYx>MX1oSX-0L8z-2 zW_NKjyE*y}RCNlZ^+$9qsJZAs*+tuke{wEQV-%#Zz#qR#u2|8pT<4KDTwZO6rFx>ZO%BuFa7oXA5Ly;hAlL`eu%*maA#xD4W7E z+B097x+$uBngTx&q^bj~LD2^}Ub+HSEH4U^W`UnTzj8=cO%~`Uens`7znLEad!WCO z2(WnKH}XZZhxvPoz-qUg6+;+jnK!+_qM4dqvQ62jyCv;mX@$Jx!T@Qxm%Q3f)9j~e z62pu^ULT=vzSBBI#!puR zv=NYA>8~&0ryKdQjO2=@S61q|l{%eO7$nOLPOlEnX%Xq`Ioe85Mx)5k?3Y@4tMg@Q z&&$(g-9gFcAmy>RWSyR~jGHM{-Tf$CMUb>4L{|rKBJSh;81Rva!*x$P;O&bev2r*j;Abv%2L(+^d&_-g9Ngq1NjBG zL|}$Sih(1o3(+=<=Xs_KJD%cJh8B7nVMY!p2+WhaW=m$BrPvoQ;rG_k)leVtdgjzf>o74 z@}h&fl3-1~P?ar7FOXyv3p0v?Gb<10H%N44+|;ZvMQ--}7cb4`$MdGUwf98HMeu_{ zRfSklhY={R5v5fMWi?!B4Ih41cZ_0bO@yISENk?=T`pF&1i|Y%*%+azx?0{zU7>h>>!%ib{A>i)Sp---vCh5_c6WYd z5qKM(AXB0fPtX*FX~{Y_Z<=1F>R*ke)^#3|W(Ox{1*a4UZ|6cFc~D&*BFzU}2v=nX z%JmY3R;B5z{cdA=^XO>`1jafXm_tjBFNpMm=)*wnMK|M%d zQbBNPF(zz5N?A}^c~FY+psebcrXfsL8I)WeBC8ITR&cLppUP>@eK2xwo`M9jw!c5E ztuaie;Hy&uR5H)h6p1eVNJc@JJX3O86`q!k{SnIyyfiKLM54+TN%ev>r7%?)uFbqy zQq?gqI!{rrt=95Kk7M=OA*zfJWmdQ@FG!OqQJ`8iD@2wJoDnM5g`^t7q}dVjqR`X= z>@!}VOf>M8;#_t~$^EC#W|kI7rh`JQD2pe4Wxo&haDGh@VDZGS>Dy$FW&K6qEqH?N z;z2j>U@RyIo-kSF7nWwN)PtFY(kCykFh?iF*?ZNoDP4(kqXoWFNen z6?Qu#^t$FyQbx$#Y{{L>u%xWeo4U~J+JmOQU5ahwfyhRdu(H%)c~I zkSx4doUa;qal5|as9wpxbCY}PX1G)pDb*fLQbi`I58akWq^J%lbm4L>o+A~8qf$+z zToZgV=}dM}QQzZNlk-z^i_>P)v)LJGZDV3qVT4qJC6B1IM^ySqS>`dRAv{%y1HpPi zZz;oWXb;`Vh)T*l3 zZ5*3wn4D;uo@kjEZyXtJ869gKooE}It9vz3`E0avY<|#0%~{QJQv){hG|)iD;B@Qo zY{RRmmXXDl(fRh#+0KcD=5bT~>&5o*rRJd-Y@)e;yz#|&+rVu1xUFNqZBw246M49&dX+**-YYJv7xfI^Q}l**G-W@Orv#WU_vE zymff8ZD^ure5Uqkf9KdN*_m0>)ab-<( zLHcrrdbLDB&47Z7$CicX)+OMDAq#09HfEt-+bDQOyN0KPPOqY5dhU zehw`&#+FUPly!hwCTqX~OVs!xncXLrC{&|hB?C6<6}=nCv&>=5CNl7+$py+XZL&?y z0V^$;O|OH;>Y3FF*(TOMyiAS-&sZ%MdtqLDG&A;ecI>rz z5q{*f)P&7I@atOxQPi%YnVpEoT{ zQ>Ibt;-u9&X|^p`saZ2MIJq>13EONPv)P810V8Zv)bgZhX~||o2_^mnG%#tV00L%a z=PApgdF+L4W^iU^YGPq&X4y7rUIu8u1P=+qjBOdSKD-=eD7?Qgxir79Y@WB67OmC= zEMTMNFvpKik5A8;kz$yionM?m*V8e}(zwL}B?Q(sG%+_Zy|lPUEr5QYH$7pcrU53F zNxkd*5;eVGo;Oo7CTe=xL@in^W0MPGud%8Lvl(fN*H&tDd3n@q8n-M@nW)iu3$V_J zb$N8z_S#In21c=xJPW|hgv~ldSzpeMKc5_)vMi$is@Xyt96#Y&M(KiaB!GVzHRbX57awTFjV*mzJ#vXvIU3+ z3p2SFyYH5bRAyQLHLw^NSe^`%nGCauER2Emn{4D6gL!9eei3v8CjKR?c*#WGvXY}S zfrGKF4E)oCEDr{Zs+O1M%;wo8%Yqq4O}~l1lH;{vI1ko}HIW8tANj-8D)r6RKltEW zu&EuS{@_)xA7CT!b47r~6F*lZ+4Zmy_>mC+s(9<;eOAYO5TJy4;5G9ppaIu^GH*Kn z%DiTv4D%+l9@i~03%t)hxGv-LhhX-H`O|l=2ODDc_J?lkqgbrizo4JC zW55s=Iy;1C`yiR9w+tslc%Kcjc;bDc#4hu}Bf#Q`4<2H618fAoI|8l~?N5)@o73~V z$cdl6Ep`n*Fap36W?Bz}XGV77#1^ZCx<7Ee?BZ+$) zBAXApH*Z_}cfg=uerG<|770J`RwUt42K2!;&aRk^zz2>1izhyCfZ5fv5%^INSd$%i z*E+s8Rq^HmZ_**(fi`|pNWk%7`LZJJ4UvSN|J05Ef7T2EJtW5ZMT@ z5m*lqU^>*B9hM+)?10Y(49=EGk6F9*2T~$1?I26ynYILRJe)xAw^CcX9=@_4xDU*C zvnzfM}M=fb=uBdX4d8`#>R$z^!nd33@n~_&p@$@u@PV+z((Mw zMS$s0k9rO#_dpX81UA5R-Qm_jH;Oac%!|uQ^HvLS=3tycKJ;Yp(~gb(v^@f@)3(zU z24A0Z%ExldS7|bt-bxn!xG&oEQ&=-hf`AE&g-{USA{<`qq@Wc3r4)txS5O%%53&R(4H%>THmtus-g5{OIi~Zs!UBek zfElin#$aB!;>90q49-~}6oM{u3$c$iadIMXqepc(~JOVSp>>-OM-v29Omt-TrMu3gLPm2K2UXuBSSZ&jiOC&f%htBVG z3){9YeEa?*JN6&j_)Yllb_VX;7v(28f8v}hS)N;1*4Ebf{Kc!;*~OJsAB^h4buhvU zdXOqqdPm;}VZ6^WKkcZ0GoSA0=(u(3R%mFb2mJ^P3_Nw}RDOQ`vuDo`1I+ptFJAor z(T~6NxBeCbIOgW&`uh5eMq_Pl?cm_xk5)V|NP<9Bn=LgpH8L{t`0?W>Po9j8jlFQ; zLRwl{Mn*<&Z|{c#6WBD1Cw?<$I(wjO1lS1t-XmZ))+Vz)_ypjIaTDb(I=*@Tk?r0m zHn_!a^*Xh|En$oMiOu^Gx40eubXUZme-ZfEH=*D9#|sW$kG-J1qp8j*?``ZGdOl{J zr{M^u5v<3NB0*LJS#;z{7+t4@t_ZM%LYBDiBKlY0*+;@__RZ)fwka6 zOD2aEhSg4?)BYIbW_-$`eL;5g5mIrDG+&A<6;f94-5=AQ3XS6yfrQI zBfdEnPyC3Wu^(Y0z(#kUm=4NsMCq*tPh}s_V-9 z%(CXT$3r6vw3gE$yZ~4M|AABqLtro^sq~yg@3t!haP5;$cF&5zRJ~PHoZA*I2u0!U z?ozmGaF^h2!98e#y9al72rj`Pc;W64+&#f1ICSl^_r2ZyR%6uL`sbQ!$|ua=2~!## z^ZIDMoMFW%GI<6h%Z5NZy)Kbwi5@GV9Sh8y^4;~euVB=PiC6Ns{s5@JhnJh_tKHi7 zyPi0Yx0`^sX~;jvv(z=(2AZ0o)^L2=xm&O29=eCMZ;L&U`*C7;^CGle^t!W8mQjfT z8p+7bwJaFQ`Z^m4dYjsM6byJ74tSYn+1D|u0?~b$ekPs6~Du?Q!w~SQ(E&Ia= zxw+q2kxsw;lO-T8y(G2R^zWuu+Cy(HUEl=4;N88)V{RrTQ*yL^ zSN@p1P!fi_fo?o8m2p};SQOjm{8wstKmIHv1xf-+!6Xn0(}%MiEj*gS#2WLxMVEaN zgK@CazG#xoexNIvcdAb8_cwEjFh(hFoA`ms55vY>dEl$|C*kdU6S|BQ??1n4YK?n_ z6w!&GbPIp}e8_g{e)M=h-{PRm%FA=Kx0kYmVMKbt9z|<2eEa8stwe}1G1ZKW;$B8> zj;1pY#Ay_jmDAGFsGP9K$j}z%78eKWkUzVF?<`z*Bd0~4QKC@yH6RzWjyO;dr4ni# zS}-`Cod#Yb>RM9eh%epxYCt8PvICA6@Xs~vlm%uGdwt1QxCAnDa1fW-jCWtgZ0@`J zCJzfxAR3vOirJ$PskI7%DBS2u??R*CZhl~wEG?-~m@aQpyCYQLlZKrJf7;p=y-e0bDYB$!Mn_7 z*jCA>z`d|;j<#-&$L?8J0)LmVw<~GrD{0T+Bi*>89mRX+{%(^+zL%6-f4-KV#;dN| z>sGCe1O~!W{^xF{H=BEF_oQ3hqNdMpKOLT_MLptQRt3Mr2c#Sy4{9uF=?xwJuQNTzRot2K@t#4+xXVon`zQ6me(Q&z30GE5a_Z`Zpn#iFMlc@%+!WEh z+lYrj0%Qfy0k8d{FOu7c_5h@@o=3iSy3Kc=fQt)*=^(UuOKx>K1arB<$u@t9D#iw$jr&%XMmWO z!K3!gESWEWxOwrlPdndWQ?0GL9#({7g8n+1$t8Z`Dr)p5j^#3zI-U(Jg6VHG z!)-j{W%R_oaMm9;`gs<)8(coy=%dByzxivwUA)GFZiw%5H-F~W;g7`Fd&IAuV>VBo z&G4oqoT9Jn;QdVh%1?8C%_D78T7S2)-8b{o5@{7#Ryj0~)P&3=*HSPBd>)((&Q?Rz z;R~1&CCL}6mt&b>9xSqr+tEKx8C5XGBGT8jG5%o;$}{OUxB6h6*Jj8{?Jp$}1nD|v zpi2Lyq~KUrp;_<|WI-1Msma~m-o9+TKL-#dmg?6G36}E=auGzRh`y%mVBsLaHWs$G ze}GdF!wM?W=p1eOt^0LK;A%j}*l&W4miR+t|^@mj65-1Yh>_Xs>62?yS49bn!WW2i~yi0Lhdq;5~7OsGyCX= z71gzSBeZuN#3%&WP<9qw6Ep=`V-Sd_JrelW!$NbwzRf<4#GIxT`;GQ=P;Wm$9Docu zryPWwcjx`;4TJ(&uv)wZ9NeNMSVl|zzR62RuFckLh5Fp64CktiqqteRskD>Dnx@m? zX(P!5adehNUIqp{968Fi_!B1I zl`A5rhnDV!`3yA`$l0&ec;XEwcm*s@pLm|iLAYiv5K|2ewiK&ke_Iz_TA79fB2&_- z_kV%dg(-#%Z`fUxcF9IJd zILD9PMNHl((+_|}yAmG2kPfVVkbV7cRv5fchThR@761BnlwEaqe*>e!K;QDT>ug|GRKK!d=+41Ligwe$D zVKyf9yONT%I0P}==kfsXB`8iZu;fk4HZ`)s5(n3?{DrI{Fs%? zlS6~#3d8OUhlq&ibAJwd5I6ytfr4Qu`0>-@f(#Fe4$2X{ck%w(gDwQUe))T`3i9)v zuQ8+71Pt*IhSs9Hp(_Y!+@8m+U@smtApZ=34v(o9d=q_47Bz)9@X|{vn;4+1n*0&$ zzF!(={il$DGz@O|&@$B#`Y{_KSKA=obI~3?seS>Y-GI&vD1^Z9Wp#)(8GzIbS+1M9 z8)2F(@zI3sW%r}so&q}*145?wJMKrUa&dGTC5lc2Ir{LwfQ2v>*Sw`f=KF}-SIX5- zRF6kF`z)J$e560yd(VWn2kAxx&o~j^@2$d*!q`5^QUZykBoJQ(>7gNa6k;i80VOYm?=6EHSf$&G z&HZ*6BlCqOeutf+4}2NRdL13~w+N?Sv~u8Ct~coN)Q_^=38uvO(#i2oo;hN+*1X3epFOcuAoPrun*or!2<+577-tB-9QWmDhAQ_t86PlD9mrY znHOS25@quOEcMyh*@^VMNfZJE6`@Stfys0jF`p~@DvH1YaD5?#GT;<;WB9!J`T5+i ziuafN_pjRoODU5o@Yu`1?28B7{L9nAf4cj+ZYnSZVoVz?GTq)+om7LKv{TpUxAo^+ z?OLvUFFOBS7vJyGnGQMQfX4_h0=)prpiw*pxgJ}7pPSjxU$VuqO>G+^-xHW}AQ z4^0j@F}ONdF*r$mj2&2Eu=94UQ6fEhVR}5nT{O?ra2k|>36zB=In&h#{ZlXc3o*~b zZj;HS;DucOr54Xm%fG!GqmNF`M4UV2LLrX?9~nQ|a{b`*6J722!#t>iy8Bva;mR|n z97I3>9Y)tKgAvbkZBoq@P!rG7jC=V_Odb@9WA>(5uw(@s8J$C=O4*f_mD@>=4_D?di%U!CJ6NhhfVyB&@pwe(qi(?OQ@&HOEG&PC+Y&`#d*Yk#-O!cVq{@d7;_WAzeUG)9vUCgsLB~5HxPg^@&MZlpt zM+yQ+>4ua**f1mhIsb(wE;Q%SYPnZ3VRKqBzZoGec6zJt=*oeYeLqAU`+Qc=caPUc z+JPUVZIs%-YWT!~3?B>+acchC_I}0&lhm_ca)vtRNj`Nb^}YK$Jw-b~Kd5p>5KHwt zf*X0xo@HL*W1gBc&|)CvVJKymAlQrbE|L|x4h#q+Rn-r?xr~SJEWeGe=xYp6YzwHK zWowvumWx{|bd5(iYYnOp6yPIhoSGCl;`7R1JN9#N`svvCu0${F@sJaqtu1#tS+D0K zjDQW$HR0eQu_1`Lj~X9t!}yw!F?dy8T`lzrDz%%~Rg{L~`nnf{ZvsmuD+|fqbE4XaekFf`h@<^s0cFCV*b? z()WYegoU%MLIaWcK4DJxNYCp`X4z}KH9m2Dl7P7=)GdimE?+v99?$ad)sT@yt4KGO z+@Cg9Srl(#Mz*E!oz+B$axd^xkBBqJBiShFDN6}lVQo+fP@M@LK(Ukw+ze*Fl?#6a z4h0vh*}%djK{{yNBy6Ys;q*cR9*Xt^Y>fd3TI9>$Zzt~ZID)Vh<*Y8ESvkR5dB~^J_o0;DClq@S=52ryHMvEoI#GFK8@NX>I3LhTA_+dluUp+ z?&X?{2KtasROSf;3w8qL{O4p0WJIBJf;eLf3Mef20tQT9+A;%$RYsqq%6q^9J7dG$1IyKdDpdve{z z7960VW9$RDXsN@;VJv2V3mjj)t4D$F_aj(OX9o|J6MKu8& zWo0HQm(vy$L16+~mCAy#J`a^;OmhaAvqLxfhj%&u_$E#~)u;B7suvjXiG(#%l;a69 zPm6@M5S{w>H76&6R^JrIkHboZcHAqR9M})Zw02)Pj}m5=T5Lw4vt?9SmqU-~K4B1e zOjV&_=zjQsa|w9u7g@o6UD@QPCvNe|51R%egiNn17kyv;75LFO`ur3%)r)M-i+qx% zQ`!|e(R8i>(FA$5BRIp-6pFlnF*JeDge{P)XdGb1^z%wf@d*YTOrudJrrBaF#L1x7 zeHBJF-0%pBF}_T01&n#-tk*FyQ@#C~;a<&#R$$hX|H$d>Pvs!$Z=)-cb-=+?+Y6lf zWULW7I&nO;AMwZa+)nzyPV2;Q=&=u%oPVhYu9V^sCD;WPb;77EOM54O;AJ65G$(56 z;u!AwZ42rDKc1AwET}z&7#y5nDW{7v_jd=K^C9u(Fi>!>=dE zo4KE;PV!8e4SsI!$??{~X27XmLGw}(=_CC#$7F%^`u0oLQ#RFGTEbRs7kvlBNg)?k zU;@mMmB@24jPtQ*A!lx}iD?+U`7P7RwT3p}xJI63#STE6%@sE(jwC)Wahp=<44?}? zH|dMfLKl<@R0us81H^O63L{^YIN!RuP0df)15jAbN>k9jeUa5*ZFu96hcAO0)Z^-7 zM?z6Fb&r@KE;dScHlE0KPc1cf2t|~<;3JY}r%z*sd1;qZ)F(8U=eZ;Oyw`tPxp(#B zxZZOY^oevC{hxg$WZeEu*2LjLX?S9H4%k1d+K+2(LZsKEihZN$;}oFhP%B~V|I|RY zf0+vNR=?&E_Hx4e1~LwFSoT4&h4>4C_&Uh9ev0liz7CsmUS$$z73Oc@IXjeXpX($o zI7wq2$vRbRhir~&gXoF zlP|ctCE{_R7A42^m4lHQs3Cg;?B@E`%#wtDC|!uV)ygXSPLIW}Ox7>zqktI0^d@Ub zZ9oM(Q2{@n!r^ms!cY)$jx{=}PsANOs>+!z^wg16L-eg9SSt5~@bi{%DI#)LnLBXc z6!DBESor(9ih_qWmhMz)Pvk%IFY)P5?g#(xoE%A(6MAN2ATtw6BjszmKD!#Vx#m*W z(%?bY?}?{m9nObuKO8oEF5eyY#F^VBg};z+NPt+<4cb794 zU|t@SIzk|WhnADIh*+OE2bbuz9WT2M{HwL_0xZPn*mG;h)OBm$4O5HQ7~%XJN&8f| z{61>J)VHbNSZWdhzgssELl*~&DVm?!x3}>A1>%$29?SMJ4c5JBtPM2^F85IZ9da0$ z#-~s6jsv9ieX6YcEp@>si7N)U=x}tQg<3C zGCdqrldA_z?-nkgqL~l^N0bo?O~m_oXsETFQF;??T%2TyC(I_eVmT2hrQl#93C4hJ zAf|fyl3AEu8Y#|?Rj$s(Q!hksBd)IBdzhY%k5kzL!$o@R6$9|_qk`$QHgB0A59PN` zyt+SD|1BdT1NR&=Tv*MlkPn61KQe4QVyA$|ZpBKaP>RV)l@?~2?pEqfHpkoS&F|f8 zZ7=H~O_G_%sbGWoJ4c4inQQdMZ@ku5F)EB9-g|7~dywO@2ML0CIgt97{5UFr`hoai zp<4vV5CKlmrfzH4{0A+Jc(ZQ$v1cB7P(r z5fj`QKY=_krNpb7Q}8?n&<+-_O|Ttm5K~2iR^>;TVbnE=wBR1G4Yh`>t#3uR*rVlG z5%_x>nyNn75HG;>x5bEmVnv6H`BSz&JnQ{ieE5P9Fl6cR<-|`xwsO5ROS#LzhHo73 zStB@dj>0FMtzX$mhWa>py5vxmmfOkWEb+K$^FAKH<}M%MKga5vPQA*uUI=&kZO!I7 zCJLeti2K)Ht!30Wg4B}Q7yn?PH1qmwJjfK#;0(?ha&x{W1w2pR)7algOwWzfcEyDb z8~SnBW$96w^m%QmNJhEIDDz(haaI0Z#n4X<(1W-l+99$KS^!1h9gc4q{A?j5M#ImO5yR(lt`mA+G6G) zL7eRDS34~@*_ibY@ppbJ#QC@&5<}yf>-aCTEfYG4zmQ#g&%Y2v z2(9;;PK@;_YWbPJyzBBd?`Q4tu=Q6sYtM%}nqS2;M?A~CKMz6EJ*T6BoCFk!dPqjr zI$ADe5=G?SE;=`jBb~m;UM<3<-GB>mhunz_UHD#C2Xq|nE4EQd0h^yq4MQE>K5c3_b+FVfTK6zMu!P4x3VHL*;nMSo4P zO>0op_$6=WmD6CVxLo~k8Z#MlUym3gU4RY1WFPJ?H6w!|Ftc6ZbSgQtUug1jarbiB zwCc+kEVDlp$aafsm{k8oxW@S_gu{VnQWw809ltc4<9(WSr4%o12p@k~DeX#nZGeOa znqSN*I$XeF8IN+IKprKdb@kgIV41U3aj0{}j*|@=i7JHPO#`zOm_iO|5I>7cbz%`8_NO{HZ-0XCH4(A@~<%3v8UBeh$$M1fkOzd zT_71-&B9k=LeHx)1yubCQzjm}5~H;df_`=wqvbRx%rrc@bi7#$dlGW)Wbw4(5rQE2 ztW=@~wGbRV#G#MGE8pHMe=l~ygSO^1Jc+}|2^H!)k#_=8hl78k$btjm)h|WzA3!Tch0f9{v#P-^ zqI-U;^=lCH?GIY65h>fTg>E;?_UW_U3Ozd*vPyMl*9~>#Kmv-dsi@)+X)Om$enXjz z*J~9f?H5COB9E!WR7C$v@{L_kl&hOv(2hsAnYl?R{!fP>PEF0+5So`pYjp`zbG>j$ zAxm>1b#pgc+m48rwi2mrY_4r=S3@a0t%^)ub~ytEE)EaOM7;^4&x7X1a-q%d zG`>4CIpT{lUSUOB@1(g0=9-hX&Yg9{ zGRu#(r8Q*^zK125G)@I0?Bd?y$h;@sX273P6i+oN@azjE@=(N<18e3Pwr>F)Q+MP6^%00O4?zduB- z0Z*F=*?UJM33n(Z)vDEm-$cw0mU%T6>HrMKI&np)KKBh?^VbhaUO7c$&(BUvi{!JG zNJ%{64Zi0!iaWXXo0BN#cZ$6=fY})sXvvLY*LI-%Fn zynxO;IwPak5?6?mr7?@md3H%2o5t~+8jp}qOOrhFB2whfY!il&3!j{pAW4SMFBgT{5*j;Jr-SZ zYBX{zhwBV@#VI#)V=u`_Z46;A3x+>5nKW%6$mc1CR0>*2n(3>qs_FY z%+RSNGZ}TO#Bwo`Nefa;h(v}r|MZIAKMe_F?~Wb`7Q7lja=X~3G)$uXk+0t-?or4` zq6*c>yc`Z+i}&!2)qaD#+x9(i^1R+acu&p4o1XH~UzElR2beK&8^Q&>WUcTJ`t8PCYM*zWO+{uhM7LCRvnP?gm8hOC#f6fhQ1XxP$f1fmjBqPV)VS` zgkaW-s*jkme$L@VR(_=UoK&p+jpJ=+TllAW{k}(`Q(9YGbu22w&2zSxBjdy-Tg!X}5w1I7WAoEnRqB zDY&S@NH%qEP#*O*IG(y;)-`MViE2js21g6JLQBXCMrC6{jqj4D+1DXc%y?w}0Qe_b z66K&Ua}h$m^ie47;e0mHPJFw%Nq0SmRHF;6VoCs^`ciNR1faLiH^KT}ac=wpF!ktv z7oNKf)-rh@RgS58zf}eV@w!Nqz*()7h0sA(A9c0MC%9dc)NqR=M~;@gyTN#@bD5ax z`d}L2oc)3JL(nm!(zm$KLr{(qgio*LRpm<&QXuZJbn&>P|nRt?Tw6t0b7$s~?y1IF{zIXdI2%32^f0)fcGXWfwZ?s`|l*_F(l?er}4buEZ-SXH@A&YcTAPLNrTE$Hv5L zOH}nS%PSM;ZxWWaM_mD4R^`=D+cHUJD2u$I7JSG&meBG~jj*_KSrg#enBoYVvnk>` zFhsxLd%)2t{D04MA@wDi&`$dbkDO_9TiXi#GJ`JQV|xG2)#G*!JB{fL zzQ2zzmBMdhMX)Qlp8u>>j?6V=jE6AU1A~$LgXUJNe_zK8tNLHbQkcO@Z1>I0Pz8mx zluU2O60GQ$w=b4*{^GjgUb9R!N^68HMg(*8 zF!GEDi))Z(d4MH&p`R2OK3{}%7nfe_^!~BkX~z#kJ0>X?gl%X z72GJmbM?DVu7XuNldNj`;{-giI#o(4*{mkH@~ARvWgiY2=};z$q)8Sx#IB{1kBnPW zraaorm3FqWTI$nO#qGMrN*n0r#{A25ycIslD1&g825u2YRB;JrR@Qb(QAjIiOpOXf ziDIo2a(gPxYzZ=ofN#Zl!Us`~?;jNO{8hC{D%fJV_Gzq+q^$QtM_tt#UVv^|oh@of znpE1FK)aY=i`TZC2Kh_%z_`mZDSX1YWP7a*%s$Av_d*bUrRTrK;2tmN9*pf|7Oi!N zR}j8xw01-0!TQt+wVL^&dWaxashz7AsFzJo=Uz zsU|K}Dg}4S`mvX)h^VoEfcB$zv8i{9m2Zo^bCIuer@Lp9zgg!OUq%n-4^&lY)GT3E z8onAHJRP^AyXvBrA7?6HN}>{OU&I`CBBQ3(gW6V44%)BU!k3bpsj0Q7LvZ){Y0ari zxVVO-KW>Je=&v;*NS>McbTwK(Eh89AmSPRe=gXEUEP!F?p^BA_4!#SH%CeMb^cV7Husna@n!bS>h#w-mC2F! z5SYBdCDT<0xHu+Nh?I~*Owz1Y@#Y0cTw!ktn_KHdg{efnQI{p@G|b_#Z#Ox}miO8C z%fo1IZu_bS|3(#=ec^)jc2hMa8&2q-PoYV18{8)}Z)}*twfZzN{ZaG4c;rWyp=tnQ z4>v0xsHNaoRXbHxi!{lPTp_w&9(FL7Ey1kZm)loU567+??|{3#3K^a7Giy_e)R6_r zB7HA?&ID7Z(S~P5^%I02o1%@Kn)tP{ebT@PtDpvb(q6Q@qRn9CF@@@m6!aB&Agdaj zgQO-CCTA_AH%pd@CPR5|Xw*9x{Bi@)DT5VI^U9P(MvB8rAJ%S>+2<1@!^~S8)6oo< zi&nTmg+a1#6U8eVqZJ5C0?6^BB0i?dyP;28@Q|e84id*$H+*qQ!Ob0+C!L<5wd349 z5GZT-6FiRuxH|~aJMd?yys->r&TQ^e%;grgHtav6N!?>IeT*dc`_e%E9}@N3|AZoj zJhSDkI`BNIuQPR1lXNp5A@<5dab%hnX`i@RCYdL&+xupPHjvar&sA7H4y**9lSos9 z@6}F3oqYQ``3G5L`HwW=mp-E%iTj>FAkfpy1@XsRoi2F5i&;42`<(NgfPYpeHEk{- zwdSvF!(UZ9Nr{PlI+B(!UrS)3VcD#|7%Sn#Oj^WF8QJ<_mOG)1d;RvSqGPV0soKN6 z#<7f-iL+y*ZTEKcPOdFHG;|8s>{JqDFYYJAog-gpr*E#blRWmjyrtCi$&gJrm+Zria_GB>=<&BaPOcVR& za$SGeyVD&SsJ5~<;<0O6=fca{AWWOzW2*|z`(EHgP$0E+!>yZ&n_@gT&YXPwk(Eb; z*-Po5lfB|XKE`N zwpA`3Qv@TPitq83NQeJ_%c>$w6FU2&8-uH1kJA~!bnAqmKaJ*+7Yy+xcn0a{@I_{~ z#qK6j)qQfwF<#NIGRUBtD(Nw^ygB-rUt}z&!dv{VU4FMR&zG?+Z=YG%e(-g5{#p;H zIXh_n`}T*lc8aGbhP8`4WtMi%9l2z(uVe7!UIt2mF+Y|>TdU|pN|imz#P67DPuI$} zce5jK$q$|MRpU?g#6LGVeA^r@zdTn}eUjAq0I%{(7ur^x0|6`w%D3W`Swo5(^2}Wk;(khD*TUT^iRFV$31h40c}T%C3%HhEd8^%(~^xo z(!cS7ipY2<+Wgqm?2G=Kp=+cf@o3SbqAu`B;oj#uko;T_@hx$K+SC8TE&!fNta1oh zT7z?RMa#AIB~x1%UF7?s*&N&y6+K8QIZ)(7m@9?B*xqxz3O}w=JwBhi@!t{385lRNw;V zJ2!QB?ZQYHD6~bb;PYXXJ`K@!q|CD1FuX^bRQFXrjOiCPpp{T{rmWu#J+U&mk-tb8<;rc0Y2AG}*pRR! zW|V+QDe0cWGPcdEZAwGYYm2W2_M>YRM8W;2Of9p=HR-s1O5om(JX$9FbRm|}K(Z}U zZWkm7YrJt>_Tx@F?Riu|py5k^Uapf}O@8$zsAh<-rq5Upv*5?|;0!lXlS5z1(?{tI zF}Y$V_*@F_&tDon0D=h2;3H}+mzDzgqDn;0rpv|2pR?!- zZYDCrip)c*d_kg6rh&aWOXg6D2yPB$pN5Hq)UI4F>l%nWsZ{3~AQBF@qyRtU^9PPg ziJVQk8SY3|770lva;M7Afgq`@a1%e|UhL7(e5gE%7b(^d&^cnteb^Y<@<(UW*b+K^ znexw|o33v8f?DcB>@4r@QmyhcI1MUo4|Y$gvU|eI(3T_ISP+p5PS8V2DWYeeiUukG zE)NePXGDQuJdZXLLLJP5r=l~=#eFp=I1sKJ3_|j;6%v0ygfJ1ule7On(J)ALN#+TN z?S!~XOoa9{k)?+TQB zFCX+1qSG%7N1%SFL__I>bHuQ92~a6VUZ2GFhSK=iEn9t&YU7>zhz82T7|#xA?lSv? zj<@$`mA@Sir4tN4=-mhJyr5)QjH~*&(kacxU?9tiNCoE;drNgMw%KH|*rae_ODb=# zJqFaejrYTE$6!s@Ng*X_9oWV*w8Zl~T_5yqI8Q468ur#B!dvXs^B`4N-ztPWs1KMY zs}CnF3^UlSoRX_G@z$#6bT*n@pQz~_D*yQ~J)92LxRtpeb!3(A* z`YZ4fc#G}Y{T;6Y`bQD!{gZ?ndLArek}QQ#DA_axO4iH^KmfoFB+&xttUR&`QEI#I zHu}1OROY7^n@tfAfF}}Z(2-aPc!qvV;xNUvveXag6Hh34$1Q5;2^S-_0G>WAk!`sP zdV=&*Wp2?z2Sm{Q-wZqfumN9YEIjW=wiNO#V@Ac0VjW*Ac*2EBe;6rHG+ zMqnBSv4ju|2pT1^`3{Pu5DZjD-_C;~rDO>mjjg&N{X3S&V~kOi|L{1i>mK{>J`Vy| zBBQ?a_v}l&C?5<}+B(0NN4Y1YgL=PB=W|IJ0bpMtLdp;@Sd@7z>7hc?tBZ17nVyy&*d@?i`)~HulggG+l+6i>5@aSx-Q7 z1&S!N&}^gPlE*%Gq}G|x0ZbkQOa+??Q;vz_!40D_qg#g0M46?!P%SYH{Xu*!8i;xs zDz&-24{{X_DTXF^K$M1`K@@|5_d=!Gvw_v=)w#b+xqn~`2G!}9DAkEV*c(TnEVli7 ziBJwnmPCEEpG8UVRr$K_)nPxPI;;0_bu)gK@-P=!KvIuQy%>sG}4nTn4c}U4GlH ztfVOPR7{##AJGIwuUT7= z#SYC!P-2)Udjb^j-&K{r@(0O#h!F z6x9Lx1M&j*uqj~amUz4sho+-k0UGzf%B@&XL@!LgN`l0s$6n+it}xwYXlgIEn;5TC zKXxvSWMQ}LI<<*yNth`w$&VcGiCt-Ys47O@zR+kZq0{DaPRhK{@Aq3ge zHYMgR{C~sYa|9Uj;cI-ovQ<}L?-5oLqHtU=V3^dm#7iyam?b5bk(n=HE3{EGh3hb;^P zk9!alCG()NdBGV14yZ98I9nfV*PTq>jr(F*~+Lu6=4J!iVW89hV$hy#;!M89UKb>qr`vZLX zbp9f#ZWaEf{gG>fJ+_V5-W^g$$}I-%u{(e*Ay9!&`xtkQPqTFemxnL|xuQ>q>NZH| zPDF7D*BIM0`l8TF%EFhxr|4gX^v9w3i!qb-h4$J^8~eF6_^*%CivMqJzvPfI!3}rb zFjb5Ls_U{WTr*3X?h0iahn(w)Uk{1>RGd4n1&ukr(7(@eb*Y zN%FKvj3j9;7}JkyNGhWjG2UF2zh@HFS!YnJ!e{iR9fJqh^0A>}l%cNmp(T`>og5?1 z0ZW6H&CMmVl({c3s?HG)9yHzL?8L>VtJJn5f9PN$nI|-e7Y{hX2!lToUq`qX2Ek$c zmLZzlCPbB*fr!YAlE&eMxKiPk60+enqv`I5if1C=NXDx{A-f9dS&H{QIRv1H0^G^} zoq<8#2?+NV8XJu+c-@ri5B0^2NpM5vWQt{05frSlb$X=w7h2Ar2Em^wt`Wqbf3$qd zGc)BEUk&Qhgh+0QKeM`W!B--1+(z-#80rZexw}8@)%!<{b%~M`RwHpV?aGiBeZ;Z& zk|O-ZUO#n8k+#N|;l*cy1PVrRCJQCAktRlym#G?=8A-&aP2+CSFBggWkhw&qScS?V zgfZqFynr5jVq}cv&@mwPA@#Yk>L(%UIx~R5S2Gq`;lcV|Vd1{F8saM4w?C+tJ1w4O zu|XcCnu<`8IxuJO(Im8|a1vb+@h7Sqmws?!cnKW0l0%;wMzrGUBsP3>pQe$*`QR=c zzw2aSzTj!KJsz?ppco=2q9RDUqVn%h3bK}vB?y8@$MRMoPJ-zekQy)Af>%wX>vi=; zb+n(E>|Rq*-+o*5F-KQGP#WQSYuTtA(#{n*o^Jsq0ajWstIkzNwdr&_WM=~U+4b7aOyu~ zk`_%EB|;nJ)J>k>s&K%KBHdh|b{El0KYw?2Q_CnsqzAU4(DQaS*Z7CW6|DPEu5|6* zmKIFN1VO&QkDc*9p4V^ykD0ERCaGHd@#r4}2I6ZI5Nz>$!yq83&+I$%XJ$^w44e8z zudZX_|K?30fYUYhgdiR=bsRaoPG3 z?e3!n&P$ce z1kO}h_NnM%P*+V_HR~pl>c2uF30)KtN&oDF|F__RM3To2Vi6k+`k%fAFV9&Np`f*^ zhu~l3bMFB@ORtMV%{A}S>8*}t*+oV+Lq8Acl&{a5m9PoaLcQ_j%J1PM5Tq`YAkc%X z&{erC!pXG|l5Y6V#v>mZr{;)ar*c+qI2xyhMSKE$&gz{yHX5I2x^6}~-f5g4 z76N8=Hl8T6o>)6y*E;}b&zGBxVE62=-xurO)?4p9w(O#Zz8kLVO=*#2%WfI$s+>jF z{4GJ-I^U8cFAzp16+%4|LN7LY_-ibq&L+`8+Wq?Nd;@K1)BOGoTjh5J+ud^G%|s(( zmOg3@u9?nY9{7}NZQXon$oyWsgL=_)%jaBK_^Z8}VzrQawLku=i?es$l~3g0^l-8hO~mE-}@R$tdhyB9CIkmm;IfTsHhl_3lujrUzYDise(y*u)4V2yp$;n=vr?y z^v#{<%451^HhUe{eK#u%5ZTBVA~)eG>kS>uPnk{5Zn6@3{0q^a$%wrbak zo$f3~Y`4soy-9D24K{7-JRU3?ec@JUw^yk%n}w#9(hYSVI^9=WT^aNz`bb~RyJy;T ze;Y7IreE)--UJ=o`;CK&q}wI8uA6OYX1a`y4X52}bKOAhO;rqXK5dK|VZND_7zz6> zf^4OK)5YH+1D+A3VJLtY00ihz@F%hDR^MH7Pr6G)KoW%$J?sFiA(kdmnLqnCe)XlL zkUVeOoSN|9943+jq>uH>{3P!(`_I4c26Wy@tRsX0UQI6anxa@iW`qgcE-I&0#ID?b z35S}o-&VKh4u>MM9eD~v2AoU#G>i>$azFesK2z6)KdrM6?@ zeZ%1g@fR-3L^!=BaWeA?Yhv6hACu(*6@H6Gx>h#&vG)32%#YnC^OroB<;LDl^iQpc zx?N^*97NV1sV6X>AzZQFG%fJo&%JF6jL5=yvYxY+{rsAY@J&YOjM|EZ`AJ2gb!Wke zz_a`Z+IHNl$d1bgYGTZPP7O+8TQn4GJxZGY2olqVrk0P$=l>z`B2}PfZzb2Ss0H{5 z&_F}oCrJw%gAHQ7`u??6iSToxEV)WDG$5e_;)^W)x0HF{m=8KXut53!SBDc% z9-4Vd*`4?s2qT!zFQxY0x#^d7@{ypUg7UgUA?Q4-PVa9%WI9%CG6)7pBtVFUjak8&I2@x)lNLk z@;OLw&(fuI%o_a{#+G0SvG8TdOi-U;uNi*$CJAgGyV0T3Bkg|J(#=`NBsDlQAu2*& zFpYTXWV_;)Oq~@!diUhbwNAN8FpGhpZ@ghkfs%rw9Ft#$@gK}bwf%~ucW-ORjU~E| ze}C|BPGM?xP7%i#h0?l!a|0;2amPss#*Eg(Ok|gaYyTia@p*U>D76bO%&l3P=A7Vi z(+~J=-^oV+F9N+wLL1yUdO?9$biHCsUgNP|vjnXdjUk5c&yg8NfLTPupMM8TQq;Vv zmehS=P*RuWzZ=8C8$0!Nz-gcr4}{jFGKo5{5!QhdkO2vR z|J%}Eu422YY!#(Z!i{8rKCBXGO!V7z(g7PsHHr`In`+GwVJ?Y5ibM9H0e0+apcwi% zUhiAmy$1imn5xk0ewP~lFuztAV!(GaiFWuc&NV1kS+lP-rC_}x`KcS)F{_$CiEe)j zQ>}>Ie(Qecol255Zs!eDoKR2sg1=WGhRfZXpb|n4JDg&Xxyvk>z(D+5){hYG8AC35IVgHBsb%8B=lzN;vD>*_QSn-IW$;N@^srr~?k`UwauLSP#_aZt<0J{aSA7D{ zGu)E|SIvY;brgLmN?%;-;oK6c?yvSsmA{b-vBDPMSfW@|i;*N3(9-)iQoavt_zNf8 z{VzvBQC@;DA5iljq*-siiySJ&P=^0M0i6h9_Z2dw%1Nmzk0c+foiHEsO);2OA4vTm zg()!1Fk}I?%Fj|bnR083DbQ*TG^iX*y-#495n5;1Kr&SJcB9jJZ~}gX>C<$ldjUc$ z8x#?SdX^zwV#=1e`t?TDe8X^pBVDKyPc@9hAl{R&T@gT(#^_Xwc0s5~a^!J=A~HO^ zxr)>NiJ_SwOEt=!7;leEF>RhY{RH3Q=Na7sgO{zegC+Nd$Cw&Nt8I!k=55q_nw&9# zeUfeRbCmX`_{5)l4pBFR073vE zfDpLb2>bz`sNz^LpDmTn2+F>+`<>n^lsQ|p5WN^}9C1USWwUNds0lae0!j@l+P`uN6xz!Ze}TcEUb`XaMe83=(~K2Hw^mzyMRIGv85lqY$k39e#>BaiVU zu@>2Evtm{#PY6KLTRPn&k8wr<4^`9f$(0138RDM9%qGnzNXyQWP%CHa)J-;PgGN`U z(6*W#9Mu?Hi?1*@Yiw60> zCWn>MRVsFP)@_e=NUc1vu~B4b5E+`JE`ij+9X7MXI@W-SGo*lU1lEvLsL*qyh6iHR z*S#{fSog*eacv!JEN9|XtClo%PK7Hl2H|f+3lA(Lz0p{??W23oQ zs^$+V*?ltZTSa59ggdO}j%b)eO75tZFVixI*jXs;{KXZ82ng1uJy_>Ttfj|O+=xUXj- zD0auY#=~!&bJm4-hZuEQsa{Zcs+4u5_GQVrS8m4-R`XA0bGOqYviro&;9GmJ-xh%J z-@0MgP>(a%?FhV{aJ{j*p@V&KW8?kh`B^%fDV*oY%?r%2a@_BUce(wqJg!%M_bb1r z+Z7Z!B4A06E!5@o{WNZSp&EOsvv=922K-5pXL=+u>)T45;^3U1aNrSu6HYrPU>EU; zyZt^O>k$G70fYcT;L8yBEMF7``89X~1QjY6j^)$WnTsNprl`Gq<)}!W5)|k{9uw3N zN$r;DQelw52?Cn?c=4FRPbl&oRZWpN#43U>;jloRmubyBeSpC$^dPW| zlA@}66me9gekQ4R6?)7)oP@L#P`ubFRpAq?N)gm9R^7{Dmv|MDYC#o~sDMrfEh>uq z6JPcaqO%YJ2myouLf|ir06kgaC9D8jzEsYaiF6r<={$o|7ZvifSpK8UW2qI0Az!HFbToA;0xst7r`4SFfBAd_UvRBz`26obgLXNE7G%8C{nQApd5w!;L zP#G8m#~}|BLLKuk_mdQ?O`I37(@gF>SICvi*A*;-W7#SJF%ZxVMgXhM^2Li{DT`Bu zDqT|pb~5Gab%mxfd6K%uNjO3eSSAX^Qa+o@W^$S9!c`8t!iaRGdR3Spm{s=M@s}YyZRfX(kJc9dzLi@J|qs4fDBk0v-+&kNKB= zL(t731P}rU0ffLGAyBCVB44RM1Y!}dW-A2D&9D#t2KbL3 zgG!q>$KkWKpDW>@g+*_LTplLy+%+`V$rp35>QN>MXr=1MR-}lJji_WxMQBimjP5|- z5b6j**3jVU7=$Un4qnMuN(Hw(uM4Fd24AT%@DIFN0h915wA$Bfu?!7Z z5lxN?v|03hVP&jT!Ab<2UaY{dU_Vu;faL_;Dm0b3DwQ*pa+bjJRkA=;3so{t5(OO3 z6pJt#tz>n>Bz~7nAU^R&PZ|0hA%GA-2p|Oh>Il^M1Z?=t247)=NgZZTP=c(*3cx3{ znxDV+`RO?Hfu6+*l<+2Ng}iCSr|L~%@0&)K2)T7u?cAHws~CybcmtiUO0Or-ra*(M z!~V^MfF(eFCRjoOZ=lbr(wBqJ+L&dM?gn3LM{c+U0S%4=F@oCxo>aR*`V!EGq*=(X zLy;7aBv{LUdjw(x90D99rJGVFpX`KTX_H8bw&}B=iCfPG@=yHLUleqI2myouLI5G~ z=MeZrEPkDRf1v`mIl-s>+dIFk0Mv3tKDN*J#3v&1X{xTVliGxRy8)m0NGxs9=U z?OFe`4*&P2Yh!-8=;XVs{T6Tq000m`g#-Fk^;;3b!38L==fc?H6pQ!Uy}Q`+Ux&Vx{3-VzJ_Y-Ev_Tlx|URv*-%3piHUdgfl^-=x({rUYx_}#r61caodqyz+o z1%!q9ur2t!1KfP9p?q%M9DnQKo}IU?my?H&le-)1uO6*!+~z z`*r$v{>!ue`}j8p{>_1ZbKu_`_%{ds&4K^-IPkj#XXl1B+x)Ti+2taj1b}dHf4#5= z9`=QQ1s@*|51)vTkl+dl5eW$~5iv0-85IR786_DpF~xNX%4^g#G&CgSv~<_0>8Pk_ zsDE{W1Hyg=5C1AY{#9yHVp8h=QK zv7Nnxqm#3@kFTFUG$1hQNpwtXTzo?Mv*#I^Sub8@zbY;%Eh~RrQTe{EzM-+HxuvzY zuYX{0Xn5qq)bz~k-2B347#y*_@#X8c@0(kwgCB=S$0z8Yr@!dJ0YHBt3;X{sl>H~V zD6w?m;^Bet2!GLqgX{l`a7sLUHX#D4JGz9{p4ZrgBd$>2O)IMHxym7;horIbnk2f; zDGKL8{UYr*%Km4BMgCt=_IJX5rway1u){N==qc zH)$md12Z~GJeEFdQa?axG9|P= zeqVJpdu{kZ^n}~vg(8#VsD?XImP}JF7|%05%-tN51X7I)O7|<9hNkAUcOFylE!BOL zsm$oMKS)71N4N1DqDy@u;P4!AaCWZ+#$qSu2-hwme zviMVz+}#@#QDivOCwVRO!*_H4!qdDa^@0z|3pdDUSWRj-D1lnSwvW9eryI0blK zCH&zc9x7kcen?30~Gi)f3+BDOUuJ@{s_5wGWQk5-LnwzNQFmOlyZ-#qxeBv@ z$~51~-oc*NT_vhW4VH64z)KsrrpSby;_k6^7+&<9wQ&M%f969NS+Y}^x;HM}oP7+J ziib#AUPDi?Dq3j$A-aydzaR#-5?{7`B7OgRU}F}7(Zp_gzj{g~lU_A_Yg5!^k8p+L z?VUmVpc~N$K8b#qSWI=PrE*BwYIl}+lVEPiq;!m`{`ZHGw@?`m*YZ7c$x3txvoHW! z!W-$`o1NTQQI$#w&9a<*>wE5RKpCJex3l3cG^pW{Y+##gBzr3``$+mhg6SVVX#Y$Q zRK}*09GJJiDxi55E`cOrbab?gXIc2wAuvq5c^$QFrNt7PbY}GYV)_ywaJ!Jl5TjJX z-}v&>Cw7>d{q&zM@<|%oH5E7_HR)&q^-g5KI#cLT{rWcUqA(De}C~ zz`kD7^q&lGEUA$@nI~C(qZf@d*=DhnvGj&)ec>_nnCIv4b!5$01#lhZmCqh(R$PAVLZ zJC(im&BE8%d>CJf^B9H{?&@e6#fs)AFoiC6QIyTuMCn}uPt1Cy4KlOvCrt2&{cj(P zZIyb^x0mRx204OCgCzb8xdXhdYCln;gZjaY7`8G_kM9h$DSKgc)rB}VjVXu~;@B85 zrdzjD*+5Kg-H6WYn%ig`q}#W(Z_#3mA^Juv>?r<@D)a5ry-N0j^q|kfE4E3QpBXBL z7~Vo39*J%F0J_T5e?sA)picJ-my&-D%(E6hUbB5>#fb69c86`Re`)F=y*ZBIUA?MK zR=vKs@FKds8NORz*!P-CX=I*JRf&cC&Bg2`z``$26fD=2*mt~L8GHxDw|XoPeVAW{ zI994xyR-f=mF-0%H3_Z?JWt_s>Y8%X&N;I8jRj~`9JMtb{kC0lA@#w4Lc%|1g#PQO8!9AVtcszGeS!I1 zHCuw4f$}fiPW-d7@SenQ;bqF5k^vj}z>Nm;$mC}cDVo8asLp^cNI`1jDxOdIC6E{d zzsWY+9u#zqA}fnYuJJjYFW&0_>2=5Nwmrbi5=$)+IA-dO7kHrF8rFz}t=E5wJzdE#*koPweLlPNe9167 zrDbiL`g3A6C@N@dO2~AUt zUc5gU%7&5msYN%eIhpB9z9q+Wsn-8I7G@DnuV8fu_mCtR43|$JBd;;{DK~l%Iu2$$ z41I0i$+-)7 zh1_(GsjXgc3>5oX5#~=*RsaDlje7H1b0kx=Ny$gt;XDN?vPhj7VW7?Eo5t#Lr}tY=o)&vN1LbI)YajW3cA9y+XwG)w+{DVm`uMzDmf+d+`bc zslCg~20j({?0}_w?a|(-37#?a8|^{IHWT{_j{AwyS-W5aM5<_V)^75G#VIR3reQjQ zm+S22+508tZV8K*^mex(+Y>~`m=sMwG8Z%$Qjvb+i%W7JVbQjpt|IvQCP|oeP3mTe z$W&V3YZtXPw5-_VCltO;;s?IsgGk$*_hSANg@KqJckp;XWgC{Q@H{tu+$ z5oqiFO%P~%p82NZA3FO=+^}Yh-YjJFF06M7l9Lm+TV13QV4oIQ;@qOwIvI+#`=d%aMy`;jdt{JWmeRR4}^xt4!?dFk)3cH*Mj4bLGmpU=SHH{=}Gqrdo~rj44OV&svZ zo;j|PyYTB|lMx80cGeo8?8sVH^9vkH(Pq1>F_R-~Hz>oTk-$jQV6HZ@hD z*AvQ3i3yh%K{vPx>^PxDFN7VD&1i%5oA3tIN*<9AgX<#myRFJfGU{jXYIx~^RNXZT zeiC!1(WM=;Z$`W!o(vzUYbAYsIoL8{@hH60OgedxZ*$fK$hS;cTI^WLi?^=tyZSpw ze$ZgNX^2PG4}LA1@Dpj<$7wK28qM|twdN`byK)?%{v+IKw zt9lJhE%n(lIZo>)uQIvGQj%~C4VftJ5OU1u;Aj6)I`poGaG>54OH4RPlK1Ldr|NkX zM!yo{h+0WLlYok!nn5}ODPN=KA2!gt3aA+NDW{#-<%Wl|_+ zbL|>|9wy8!36pB;A0x~hLCK4?J-Fd=1-&d()3W67YTmw6t|U|Y)Y8S$($W&y+q&%0 z2H;pkV7|AR&tHb_*YN!+q`qXOYQr3OM#&e`s{rl#YF}6FXG`(;bvXv#Sy_pdg0pZaNwYH(D=E1#K^PnQK>-Y24~wvmGOx2BN6^N&F{+`o;#5ehcfxq3 zNiKoPXN*silI*3KT6dqFdae0 zKB*rEp&|E?7xA{&UlworZ8|wQkrPv^6&a|)(fQ@MbIZTSl#+x?Y-N2)Uxf=|bm5v3 zgZV5o^?Y6*AwEZ!pnT;xBp8o-kiXB2Yz>DcmO)Tt8k`JGt?DZfQ=`Z-9x=imYkIpc{}KN zqae6In7J=6MP;5Pwj3*x$@VIo1b8WZ2j%lI7ay#^R#<`IiZy%BK-&){)$%Ez{_cX? zP`!Z_82+$X_2gj{72y^cuaIs*ZfaobkC>`TxlF!THpsiqhY~=`5na1hc8GCCGqdu( zRNTcO?r71OW~#(<6&(h%k@X$xTet)ltZ&J6tyEoln9qNVSF8V%INg)Sl(|7u(+?7G zIKzQ4?FS)_xE%wpTbeRy<1MAAt2kfqMoKZ0slMi*>A}#Tnc?H%o%HR<{b<|v*n7=0 zZ^d|a_Vyl3!sv5bVwlc{XAWp#)7h;9~vPc z+!TwIDeATpm`!4cU7edL7B@;6b>5?2%CC0Z_ED89{}$9`XVl{r%R@m&ka zB;1DZh{osT|IYE%-#A`?+ryMkJ*~HPU*PDfqLVv!YVDKPR~XTe&2q{i%yoGFC*$(< zI9z{KO@C8S_#7k^bmPod2l&|N@f=tVaSjA_HQ}UIYYzPy0x+fSOif?;0;138Z}*x2 zW-E2NOF$h|o(IsAgoWG67(qhB*1G7wUnn5U5)oG9t2attZOu(CeO_im7%$*TZs$6% z9$aBvOPFtNpcOw*;#BAKx@na1fQB{2sA%{I&!$-+vH%n;g}l~B8_JBVv>bRfUSaO6 z3$rwTnf~e1mrsJ<_U@HG&5O59H;D%ho&Ob){W%i0R#X5i$=(oD;C%xT=JS3Zl+@3)#YVcc(cGrDyK3j7bYqT!IvxwLGJl?}A(U_TobCXE zxW)(fL5ooBz6)*^_QqD^MVG&;ZJDz;&xw%vs0TvwA>C|kmW;Y2Q7DRLkrr6X4T1dl z*xM;I+eszS-$C1bQm%o-C@}d{voAkdu({WkzPLL#O!Ty>CGJ+C>x$afjxe}0Mg?7S zb|+XPa~q*jUIJCQj7I-$?8eGeyr8^93e6O1UIh7)g?H2QD5SV#=jI&7E{8 z_6bdnh>h;#UfiZ-xjy%3uKF!z8J@(#9>vSIAYc~JZL^9v{#;ST9&ai*nZ+rgQxzS zTMM04Wol-+_?C(;o%d0K6z3XQdAcKLi{YP)62;+qbua{}VR2q`A%&LzF=(pYze?XVo3B1`^=eK;BWz2}DEOmS@_1JI-EY}vJWmVLJqS6o(E`Ux=Wj#FF|25b5cnlP zi#~ZUakQvF32_%M;7R+Y1ADU9d{0lyAOdiZ8ZreASBWpCR@hJieav18lq;KHa@jDr zHuYz;w}nHDS=&{MJ@zYeYi`f%K6&A*xVMgu<93LcoL7X>Lj4h(EVwi6wyuOpU%L`D z81&3WIV-%`RVHI#PuSmO%F0lbb2_j_pyuNbcQyLMu3m5j8_f9RbX*@ADxhkY@+>i4 z>9x*#!&k${2Yf{D`Gm#D%gpVfNkWq;SB%!v`>{ryzJ}JRu7M9^-JJ3!hqz{P@#TOI?loVXWBsp zl3?Za4h@rMh?q2#z`9Y~Zj`fxU(hTcAI1K4L(jCc9LI6Vv$0SI&UFB6^*2A{Uk^G3 zZ*(Db#>3s1=LfT5sMFRVsc2O(ZBA;N>a+1W*KXEG?}(^zYMet?n!Y8B8&a<62}S|o ziQt9QkZ+&(%}LuA+oXEae3~cYh`y6-M(QQCZ)<1c^iB%bg87PJ$!0q58m9z;Z@3XGLMqrSe(-&e zkhfGP&UXaucnjYI8n13*lXBXop#e3SvI1us(AfSi65nBrYEVhAEb0=7meqHFkNW1* zyX|oKH^*_9MYL*W2@TWGcWz6_5*>~@9G9Xk;Hz;@p~QNq7}3I z8C4jaDH3{(kvn&D({dvL!s>tob@~mGb*8DS4;yj5Np4y~P~-jbQpl4S(FJ)H#|h_r z&e;aD5Gj6vjQnYh=#TX7+y-_I_YBU$H|of`)4p{QqN7l>-39ghsmP(Bz`fLUh!urn zlXOQ+b=hWexw%P4d)#~F+bk{#w$|*_!Qa^G6v3+hx3V!$uP3MK{8PU>taiR%A?AWE zs<)H*yzuCpvG?7GjH2gU@}bjyi32Nz z#D>~8n%4$_Mr(I!864)YSrB@@ke)ipcbujhtBJh&9hK+-z>={iSwpql75 zl#`sfUblHoxR8Xf%z625{_?;6p5#$3IVdy#sQ{@}ZxUu(Lr_e*brLviqbO5XWl0tS zT`vzjbsBt_stfCUhy?HWV*T2#7*=FWY?z~5m)2{cHPs{cmD*DTlmwD$0q zoHk@$BD;z#GH@%pt<|fH2+PE^py^U{C^$vhFwO0qix9iTi0kq8V|+D(rG6S6@qJEj z%PwR?C`Yi-d0{8lM_&|&{*)|uEtO`#$-2$%!Pr&uf?7K5mar=MI4?D>5g@ z;QT2@q%>FnIUeZ)S={E81NUdHjsu|66hs*uzw-iqu^=S{TmCD12bJy&d3wn|OQ ztEy+BeZ9XfF+Ftd((fRqDaHCRKrNHF8Jb5dz)%C>U*%{s@4_+-B4NiJqgeNet3 zf0Y03&)Sk4Ek%zjV?qUQg{A#VqO_VK6;X@Ij47a7KzE>hIN$%Rg41Jp?o*~N>sc() zT6rte-ZmWA!X}&Y!dz1m0*9V-GA}?mdP=(bt>XMwktyGuOH%7%0dw`VfO;mz7<)IIr2{O|!)jgH`NFg~8{Nqfd>SD^O zo;LA9!LmvN?s{7tE`VE7`~?EdO>FzZob;Z+z=Z%tZ$?LRjQ_i@`_ERrKm4AW7LF(| z&7PLhf@lUmQDBf2L3OFm9$67BX$F-$1vU&mJ<(ItlZh{46@A*J&cW8~J1i+5)Bf>z z0bTwEssvw-jSObcz$l|*gJorkDr> ztNMT8)U<5|F_}tWZh8yM11IWCl=osv0W0C5@+&bpy)d0;y`lK(gddJkEwou5WauB0 zR6<>gp0DF}cE7cS4~2^sMCTJu1alQG5?e5;)TMlY$%`}qj49G}Om!@!FS0kk|DoB1 zf5hFDxVHu#?#^Y_$toi>C3*wNN~@huoNP-s=MF5Emqzm54KQT-%Xi-t#rXl=twU(4 z4rN(VoabYBJtiQR0L54L-JsXnqvejC%=7x-yJ(&kc%1K+Vohl<7u;2$$C7@^hm?Kt zc6~MfoQ#YH6n&M7$AHPdyXIw=O)B{on5vWX0dki%Y3viwcQgDW$HZ#^#n5OUe+4JEO+!Y4QGK8VwO4 zIRHox!4J+mV_R6l_@i^y6<{*PP0*bquN5%TprM!I7=0pJOZ;$!MziT}QB$nj@{kE+*LKjY}zYTV=Kt+x{k>@jM3UNyidA^qL zHJGDE^M_{(pVruq$8g|Pn>EbPZDq9gT8I09^bri^*eaLX)Z`6@CsU^bdqZ@@GwHzB z&VcjScG@>vr$+Je;^?vIVN`j4RYsRF=a>3jK7vIV1MXZGobd;H7TtoPIERC2C1#GM zcKLbM?lh&*xi{V2T`78%8Nw?Q2$gjJ5Ul18*ZTh?`u7jtL6d_}pxo@dHX+6gF>2Fb z7SL)+-`o27U^jthrmp1}N4@&`S$U1tGGxPj5&WZ2$Gl&~ zB^7wd+acT1#U)PHTd%)#C^t46aI$qGGVA$IEcxStg}(hV^kkU429DT28|XXXM^hsKjEanPi$5TAUiT!DmO>yR(+`^275rQR4d$5ekE z4POiHb7ry?JMwL)G#Q{4yq9LyA1S%68=fEukUy|JC=Vx&Wu;|uW63W#cYZ3!8SbTO z`sXLX|HI0PRe+xJSS;9j4YT7aSv!3-y4a%4%z{z1F7tTHHiaWu{gIqD9jJxXSM105 zj>1~Ct;uw-CaP`FFkdz4f^(VpFjC7*Yw8)XgLBD`)Z2u+c^{hz2?@KLHs?u~*8uJS92Y;{JGM#7^f;WAl73*BNzAq|zL15ASb?|P`NtS77B z-a=ycU=B(i7ZU#|7}D5MK5Co~!8+*pQVb_LtOh+~W)?GfXr|iUYW9$vEm=itLT%)b zo(9pR&?FJ#6T};$-ycSP!38zydmkMjw(HP4Rh`@_E_fj8@&)SKDgEo?{n$Jyl%jr+tn7H2Mfe#x;M^O=~mBn4+Z;ZOn1T@dceXSOyU)p-+| zHNOj@x{l;8m9Z$>u6`*ZUt%-W7{{vrFPC zI>hx5PH$1h86S`6ZoCbn#EXGZe|Q%q<}p$s{{9N|-IepS{8J!gfg;=cOYv+Z3zT2LCTnBeD z99(9k3t$oVsu=U9oC=qJW@N;~G?gFIavdFE%^io4D1(_&ZipJLc|PZlptoj(NZ@dEA^Ov?~~B(j~gBXdOSULjCnEY-6PfGPgbvE$i=Sk>{6@9;UzlcE+Pg|CR>EdA?|#{U}a(5Iira~on- zb@ryHRF?!C#C{O&wyd8!Yl8Y#31umLkHc7kHQHTo(IdFUrJ?347F6x8)!vKDr$1ya z2Us>yGVqm`DEDrLOCYMDPuhuLx$5&$j?q;f6@dB8ot{HUF@(}F8S-rrgscD|z=C;z z@(a(w%O-0=eE#z8Ilp*Se}n`Nh;B?SISAVWMua>nn77zbyw2R*bkon%M44}?%%ntZ zhSiDbU`!D&edMW|Bf-QRrJB9ot0{vb(xv~#Is3nN0T7L#sUo#OM-v6m|9EXMUhOgE)^8Gdc3gUXa@MX(VzV@E4FF1q!w6Z1N z(Q9|8x!-`yfxBfOZQ+BlYX#)zMfI*M9Qd{ES)ZO4*@wM+N1!CMI~9Ht-AOF7>$f%C zXLLb7-FY+2?(5Vy{YJB@W|yQwTE{uHuZ|M0zr%^cuMXGHo+oDS_l0txN7llqGTKpl z8Hgvd4ZHS)mw?Jl{>3s^Q#Q|WP1;$B@h9`$#gF1!M+yn6bz_Mr)d2>Dgloq0!C=Jh zt*f_pF-q{D{>C=(@sB;+FVa;xmBSysO^yK~rK+``fbkB&k*D@W?!_I)#bK3}Viy%} zwoVkBI%l|91zw82_`5s;hqwNNZSn_z0sjhMxl>5gT2|rkYV*Ws{zT~WXITFaZBG$= zwPJ(YpZbwlheq4liu+<%+I46GK zNMijNg~swYG4i<6nkQin?!-&&4CASfMYfxC@_o*UZjgGj9SX01G)FG@ZYhUIz{5zL zF36EzGm4zV6YWYj8z)Uo7t4SYo|#)IazG(z;9p6mI28L<01l>~VhE`H>_lLVm(&;Qp$>^zgpQSveDvUm-bgo)CI_3ru}+#kdE-Itg9uZ4!Yx z^ot*WSB3(dd&T*-Eba=)m)1^ohSxmx)mg2J-TheR+-`>EUgvx7)bzxYrnAGj>-#5d z`)qdo*`-=J-t8XtXW?P@0pI_ep8iv=^_gdIuYyv_O98_ilXs0Xe7Eg8ni7st7S+`y z8HVb;wgJlAw`sk;(GRKwCbU>FWAK&Ev_hvtZ=e@_%G?Q{a;3+f$|VW$GIFd;_Ew zM=Q{_BRJ~J#A+P%4KWm@;K0m)&@Fd<(8-4C7;U3)J$#W%-DnT?8D}*Z;j_T5ySm!kDl_TDzYLw)#-h*tNmX|vrH#0p< zP0fZNPPlJ$m8haAUxil|k_kmbOoM2=en-$kk}8<6!Hqm@8m3^^A8Gh)y{0>r!Z9$2 zPFCBl$!LOK2M(WcUHtT<>e|5`%yxM}ZMNb}5aW=zzTYE2gr2?I7Zl{iK2sf|`gz~> zgSl$`fe@>U1N#2yd{usa#sO__{Us3NeO}jYf!w-t>`oZ`ARc1|ACRI{dOaxg+KlY8 zb$kavB%KYMo+dc@x;#26|MTp!zuJDdEU~BapH>@B`l~JbdIGt7_wmfv5#;?JYj*gA zo^HTkG^1bOLu|Hj1uuM-(@r!)ID&#)?HoKky=4uLCkCf9L_b}ZoWRxi3;1b);V19$ zgT{j6NOTKgA0}cDbEm|-3hLgC94nzaY<_GlHg^SH{#>HST>7BY3+K?kcRVIgwU<0L zpAl&vaWzqWVrRWBmYd}4;a*z!T3xNZ?NY7S!aq3If0%m{8=;|TzjlHMmkNu=E@_8W zPgcogmDZUe3t1zYt|#NVylxznvXfwQ=|`8z1~Zzf`FY4GxyPq5f*yE-yqad73D zvK4c%RHrue_dS*1fvB(fprxw*tGDD)$*NK{B^B@4KQeOg*gd*qNRJnk0BnTPU{t0$ z`7pA`)ueiL*--)Wx`a~-EmOgdAB4dD)z9Ul)!z1YDxIgl?8L)B(9g~kEQAu(8egEB zdcNeuOzoAN@9^e#J{WS7&ie$}F5J=*8K1_SPJmwj37hWzwtwp;<6gn$9ZC8(*l-~_ zRtxzy`Za{10cPx!9nzlVZ=`p=V559{FC-jqg78y8<4~fQ6}>v#DxBq7Ymoajj7mn) zj9c}^an7ru9~Mhs%?~7yTsO8EZ!1-_Kre$sIEJqT>aYCXLP2fH)%JZ|qf);od$jIR z_x5VVM)uG0<69E~o!r|O0_U&VH5`vy3z8R1p?-s&nbY+=bZ>~{NJ@hThX}ts3bnN| znfSvHSO4IL;RUZo@pHl>T#g=vmRD$~- zU7^PL4z9K8WwwGh^~uywp!?%HInW3sIfrZ@L+K>FzcITpeDtjo<6tb;{1M9RxKQmBhJAs6+ioP|{6 zgGL#Gl>_L9%yH=)B4XG(3nzTr$f+E&-D*mgxW49uxzo&6_nXICrzvry+FTLMXPt0q z*DczTmM?eCYPBzNwJ$L{)(%{51o2{$@hIaq8@y}g- z^LmQGYGIARCcbX{jC^FYj8D@h#(o7rMS5z=LRrx#%4oZ_Wbc8%^C{SO7&ZW!rI$aP zpbnZ%5sz4o1EuqR+g}LlhRqedLZ_~)CcB1?uZP9^tG|B1b#@y1BFZ(Rkozef)O%Xy zxgbeB&f(j%64>`ZyZi!cSDK9IlAG@M^%_qSL~(Sf$#5&ffvTDR2=4tmg6>~5QAjdO zUiqplY)~-L_Z$rbXzzSO;tm}LAe9Fb35q4+#9UQ%TV{P-L-z!;7?MAYH;=Fq0?`Y_ zOx9Po20QHk`%Sxdg?TruU`_tr?#Y(`X{Q1V#FFxi{0e%wMyYIRn6!OAVv2sudqhf^ zCF5vv(VKY@wl@#bVd6p>bQgA&Or&f;1B3Jy8u-eD z!*K~VLR1^|THA7X|u}NFqEtrEqMBIplY=~T1Wjc0+Km4pJr>@sQ z^Fn_1f(~LT(aZR`NDX7>8RBA>vSMN=`Sxoa;inV=d;Q!@B7f>0nnP5Y8bDzZqj+=) zNKpe-?9KRUtkYFkxrv6A55$MLF+xK*oioKJtuPAhGu5)ZukIh1IE6bKdWIh~%Kk)} z`Q$t#*)``2iTDjy{~xBFw*r+sC)67bjyw{$j&9Op(2hNvRcUP(9FfRmQBDq3!)7(} zRDq+zx7nSKFM-IXYT7#%D4JJ&YP8ymV>k*h@qRC8A?fn9<9#$3!ROqkK8>-H8!QU( zWUg*kSTIf4Py6tM9^wIT#d1cZc!xvU!%|mpQlu=}??%|5grQQGz+~C*?8^+pMMhzD z{w~R%Qe!(H^Vmq)AXJ?(%0ABl(epU*p4G7+rd>&_{iSs+%m_|sdNw|{qI z7{uO^q*fUK+lGbTLQfyJqScXS{qrz_mh~&u4SaHG0{z#GBm@`g)}5C|sWqAKHat0Q ze(kXNaIvUYAGtM!;H@&5v`c`R%X+cVlf02+P4SY0d<7?&HpPTeqTQ{~M##m!Npvhi zyPtbTV7~8_VKX`3SHo|*T>*4)yED0Y3#12wdBewDkyhkjxJ0i4K3ZI(apv)aPmZh9 z-k56kWIxRd(6HW}?kxg29#GpKpPl2uA@lGYIjNCFHz8H~3K$QU3AIoUB#apR(yGk1 zgy`aw` zXi%ZvXa}o%7o7OQiqKvsMp*`VFU8GD=2RpT3BT0cm=G*UY4-)+nfA*~)xt~=f>8>6 z!dt5(vbJSThBt2%g%n#X-XXbP`8aLO_ue>YpK_k&2jP=Tz`^}GL>MDFdE_c=!QZP8 zQ3n-x*e=XLEb~)uO5{4t`+k?FU%a{mELB1$v3Q{8qvdqn7m?u#5*34PO*k}B|CkDw)!Oeq-6&|x%v$G~UP3#A0gLbZVI6*nO*5`##*N?n zg$8$XZIc{0iH>29^oPLVo7(>cMTh6Iwq703v`dV1Q%Ehi;=78gU7=nwQ2Bz`vgEE| z>WAhO7gtHb8-fZ9hm}0IG|3r86|9ebga>->T7tNDGTiwZW)zU=52`#-!sPLI=TE7y zanFLCg0Q`3w5!|W_fM&?KV7l@@cc!vZV|L@Ah4-VJAu}U{O}T>S!Px>oYG3g$JiWA z>X}v9`w`GTCL*R{x(=)z)|jJRkr(suf`~(R5|b4xXTw&D*2U7V6<+hS`bW19WT-T# zJ_X_A0szhIBsTAdH8$t_y9l6tgDf1#Z$mHZmo)}VqqZ0_KCMvJw&mZ|9-&A#v_B}e z%HSBAp@$J@h6t?fau`j@ij)~|XHP>DjUvS~sttB+o6a;MKK}HC>&pr9`u+Fj!{6M9 z|I_!IuaxD;AJncSpt0Vfr395ldQu_)Z=uSQVKhFN6a3 zy$}mA8}NgfKwSk~S@TEW_6K|!U!Ud-`nX1?X4Qu*&0z6to$_GH@8D}kT?{LZ7tB+^ z9EfxMJG5h8VR-e8Y1gG30v@x80FOsfxOH~zIyWrZiB5LRIMv&^Q>UJfFt6A4h?rgNRwPdOg zGkNw$aplG7=JIepemw#(d!(7Uk*)Qyrl`o*Zy{4Z3#_}x$;(4k2TbSVeR$eMDx55D z&4^mrYsdwe(H$}cvy$qjWa~Nbk^~|48;2XPs#r)Q3J7yb?+28Pb*5O|C&}J zD(86Qym(J6&#E70=G2rAR#SK=OQrnl{3pwtG zNm~47Hf=h|bD_?uYF`eQc|Q*e#sK9{?2dgeNK* zdVrDbk&$YIu#h5HG_>!twEy7VC4lmiz3-)ZF3WZa+~|B!X~JgRWO+ntJrs#yO>EAojW#61dSmx9!XruH{75+AS3folz+V>7qp(OxR&csa32*0 zt6DGp*4-6JdvP0iUi!8PLH{LkT={+T64FAO*E6q#s){aMrH?#=oc&o37Rg#3L zhm*(rWQ>G(EKhU@J=6f~&K_?~C*5m7g32JyCOVd*5i~DiHS*$y#kQX%70U1#LZ*Yk zAWA3oL7Vz$RpV~a@w24o9^`j*V+~k6R*6FewnN#`(VT%!I)=+yJw!x7ZWGN|J#eN$15LJO!l}_R<1l9;Y@#LaF5ye*#x;M z@FEx2>D5EdPc)RNIx5L3DtHgXlV*am0?=@6P}u81W4Fdqt+=RqqW zI^)l4*7m8KIytACUcf@czt~+tj}7*GXq4)Ew4S|oA+=>ihmmeN9SSc)N?VC<-%T490Zk{CIvl*LAM%Z|4uVe|X>5eZB72`?)+ysP@)9 zz8;VFJujF%7}P96aAJ)-m*mKmqnw76d?kQ$rs$^M>lL+(Gerd-6?hzbG)zA6C~ z<%{dzfc3}$MEjv_=+Jy5*L<%&h%}#YW3-3&vq%!5qpiEEaFH@9bHp#bH27mDCUyDQ zfgsa?c@FdCY^J<;c53T{B>MO$;twU8T$hde9(UM|iRrk zA7gE3|5xqT;<@~8I@eDg)Z3C?mj}`FJSqSMyEaKU6T=JsiZWIK3sLg*F{V91}%)e2)-x3Wx@_}&MoU11A){~L;R3~x(-Yx?M z@mf7g4qrQ#`$p^Qo`Jl78?yMJVCl|??PSckl{7Er#liO&ri7O`5+sDXZH*V^qDej> z>{OGsi2wbHKbIFACU-s|oJHq(`35bZ_~r8z7cYfLrVp!lwuHX zU1Z}n?HVZh2ibY3QadP1QM$|QPPWQ=VrR4GNgGl7OqVo~pc(IrP<=u$?=&s{j6eCq zf%Q%gj==_8<2-M!*3^=9uN9mLDCS8^{u3!4F%9C>7hj$5Bt$0aOHA&%<~vzJL*Ea* zHdQnZugO`~H|J}UxFDHO8B2+!x`z<+9M^cMvSN~VS5cX^>AOm`k`>BK!BjXiYuH~&N7=a0PJyS2Nu zJGf-{C3Dv|My8j6rLn2!U!H$)?N_0~nWz5^wP0rl&cCH%CKDIV z`=<8IH^S2CJBMM|2?6*6CVb+%sMb2nFBZ}6%Z(n;jJw{d-G z`q#JSL8`Py;XHvPrjt=AFk}CyG7c>-+Eydw{sjzb<^ni%>gmI!Xe8<&J#z5Io)>=4^ zC|&92*WnDS0XDJj5{661g7C<}m6tzmk&4o3dIXiHtR7Gm&HKraJ83o1*5m9i+L&eL z&~!N?Vh&ip6i+Jz#X751g^+;{x*Ag0LyoiUS9&piUd;7>L{9o&nU&!r!%|-uj4Khp zBa1{LA^wRBZ;Y`?Sk2RgY-_*A)YsPdRpw4S*xTWnJQ{f*ka_j&xarI7diq5~FgZO5 z;S7pKXyXe+Zjx3OKO*S@N=^%PLBn=(A(_i2BOjNt!jlyF*|64H(oy;-uCpw_*vXmd zuXF(75t7)_58{CDVTaLcbKBqzwCC#64ChQ+2E0;290KG^cijJ>xKSuQ7yRNS^v0Gjq*PJyx@y|ZP+l1&afW!Xa;YS^?hwY3|nm-%0@$L-z)W2Ux zxWTUr2Ca_2Oqej4CR+Mq71Zsn{HMibWT(1 z1HTJVq2{EG0?zaKMImyjF^=ckX*LJ&t8yW;Mqvs|DEwt)t{{kS+ z9@a@ZBB&chRJ(fLS|_UYEvUH6#VeB*Fs;@c$+b%dm#;2ehz#zg#o|NaYW8(c`Iq|J zWyLO)s9A?qU@8Zj=dBD!&Cfg6SPing3P|&I7Qs)%)3l2%i#J`wrq#PReCd8U?rwq_ z6Rl4g#opVP;mBX~elqjovSZNxth5`XuDX;;hfNYndPFnKOIRb1k2fdr1S6ZD`LYOc z#J+9x0gGUyfk;1ysDZq2$+54D61@cj|L`_w?pbqAdYS!w%wIs{QZsCS`FWyDn%1PO z7vLX50Dud?ZpeE78=4bz9 zUoCFVB1{UePkWGP`VyHMSxZ*8zWew#AU1uZRyI^eIsv^I$tTyf!05gHcv~iWKm8pY zx8fh9&jArYsuLc>z7^t_7K>NmG@A8`c1roGX_>I<(5J1tC@9SGXIPzyWJxK7GEf6& z2Wq$Y%L`wZ(`Q;*>{}Y&oL?NEbR>>_;=h{6`Cb{88tW2G7RR`VG1dcA-IeH?1?V*g z1Zd8;u54z<=2`NCAn^+h^D<+AYN2u4hx~viMGbyn(RZtxB3SqFl*&htmc7pTtOt%J zlPkIf~ky;N6Af3M#!kl`umkx&HHip`J`DKrHO~IT%!!_Za;YE z{gLKR&4J)LJdjQuR0()BTW*4O#p3(R?`^FdXpPeEhYT;;*M_%(mC91M^#?bDJ%;)S znbWhq3A!HXY#OO<>+ZdM61ujpOC2Qs@qX-vJh^Q|frYN8YmSWXszy3@v|;W5XdCBz z3845M)*4qvGgYSIa}7ilk5cWA;B)(MGe}%16#D5X_v-V04>(DCnf=c3A*G+}SAa8E zd`)0$tcAa{7TUBs4tWhLEt@#AT$a{w{&&%J%t`N~wB#CS_jH61)xeMp#L~0zDgsvX z^2+YtRMZ!w+g=HbZ`v9GPhLB7e~#0fYjj+3!swxg*M|j@CAH75`AQvJ_{AVC(DoVs zTNU5#EA+3yFDFplz%BcG)KBv#1~*AUsVNi-yM%wP_!SavJ@t9#yEpY1klt<_7V_II zvcdt5p~Ou_%y{y&7x6b)G*l$<`PobrqaUvCMK8<9qk388d%O?B+3~zF2o;~Kc7s^3 zK^eBZ-{q{b`WIiKKt-^5lOXrVLmW}2F2HfFkY~yVd^4zy;ai8-H3i2gp{$h5GB=({ z2(5Fr{rlwkIXa&qNSkImyWhpk@TK(ht3x4`m@qubWqj45ijU<`MX$kZ>Md$o0QG^* z+GoE$k1pwz84)w`wLdUYG``o+?2d3d9XMc8zmpG~94!qkWtNgng;9Uh3OO*53 z31o_}(@is5V-Tvof_T87caP}b4aBaLP5VgV!Pnj?ys4dk{^xeTl@4N5j7{r@Hw8zl z#Z`x!T9N91H@s-4#N&Cn7q=o*VB?al8Hk3^6w%dL64{cacq%2oH^Dl>%h%=-EdK8v>F1CEx%gWsMx`o#2 zV$#z=v=2vq!-SQ&d|NSZZls6vjk42PhLQ3r|yLXPZyaW!We-Adh*k7IP*XIG+w@Cm`b@y$vL(0hq}1ouPP3=KkCuf(zUL zKbw0{k6mkJN%TJe$Z8kuUf7|~ zRQEy+XJkwf@3x-}0dxlLAvzUX)9{lr(uXu*xn!bqkNw#NZ98CWFUIKm4VBvs+hB$J z`NX;M|ClJq;wL!Ym*RV3K!cEZlRJ&;e?4G;kc82vCSfYiA*;$dDo0*x(%N~!UpDNW zKb-@AdI5W#d@{DL_3KecN(3KV9@CPs3eWD#Wl!{%zSpqqjeRC!^X>pEI-Bu@A+yFS z(}Ea#f&s5Smu20r*YIwP1@#bG{?kRVz2%|%T;R{*Ha_R8oK0y&*|4Y|BP$rk#u|i4 zBp=*>crcz6PWpkbk1wA)fT}o-IyDyA&Ig4x*`)3YEWcAM;%G48zx#|8Na&`$?mzaS z93%K4%`co5?lxkLLZH5f)4K;W`2# zoAlNy7)HVpfG{qlrm4n6o?-Aht<%+VDrVHvx%%)$0>hkYX2-s>9IhVPn-a4yfLhYIqW5_#!<{b67tb-sD{0Yh?+`h00fGZFzstW8KTd2QmgPq7vO0i4s+pBPdtc6uc-s8P7b}a zfpfkSW1A6SEY)`Fl!>jYlEOH77&!pvnS>5k4Rn!hM%@VKaqr7ixw9BIw;tJ%Z5Wp} z{%w_Nir-D>1}#}Y@culTF5;iEtTmj%rAL%*XZ!_p+T8$<3%^ZWm9x?zGFoAKyuX)- ziILvBD1UOit5PxCX<)9MCtI*6SjA>XVJGO43%=AYX88nJr;0-+(}k(_6bQAk6mhR} zs;1Lgvav2*6oN%Jl&`E0Ji6=ZiuyC$&df11nonH+8s5bSu33>ztFfDoz7;t>d*})7 zd`=FvhS6)bbeZLYOAQC<@!h;+_QcwQQ~Q3$OO zFME7%l${GG-;AaQI*SgLy9IktNE<%ewFA{QWTd?wZ5F1gT_(o_r%8M)jl8Mu8oDjtvLNR#K&YKc? zZ%?D|N>%{`8URWL-w2Z!ypN(IvZFfl+L5lI+7qNyF8iO4bW~8aU-wCyl&PwgM&n=6Qd3eDUCzp6V51BS5S&-4 z%czx<#HKmA90PLdJVsZYU7HKs6^BP|huG@`xMD}1X~IIYhPb2^?()73Cng_vP_(86 z2L1vXMFy5GGZhcM-bK9GBPrDVn+Qm6o#6goF>fK>f zVFo4nC!2mPU1e>WtEw1)DFXr$7iZsyVwfeHrcK!fInFCEGyAczX8Afy2|SoHm4C~Vcs5mk1aop_*xb5Bi*T`o4Se6m-!sBtFVg!LtRe^-Fn zwEmTdk(QUWr%maU^!~_3>}dG)O01uQIw_;{n)tfZ$KIoK=KS2_eS|y-MwXjM%K{wmH8zyS7&;$*|dG(NH~qt30b&pW5BPqe;w- z+A#}Rn-XBP%$c(j{@!C~Y9xM?(-hrIqQCL?rbI4ob7{b2J?1wqohga@miaq4@F<-z zJz&J1gb0yzeC2Q73T}4mFhR0#T8G%8pqB6T(8n)QnY(YBJ~tgRmE8vyZAZV`f-zK( zyhc(!uYXt=!xc7c>#`6!+K9e!E^9rROW45OK`_d66r(M4%qovmUmPlwo7pb6)EjK} zCY1=+34VAxq;DgX$P&m_mTvqh{eIp(3oV(RT)Ft2%xfEH?-%dC&PG`P12>}dxm(RT zZ3#V5X8P(>@rS|@QjHy?)dJk(b052)fvHE6Sr&_2!b2*K+GHnG_8}@M`-EXI)ACl92%&@#NfMLD*v4p) zBBl^B#!R-!GR8i}jPX08_x*m~cYWXgzw38h-*a6qXU@5wbKmFQ@B2LQr|rzQi0l@D zAZUxF#Ysm95^RDX*w>94z>HVqt||!9ow7W6>LD4!R?zsn=*|YHv*!82@+cy3o zc->ojN`C|YpY+BJH?Mo|1^)!!uKxqSkl+u1{hryp0robSi6`aAwooocx?YG_UhSx* z$7Zb_tR%gh|8fvPAAA6C{{QxPuMAwl7s)YtFf5G^S1)PSPSr-#lY_4AT-E?bk^bdg zzdGF&S4Fl<8MI=PKm6SGoeQnq=7WMe%2Qq{N%31xQcJ zRpC!+a8z*%qM7jaz+)i@!c3}Q5(~oTZ@`v6Z)+o4uA1K;e5oM}Ugh}<7)?pg>J`PC z)CEOo;g5wd+4T}pulj?wAW6p?E{&zNoIkYgUrllW|8n-2|Itk&x@ zn-QW%L?Ng9rs$I<>TYtsLh%$k1#Ij~@waTHne!80UTG)$yiCfu6I|SJmR1;Hn9>3@ z?GvT-rUpdrKLcr>i|Zo@OcN5JrLwIw1zkzpuH8! zJ`MRb$B$%)&e7`~2V=?f%DiVhL9@oY^qv4oNP6nn$~>>AAn+-Vr9!#aEAITavwF%M z9jVVo@|#CcXnD0bfy*vH3>qT8rDwli3Pg}b9$ zCIa6)TG^`2S14yVB;}c|RR@cSyahvEJSxx-R0*a-WQAt_CrN>=4ofseJ*68Ne@p44 zYC1u|PkEFa)=;g`x|X0lA!2gDHNWFP^Mgpk2Vd9~2!MAo#G8ssx)?PBb1yeEFPKNQ z5^Fa}H2vVH+;A@H%aekFj^K9kh8 z7A)Fa28Vy1_#=WNDKyz}%LG96w}r=+N6TV4`w{}6 z%XRJDvZB7n^q_qBbQL+O>dhu}gl07H#kXf<2 zIUN2m_D|vbw}a-5oZzIGhFpW24lv01g7S_<%esEeR+5i?v1Oz-lwBgt8FH>aaBq2V zw0qY>==s)d0S%~-6gG`jez)CqEJ`t)t^ZAz9A5OWppBjyAE~<2aV7eMCMdtyXIoMXaq9R9qOVDz z=jQ2f(tcrD!a#gXI{6SK`rsZRR7X1Mh$3X0QG8)}*Cyt%_r%GFiNWCp?=C^@^Xx0Z z--dsUikG7``$#^kx&d}j_H*gg+4o!YwN>k)<@**On?AdV2*tb}9p!3AymMcHt;o@I zz&-X_5am9z;X$2qgFI&wr2Qw}iFNLAy%W!qm&lyOwu>Bf`cgbCQ)?g87m1dp8m2@O zriww#82f(jKA2CoP+mXICQvuRzbfOm(5QA<%5Kd6LB!T=o!(1^6 zXNJOGjSD>7z#h!Sr8Y&}8s5q_`8C8Vy8|}dCZ-S^QzJk%f7Sy@y=bCS)-ck`o5~+IHc*)PAI=4ERyc?s@U+|H|NkJrdux$@h(HrpDQbDex97k>jr^@G1 zW?ZV$+)JV7Pu^Q}bB^nz8CmpYJtgZwp1AaK;zg}=xl2&@r!ET{c3w}^Pifa_o`;9A zmy{T055q1DWpmbKQ%FwEW#Rk{a}PE*pAp)6%QwJ%`+K8)dO-*@U{UM*wfB>-;k(&8 z8Qr_K_s?#CdM8f&<+}PbCdnmkn+RslxY?ti(Wa@*;$on5Akukt9W)q{W5iBpwmSw55&^squ98sp-z}2L^uC;Y`>=Ib3>$3@=0KUq)>nEg5l` zRFKAue@n>8##!yoAE%CdCT*XO1QukL=@>cvL-?HliUJ zxr%2U7l5)HlPEL$#^284jO{4w%&c1=!=g>R++WD@!m}LypoiM#U_cgB1FP*bNFKWC ztXFOuwZNc*Jt6HVQo8@-#1MQ!?0EHl832`@ZXRutSWvo8=FRDW#Mrc)`(X0nLm!bm z-Qx~>q7F?LMxY)Ih;9b&ZnrO7ZfE6oh?+AJyaCjHZ`E*m9%Y*t7-y>i@~m@9BEk5- zK39sq2N@dor$^LdqwT-F634j`MqV?k$r1ihiAVUAi}OAyEK#_sYk}3hs|*>0_;Ru1 z4Y_4G3CmbIz%#Y~KHIb(P)<`5rQIn(T*P0#%2?7!=$wym*CP0-dQOYXl)7AMTru2< zb`Qei7!i_dTlt$hLiiOUw5|w^S>&_$I?Q3UwuP$e7jPwRLfzwkHN!nM)0H-HaoJUt4u#KG;ea&l^slSq9-7}=el$x z^V4~civkT^{_>7=E6=#;*p>MS>N)Y3`2DZAWiGd2HH|_cp%)``&cuaC8I?m7_ONLg}ZB~8jYymR3CXh2)+Q5D>=wG7knxOFfp@Ltf~jSQ%-YA|~&==~a; z6gxdEDnhG7GNLEM?jM>mP{B;PLsDe))ptT6)90`#fsyeP54Mo^?s-3Aru|gVf=YH9 z8*wIT@8^^f6!{x|c`ZhPeccwOT?<22I?@)@F)YmrF%jB|WK+v*_UIG z*!wb0Xhu;eWLShY#GYFvfZ7lHbg(c{@Tk+ApH9s%YtvYr#~JkWEbHF2!-AQxfj(Hx z!@RM#LP8~!a?o}U?9SMq;SrNSE903A6xomymv9~)?nDsS-{B^ul3s&|}?>|FK@g;X#omAa-u zM@aX`byhuk{On47;@m|Mnz{dsvC|Gc?Lany*lF^#Bfb%C>{+cqrDQkFCC=^|Ap!0* zXZ)ama@CT1DcO-eGXFe5RASE&(WX&R+9HzYo08iIL$0(Jn=(Ds`tvzqe#3{GcTGG( z8kv5W8(Y!94U7d!dbiH|4c>|4YvEz0BFmBW^a=qI-`<>eTuEoKlon-s6=< zPJlEsn&yv+u&3s5mrF&wA?X)TM^Mr9q2Pl>H#0x9mFIY#kjvVM%KV+2k^EWA9f7IlmleZvSeaS|7TN&*e3u{Y}$C2WywIKgBA-ovx)5 z_M|uijj2B(1Z`2rByz6Fd!2@`%~uhF1k<7au8uLbIGhgC7U>Oyuz7?QbTG9M}r z7Vy}tP-pZOh+kAM67BVB7pO+-#G6vOu!83o)iLs_m_5#r6e)|zBm@X>l@{eZffJ?m zi_(0evxPxJ<$%ykw`3D%oK!nCFVU|x>oHx>p!Wo(=f!AeamUI@^%)l>%Rqg!Zi-T# z-oao)cZjv;=)!*zdVbNc*fY@W2^ohWjq}#}3NJfC_C$6!N|SSpHQMp<>e$e%jZvPw zPZT0H(>Z7!G&Z0aEm4{g%|ULF`ENE;xqRlx{{-z>B9e+q$CqxZy!{uB#UB~{Y3 z8dcqbIx=+FO!r6qEOK z9aPJJqzCjB<9Ha-Ne}W4z>yi2Bj=#}+j8c!D24NaO?~GSv3YOEAa6RlIx^i+occ-^ zgtm`Mf6k4oKY=jO{RtOhHN+w9>T@Gq*v~d1&}r-W)aQtIjmk1$^8Ke`Lx6FyYUk&b z(<)&9J*oEd2h7TOu%lZ>-|EMiy=w&7j&}9%r(!;3iwkOLi`P#>O?`4^CaZY{g4QtT zaODF7r$ZyR2xXfCZ4?3(!YQRmBe&6)e6UPMKbdHvm z){;MLfs*J>BVDq58AWgZFWkT62w`@&-bCb>y@T%*gQUr!BV7m9;Uqq<9OWPFF_6-= z8|zX!fCr5hkkdlGkK*64BE{c9N4GG{Uk^$FtmLjw#a@Mc&hPh{Jy6cm{J*S6o&coe zi7B`e`Q(u!M!J0Y7~B#sueM^ei zX;HqQE-6i>tK1gAd@Fm}kwG2!&HW?)iTf*7DuC*01y^}xXS9H~$5p_GRbnDI{bD+f zmv`#jAjqqv$>&G9qyV|=TAj|P$1T+!T}=jxa&+~Jc>zy0eh(x~av15N^Pv|0N?SAs zhpz}cY0{1t=NnzOyIe4Ux2rFtn~gEoQbF1XU9;5z6j6JtxhoZQ@k@bj13 z00sXO|AEa7k4$Jm-JMsICV`%$t?%dUT%78}x6+C6y=jYlE3Gh2iCJ;wpKbco>kh=4 zCZHkdqDh#4N67)et!RU!3j{^JiAbf|Z{_RhID7)WnhVMWY3u1@;gb5{XZhkxR&)*C zBm^Zf!biIJWJX7C`)rVMPM>eWnWLbUT&*Ajoi>evze`ua$n%vU&~$FRaa~bQy*63b zXwOEft{1zVwZ>PLZu{wJt}%$I{PP*!jrqU)2l7f{*a78VjC9~D_1qcre_c<6H2WGb z!2G-jASV$3S0=$;@SDi-;Cld*9g9qh0jLR?tcW$)$yX8Q)R(5fOp^i?=lmOsE}vW9 zjV9xl`igI*AJdA(xVza4@qAAK^g0gMpLTWor($J?&4R%9{A;BjLCJ?sTz8WBd6NBs zILhGqroia}#fG5#k}*fWucaPPFsN&QPkpnqnW_BG0-50lV$jcZsEESlRp48IH~#KZ zpKB)20K6Li2V9)U1dN9;&;CQ;z)c@`+95&HJq}#FS#V_$?sBib5L6;c;UB{?L1P}=euN9;7~kz}P?MK)MwKY!-Y>d2f^EiRWSBo-ZZ6xJ7BvKF_FZJ$u_v@UDn6H-|;Z#Cn{^6ry(L? zq^@cRF$n66?t#?>`oi_m%=x~;?p=po2k7Kc8$N^~>`RTtK88*#d}*ybJn%*z@(vT4 zDO{kh)~<@sRyVyQrF0WIH$&khte1LoaL0N2>_Bn%qcN>5pRk3JM6cm_mZBCqo)i<1 z4)N2xn57jxI0AKCXvT6v?D^Tu;?u3H?eYM9GSBm9mEco=mwUoE9% z3Tq&=5-2aSDsuJ#sob`v)lSah9XWY*g$L0#QjtG*7eO6ea;pg!O;W;DPTk}^$a%ZL zdqtEsb*y3RyAb=J*N;`V*7~c8<<=$@F?k<}uQx-wAM=kH!SUqi6xPD-4~nLPa_4J7 zA9vuulty^5OSG$Mm4J7)D6JXU{u3nPCaN*wX!OC6IVnf_tvUD#B8OjhF4IB5LW`@< zhdPvRniHbk7jjl)o9EMPfr2-MOEoEjoDAFI5;wg+SGf2~p(*xR$6gn8e!>LxIzUoF zOf&!p)~eMkTVP=_!0ZeO_=s^52f>kO;_P(68oe5cU%$H{YGURen1f=3yi*N>xQ^-@ zW=1XDijkj91Lcvq-q{G3zdtE%`!dI!w6EJ~K(hG_BLnK8#Eg5L9AE+{A&Hjqj6#qRAJgHzDhzE(p zI^gAJ;Lh}s36RG+;vmPF0ZI|LzV-3G#c6rz51)yHPF)QpCfAY`Lo4z2wRGVIqU92;k zVDVJ=Uvj~%pt-x`KO_wnbvzo0{rC8XclbAET?UbSU2j&TKVNhHM~AMMDYVaT>;TEY zI)UbZSP=Wy$9n*=TF#o1txgD{_KM%+P7WGAF{2UO> z2cDco{k9e8=+5>ka<2*ow37KiZ+0~^Eydr1;a9!d`gs;eoq(S37bKdQKq7n#k9tvv zb%3Zq-h8`OPN4D?3)rO-V}kYHVEO(eEuaxl3j z2y|c|=!z@kw}-KGkjEn|tg$DU19vB%trz@0=bVb$=BBpcJT1`?!SIRsL8GvRLY4b? zGGp-J4$b=V=wOn3)&?xJ*n4* zg8eZiv{=?w5t;|Lk?9Zu=x!A$TyC~=(G6Dt(0Oh;DM}lT zxVq?H3BGP24IRc=GwU-9JhKtU8e;x#)(401aJe9M5VoK#G?Nm+>Ga)IIy;!P&Yki~ zh?i}^Vl7=koRDl%pXC;tU=P~ql&*8AKxjs3S|F$>5w{<`&U|f#(2Uz|+~eQ0mw-6$ zJYLzoYv#ojlkg26qo1$81)4nprttZyxkiEsdR?is!52n(M3x?$r7tq1HM<-;@nz>r zJ}k{`>S>2c`*1@s$KzCc(i_i>GLbd0)B<=LCenV?&~Pv9+I*ukdcz^i05y28%M z%<{{gbEf(85m!BDBjY5gzNn_Fm--o~sz=*RP0>qnH_-=!Z)XDL%QxzR}BQo;+pj zAKN#>MF!s*{vMp+`%YoW@bI!6Ib%3?&5FpJ(CIurN~oou2vx8U_;Np-?AogNhXy#RXAfh!Q={=WsX$V6%3b)@V@HJ+>1mUPF zzd&02v?W+k*(m!4_%2L(vKQCuIW4h#u#tIIR@tVseS5NV@%Y16heuWh;}zI=*h7C! z46!+x<2*sxvJy4*2X#>ugE&^ZeGQsRUR%N(UvX$C&)^wZXOol&2_zEbYN_XeF{`#3 zs;{x1FJTVtJQR)>j$yJYUVb+ru{hmadJA@pu8$Edv=li!q z@-E>=@uPuDQE2w^*!c;x!}H}J;_$=DWq5OB4o-$Fk-}^aV`dC}*OFXKw%1+Z#_b2` zZc6WnTpCKm`x#;V)3sytn>^jwtp#r4V<%5H?p~^2NFg*7qA0_M+43Z}i+-+Ea|Ff5 z4j5-9zLq>J&xr7kjoYGfD?z=1Fnv|EV~*#{=0t1Y8|2vG{F;!sXo>lbloaY4ULKhH ztH-z4DS44(b=|;y&Nq+H}L$G zYUd1c%{4lOh;MO%JBC5$A6J4}n{VVZ&m}3AmvxBS1%5wCotddS6RA8+Yd z3*Gp+xQxCo`qyk!@EsM*B}Hc5LKN@B2=#BP%_fU3gY~U*FN8vH!!2_a7I{omHRpTCerd ztTtI5B}B~M*; zr`(cW<1V^L2obhyRL!)CVczW2`I)#(W;kjDWhI-TnR z5UIHDb0)F3-h_I-yy|_<8G#0h;?GecRsIztLG}vn+kUSPh?^a8Cds*17GEw|?du@y zAgdUDj1vk8CwOHh(BTy-;6lZ&Wt^$L+Uz=~ymQ%D1tXwhbT7w?aD3TdHTJRH8z>GN z?e!zpRrO?I`oJhWs(E=|^K3H(**=ZoUUP{2MK1EGe5o?iS}3)3;xB@i>sp&+)AzTv z2?`6g*cypunuCngm#u8$QXIRx_@SW{nq?!yk^`~}S-jP91?wVCyl0+~uJ(PW&U`4B z>macCrYH?g$+x#MOo}snHP=ylw*ODY8Q!tgyQI3TXs<`5l6w)e%PPEcf$aIlGnunK zx0sFHzIW;dl%I(IeNHP_Ewna3U|?dtZbEGJRTU#4_iLM6Xup+IN_1}YlpgazkN;YN zC{6iG=O*rS=StB0RuB`!#0_uV5`4>3jFZuGesX6- zm~&SXau#2ji^Zh!rgMAj9S&vME1@mX_Pz^s_D?db^jSY{iO`&5;Vk6Q!9074=%0k- zUFc_TZ(`52Kbt0WPq=zQtY+KPvG00k{Dx=x;pB`(`3L%UR3f8BHz~ki_Cn?aU>1UE}^@9`1!ZLL=s8 z+YCqj9PSUOF^o#rF8dWc>_D5UFOqL2lv^9(4ZZqlCbLYMtiTAS!8j}1FX4GrZqV~Stumx#fvyPAu7>;L&k7NABX4`4cfX9~uc?#EcqL zL@!O=%cPVz2yi3r`hM&#z6-iuy`e^LK>0HxFW$uQm29Gi4%8jW-(q<1HtgXZcZjMk zajRB3M#oz?GxDJqFi$rew~+7{=qII|`+htFiYwwFz8c$7I8GlAU=kg+fxt_#YEAYn z*6ULKIV$^g00XOW#y{S=Qv=hBQfisATP?V5U^m1$Yktq8G*qDB!3vdSBu`8^46gNd zzIx1)+gnMUZ3;+%)#!cv^?lD|IF>tqz@+k5ZbWI==5E~Mc{lM6{HafV%}90APOPBY zS;WYyNsI4jc!`{H!vrE`9(hA(#zrPfZcFHwKIR|hL?(;y5!~U&7?A0DQ$IHnLxr?; zt0Bz|l4`d-ykvo73Kg71Ti>^u_WQKJWp9-{v<@IRZp(^19lSC{OGIBoWocj%bK#*& zl~lV6rS{idm8xzE7``UWvPVx|GYl_gJvu4ikpy2Ze4W!^yk;%6^~R#&YV6jpHYFBX zXsnOeXkv$oq@aUwV*#w@E+Z<1c~0heQs+Lnz~-Z-%>mo=d(Rao<*3RkH{fKpiUUFm z>Y~w;RuhBK-4mfX*WhZm+Q6OJ@ssQqQy$`GbpM&>Ed&uPp*7U?M_A05!^->xw$Do{ zsWbYFD6JstM|x|tob4+}lRkPUR)lfvrZ;2p$A%O~Ru5r9=hw=8p&5=kI8I;V)I4bK zzTGAiQu~YBPsb4Zo=7SxqVMKf4?18UL|hKGTDv=+JnKB=9Whinm%QLqa$jwFoSRqY zCog5yyv#u|>m6qa+Sg$<(y}u~tNQ(+hifGe)K#g;+5Lm_gtQAU3{!@9*hb{=C?7A}TVG{8-G1mG=!dPAY#7oPcJsREHux=u~-!e*HuE zD>oMqI5~PK&(G@eUL&L!2ZKw|CPR@NH-in6S%!%UYf5u#K^lhWV&RP8bZW*B+j%0{ zXH{*874;7E&&N>>lc8Vqo?9-%zQVk{EjX`o%U*hCa?kho;Z<8IGBwCcr;M9rK z$fYjqP0Iu34rd#*upW(Gt)+y7fuVx8Q=0a2$7;`&u%nM2avf66)A|M4;@qh=2$F-~4fk}zl!wmBq%5u^6O#9s$D1uKSGll?wxb2rV|y-swY=FTg95ep|33fA if&b;e{}&ucnntHK+eD2yMOE73#Q^LXVV8p?B;*W!~ z2X8%@hacs_!TI?R2S@M~4$c+(JAIl>V5=`r!&% zqmy-v8dg|E~+rXXh0-Eq>l%hD(VAM@miE^SI@h7jMd#)`^i@ zOh%1#amyhBc}U0&HeQxiBX!o3>36|vpi5AUQqJyiNipHJ^v=u3@EaS`PfSXZ%f>oC zeoTTJ(8nw!Fs)05i~Max#j87XPqHirta+DQg)o>&d;SlD))t9@S(caN z{fQ~)BO5Vxowvv((d~mVvvCs9$^I4YgyKmv0 zYT!D5)^B=xT2)gMK1DN_KS}IDpkepus1iF_MnOSDYAQ`tRaLO$b6h0Qk!ou<< zKmWz#q|u{Ck5=~&D@JW~Ij$&YlO!qS2VJ=KcgZpsO+Lz~CtM0S{YEvovr`#9f!}qB zyrZL|q`VvppG`;fx~7H-Zc0fBzp}E@O_*kVecir#6kec|_6O3O`g~;rl^VE}q9w22 z6{g9quTOjW^u*^0E}00L%=hm@^Yc0GKX~B4Pen>bcJKauc{KWRW@hH}%uHl_d{FloKEW29y8xNIODy9k#dU(yc3aFyL<1%0{z2Cn#eF>)&u>XPM z?d^?6Krp%H_U_#~I{4t9KOZqFCHi;GMMq2g-7EeTs`u~TAE@=@t9D(U+*zA~k9Bl% z%E4fa4tCaJVq+s?V*@(OipgRcW*%6wZZOHB(N@&5ux{n$@qXtBi(sV%1qIIsh{I$3 z@`75y+gn6EOYzf${nrch{(p~4vduZ+1jQoHZQC)A#hZzViCsz zevYmxudYsl#q#j<9QN6)@DPPUt^TTBaa#i;n|BXX0l z+jx^YFuxgSTg=zaElCpH!t=A|Zx6($)Go8W+G7#3m|y+EePdQlS2wJAD$?)9zfWw9 zP1uW8&KB@F;NRa~F&`{4b#`?va$7U9tzPCWDk{oVE~((cYq*0(Z!HfuZ*+;NJa|C4 zG1p~&bg-+*a}!QAQtKw4GL-Q@ zH?zJl^0TV5FYkf-(e7M`bVhrsNYV|3I8|L;>zgm*I+xAIH52aDIX0fomsae3u%wqj_n3Nz%U?QlqB`FR~pK^!c4ZtEQ^@kwiuK zF08k0`F@Pjsiyh27*?o`{r$~DrB?0>neo~s7Q}?Yt2k%QoYB-V`OgYPQR-`!!?mpV z9`7#=76?RvBRXtQ8#|4{g7XX;yD9~_U;YpKo}oGVYRhBmn!ZuaZIhI5*2pBVF8$t)?+ z7()MRGnor9pS!=WK%GqW{3XC^Gt~GkM}7D6mAX50L@&?GYCZH6q9Q=vR^Y5X{ZV9l zqyg4{2IoKfW2bik1#5pW%UAy#xrcN*!%&t<^@_UGa@x~tSrN4-{WoHVw+*! zUsa17`PIJ!aW7uX91Zw)E~#L}%EvrH;|+WgZW(`B-R<&CEh^%6){FOZisi!d`~F^Q zqgR1bO}V}Qr`|`TeAJavA)ey~6H}10;7#hT_mXA| zl4_&`)PjwnoOQBLZVh%@wO+xU$oNXmctN*Edba~dijtHX{F-0CZ0j~sR_u3H4eQ`Z zBHMGxwK}G+_PJ%Ew1EL_{ceZdK#3)Z!Cr5?c7^>lf*=WLiGQ^v`N0mCQuNISWvdOe zM=xt?glRmNZn3hm<~9evPlD^cbfGU_udTRCwC!Mb!+oi+8A@jq0#E+z>!r^@`HQ&* z^{KSSJIzo{HyTM)UM3~6eUeCa%*R0vZ`$FKdDzw4fdyVOY^0>5Ur${6ckyP_ttQ+p z`z!Tb7ddM_M%_@TSS}le&7SQn7(gzHE`a{$yBqHd+wy29;`mi!B89*m*f$8Y64im@ zxY=(o7jQ-Y2dw8&ry-3FX(vQA#!K)V)4#ly+_t0wV*JxXO{edYo0@D<$ zOX7bY^d##F7whxS5xxh;ntAt|rXs{TM=G82EVaawFDhY!r88=_hODf|Y7%5X+poAyakDJ-KRnV(NTa8HpD|6CoUR@E z<@Rx%woL|xwZ$arioV-ZLa~FFa7l~pDMJ0Y*SIgive6c{0I1c)2b6KVMtl5eZ*y^X zO6M=4sw>1wNbay;eV+^KUS;GMN@HlbPL3R%Sv9)Z-C{b7H$KDg%{H**qud zmpT23BucVVJ$|wX;E4ybpvx(s`Q29OW@ctipFBZD5EQdpds-3u7EoHP=k`5)w1kVxO&uM5pZ#8i zrLo$ihg`B;p>rPHOl|}+WkC%<07lOAEh>Z`|!v z(&$q*wsWLs)LmW=ri3MNJ-rhb5lfr@aJm z5WUeRCQ*x+>YH0kqKe8u%NFz_t$IK91Vd468M1~mw&L-|iu z-!S7MM?yvH5Jf1uLwE5CN6ou^FTj5Q+y@Jd&sFXQFeg$saNSbkYJYRg=pvg)F>@KM zp%v3JIQR)_`_~^o=v-V}0P|kBcBT91aDQQ8IrDYJNvYzL^K=g>B#xCRUh?wDx4nOt z!=8~j!;hSTkS3!rYN4%XWzDNZ2&M&C(F}cWZPyDaHd;3a&qY>vQOz=Rfr|| zfDP-Q?ffHwF96cWsi>yAj+VOKy?;-sma_0@@m1B||ws_la*}po3ced(9@uSU0*P+xb`W|uuB`}Ze5*gq6n#`1vl5#LQen=e~ z8|$@^G~T$f-NuKuLr-O6)ES(v}%e%gEe6-U3>XzRFyV}b^4Gv$%lNOlGY|V>8 zOEH+=zkh!)Z6kABf97^5FrO77V03!A@%!VzqZHW%f0DCfcSNI^UNmK~9{kQ0%dD+U zxj@4B!&y+KCiVTbV3VOQS;a;WC?<)d_k)s0c=%T9>=qh*F{=I7i8A{c*y1 zYIP^@g?ARd@?GJcUYdz42AucYt7x{QxoUp+xAu6rS+A1E-=uwPqJsEF<|2nYzI4Oq5& zDXEb{wHO^8b#``sDWBPc3h3?S2F3`5mG*(4mKx~`YN0Plc6BW~dpU2>LJK8N^C`2# zp1cijzr54lueg*b?%o|V8oc&pv4ES4>mz-H@aJb|F0OjiUUB(3VDU%@s&D2!Dp9Xj zrfqSBK8GISZ-^v(Qxw-X{6@Sslysdsc%VUh8S&r7^D{V@yfRHjI<+OD&p&Q&hC9v{HC(AM&!;18%X0fAN<2zsY z0B*c-!9z&9yAO}PKhn(Ic}Xa$Ev>0Zrkou;GBTnLB|9NZ@;Zu_hLKM@`01l?k7VJK>pjS0 zpo`_5rwESbwqd`|PgsuaEJ*c9{FO=r8y@J=JB&w@W?O3*dr4{O+R=X7&R~~MYiCL| zTs>1?58h~c5A%a^Ti6f{({pnqcxTnnn=xFkF|5$-nJB^|N!U+HQ3~MlSf6IVib`w~ zmdbEvVtI(@!^7R3%+&8|e8j38Cue0bIyyQ+(-bRsq?53M5Ne9>^`!BGK){y1kF?4D z;-6c%CW6*barwsF{Ge;bH`Dqi%SAJK8#S?^(Q*uzO3KQL9HyiZfL>^cHTyjAfrr}6 zLW7|xODPEmDIE{b_tSo#)TJ5l8eRjF%um(#I-XQs`oCLgoZLdSxf zK|!&l2IN>y?$+z%-ZF$mF1CHX^7RuJe<+UQq{GLDC^=8Nn1g5HWH)s`PDq?{Tg>Bw z3kP{3d_ZVg_Z_Ac$h_sFN0TX!#|*cY26wzfcQeWUNNK@6?ATkQz)i&t7gQ=YI)xC@ zC6{?GZAawPsZ-WNB_XkFT18HC%0L%acWan&8KPT=vCnWo*|4y%c%7b}{Q2`|VlB+M zbLZUmm&;jLS)^Plq@A6+7@xeF?RI|MdBl7eNs2XZf2inkrY*j<>x)tnpxoA0c`HF} zGwQ~f5!WnBL^$x-!>0B2^#$fHepE#lPK4G6iRq)!a4P)891q+sPJIc#Atpwv*RMtzaCGFOS)i{pT5(}M(6RGG zBL&Zi&xNFUqt{02sAfA8lmv;*zGKkHD#N|w;^O9Z?lIN&_jtAW^Vg57=;&MpBvjlw z9w>f|O_G4{enNLPGp>k;2ncAm%{!BcZN|pORp5FDsNN+m*PILv39-Dx9QQVl=~;7H zb4gD0Ev!Qvr$HxNIJuDPQm}|f=j_=Zn;sX=`qiwrvH{hDlFy`G_?Ijgl{{~r@>=m6 zp8~YvH3i7Lv163G+SqQmEZk+W4|E8?3;+Voo;{oEyYD3mf}5Y8pXOIp(izFp_xb=q zjK72xZrPv5Cop>T^>(iFB614AZuLi>n4Dy6hWkklyzfPwf}j2d25RR&hFLQW_UOR zL4F7X9UTo96Izk{Ubu0dE=$hri{5>{`T6i}S|#=SItqGdmQlUsn!TUz0m)@4r+D-& zigVkqP^(=)MJJHR;Qq@TAXQR{`6N3FhGQ@wJTAGOd!zc*qq9ABr~O-6Yb&m^UUp^WRWz0hJ$&XO zUDk=S5~fBOYosQTNeRNa?;bcL#1N7cva!Id$&7<~z zdgA8hW=u>>_xJBozP`u7+g6E;AO<0-l0~A~@`>nP{354~9u+OzU8#$km4J%>BVd^XUK@+Ilk;E}TBu=bUzMAPns< zS2^9%r^M~hk1($1`}Z>+fw8FZP%?1_)~S-3TBvy{^wU{wx&Gk9%=XC8%+??8*DowA z6iVOakg3XlaE*aP639cLcKiKMDCm_yaVyRD$A~yF0TR8G(zEIk|&C3g)YK}FLjg?dnq;roK$LRy$OWg3Lj5 z{&emOCcK+eV^uDcN=hA)*NH*LW{&@1tnpMFa2`JP}wb$5gwj9n9J zlk)l4kXpO{iq9^!RBd9$qjbcqI8r&g%c58cjYddENlA=x^9!8}ZP4=xWO9F+>FJG< z#%rC}yWw^Y_tTT4!;Ta{K-yM(ouwk6&>-Rn$fGlPz%g!P$N%Nl)7Aaa14<1!WOUKbTj z#yEZe@Rw0vPg~G0N0nMq+Gsy z8MuI9mCHCPGu6h*N1!7WA#Kz93hdp84!lc{T89+stI${x4&w zv7m%BQOsNdXd4~|{s_m#s{D=hHqPjjlnzin0N`n6Vs0;d9A0(>+1=9Gy5yNGcfvvA z$5UKZee49ZqR+A{`wsE(_pb`MvO|6T_JTU1!Is{GSSU2maME$8TipRfZ3n>=i$qtQ z<(H|0%@SA?uiaSt(^Kbyn@QNUxsJo?>tE9akFS{9+jo16N6Px!443r`4av8ynXNy& zOZAK0Xl+``lM{1tip?>EX%ac7pl10gisvquVjDtuo~7NlcoMjIUA|A;DG#S%pXpxbA9{aL-&xq?fja=<a~r|K!_!`dvteN?`T~QQuj-t|r+oJgla*T(XfN!d zHWrc28gto%IFz7wU_jQ}`%ot(BF!~*%o(d>JyH>g=yx(S?v1$hpsHoQWYHE6Rs%E+ z`~V?$z)?VxUj}4nB7JwZGerT+DloO=Gey_pl(T_$#l*)GCI#~R)^&H^1ux`GM6&|6 zhaN>OddD^HV)gQ8pInMSxzzEFS4Fy8zpNgP)wqL3oZ2%K((^44%56zW$xG$z3ebOa z;G~9NjA%jb?zCWy!!CfF5TR{8vF66i%+8>#ddK_F1Zb&1uiMKnhn}kf=%qxDPq*J> zu@0d5eQE|-@x8A3y>6bGfMu8q=No%nT8b7u{ZFd9H(cey1|)v6YB8Uljcqac${t1F zUBH7jHa0+}h$5QB3LBDS8C22X$hp5<;w4I3^#(iQg~!Fh&iZ-THTs22%htfxb$-=v z7qQpSq+cC}TQ0g;+SoKqtwGc04DE?%zZKd%1=yN6w27IS`Qg%lAQ`8ABM9d~S(eu0 zb<}$OVmwdq2vZX}ZftD$JrQRnP0!Az)az$qX8)WP8<6mvWOV>OR!s2K++h}-*pItS z4_C&#bv>6$uL$QRWVO7Ni$-h!1U_|IIaCvghe>mvQ8<6=kx)Uxp^zrjvRAi^K^l9G zuKcMeWRgITlD`P{)B5L&oQT@}=n)2bdsylFon~)4!4!(MM38fGb7P!Y1BRCwNpXoU z1j#W@z6!i+YsC2?PhRg@k+=8cmRzB# zkY{7%6xtks^YK=vxCtxYRb#oON1slaK;>Y2tq*loD>b|!zxUg>MuUSU1Kk)`d%+ZT z{hC*ZDr$JRx7nU1W*|!XZ*`3x>z;638SD7ucWxIec8GPVx@4Wm2rN5Sw=xO@jUFsE zG@beP)>hBJKrnWJ>egq6bt|AflGh{O5Gp2xFD+REOg-~T)7?O?e_?SE&fJy12(hwK zAZFTwJUDBCCX7U3-h6a{=R}ZR27iC^SnUI_Mxm3vV5apFkj=(68Wm#mO}|oM6{Xnp z`c(ecySFWV3RRt*qb#vi?&pcS;e!EaRImx!_LPWD)hgn8tWx}a*1oo>x?w)-5 z(Nd@x>`DN+U{v6s(Xk`$b1CqU?o)Tc%A%Aqz=}5EmIDMV^!V57w{QKQJv#{~diAuo zGXVp6Fhg)wm)CNjWujXO0p@w}VC9>%sHjWeq5=(y@0i?B6nc9vN5!UIgB7tu5l^|O zs2BJ8^K>GDAWLguNq#C(wUFEs5zR_~#G$P7QxOyu7w1Ki7W6Ban=>V5K`)VFxHNIN zJ(lS+DcSnI`N!&M@~}+V%&zqNaTk0FPNAQEx6*t-?`U3hb+(d9sC>91T`CkQ#z@@) zA@BXt#q0hR*S}JLi1yBe{4CstNmr_f*U?6r*!%bG&(j~0p9O2vaQoLNXoTvzy8cje zxbT1n!{y9Zfl|hBDMXztZLvz3pUMnGyWiW}Y2-W$nT3VK)uYO+vDX2SBqdpih!UOu zvAqw#1~GyV;N0E)v5-DUHc0TRBl+nXWvlxG**zKo$?R8nrI^6x04t=eXv3YcI(4zC z6QH8|M8Fj{pmHFjd<2zMj|W*s!E$;#bGFEt{Td{KX5_RhF0VDhjP|VS5<;8ma7CtnB8V zvxn0PZP^%1LCqa#%WWx0Tb9nWK(&HZ)Cew7o?d-d)`lth*&sq%X>1AczBgPK2p)hf zKiQTe_pEf>0$Udz2o{d$SnYl{>ayzm!otjVErZOmvN%Av@I++%U#c>j)b>4pjyp6w zgsjY(rcLw_HPVy>ca`iWVk_d;g!EHOjZ-w79Lw-jz zCpoua;>V9qesGiw%ZI@|1bGqUdk6{40K;Z_+!N9?3CH|7nDe*HJE_3DL40^hK)?GI zCcIpD@JKIR(9A!ue<$*jQT0`dj0(RTbJR}bwqYWpRHeQy!DU!Vz+lb!v{gik*v18C zZr>xXKgxqq6Yj5yDT1qKK)2fZpOD_uXE<7BL&>I@`=ia^h+8>(BUeZ_(9r(J#}oH* zVt9IQ)%4y2wFb;@Py;{@4{nAlprE7#MRYFYkXplv^WqbTT)^SK%4bUz(kPRnSw5`9 zaH-Ihh4A!A=I2Js#695a-tp!1;1;)ZIH4S$q>2iO9K;_!jitIjZ%uxdSIP9g%?Z!P zHdAg27q@|HsrK)4|MW7;Dx?QD}A3DppSCzVcfM#X~wvY_68{jEwGtIF&= z>Gll63T2z&QdZ?If7S(1$$`W@`}7*k!**LRYVb+#vMUKe5G;3Q2B?pZR9~irw=LhG zQc8bN9ZhIOh;5uIQ8!4Dsi+Tnc=rSV#>#h|&L+~WaDYMcNQWu|YCa@arX~KlP8@H6 z;&0b&ivUqMW z4ZyUQpq6S+;D21%hK()(D|vW$ie5Tb0}Xf!3;7RtN59}oQU|XFhfe>(&!3-HwvCGi z16yxoV}WQYVi{;6ko?QLddjq6X&t!}h5mPibBqZt`df~^X6RDTbEhe&-NOY|k^c7W zbb7fI z1*tVOV&LK$-be)Td+h}C;IliKdUU1MLI}jRa)u0I6&Hg3sCePh5H)&xk43{}9#h*j zmr+%f2wM-%4^S>RQP|TuhB$IxmF`= zr@ET5eAwIQwghEBK)`&JG6YvjOJn=`c;VTzu-RqX9xyl-AE3(cpFF*ccSysOXjBAt zGmX!Vq1FBVrmRIlG#X8M`EntMfv|I)kFUfN;=lxw8&sZp z{gxIMbdj=Q6|MP;dm#%v+4@gufuMt(3iSa*76>r`?od`HNX+W(?|&?R9=3QB9C@Du zw}%!Uah8c7eIjctnDS-88G<`FxoYv+>}T=NdylLvzZhe4*x9b8LilI$N!fDfuB$3!$ zSBYiqx2orS;9VO_UXPLpZT8O6e*Mt?#afek5*+Z7O0D{l8-nAUiEB_Vx6NGV)t;BvKm7v>`L;U@Utw73_tv&MKMAA9aLZ=N5+lq;^Rw6B#>5GE5 z&Ds$&-5eI~+xPD+b%;zK7c+5LJd73k?R9dzsI)ZS&Mt>_(ps=X%;9C`3nPX z!Y?|^B+%%^IjDYG-My4czAAcb*orfvba`LV^NUkO0TV@25jTv z!c$vY+b4Lgd>H=@=x?BzFhA$kc+Q(*_PzCT3H3nxjyOL3WhN>Dr(v6Ny|@h8gtD(1 zS1El$C9l&TrO0Q_udFbUg%T44)&fv*_!fwUHJd%gb3 z*+eTh=qym)iasZUWo|pY9ztaF4IJ!Tz3RBdg8J0t8WDLx>a$1f zV{BIe#10QDu#**U;p~~^Tl?si0H`3`fz75DH(~5XX+rjfkhV5h+8FFGBvl$B3G71i&hA^{m+-J! zWhKpi%k{gAfy?z?1mMxCn=q6c6h*0PL04<5I2Hw>FTexD8q;PEY?~?4uZNROMX>Sd z94(k8#^KMr$)0iu2_-=;14+rfU_NovpD3OLvGcrUxt(^(tp~;7u0K6ottsF#Z;dyT zCJV&eyOH-(gXuAywF05UF97J~l)fU^f>(|G?11Xu&akqd;737j%ep+1|A*1wf@Ri-8OW00A;uHWyQ?OBis71xc%LvnyyDAlcFayc0!~JdS z$cS~Ua;Nb2>XwC#&CHu18Y4()rRls;Ssx2Br7=BE?}ql}>WYJ=#(fMy7)Zj| zgKPxC8Ui-KK~!ayw6bCwF1IVkq~hB_`IZ757?cs@mOvjsLV1@@?c@$2aV+4Zohd?j zJ>vhlc}t)S(8X`UqFG>uJzzS z=pqvKMgH(V05p%(&-Hb1GG+QVH&Qjh+4ZSUle})2ccy3a%_?yOQ|Hd^ZLNmeZx~6; zlz*+JipJpO`9f(nfB5iWqZ8iBQdxPq6A~y@PL>6oiEq@Iq}Z(EI%rQtw1;0yRu-i1 zM75kwXo!^0ln!H;%;*6Z-BdaRrBqC2u(+2U&)7+|(LS#M)V})hN}12ts<$5KHE=87 zdi|qk+cp;;XrY%&2v|s%x{VTyEqW&H>swz+ z`9la%vFld61k?s)ZYQaJm-4`;o@Z}e&c>Ui zQkMgkXXl3>q1$>3(~QKwxJjop9A?P5!_T}~+&km040qSkat(Z2%fwQ0kyv@qn!4PE zPR6k`G}d zXdl4ak(~4PzWdHZ5DOa{f(ksP)jgrpWT~VfE=57bhScBK42Q>D2^d%r%@R5~gDe|s zU1&3W+3S%Xq2s8V_gaqP-Y_?MCu!{=Ip;Qi9K?Bt4^n}Uz=DpY%#xI(;|E~J5*7k- z){t30f*e<>Dsv+wbFBj2wr}gnrL_EEPO#6cBvoaz)#+hZeTKk?f5U|O(1J5PUG~|& zH}YyA@B!xp31%jlVvGi2LZyOx zoN4&aib&eoafXM7gA;$TY7F9QhN(xLK(36E4CT5}gN?|HOui6!)IayQF8)?S}a|mbwOX~)OmDFF!FeU)XRk|ewWSeCf9OXTc)HS4owpPYf zVEzDcQ67)EGl2<0-**^h2B8yh0oOq(b{hBIZV3wcdrG_<=f+C5tml9JM8uLgy~Cf= zb6w8T4HF-}=d^2>2uA1V)4I6E5~oZ}ES}@;N=QqKfTsiU5x9t|kU|H&6%X$NRClld z@350iMas}jAPLPxt9$`svwK?3LfKeL%=`DdL;ZC~v`DAW=nV9~Ca%EGcV-XMMrac5 zI9Ao(QRe%3EB8B>5*CwH2Hr5SXB+4&#gO-baAj!z;z8%i0RfWtgVBakvK-%g6<3w& z#h|o<_gH2_QX@L+0G<@>FknxpfGiy#jb>g3x0QgQN9$#7O+w)+S`* zKF%iK4=RTP7{twJ$o^m|jR`D|#5`igrGEy6dBA#$SghdP zyH`N6KY%K2)|J}v^ZR>1h7kFM5DhIP{2y}hr{ngUZk!u2L*z(^ z9s(7D6S4mE`d{mjb~sOq6nwdJ1XLhVo`!J~6h-aBt~8yngX;SI>M&ZdR3y*|xnxAj z8S~k<>C4sG$f1Vv@nMjPJn+_JA#c(s>nW?^L4Xw}A&^23!VS2ai4b~&L>>-E`9B~^ z&bvMCD+Xkgwz~XlEJv|x22>FEP$x<^LBFc>(fU>r=?!wAnE!4w+eJ4>$3 zA!in8mkEJ#NuMUdqOOYhPyy!vcJ;wy^O05m?i>4ijcErhY4*p5+v++x7j)|~RAHH5 zHyM%xo#HZUC*OTY{5LqF!p+bB6@(1PLMMK|P@}r>O9?B9MnkzooO5Ifpm5OItyUl8 zVF|&Mqi>G&33^rd#_v2d&88*LVnacnXB;n`KMl{XJy{^S(75^Dg9mNMZWy!{Uj29q zlFDZznvL};>l5E!Hq9zvc`gq8#CKE6x2cV@zWw=SHps#u0trk8SqW-D+$1X`Yfu&4 zgv&_#m>j?1OIyo%;Rz_ba&vw$fSZmIzI5z&tWZWP~RUgGg(-#O6fb$^^UO!9h z`@k2aBv=jQ#KcT5>$wq>5-8i+Rn9C5aqK5P4AHC0Ejrr5&=AB-Giqv*Vc)f$drmZ2 z^adG<^U?&c0@!MDax&EQ#@=+(Bn|{drn`<0y1)z;1tSFXC_tK9>0d_zSIkl??JV}b z$rOBFaE8$(zDfC780gtbuej9qQ{UomDD`OW_#jPq^P9S2nkY521{B4ONLlUXQ562i zk|Xb|MP8Ir!;GU|MF5)VrGv1r+VXsp1<@@ZlB$7-%-#0g(3$E4tm!!vw1`-aLCddjL zD+;b8u&5zN2`bbK5(Q~`ei@n9&}J7`R^|vaR5cJIPU=9?d?ERZec~3265p~WHa)sN zq{s0`yE;_rj(Bg6BF0zE>gS6{z#gBH7s@&_#E2p71uO-!T0$qIbbkBroZf|LxpZdG zveX_tU}K8>x2JS*2Ep({q|q}5#|~u$0D7F@fZetm6Gk#SMW$`w^xaU%htf>uwEE+k zzV{|Ns%1JDCrXF>rdA4P4dhR{=B6I|Za+rSS&*}O{8iC=zlqZa2^C{7QOXe`<@Cjm zzb^D`>V`lS`0xRcg@l4aXfprbh})V2Y!V>)&>F&?%S(b%0T{Fn(%m2IC&VF*0<8!# zVOz^q{DoxKLOSAZr#mg$4?q+~^2N|saJq98c#UTdKd8O*qlFZwZPo(7u&lJPtz zLAL-f3bKo{X)y8xIAmK@ zPcLA6rJRQEU_aj(!h$0sx}c(4+Sz4eF!IV2|FF8i6KMP(HRs|_NUNJW>{!8|7SyMs z3EHEl;CqF@hIDGc=YtQk(Tk1$sTE(Q=d8yBG!sJ9#V``~o|=2E?>pJq+ZTHpCb(e1 zkkO7m;~mZ?rXlD8s!$5Er;vwGSVI4-Yjq_&tK9v-dNXiAf7?V|pkRuhs2v)*?!*om zi>iDO&1X*ktJc^vFM;mBP@dJ{zT?6YzAvG8Sll>MS{L}i;0Or75X+xI7D15vKw-Ee zxGtW|)W8L?{Tmf(iE4R^K9oQ2a0FQzAa+ndglXbWUH2#d zZh{*}G#O=OV*@hi3+5D7$j0G7j{q;{8b$bvM10Y|<32_jApL0Wertd6T|9W>A|32M zxVaqv8~)^*e?MXol<);-`JD<$C|0)R5fxo##o-ve`KH|-7iw@*fywk(Xb=Pw8EHfl zgCln%UWbA0@&^vl1}2;O}yn}E&=;t73{~7$WMp2jo00c)OOECee8v$%ocrZglLrAp(Xd6Ppbn=7U zxF{kyK`H&hPcz77%6-XM< z_zND5`!w+lnusK1b_WqAW96aVwe{B@xxh?V3`iLp5WGsDi8vS3@ChOZU2uMp8ARLi zBuH|)8O)a148P6e{In8vn($iVj5wm+r|A)DV;<^7J=u0hIabVu<_O#Z z8Fd3`f{#f#`72-ju2R#~8uGlMT*9Q<>_kIQkBc6RRED|(YCi!|MLYBf8Bik-;|6#F z3^RoT7UOc5l&ss#vtxnmN^jr4aXV?5otMfBO(I`#Fqlgb%^hyS`6gt6OqVWbpl>87 zas$jd`ayW~WTq|NX}S|2kz`Gmf6PQJs(HYy7;-Hz zVDlOocyt198KN~p9Wc>*|00$?@;pS8U?5L%7chm2X-m`%pk61D5=MZB_X>vep)+I+ zFY|^zf6h%Qmk83bWn%iD>?-K6G#>NUjuz{W6p&IC1GFDBRMyLd9CsgF{O_|*n3&OR zYIS#{EWyXf_UsFgjE zCJ&*9d)dNUQ>ik#2d}|O$SwcZW1`)KX`r_AXdTKY7N_c{7Xs&n*f`vIaTaZOSuKU= za;euyzslodCXx7DxeCsVI~Kum3I3IyW{t@=42mJ=be zMV{b7BCZ?eq^w2?nloH(=k0%8HvtmbV53IT(pN2s>i6$IT7Gkl{?sH4h6Id_>Hql}0rmghv=)TijBxv6i=;oIlu$~>`&-M?Gv`>C z^FF7MynUM+7QWcOHU|Ji_+YgGj$iOYuHgK|d#RebFE@a{!6o_B)0Pk4gwF5>cLZ+i zdO>1??U%P{zPL<=n!NW3R1&rDi5lK33U-x##zBPZP{NSm4}=TOwkH_JGQ<&G@6>`J zX*f|uXTc`>DVKwHtugcXhZ-6#7TST<$jRR@4ydlDF5#;GsXHa>LZaW>$*dBy>k*hN!_k$6K+7Z3DiIT&B`W^7_#2H2}0z;b9a@51ucLX z{8XLMg3lOVzvQyUW3*|n|D_o>=0<|l{vYSWeq5>FIa8>Om(;;{ZTj}3>bAoQnd`*8 z3nlD#u88ZBP2US4n!J614#WV@mKRFg1`= zb#H}!K%2ozGXQt+wHa2? zb#1#@mK1<`>GCK|c8j}SW6$T5$ZbD-_98gkL49sLw!ka)%a;bP4a)I{;CsO#)Id`g z8dr{V9-l=fw~@IrU~=XeRm|F|K_TCRn}bjGv^9MIjB`L4S*?rww#AV zZLh&D|E=X4vGS3Mj0M5&i&gmf5le*V9`k=B z;8$^(7*?>!;5xtwc>6SfDC5{kf#508Wbw;Ee31l2{kQ_LVrdW(VK68~SzS|8`ort< z4gS&nzFwEkaN$RH){zfWc%CN>pXjPPj0gB}v}Jc3$2aa34HbXMbw4=mW~ay#m?Miy zqHSDQpIDuFsdcYIWZOFx#fP8?90X$niguoON z5;0bQqYqI42yNj8eC_C{weEXnqMzMiMo$yr1hxWr)$r{faZ}A}XB}B`R*hMo_-tu) zP06LyHhWkuxY}>%5%v!EzyP{%GeCd}NYN$;I5Bd>hk`Rkt>ECgoNWnGK6-%KmW`2> z_ljlH7yq2%SHdoWc6CR%-!>cwl{sdFuTtd%zjlo#kvH$@6 zUp7afg9{G^#lf;R0@Ej6D!T=x1llj?#z5hbp(nTH(td4_-IDn1CO&@hgScj-Vy3;I zUd2f2o1gNt-L7Es_BrDvq_UpX9FG0V4aq#LLPP_U;GSJr{ zPy-IDDZ@0C&=Sn}15lf4>y%I{C9&c9Ce4DC9@0oaSzfv!b;j$1^;Ph9P6qbf6aZet za~X;kG9d}!Jc!O1!XjJ}{m-F8Ci}pp2geenQRf;1!A21JVYsLq+}Lex0Ai7{jYBe@ zQA(dDh?Haqu>He+oG-(>Xg_kb7ZcDizU);J+^7E?7^Ttw?N)wSl(7uq#@6r(#Ae_L zMn=!G4_fmgVfY$-TAj*(SDO5%u_!LZ=#n=?3gVis1lTO)YgZ!>NCI(6C6Hr53(GLG z3C3VxClw*r#i_3-Cl`V$`VY@;ZsEck^WN5jZ-n5+6bzZo4%oo}6MT__BN#DM1Waen z+=bu_Mfm;i7;aPDb9?*qiwlfGOoS0#zSkfAnI_^P32uyX{w>Y5B+A&Y!qB{8xY&0S z^0G*BskOzK48%RF{=AHel9TaWV(e7h>G*gJ42fK_S6OFbc{I>5J)_@0Srybu^HXKz z=MyMlMK{!#rPPoF9Td|M$T)*2XK8Y+aen30*FWM7bv7`l{6#PW!iI3728|JA1lGV0 zWGDy-?e4mRpb2A($cRdCr$v_JBbf&mllt->eyD%^?$`?HG` z`>FddWr!#h@iSwW)6frG0kJ!LK3#NjP(p&1cK^p4#?Tus_dTvRE(^39IFi8Ec(8$` zGmD6dNUGuT3L9A{%tf?xb;V2nn-Ei(bQT0D2FBp-)1S{+HcMoLZv&y?xBvLFQBy-h z`*IEpX*M-UX=rG`n2+*>Yc@CG4r&M_`n%Zt@kD6-jpfRyESQ9t;){CkAy5{jM7ef* z#&z$I8)BWs#UwCdHTp}qVM;Vc{Ve?jis6g_C)SFM)N`+d-El$k@NMq$J#vDGPTrJQ zQ7y+2OkQ9Cou zaYe8d{2Pc#(L#$WDvM{EtHqbBac`ORDjuyy9LGR{b#>X!Hwj|#L?M#hsO6}bWgaDL zBED9q)PkimU}BZ@&-X?EF$10pIC2i;`9jxJ zUIbzqL1<>vj{LsZCHx?MpOQ#PVGsQbx~s+tPTd*-@cTd|CH#6ezHIM2NIgql|_8>Xl9zRY&z5s$s(D^ItrspjQ*{s^?sw%|)0v6c& z^`A~;3H1rIPoS3FeSKzn0yOyLVH3w?!^`mPKZeDFkSqqZrPz7l-j}eeNE!{qVSW9; z{>7_n-p?_;qdn4(&6~TOb2jGVS#ftp2*76t4IT95uta0tL_5%Xe2;yKoQhgnE~ai* zTD@Xiu2t(>vZa4-foHJ)g%)BYJLM~0L33%osW9PUB|p|w1p^}xPy)^iuKXX_bA5N8 z6GVnJomOz%*w{khn*to6&jWXs>9D$ucM3!x*c&j&1nDV|qvRN)`%1DOa(#tZl7hNA z3H$>fW60M5ab=8@#nW3*L+|ZZEi71Pd?<0WSln_i9ye#PQZJ0SQ_ zC>6hX#Jcc%m-aTST()x=^-sF_$HmieT@E!4S03=$Ks>${$%Iovo+n)~Qk^sa5&&Rs z18jkm1k%JzCH_#&9FUG$TU#w2J)&^I21CemaTAnpuu>pp1-fHM^Au1WfV|Kqz&(S= z4zQK1y1LX%h4{&hUFjFkUU!cVOQ9}J>XHQ8zS1DwTXSEs9v3aS?O}GJ>S5!vjp*SX z`09i5;ah549Zp?OvJVvRph<^3=-J0z{Y%P|g=pNuYHD|mFcI@@1)MQqtN#2ZF46lJ zFVQS9(S-l7ckik!f1vhKs;o;wMTHZtybhzVYL=yW0=OW|Z~!NRd@%TyeP3V^!0Ugb zdcK@5H1wAKTMTYa$)`HwQK}VZE{E1jX6<1`rW=fLHy?Zfy%?xY&BM8J%S8A(FOrDn z1QE|NH0tg@sp9Ip{Cq?dq!M;R(vl#9h6#|lO~zi{D!sG6Dq%bV9o3=wtgTNk^U0YTYOoi)PP;*Y)+s z&(58Oki1VZTneCqDcuw zONjQ66xwNM4=L?|R79Ght)UWHqN%MCg`}lOC8?A|r6p-erS<*Jc)suZ?|mQCeP7o( zj^nounNOjOXCcKaMN-Lvetku6cd{1Bw~ub)Ooc4drp@;|whj7wHJRqEw496grCi%ya()-WWgaP1?7xYt6A&fIlJNkTwj6?2 zLX3LAH9*8_>FTYQ;UKAA(-NWhan$~h(XT~?*^}wZGJ-BFX%WI&H>t+O6G6^_zWo&ss18Y>c%oy|8p_Afc=Jdv1_atBKj@||L? z^BwRULKX}QDddR2Feidl@Cef~-oahw?!Fae9~YYjcA&gaj3rbLwz0p1-1)hpc3%Md z!aY7$cw9GHcdv{kE`*!6^dklutp#GO1twnW*QhZ%7aB8Jy2CfmjGUc^7tP98`xo~H zSD?26wU(5Xh2$LQLs|Th9fQ2LybM)ug$%qWSTS% z8{l(1&g2LDCNpdAI=>2ZYal2qBRB()G`6RGja6~HiKIIxOWJsT&91L5kqLeML%LZd zU^LGxRA^W$VjoUN!xu0`dCrcUZ~snxy39RabCu)ppSSI}zH{H_?P% zC1odWLE^j3RA82sVjGCoH!*|q5SF#1OxD*r!58+Zoj$RCNGJIHdxpdLl7EISbUckr zQ037$A?Es^FwPvLxt`%i=m0q)QtsWm5*8N5sQ)t=99G!GgmdQU;RpNqQd7m%y(PS2 zDTE%aFxx|Kb+3#)y&2##^VCd;SP%>nd#p~5^Y7i8{NMpqvj|DZLc4>rTY!ZtMiuy# zd35*o*i5lU%z-t-$$c|UZcm@xIWFVJm$#&?c&|ZB_4XRiLAO|lfSqf$Uow6;@jhlj zm0Ud}v<-?K5-DS)EXKifG=vqpO73N?X#RSmf?pT5mjI(7cm}u(vABe)pu_Z`3OQj8 zQg?R%DOcixDkkmz{qm*`l&-zbqCi?b%e0C{ObRlzD$?@CqhXG^xDLPQ` zUS85rT7boVXw~et@n^^3`@7^_G+{MT$ll;|KxAUxAvP{d@cc4moEJN!AM^Xob5rC` z^76hMxd^c|vSB*m#s)9_iaNm0Zw(4_!}V>#1RQT$!9)4ow};N>urb^DzHG}NO6=%nMck_!E(+@(|LJ0T@r^!0BgXE zkg;5!I4=~}0_n+k&Nla+K6q>&daNTeknQ?OWY8#Wkp3rAiAJ_;N7>c zZ{*3BE%~(9%~(XXAN2p!$MuyK4hftT;wMigs6I$MYNCGQZaItHXQ{mNA4gr@HM_*4 zs|PN)|KlJ+rEFmRCHnHjL>Uga41AY0Z;HfQMz9)eaPlbLmuoStfu(BurOtB16gJ8% zq8ES+pHP@zFS-MeOleM6kHZ6Lole{)O#%20_^zeZ>;a@kbvu6CvmtJX?{QHPW2gdY zgMqI>?h8Or1^j7gDJ<`1VQhAm9U+D@>nCl$2Y;QGfVmR-xf%hQ(|=l83MjV2-I1J` zSyTp0m34e^1ek`MA-2cD94#sgfu0gHh8qJI^V($hM9a0zlho0c_Se2aQ#v0)xKlc) zYP9dffBr+)>^pI4vmx8sPvP78p=gzBezg9*ddztT5E{g)t|xJu!18qwzdvYe9O4c* zLM3PfU?05?A&`l)<<$1#i0#$)j|;#S1kA7!I6e6z;xPjWwk-5eXq%!JuM`lV>Ag88 zqdEks7mx!dj4>h12m}hx0bIa6uPiZokVb z+@5_R(k>h~Sm-B3`!1<9KN1T#16lOQMQu$@pL*+qsh&dj`Gmit@vC|j8mF)gPQw^! zPu8cow4vtQQIKXzCZwgN#{6u!_~!bAljO-e4^rMs`d>C@y|eCB&|@*R>^U|6pKUHt z$}J@~DHopqdVfsn*E?C}Yy*(QgrNs@tJoRJc}39vp_@0rD$Q1T(shTnjI6(10V1nTkHK2$wI^@fvX3(zM*G8nj;8qcOEow2x*x? zZ(29rEBh=(UsqQUs;K_P#PFKl`)O&EC&eY4ztsjNJPlBxVBbkc1qw?Q#t(-lpQzbm zHlE)TZQfomO?EqODvI1NJMqd(GNXfo_i}R1J_wGkktI#Q6m3keKJdWPr-uVKy2B}n zvkO-$x|-f`19yJ_NFZT8KDoqya{KlV8nLPZdA?q8*Iyi;XEYf-o+|XD@>QDJFuB&j zcs&2KM|;*DkwwrQgCx9Mel(QGTT;LYn}5Iy0eKM!9TdPSe*1uflbvaHLMVV<{4UCi zfW%&kZC8vcP9$`+@Z3O{s8LdFE(r5cP)&ejIuRB2y@|%VfJS+ll;=QUrVwIG6PiX7 z9d*{u?mk4=<>lq{M}YaF`$BnyvV41M^n8|25-$o+K!zm1@#3>i$ml zV@)pCBBl6eSJ0v^gTo;gO~J_JGd_S|fkyF9zwvaMTB^w4w#p^+qJS0ItxS8#7K% z2v9ot)(KWCz>NS#@ZF%`Mz%5pCtx8zJEVjD1up^%HzfL>-G3Lj^s+t;8@bPQPs?Pm zu_`8f@lGan`5AFT6K1rNh%D66J(M@uO8A!K{QL+IW}1LaR|n!mNOg;QNiCk z;ZWk|{Rg^1DA1t@G`e2G4-vORs1!}kdL=-R`Jc|Ft9oiO!R=p(>-i)6kf1AAVv$IZ zrn~!?CKH~NNnSvC`Hd+y>~ZYZL#ZA5kVHc44QEW#moJSQ#5-Ri2r}%`$cxN3xsY_A zuk%LUXd}GhB;yJ9(6?`v&2V<$OiN1YVLUh%Bv%oeYZc2A#nY0^`*rFM;2g*}jlzpQ zjo@yC#17RoLMWO(e1I-DJu55DXK_Vv$ES>~;yyu}Qn%T$r|{o=m}Y8KlJMip7vjAE zZ2!Cw2@Wt!5cUw4B=m^;*lGTCVEMek6~t5jPg70ORlJ+Lt^27jTRW8i1jp*9_B+lr z^=Nn_UJa<2^uod@SaZ>K6Ejl9vHo$fxU&xmA`iH}nR`30-{yi^04PKcLR4&KC;G_Q zO8^WQtFstAi?UZ>@sJAlIGi6~eNV%xK|zukL94^<=iVVoAEWsIj+N7gY7zNSb@I|@ zWt<)`D`Rb(zIHUA@`%uZM<+ZH#sabiW*{hEu?wLZq8Z_Q|3QwI>8P$~|NiIxSK4YK z!UPN*)_NsgAMl#nf4;w=$Yfxa@*$3Y60=4EJco;whED#i4T=NJh$eF$r1D-c?Uoh3 z>r$GBuCsXnxdKcBYBN=<%G^n$-oN?s_lD*S9qQY4)yu2u)SD0;nlJSyb#<`^T&z51 z8UfNEx8?33cY^}Ub!Zh3R#7%zSBAKYtCih{agWyYtB5}eukp(2(YDrsfRLb&H3;N@ zbna5@MPD$dtXpYMe7W@XrB`ciy#->EaV_M#4C_I_M^dG*V8Mp$*}E4)C;-A?@ckiw zg}6tVsFB{if<`R?pFx!Ug~r2scP5f5Zt!)T8OmmrVxjt*JXu`qe-Pk<&!G zj{VNBA(Q)r7qRLXI0NaOQ01(#ut_1&~0RIVnV6AW+>8#JF***MH}pn8(^5 z5DXUrgTnFq3ja*~+4@<(Z&PkL6pXYGAc9r8&gmghI7M!P1-~Y!k^Igep_!k zbJc_5-y2K)c>~G1h%B!_5++$L%Q4})bLPGfBwFMw)n=F0u4d5P-k~(6dUx04jOvYd zQ@;Z9)~;M3JzcVv=!Uc9-wD;Q54$FeDCt{2g3eUX^N$EgW-s%WIeT^lTRrNi64P;1 zS$w81;vo=#<&p->7qMSJONJ|j6o63hVnHDUgI@bSoKFlv?+^?C0AV*jf6dSZK+j#R zt*?l{2#hII8&T)~bB4Ppeh^UuI$PWucF+~^V0R>(IPkiet@bJLuvo5?>!*HVHps=G zEHd)8zsKgHmIhC>*|XSiW9H)smL8CvJ}_*fq&UfW@!3L1o2F&h3EUP>o-nhp?nb6I z4pY*@KscnFK1Hux0Lei2L&m>iH2C+CgcM1Z0rJ{G7vZudz{Tx4QFEEtw-+8F%Mjs1 zs^G|hfH4E^dDt2hKRmnmJu450RyDxwy#xQ$uO50kSARuX);CD6p1f(k+t|{tL47br zbcgH;uI6qLrPRp5KQU`=xJvnC?%_<++Aw;2Z}3$d9AbNJhV?7O^vX!R^Kn+;7Fv!7 zCc)7ty6{;c4h9KEyu6(b;@}8ySVnNx2!yCgu-Fefy4pU~wYDT_%|!jGrByNBTN8vK z4$7b3HX=>{*6i!hI6|33IF2Sob`r?uDs}h#Mrbzj?OXQ1xH8-N5tbk@G{^^3~do2r!|{F9E=-^VV@ct_5#|I|FxagH=DX?gu? z2H1&SZtel^8^zRfo2tFvJW#S;x{1@*aM}gEKv!3TOWVnSBwn;Gbm6;ja22IV#_;G> z^Fa@37mJn)ASXop4>!lyu$?Px|A@sWoj58t7wa)}%#`?#PYKv=+RwtK*U%q7JTgLL z#eGCx)>-VNHLLPZczP@*%o~>;(!~3SwUgm=h6Mv=Ca^}T zCgaN86|0#J7fC)kaG?GD>-jqm*U7qPTc)LL*tijg4he+!)ae;7E?VyidXaQ_&zc4X z2AWWlz(3ir3;G5jZ&Qgm?>G< z4kIV-$YmZ!v@d*{w+gQ8o18_eBsQv9@(v#V`^m$90!7(kn!N+}@h zo!(0eBtz`TFORlS1X^PD0TF7F*iy*iqsIQ}Q4=`*1ugvZd)vk2 zHF3&G{TWQgb&To=ARxChzXu5$gRl z+hlQYt-0}*>d2mG9ySffiJkC^!um^lvW;w03OHRzkP%A^vJt_(_|R{9EX1iB)3ou~ z=P)xCLZBnK1G@)&A`t;VTKn~8(UyS@|Cq>UB#R%}*_~c!#pV{_a;fl&&?3S(WW+~Z! z4|&onYwg0^(-;h`w>OYGu1e;2u|~W^L~JZ8mQhQI%;iKVrhU@vDiu`xuii(3MYrRr z!ggXC9B4>D9L8<1W-xQr5m^Zm-HLwq9`)s0<}JyL2Gog2;eR^0QS`sV^Q!8AGfE)x zFG+eeKN7VGc%#)IR=BGgR~DiO)Z)a(y93x!sb(iM6gY*)!w}4 z6_7Vo3mfn7azM9+L=J0+1Xb~vz(h>;$DK3sq4G~#wZY}WhMw`6pxp)`A45-QGOhHv z;v24UFY|k#ilywPG}sh_3Nu-6wSSftwiohP%_&UX^!YPU50hdJfe(Pg&OjG1T z&YRe)(dx8nymDIjjqt){ zoAW;vIOt70#^s?Xu0R~5Vtn(z*R4>3((HV*n)o=<-~iu|R%yR|jdURo7n(m(oRZSb ztdi0T^CpnMaT!aURGL`zBq#$3gg7$~IZI{o>9o&!7Y1CuD6blHzJt~!VE(=LT8bHKtWahS@)kb2R9P5d{=k9*-%uMIT zL({Mxmm@)lw5QqGKaw=;UT2FV`~BOk6Aosmy7Y=sL!kSE#7a}QC9~k>Wi_9}FR3|u zbYfh@W2$>%4jKCE=lu~uSg$G%TA@}ZzLV@Fft?^Xw6)bp`grMH?aRaFMfJ$UuEwQ4 zJMF!631EFnJ~B4Hd^w8|O@VuTkQa2!^bZ4S2&u`p!YG!u~VTzX>3noeAJ13bip* zCGT)wp^&p_ZK|uU??Vz98FG}AWKJ~YxuqA^fm=+vcrkA^T+h&S5f7i)Aj_+FR~O#? z33(efnCKy_zIsFD({r};Pt9{Uk?H*C%@321-z+cMzmgDlEW(-20cp%+iiUp0xX5ze z6e*if?2=y(i*huvki3HLqi)|551y7Zp-qKs9%DyNkJOBL?zW`=Q`0OGO3b*pfY290 zCJPf(0vj{$UVO$khZ(Z59 zvQi*t$Vhf1*F5$fZ<@Oue)1+yq!|{l$`C`9|`00lZ}P2Up4<+CnQqPO@TB52zH^^P>*M9pr6bC zIP!ZvE0s_lwC@vzi3=e>6&5iyBo-*t|NbV?3nnM`Gn3-?ok%ExvMDPC?nrw+RSkGb z^GQh9&mJ;oU_*oyF!7qTBDJz)!ech1==vOfJhe3R>zx-z<=&GqX+fEp-v&&JJtc%z za7L?)cbv{M?B*=YLNsvTPAw!DI`w(Kvm+D?u(G?C`?p|^pfTl?4DFd%hrGD2&Lt#Q z`pvc{P$Fy$K)w(}01(2U4v3H61|3|tBTKW$_-uhADj%x$B>DCHwO-p|4l`nW&UiI5 z(c}{kqdd3~(duOhGZ`#btYRq{K;(C)PF*3<8=zl;gM+_)HGZX|3oDG4sc9pE&=4Ms z;`+Xw7+kyHilou}gYMbhgBTIsU=_XA;}jSx1`G_-bv(PcorF^_%HF42o;ZWw`epI= zlbli$E?3NiQzB7~PStf6D9(y)HC_7C_8MCe>Enn>5{73|M>l%6(Cy1}03BaoUQGey zKBO*;KX9Jl;ApG{|GaY;jI}OYDa-Q`Dx1VhpKLNu;Hk7GK-dc4yV5O_Z2vui#L}%Q=lpn_uW8PqAbK#>=$Y~?! z;KW;qxIW{+(XgXuD4bdIU} z;kr?llMyE+ej2JU{13392)#iJh&fC>EdEXnBRRa(WCj9ep;UD{j)k7{xCZZ!-Rhmb zGmjcIwD-@h+nK8c#~hg;gorg5EzN(HPMbg}+mNNbM}fIn1VUk4!d+&lnqb3;A;-ru zCUfvRX7;f+YC0R)kQ1yxa; zptiRFf563n$Mtn`7PEc|(##Unw%RNjv-+1KDERX4b{i7w{9g%t9zMHeu|o{-Z9Jmo zaA&}Plkf>f;Onb4-&F70#3Uy)?BPGU*7-4N$h@*eNirgU*uonQj&jja)n3M6nq|4# z+k@#_0QSs}1;|+9ixJg3!Cu`J0<#W2NBNFiZ4y-k!L!H6X;T7}K<2cL?O$=wP#P7Z z#5FUO=Uq=qEVQR}_}dy}TH8ZIV=Hei4d3fPRmM;{tQZ5oS=VvuxB{x5zxfcCSnw6d zK7fQ6oLU{B{Q!Le*>k><%Yj1+_D;mJLpGa+BuIi8ZI1hIt&Qzm?Vszm1f~8iV3D0p zHQS+^09fy&9vxNH*RmM1aEOSCaJUywce$6K&&QG@lGo*5Z7hZFEQOWKcj9pf^dC+r z+Mph{I$67Eo74&trcIoKZNq&i%}AlBH+bW}Gq)#+I-rt7*fN`X5G0#a&APfRitH)x zcb1x0psyra9?}RCJR2ijAi>$aR_Qw_O{a}>kjp1 z5i|kisH^No))MRJ8L_x+y#Lys#01R80w6=y1Txk-ag$>U(yKMAuj4bnxwJxxo@H)s z&hUlD0jM=d>})TxbC!z_ak;Ch#qY09k5PE#4otsJbQzd)cb=Hs;O$vi_FFm6eWpuo z!hJ6>Qs&-~>4bhuJssg+%a#BP<~W6>mZ)0MqL`zr4gFa)2q`)ILVyggkPjhVg$VsC zky3;b>iuPU{a*tH659wOY>kcrrLapwW3+lY{53+5UV~k3oGXIj4;&BqPudwwI5RNK z2htk+e29`#3JaU#VK~Q#5RKVI{FaJt0l;X;M^mvlhRL9TdxvRV`^%u%oS!ROItWoa zf!$DuMuA^8uQ@BIviNm3H3E%76%c7dC?TLMHwm#G!0z?2L+5x!K=FH-vWp`f)nDf8 zzuZ}BT$)_sg)-sL$iSzuFLB`{MTOi=9mbDrBPK9o%ixRq#B{vJ(pX|d34gRoKIF=i zUlvEcymR^d%T)?ON?zQEUtmTUKr^AYIK8y^<@rp{^?s;?)8ZY@0`r*KcOJWloIg4U zR8GxT6pZQI<4ZU2E}|Z?rCxTpN6DB!V&wu;WR9H%h7gRP)v8L(_ucP_od+NAplehuIKH#PVyEz^*Zz4DqXMSEPJN^ABk(a^&NT#%Oy7q>?Ar)mTUNTXw0>B%` z#KG}i_N9;Wpk7ZaBx{1W%Y>%MR5Q2qw-HodmnX^CGN{$61}4XGqiAhmJ4}r2BCMiI zFbW?@K=cCz7Ii=NDa_AkcPw=bNjO1Z)WK(vE=bLOiz4A;M9GIumXEMTB%}~fU4TaR zYArVn8#WEJGbCREIT?_04`8?VqtfBV0q?`0&s8ZvX3(8`K2AxJhJ+BYLKZx}A&$cI z1vUtPYI^a2DWJSDG&Ho0eMQ#GvZ|wKlNDidI+Y420pi>pB3_bk2kDgU=U4A*i7=;o}vpx z)sv#h4cVTrL_)WZIf{c2_ zYY+WcG>}Ry0|SX9tuQ)$a|#k|f-gb_AL?U^p$iRe|M=1N`_o*Hj?u=TrP!D+Q9zEt z2rmycLrfLXTOuh(5w9AJcQ={$2L7KUR*rg(xcIyH-+}OiV1bwi@eTK3FVNWmv_IxJLOC-Z)v|Y;r}c`l zuwkLZp+%ZB;sPKx!(XZVaMvC)F}dsFqJK4@*vLy(IU<+?_OLG1as_%wlrrU*njO>?D6(9ILPWc3vI$n@bFRLhrrb~Hyge*Lwg=k zQ1gjPlioKE{w%%*TJH2A#4@1UjzuyC?4l4(1J)w!G+^X$WRSz^EF{ysT7B+_I3)Q< zwVu#NSx%BxFbH;ej{>6>EyHA}JD`4BN1b??$k+wMT>-8I)9bu*K!}VzpmL(OTHNvU zvjZAzlKTNG=Qo%R$Q(;E>;TYPf|q_8L+Om9WWdV9_GbMD)b(FX*stFj(-56Ij3y6d zDI6HqkecKFEFTf*5pfej^4>Za=zGwqXw3~&3uJ67=0Bcx`1tS$LMgC^xF=_kzyY%8 z`#a{%n{huz1F7{!PJIl-%#*X2vV&3;${*Z4OJ{H0xUo_GoWAgCwjes4aA)MtKm zK^IL2nZc(KL_bdwx2$!45p2w0a~j8S0nZ_qlQsXAd`M5@1u_e5=YCIK^y0W3aH5dk z4_DpGK4-`~VJ_Q*u~7J0St`wAS)uycj@nGs-&KLvA}Nf_E&^Fk3M8yW@{q{pguZxp zjUu*02gb?b^a5cg92Xad&kZpT30MKd{RZ1D8w-I%+CuuG0Rjb!612l0ETDG5gyWhZZ4f@9dhSJPLF^Qd>1SU=y)k5I0h4j0U?1)CU=);bts|Q>WK1Qt)^gUg7Bc zwC&EiZ5N*WE{0A0`ZY$D52q$3E> z42!XT!Iae24gLyVID)|G^bs=Z;zjuDQF~%A1bmCHz@wuJ>8$f5H)aC+j3EFD3 zi;$CH85i)VERSb*>Q4wlUMHzTM@C4g0M{)fWL6`dWDplj8NdWU9sw>tuInO~(Gexv zAU|8_9NVDQZ-cBoFKc0E4z2|JgNa8isnjONS)c%>nh}t39rfRA7=guL@6-rSGWUp{Q@?~hK zj7gaYm;O7D%&W{);Xu`-vY>yopx-%`_#yLLX6=b*<6AxRQg zVkA=yDtW9=6t#GNXf}K%F6Cnvge0}`%@T3V()SqYNrzFgFq6tA`i6aa8BK+>~npIbz!`ZhlG$e#ZA(T}f zR$RQCl(aN+vACw;sXMAX7bHK^Rm%GYQoW$6;8Yi3O@&AqU;}h|e?s|lZb3Q)X&(OD z{Jcle`{w+RKf=H>$_FMf+j8^uHHNpqB3IRo1DAmt14K}L3$@=-mQaNd1x^@gz+LKP z$wzWQM{jg!7{g|>3cx+JY(0T&4qC6VRYTRxraA=D!i^K0q=ZXH5AsUAAvCI5u+9}D zM1%VZ_9F?&K-YAfVWk`uoe$|_ZML4jUseAp$ID^<- z(8b^x=(<+Pa$4lGpa;Q9$J^YpWlIS7fx`Cra~tY|6_TsmFJZywmMyyB9OqvEw4e*$ zz$~1LuO|BH2)jU3`UpDt=ChXV9)i$-!MvC*9aSdc*fI=bCP565adCB*UQBM^jkg4p zk;E?{i+84L`b7}}C1O@)_nO&> z;XXF_ROCQ2G1-3zCP0F}&%P3WQH~*GfI&0+Q-nHv^>QOQ6!4E8&k^K%;3cKM*hMx0$uea8P5 zLszwOyoC$*W~$Y@_@%INiM0loAMUZ_w&9j_SLE{1Tt_X2mT@T~WI6$j2zWIZgfbPs zsX`1_g_5r1M?qZ*O7uOWFXnQwi&7z#+~cOr}~u(b>h4_lx|M|33! z`A9A(agjG+{;f8`zQ!f1<&8Y^$*MqlPK=f#5tLB1XcMbd3fx`F7oVLZa2wWk4@#O% zOUU3!cJn5&?d7NLL1<*a$HOD$OM%4Cm1K!mL7-)gZ$Y?ANG(KIH&9;lLFp>Os>Z+` z)D>RU7gQEdX@*h+Wf_1jgn{T~O@*+)Dw}|cq|@OEIgD^UVLT!RcYI5_zQ2c{;V?Si zCvYFo;5+r!q)o#~cct1qB24r(5}s>F>KODVsK%n9PSDiX55UYIVlaIbWo0w?fylyO{z<#@@ZYaMwJ5BtN;f((Vud9}yl^1#%=7QW zuG%#1P$e&)^n~*TpXNQ9&ASWupbSM|G-Al0$EdBXeQ7C_(AtZ%|8L-DEHTf9h_BI> zL+isaH06fveuFCr3IZf1QWTRcZbb5YMDWe~=4Q_5+qbjkSc2pts}QvU$G*np!TSMl zV32Ex0E3*u!lVrCa_!yFH(tXI7lC7ouD+n#FWVw>lXw-QAbK*k)z$SfNBjRBxQpdmq_3i zVw9L+0cP=>{WLmV2(K#c8x#-dK{sc$U_eDTC}rGlrvz94dT?=alEH+%&LsvgZn%}+ zMS(&PLf|8fwfXD+cX7?UT^Hsm69O_FOP`E;#ydqPjvu1qBxsB+BCy&Kk( zkfpf=V)p~Sr=_J8R#KL4pt{e(GgE#iWt<;-+xl;=hh*hAkCx}t?KD_kxKyia1cL$d zL)6*_B%c{B6*!_q%RP#v!mDm%T9nGn3{<(`bjDWVWC(h2Uyea`IyuI-{n zF2)A7BC);HA>9*)8RQi`N|Jc(Wg9Wk<9*k)KY!dH0aJt`6jGs-^mGig328s`%2FsI zGV-XA5i8}j6`=~rVuzUg%MSlNSg2NCZKQ*-4j^vB2xsUAJ@;-(XQcL$kj; zZpK|=8xIQ&qyrGhDw`?o;-rCS2m_K}=8oL2uJkkHUff3Q_aU*jWJ{ti?p&Bmp>s&F8qm@dr8i98 z$wb}zbEM+)CwWg<&U!VqklK1~QPJoRA9V00Ni@BA{*8zTpK9}RC~jKp{S@VPtPaF6 zhPPFot{mK~Y^}E$9m=Dk$32smHI%M!xyd~;+J_o}B2?j;UY0InmCQN>5|9XD8vk0h zh;Y3uZiY~f(;^?WgawVZ&vfpWD&4+K+F0ZC>GeZHTd-$(9VOuJ15{`szyh=l1s6_r z$pm#OdLOzS|5oCX1Jj?%c)rbhV+`Awm^R}v$syuJOS;xEsGl_yfs1KwhbVB*n+xJx z`qQqBo!@)0(=mhnEF}Sy@Q{hHlXsp znDh_4?XC@n_U>IDs=({oZe~MHOvZWHhxh$K6ickX-rJK8DI~T?vT94)gx<+i-iXM8PZ1L1>x2b6*(42_kYjC zCJ@7Kx=P*G`Bj1&(iI(ak=#z6Xo?LodJn%Gt?6}Qr1pb22`y6+uZoscR=N2qrHR;h zm2-nQPf!YAE&nhYr_B0tIY543-~9(EOopvJ&L!|VNi+*lzFD}ckX3og>7L-7b*?98 z{z#4$J;sDFSvMor*)?kn_%`F>ba+CtIMI(_omyI2-fJ6<`hA3o;r~8l`nm*@@Fq+1 z$^dqJmzYB~`>hL8Wh_-RZ&+@*Rv!4;l1sEDh)Wrb2M#-bS1ATlr_q|P-Wsp&gjbtG zE{d8Dgy!~U>qd);6g#Pfk20@6pLjW6%{;N#aS5d&U&%b{ytL_6dkVwkN?VsC(e`!4 z?o@71Z5_@grxry6dc&T@-Wj|gi6kmut5}#2*5WaNMdvFQ9I?` z*BcB~Z4}pP9GYEy;>nq`l9C`Lu3$a)3;Xp|YS(Bo$!}tA)Pka_)3iMpscnP*hs?&*2gM?cbd;mSiJEShDTz2&ue`lvfoy z{^euUt$lGMqZ^&IqeKXoGKtCj8{>?2$6gCA(DZNR^z$|xQvdCOn&ut6-@b)F!hJI~ z*1pT@G6j-cEzfEu-*0J z68(k}JBPFBXdSQwNh{laW^l?4mm&^SG$4^sFISr@Ggz$re=l9Bj4JSc^RU4~M|#kO zHe@PFd+BxBE>lHA!vpB{AZ3rlVZCe#I^f8TA!E3uhX`<*J9gVm?!awic&mjnkgQc}Yx8^qIraay&$YQ3g20CA zm?3KegBU@j-t4z)egJ7Ary}d+*}mINLyvGaffYin1Ta3syvN|cjv9vD>}^{?Z<1Ma zmWD}1Ih%6INrTQgF_FP?$lt0_y82hl`2+7AS)NtC{}Pxm zP{h9C|L4`j<%=RM}V*wqxn-gR{;a52D(M0>j~ zn#n;lp4n1DFmk57SN9cKJrOanq?Ydw93|| z;^#_L9>&vMZr3`ma>TOIHeEsHkqvoxUlz7M_5LtsHdAt&X&fKW+Rh!)?5G!;4*|USnr?zD+ zDVopd_Sl3o2XDeWuOEYllbeok$!9Cy9{?t!rb^z28+(7#C=wzuo(=oOqD7Q3}L zYzPXXWTxjB?B1x@nbvSb2*mnQ8;q;yS6-H;46uaKekz7#o^?OdUWOpxXh`O*w9G$l zAi}_%hLbhu6_cgmHT7MntPd&JP(!;u|dDnLDb z;N2qYAGfDX-+wLCQ?5{E9Zb8s<0u7_GS8l^K`aI9%PeqJrR9laIeU@-rE9i9+q zbL;@#J$tl{{SH>}|69>~72YGwBoCy(EZ?AE#x;@)fsPapjdE-*#G{)=;||Qg*tUG2 z|ME@lby+R1E)SSqsTuD3EIK*{e@)E%{L1|=W*CHogao#1uvJCdvw<~~XLr-Q>nwZa zd{+Nn%XeET4~lC|zSTeFvbJ{&%d)ywbHzM(6?mI(K7yiO ziM`x$7mNnjIN)ditN=PIx>Q@!_n7K$DP5j!IflLln$@uOGt#|E zmaUjUL*n!h@hgXEHpTC*Hqx$Yyr-9*{P1C5+>6PRzkhU9|Ct({Y+LfY@#_~&<|`e| zJBb=@3Y2KUA>GQldn0`EiU)Sw*q18^WojGdW(_qjJb2ukeLzt6VaE?%LBHAS{N_`^ z`r~U?e$=^D3>S-AaSoBY72WSaPrm zmkM&;4P-uFbxr1@`lUnZ1hKg9+ddsLGLlp`RERvxS%)ISFDQy9 z|8ldU&S<;rNLQ)H*y0=>B|smvC{X3oZgp_UejeWK``XcjbK+Xo{}>Qj{Dj#`=z8<-)r7F<5*9vpmFDjjr(4O z!cC`DSR=EycvbT5P+rZwe(}y4&j+;67&tDZMBvBcT}D`KNp6C=j8Xj7-&%1d8_g^k z_np_4=rkI@r3?(M%X`TaBno_>A)(EPP6Ft106Gl$ndqpp%1CqGzcow+Mjhjv_x+qjY0f6A%baJ)0afbSK05V#On)U4w(>{eeb zkGjkeB*%FLKu-CY{76v_+G|Zpm-M$rSvRCV$%@Ji)8o_Ix;-b86gBvEYj9 zoSQ5^>__M?f!L9SGZegzp0Dsg~dDk&8kV*6Qx*S$^U-SSqu+PKn5))SBrQogQWpju*+rJ8ai@IBI-p-{z0%wl0NbyNm< zFaGR7o()>o!0Y>FIwPVJ>N^V?t>qf1^h!%3y`|>qL|fbgxi1K>^-~g25n$4)e5kHq zNwZ6m(wprbnr|hpqdVxhs)wXH1jd*!lF9awq@|(tFW(`iaMsXrTaG+iCgMxiY5Z_t zKq%?n>Nw6#S)Y&8Kc14~>}F(;ELh!S>wWGfj%+X5NNgb|gckXHZB~rrLXn06MKa+4 zYJ1I8f|E#c?;>Q!5dgZ8rM6}3008QK#F&$LOz3Z}laG&K$>?q3MJ*_yUq7(sW#KVuXUq}XVElYuSe zGtT8O#~F;Y`;rdj1y=LoVr+s(}K- z9a1%P0Thkev14^LdXH6@wp2!Uhw``@^vrUBwEzS4OLF@@#qaI8Em}r;KI-RK9_Vl< z6zmvcR-<=Gtb3(;G*Z;}s#ixWE7j`x!O+cT8bmg>o~h|ks=gJuf+lfwUR2V@3ucmT zh*ltu5{gk#I^X{(7}>dQ#HSduylyELyww@~1yZ51^HHP3-6>g)^0Q@Ej9Mg_N+`X#)sucCJxrPt=`erbOsS%+ygjr5ciep9LP)Ln^KdhHwN>#Lr( zYx64Q;G~wJbyF9co7=wWCj95v(IcGlE8yG;#k|JX1OM*Z!Yf?L@b!}T zLa?&O6ci|tiEbmZgd_lxI0{8T4FWeB4Mqte2l<6Gm$Eq+eGqup*YBT+m&w~vH_`N6 zhc)KyTh;x~eoXjGJYU$91v>|Twe`sDkha#%vRD_@y;|itbt^F#mKYOR8US`dvx?QA z`9U@Go*H4>_#?|qr_bQbV!*F$!dy>NKmx5-rscRC?6{~tM+j& zyB*)oJop&FwWg3qb;nm?4@S^^P}z$AzETCjr|`gWxeyV9%_9weE@cA3VcZCNVY@0O z^o;Z>N5Z<>0|5ixBPGC2zPhDbxj$Ey$9P;^xkwRumr`q+xvxXq+AQAS5x=(5{;n1FeTqm%} z78yB*-+E9>FHuJE6?A$NT6BXe(XD{>k_lJCj2F7ElWjZYNfhP{Y9`k1pf>wyV__kUBkN4p^B52( zu-~JVO)s5?y=5L6-z4y5Pz^vwuI@f4k7=aP2`EKY;jn1&?Q%UwROG+h4v|sUnm2uB zJT?^Y0lj?v_AR2skQ@mx2z(!&atp)#8b+1zu8jyc%4@q0Xw_B(b>RmU`6gSDq3QCf zY34s!+|{?a_cMD?CbrwcDU?25zIGUr+))nhYGt)4Hke?NO=@}%G4Ik^ckGrGekf5^ z>1Az7Zi3cA5te2k;hELD@h+6r)JU>FQMZA>5CD2l{MgV%05?Mf5mASuMB8$}nTrue z%dTB)a0fJ9%v5)9mES^TYPNE-( zKDp8Bw=iUIehSG@bZNm;ivpnm?>8K3FqUr_>34ewe=%|qNGKse$X$$L0TVgKp=Amk z^_qT$K|-O3_7J2)!?nTLF15e<7B+l{V5)AoxSJ$BjRX z12VZKUu*92zvP%Zev<{7OTu8njOM`guTQoM##CQ0L(j``KOiv(bP+@_V6@Ud&xCAM z&(hCJgDwuH56q?@(cUr1_jBW<%c1H#UWQ)5Uo zQJ%mAVsR*`dYm?-NA;Bq+_eCG3Te+@jWw6Bu=doI_*Q->5I2qG^Bk-Ih&EL21yJEdordp+qeWA^eqeO8=+gpC*V(a#+BvN?Z>=Q z!2Sa-lYBRfCT7t8+PCWtPTwN?5!SJ+SLsQ-=mLp6A-A}P+H0ngJ*mZ>97r;Mn!+FC zD5%hJz>}!+r&*+ZC~j6!iNhCKgi>;I|*vULqOAQPtGZ@n${$ z^TWXTBoj7*lm50ZDBw2;vT<@4V671K1Bz2OQ#%n>zRl}#bip-y2U&FpJl=B1O$7de zerSkrQ3%2jm-~D|1wjO)4p%yM%mT!-obf)9OHa%%T3a}$B)7!fuw2*2H5XW`<-Mu` ziY;Uiz{?i0xs~qU+v%lCZ1KYCB6RhiUqe8baG=A$MvN%wo~rOUK!>gl;WbRC5VRLy z@&OsNYE(D+`0?Y3p6mQ1Z;hW{F{|Z0X!KOHC<+Z$C`RC)gKB#WLI^T>9MtL{m+Y63 zi<6jn0GkPWk=6MaX@k`8Cn`sL7s`|&AlPPmVbQSwdFrMNggs2b?VvyXFr@SUP zd7$|_e9H<6K-sLR5lX8KZyj*{Mn%CIO6oq7Jctqkq3uBpQ2}!=Dcu8P=64cV%1(L@ z1sd)g@^QK^q!nDQ+wA5xhnc{(mi5XqOE}LEB#Te-3T8drGMH7oB1i8SPVZ#J@U*O~ z>u|+7L!BqZ)_6nUHH;1D<1`R0*?TSa z#2!n8-;h+VC$n`9UixX-lBr8(v5}L@*kiofAg9%=2F8NFs-mb|yCf$TYCoNEsPy2N z=)urnt=^@9`Z!ECpCqHSW?(%qU4-m@0`>zfB;H{-GL(l0a;X?RBpdg=9BhgP2%&Yd z@b;}+dMJgDtgNqbh3y1)AkoS|FK77NRghTIpu@nevI^jQ$_0vNpBwN;%Eyi+W8Qsh zYwONw!yR;AQ5(QQX&*m)h6F0M$_t%#mE_NQ}*B?~3#u0)SWbj#mfM?pzL#DGC^cn_Vsa#a}F z$(VK0Q-KmYZekMl+A^uAXj7?)2249!1k-v~aveI9K)hTeKLJy~9u$_3?=kgWoJ_>K z{Vt8SdlwQ@;=(5R<&6t{nk|isiutMIH?$tjW;fX&*lhd8X7$Ql>(pkM`*6Px>8Yvmr2)PCkUHUHPx`$ss4S0{7_-s$7g5K*24oou1u-Z$5Wb9CYVGjw@CtE> z6H#m9-9tbrBB9vfE+0%F`HV^QWX)5JAkamrcMAMmAQ_hjSRD1h5-FJJ$ zaA*gDL*u;{XO-tn^!00CbgU$fv|^`T7L1nEKk2fGjC4gfpE)eH1WO9&7glw1lY|E-}gnl^?Ds;UgGrOdc%yC<8FGJGJW>tTY&jYY`kSSFWwAE05s;$;q5U zcW+w7kaYQPQ5s|-)YOY8s~Gfbg@~6bTKyO-zPu9tCWuKe%#eybc)&}hzmm$&Q9>1% zJIWBeK4B;#Al!M7{eYs=Rnl$jjo90al6zYueeQFHsD|9_GNCBSjHhG&hXH6~^8EOZ z!bUQSlg#tN6UK23MHDzi%Qhw2AAc^H<3R#bPgj{fD6D z4=rCaKz>>6vudplEglhDkChgaA6Q@VN+Ae)Ln?isW9W1e)C1bj==tIPmrrkA*E=FC z5kprtiUCW?%E~(r9$a7cCC}suJ`K#bIK?XUt~fvYj5o#jUFk0P7?fC8qJSDiV-88L zcnIeg2v-z4+ienDHW%;-JAYHclLvAG8s5@SA+HUc9MEWNGE}CxPp?;309_&X0`?^M zSCX6tVJDd%hSA!@g<{tu_vVeleI>7>o-V)Eyo}Yonxu5)>eUa4OZ3+fY2@2vYl$V- z<4^x@i)|(*F^HUq9TPo!rtTdwGYi8H;JK3C*IN}Dh2fPX_K);_4QZDyT=P)1-5__+LkUBX#KB=f{4rCs@? z$Met&_$??DWgU86b$=aSfz#0&0re%&rOu);U=cJYF%>v>f4-O_Ssmn)K=R)UE2eZZ zhx*v-L~)314EPNAZbjX_DM7zt<&Bsal9DN%sIeg-I6xo6ywRy*(69|IRpX28R@i_} zsL=~CBw6?#-F?~s>iN4@Xm;KCs7C!bJ?5uf;c zH?8+6EH(0>UPtYMc!Zb~GBVbM*IdK1t}Fv7g?;c>oC0+jZepT*0~XVcS`;e}y&AR( z@wfw#fVbA^@Q(pA@ipUf-vwer21OO-U(iAKL-Gb%g}(ef!VWt!kPbvbv8y{@eB&5+X7(l6lEUwvy~U%Baj#2+3D6%F402l{Q1$S(J-JTUUWQFW$ov_{r(a}~8jlQ}ubz%nVwl_76 zhG+upFIuK#?qstGeFlmMmVJECRlT%^Ko1iGhoKH_MlVTx&QNFcl{QNue>fh>aw3cU z^~)l=yDs1N`|$of@XjFO1phy%mNPKb5TA*uY4U~ZB;yQ2G;Wgi3O6vJLL%=9PXz%Wf}t1a%CCHy z*jzD0q~J`2gv)R;p<|Fl4wjC2<1%iRPsQbdVv0nd0BZz5f={Ury2GW68XEN~4u*pP!b5sC`k+cFI;aIR{`+P_6264=ymOBM=X4>?z{A!xo@fJqfF+NO~W> z&1tR_V@(Im#m@C*+^MYwz|_zVL1at(m85rm`4aJ_;b_P~snd|1V#K*9e|RlM!7eZ} z4p{$4`b&(IX#U_LBnliH1d5`Cw{Dp-twcB$c41){A(;plJLfjRkkYg851+`+sKE{y z)FXD_+0M`z!=s)8(Adf5DXvCpYHg84vCPF6Xpg4ZLXTi!Hs-4UBpV~d+kUr+wws#- zZX1cw$-i>N+`{67k=KV&C10GPnDWCKdUHxkS&>3ZF+>Xne9s08v@yQ9dDIRFU7}eW zYnC4GS6@6&4E7xdL&+NlVY3VuBgw2FSGcFAC*QLey%YiqwBWmffVpTg2U2kW?VPa0 zwz~oLC-CP8GG4H~Yxq!!H7?ht^$7&a!RUm89!+w2bvO}I#sQB3zFl-yxT@uez-)^i zm4eGlf}DV1n#lE-e&sN=^{cQm%spVv&cKj>5bN6h@!tv7sr27X&m^DiK7fZ9(ph{u z8T1hoft5UKc|w^krJZr+hM)zIe?UM07Rtkoc?vUzTRG4YfTlT^$zfl+Zn+uXQ+Dgc zU3Q-TL-DwIUmdI`Vj~< znSNbM1xXer8_eNUx>izR{$g}Hx3s~d#_#sb2Ef0-0l1bcI~p-o*2G#CgMiE3_Fa}c z5wjrv0tsLy8*I_8|NQyWp}3q$jrO}VeI}Bt;jMyAz`XVz9(P%uM5vevLQNw2Qo^ME zQ7LOc9FmgSo45^A8ClGZf(E1-Yh$1iC6S|K<1daf(b?Xctq|^O0kZ}cS??rAc21qR zTXy^Q87P6^^K1m(mjGJ)iWej8$~?WqT=;LuOM$Q*?7=Ghvk}rFN7QLDI}pIuF%8Za z9CyUtsy5WECK48oVEh`wab3am8osFyg zQ(4ej_hX7Uj2}ilgnInVfem1ST@?bKtkB78ef9cvUf{bPhge}_JwVTimc_!nKaRzU zw}$e=ZgzT;Je7hOEvmL#fQyl!sCPX+!$Bedh?L`TLz7SZqB|-YI#bYzxY0s}tPt6N z&6bDiEnkRfMU7`{;d&TMru<$^xhe$p@91>E2u=1CqdoUVpHYR8#?AemFcbM1Ktom~U8xgrz<=B_owvB*+8&CoqkI_)OSk z{(@;wyFDq1B=?$Pxbx!HM6z^R2E_)B?dO=uq7@ftvX7zDCCPQP)N;iiJ}xp`#0XYd zx$)uWkS87csT8Qy3BWD|Oj+rtuZqVuzIz)EyYXP$dU=WO=hh9%l_6bUPIQe27@D?R zzVAM6%R;Pg6Wq^r$!q-T3foj@9CQL#K zL;glk1kzh|$X}Y{V>Vy@ruPfbFnB=-2LU`~Q~j*pk|j^q)RY;>H?%vzw(GUWR^^wM zrz3K#{IMp$D4*?sl<=8=WGXiF_8!1$^h#63B@YzHAdJJa!}|2fJ3fvER9fHI`bdo* zVoFy9q>u{o40J|+{H1s8wYjZr6CA#9VazQ|R`+Ac=}OG8d)Yp-v313;2?bX2!3+4= z1B$N%JJA|?!6mu$$T6XXE65XjLQ*CP5BCpR(bUt6{;;DbdV>dn6!THK(BcOPMS!h; zsW$6Q=07rd9`Ns8RfWaY1@yUSo9z6%qeydbnny7n!$$U)(zegX*$5~%)fIuJ#1jb= z#q>AGvSl0?WU_=!4(Ld%>2ba->l54rj+EPi8UL_9<=+L+zY^n!M4lzJtt(R1x z!q39^ONf~}M_A5BC=YIJJ}R`qkW~PEUuvEpC@OgRy72UP1eUq3k-8ryddWW=SVO@V zkV6gI`!lschmft@e98T3Pvf$yK@E68j13283MG!DW8!0w;aO2qWZ`|cia=cN)6gW* zKTW02NlY_I_5(vcjYq2mo$A! z@O*FrgV|HNa@9Xn&F8eqwO)LvY-gNX!Z z5;_M>?QO^tPFzPvibE2npZ-SCY%B}BH0F;sm;_s4U>SdRDA@}u;uR@|8O~DGbKZyy z%!xji)uaUc3EMF+N>^Z|2qCS=F-z=_fO3$3xXFTZY&r{28dnzADG0>^39vmDb(-~q@YMkT%6hP$hxHUwuMuX*6OGHPn zc|9`eg}az@J2pTI0&$>~)wcbgRdI0VwGWiM|B3oFhPpe3i$ixxQt3vl1aZNQh&w3o z0G;Foo$wV=%p$naIKLEc!8eqQ*`^JHFaT0@bH~Gv_zlsiVQ21E2rkW1ci|=dAU==V zAt)je)z;Rwr^uSaH1-8dv7{q`Fc=V#V6z-P`!2g+6cFwmzLMw9pTG8rt^{)@P~}+C zO*5;{5wcSbzzOg>TrMdTQ?lb7OcvQ+OS&#h=TC9B2`EZB%m3b9Cq7St?z^B?&vk^( zbKe!{&D);GtjK>ZF&oU0_Q5hFc);@yLfbq>h} zsYwINvX{|O;x&&-?HtD2MDit(pMm+SL6YqMT;iv*g?l*CNh}x$dTn&QAoG5&1u#SU zDjfLu$JZpadK~P4w!?cuLL*;)_)wG)j7b3KFlHKZ;m{>=@2(lpX;{vM*V?i;ornl* zGQcg7p$mzOKyF#%Mi6RJh6}LgN3ftFE4m0YjQfB6Awo(5cgZ6F3S2m7jnjOp7a(@o zjU6_4FmQ~J{{WN{uo}Egnh;ci-o*NdTEtn&p5KYj0eyKSxFQHY(NBZbszA0sR1s#0 zjsEq;=;RAz7XW>^t-67dV}{^H2s31O67ekyyNm^`V{ATu8O;FEPFl==)&}g8>@&WzMEQ zFfxEP$DPBJKc2@wA4^UWyqS4`cUF#V--LUQN;?9z!u+9&^TD6S*@thWT8I02x>`|K zHH+J+7cP)Z^O&ANr7ln(Wo5Zt*@lT45_;RLOW!qKKFiTQ!CO=eceH%qaS;rb;`q1M zJ{9a0P~vlR(_ND4^%%AsoNlrr%KSz2Z3JRNRMijn!vMfwL-w!y@>=eyUS2oyov^Se z12w|A>7!=D=k>@f>RLf?X1;4xT=& zBE_Oc-|Cv?dB3{GUZq<_>N{I)8PHbp;Z*q0e7`C30lvH zUk~)@5pGi|1=Vy4>2|s}_=$XxClRyPyQ<(FM8k!ewjF3iwku;8XvCXUGj53D!5Oot z;a-uAYRVFp4Y9W9uX!cQ%(>ASj?Cw3q6;CVwC@^k$}8PGh&B+N;-PrHn*KdunJ+05 zW@1>fzB{$GR~i}{lWW?=?L-T=zzcSXEHJ>z8d%Kn6rDS*mzyGk68Y$!?-tv{XriO~ zc&s`L$q)>qf=az~%)~_-M5{xmfrJ?#(#X$egYrGFlBD0meoGS1Un>jW6k|YE5Xovr zPE%kdqVJ%GBXe{2lc;M~^1M{Gs>1hv=Z-@8+%QSsLewtF5FC*0X}kvx14tbpj5mD{ z=m2X(e+?=3O0$Qi4Yk&+2S>ktshecGW!N*#<1yV(=_fH6J`xl(N@Jj6Ct)rSN*((! z`qd|)KG%1P>BMbtNlrH35EdavcHz`UGm-KE#WCXRz=YtguqEl-N8BK^(`3N=;q$** zyeLSt{itAwm%~x}<;#+4ipWBi$O&sqS_tKUmkg&JbU6?vyFz1*s%k3tL!BP_R5&1r zNYnNPz5&uhU_%H3S%J|itBr`rI_HP{Fv&+PA}ewC*=qD4B-4yUOW4C#fZy28RrcQf zhotP~t-Pn#YypS|5@dVOlniQTkstBWVr+!CRsOm)l@fr1tE1eDQ@#`}flgKaHt%7`UNQgp(5ZAUUbX z7y?NEaG}Bu$I+g8!jW9dydydFlm1N2c7cf6)#{CRRr#1f+*TgQrf8sMfSm>c1f-Y< zaPS!SVdw{~0&Ky5XEJsUE;-NVVq%j4JeL1RLnI93fMf#4PScvAZ!rKgP`3mC|9U1O;Voh>InL0pAnlPu!|i9iT=5aH=? z!H}lGfh$Cl#vpg~*W+}_I&e;%(y+G%1X#Lnt`mg}vW6{E@qdLxERVq(qQsNN637Vb z(Y7}-E_ytmSE`gxLwktf8`5AzYWY`^M}UOiA+S>UXfGTSc%R_i0~-c(surP|)2O5M z&?l;m%C1_m9N#(r+b+BYs5w`8?7}YG*h4a1O#IVW0aTbB3`i0Gfh2h5j(+6a8CDmiFTi5(V{p{) z9{=!${C2lg8iSrcQRn_AGNh0g+MD|y;*{Tt1#qYm9uF)bb5+WfW8hjE($$p(suMNPR)Mb5-Dh<@P>6Gn6`9 z#wXpL)OB3jc0&Jwz5;Sj98bikzipdUY_jy3W!${s9K#)6%1ves*#HEKHsy;3B249P zM1+ha%0DPslt41AxNr3VD4V@bc&|A-Zda>YIZ7}7G%4*+A2lA?*Z}no_P7Hagy9@p z$4Rg&)VMyjNt$`BgaM4gT&yua59Z+7)vGSCDe89mqyFmDx}rS@W(b3DxS(%m%l3X= zi1$l-w>sflqs5U&5ao#-TQ6=Y+@>9rp`1i91U@`UR$xNTYWjkE8g!x_qW=BGsx($O z-KR{Le&D2d&5k;*u`E^`;A0cA{Fe+7O%(bNa-eL$c=ofiI-4Yd<1b{V+>XP$2hlaY zlpDuYNQbZjIKb9tSI@>ObiY`CM*=Ru`tV28c-lTikBTP~`>^t)@#4-tm82u2&9ehg z2NWiqvtK45H$0^rk(H6*`{89u*K5&|{{kH8EFUel6w2I>JZAT4Q^rd^-f+aGEKL$C z(jn79L{22ae9(wPGY<^f({{#_*LVcb2L?+bds_0)sjD|$UdTt_mG7~Z#zr`iF9PD! zdl5R7-&=P$c=ZNrwQT9eSLpSzssx%fV@#n&?M*7RrB=|x7I%}QdqKBdamvfPl@IMV*FO<+aMZJfg7 z!GK^A(tiTby4`O%e{T6fgp!gJK`z;F009^=KydvyvQZc=6-2cbiiDJvmfruKn_v>~ z)K3N-9$442pFVxc?}JK`*t0k419b!8^4^`Vl!ypmB~6t$g8G2gBI!Nj2rz}mt_t43 zRd6{{Y4lVWC0oUIvTu#>(LmujrJ=zg7g1YAbC7qroOPH)d~V=J6BMee@d;BD=m?6I zt>gP&jD?ca-@G-%n31IcoWR|V$iIo{#i&gpJVyPwAeMzdWctYD9h~fdUv@z&jU5Z{ z5?#6rtSi^P+;$piMcGba;_**#eqLTc|LglO3t)%Myy_@dSy|aM0==>*Rj>}#V1F1? zQ!^t6FW7DY53FrOXNvOyQZR?8wPEkwT`<>TEH7*Z%Srw!%Q}&%9-LlZl021ynhd`q zCyI{dP2~90pVezvG>&iO{QJy==Ri$>$_h0KRR2^FWAtmvhkB!G9RN&U=aJ}KT$qfR zXj9rqd$y%NFDGX?gf>eXZeHJ=Pn9j`+4wZ(qyCaS&|>$&XwX8K{bD#I(PU%yog5gP z{PX|mnu8_xppI}qN=E4?Id{7l;=pDRK#-%`a#3>yCFEFLEsd$jI)L2JmqMDdU!8A- zlJy%#t02vP(#C~a!k!|y5@?V23&jpkEX>Yo%U1QDc?8WczTDOWXLCb_PN7!XKEawGtH!~rhKW!Ug)A8 zMZSfzVj4Ph8UKzf#)RfRM@X~r4wHOMP0bqa8zFo5@85+1?;rYkxNccey(8#tCQN`R zep7(t7NhQ40-XeB==GYxWF{cis>X<&e{|Bs`6PM=00-@Z>zT{j+{%kytqa!pN1fXP zxmku@*bZERomV}_;h;w#TF>IWCJ}EE5)vyZEekg7T ziZ-A%RPgm@t0qJQ)o2zg^fD})G2vHW2QgV&hvxoS>Gk;NI+-Fikt!2M11rb129CAH z-OJYnxkObcts;Z;06CpWzV4kPuStI$CciJF99jS%rd5cW%a$Ep{TT2WnmYoUqGraf z*)o-U6@)pla}YcSZ4U>e`V>~d&9EP7&g&eE+b$AuwxN>nEmaF*I{~y%m=TRA?GW% zAxguO5$`yr=gs7-0<4Ydf)%$7RXvUUaSqnCebhQt=@m7H*Wtb_{o4EMWJ4F`LYw_j zSOKrt{CW_4l`)kB{Yl#7`f_s8p3#s=^A~;&I)Rv&h}I1It7bH{ZZJOpGr zDT(MN+T;ew*3Hdw5Hh(*r}&Rm(A{GVy2RoQsWhFM$jUnRt&1O3XaXx2I$2qr<<8?+ z7Md^;P-?73mI$|9;J_8KbbSj}|D___eyZ0q*r__i z>CAhoDGZJ$?W&U9@puv@rDAL2Vk@#L6j(n=__n73q$&K=RX5&p~Yg4a37L=iG!Co^7R)so;OWp8+>W z=P9xP*8zn(671LnQ^H$#g=mb)pMhDKlUiNbZ8DNSUNJ9avh!joF z{Ei-U!oi;VgE6ZM~`&}yv zk(EHmN33D0?!u2a!i}M_Fx87w<+50OTWhP}3tW9tB>@O{f>`$k77Gk-8+pn`p~{ee zK!J*Vzxawxq4OP~$cKTcYE>6}jgC+U5|oTg&Y(?D;B2rCnTRNW*a6zi2QLr22~_^B zK>TJp03UN>M8s|b&qCS)z9%882s(>VR&ss91lYMWRM=N<-(FqX#lyQ6(;+dzC9()C zZjWV={8vXz0oO3YWaR>3CkR>tARUEofcSqqK;(0z8$d>=Hw<*f2WLNY0+dAfOoGf|-Y04hqS#b`rxFr- zM5qTani=cClGlfuOg7T&K6EIL_`_gn#ru{D5$_cpp%jPlO=$YTur#4566R!QhxG9p zh$iNT)!56`2is5&v?MOXo0yS7G?H#^641wo0ajj;Vw{GGl`PZ{LJ!u2ICFD$B*~iK z3t^mGK~j&A(3hyE4PYI)sZg#1-C3qZ!rD6J57J$;HJ)(sv5uv?%5Bcc4F z0ODY@C$Kei;&>GK{$!&01-^*($IaCOV+?SCOZ-S1JdZtrWmf--s$5aicpLsaV)h~` zCKz4CK%p2?DZn=X+=z>7qk^rbcS@5Foc5(UUb?=5){Bg-vFJ4W` z@+QyhV4!m8069nSxSS)s5aj3HHE6jpnB&8x5#%8Ln6#8DS#N^uc%YW@#FGW@D~cy} zLBvZoQxSCV>qEJbmsbgSIlQ29BdG zvRH;A5ytgb*{|U(ZEwNqz~5!dARJ7nUoo+5yTZj6Zsj)!RkJuhIgS_B>i2zA5biL9 zG=nUDLEOc#DYOU2o|Hrql_SXn2YMHK;Fdn@+~`k`vH zu+^2kUicTp^?~II9E~nzt?%ChsDNSW>i1BgT($>Goy&?EQIv0}@xW(tF-GU#?D1lb zfV69U0&;xOUKMrUXmv&K7qmZ!>-2rN5pr(CKq2ArxUxC>sxi;C#hZ#Q=m82(1R%lOKm)xX3Sw%;G=(ved3YKKEaBXpFV0p_{Zf#U$0?4-_KP~z`gjW#Z(7L*y(=^!I} zHX)+>Z+oQSDa5!nk2Z!qEB1W)JEf6)rV7o$O$%zDPV(Pt5iR5Azt<@mXeY#I{OgmJbs?Ls!?xYPO5iZ)17Xmp?Hzs9(V-88APyg} zPmq(9STjQO515fv0R9O02TgUu@a+|8bTXVV6|YZlj$H9HPGvB$th}gYrlPy;LT(Q4a_wT4i39hl?8u1FEUeNtP7!!4ynG~(C{ULK#h01m}%a0 zewKn1;&WJduEw!r%xLAl55MH)^bp7qleMT5iGQ$1a7Y({lSO_M^ll)@pbRJIITVWs z{uOR64SadW_GnuLnF0P*{gGoL;5q4N$XnDuPC~(fJ`$?{#0X@#!~~6-0)3kSC70)& z=pnJe-+{*iRc#Y#i`%1X9U5E}dJMcAx1KlfQZBKT6d>*cq$xv+ubtt%dpDA{zb&ub zzk)uW=8DY3M?G$r3U9{cW1}Cr=Pu>eC5gNA_y)^x83<$^ z_ago0wXu(}e2Hrs_ySDk>I~6%2XM zIyT^3t2}V+oBUUxVEJ*WTqzf^_eX7g!ufNo+Hv&_W@c&c1XfPDikUA~GtBcv<~6h~ zzHRLro$P7uoxLH}_IO5LU43sD;}!} z#IA~5|D}*~im2#nKmasxC?kLCkfYw%ns+(H56t(P>aN5O@gWv_1py;;Uro82`gKc0=FjQT|;nR;;I|?>pS` ze6u#GTDnlZK^HDKZIBZEsjGB*e3OQfDZh{P7cAqRl8n>RU+!$b>&S?&!8?;6UU7#| z+J#$`xvPxp8RE2zc<5phSQ6`J9`1Zd;-NkBOHv@vX>0#{P*)N)esUIRjd^&JG#-cU zo{W=hMkhcXWuVq@h(GnX2T==A@a@>Kq4Iz!VwD^NPRQxE4nz8W?LyEefjz(`X2D zec0$t1B0T<8#4C2lg+V$g1hxs)!PfoJX*$WVn$&&_4U{91Qrp)ue|efuYOTZY9>`l z*XyP!`0wx%Oy_zavhj2FDW=J0ZgDRwgFk5o^an2GJrNmdL<#vjElb^lBQ2r$R`T7q zTR-gh08-`2FJGUjR}cZr8uQKuRi!dy$zLRWs622IVxz$_3-naP_=|^q-CZE@f1~0S z6>8RTZhUnx@ds4#htYuG$`-7Ey!7{>duK0QdWql}cpDmUSe97d-tp7vaKG8!y^-h` zdE!{({`7~`)t2=@WhrFbW?s9!l}ps)O~WdK@R}DarZoJ*G|_?5NgLx8-(5ES)Ka5$ z*k9UU(U+CRZ1I|g`I$7GZUH7?IzuJSwyOHDxyL7wx{DL(!vp?R{jh za#ddi`ioB=4gWTEfw6U@Yv-0*Ltzu=@AAV!dSJol1*5!rzsCf-ji(pw-AWP$m<<9 z3CpKKM?{uYA31?DA-Ck5ZHYLaZ$8Dkx$Sb$>94PZ#T)5{4{m>7U`0z4l}6#E-Pcij zQj;2`U1}w4qk?-H&0J5VM!p#7IeccBdC~(B3j3L)ZN6hlMOLv>+tbbb^gq8eyOk(s z5M)Sy%^>ci32W-t)i=}i-cA^&o|;RBdQSi3hNHiJA=?t$g}x6BT@7sEL&8m5XJ}8( zK1|yr0Lsp8CwL5E@*wygwkD_){NML|6n8qLcDz84f5Wro=(bjqFco0_g#U%un(N+2 z)d}X2u$m)VFLG}$jYQ&xxuOdrp74C-EG1rX5R?kaTS_Tx3A%B*hkoolS?$90j#>A| z!Atig#iwUnmG*jnUstqT^|CUxgInh36&f$kC*5XEUA)xpO3t`kk$`L})6CcLqzgJ~ zI%bTLW-Z|lG@Y%N%k4FzJWyecY?KlDYJGquZTb5Qfp2euO<6+glC`<5)ozKn@7C7Vm#0?Fk~eepAAYQ3_c0hg+YOG8@K{(}4q+UoA|GONLZ*8z z|Nj*X0 zess$(%jBA+E}3+QBO7w|#{qH8$ zf!rLtT5jdg7=zU4g>>xD!dw)(v^&aM;5Ds05PIj+mXfcZPU&b{C?4S+*(+wKcU|F` zv8T#9sw5vZwcebXICCn4kMXR%nqA{39chD}v2GR7hz`S-w~!d6;@jiPx=LgB0YmTW zdv@1Zk5wpDEmP)+7fH!C&i~r0UbT9Y!_P>HZ~K2DIq_%iFEe};qOa?)JF)xJp>(rc zh8i=oU*-_mJzgU%N#0U)*pK*_bIiVx|VnZVK5MSFt5t;uf{&Kas*4YzB_26}ix~raI~2900nn3{%dQ5lUZUh8R&i|7Aztg~Ec3f4h*o>ewicZ( zlC{{$rJhncR=RNWYe|8`T1s?dhk?O`A?b*VGRBu!cD<2iVI610CiBa@qxVZBMEu30 zZ^fto%&|A~XfQw4RN}KEdR&K5T7xzr>Fa_(oT@?eRxw@)t^D3TUrON79{K?{I*U@e z_~BvmdCpjjP_k?B7?+D1s+1RFT62`ds zT=aeuE$5>9s|95RRc)LM8wx$szjm0hJ}IK@i=bH5EBk5%dOzbZ>^N!g%v$S&^qDiP z-Y-(AAKC7vi@EenhD%PKU1XwJNknUCE%E>m{!l#$RTBT~->?t(bpRU3_zl?sah%eI zi2v`|yhlX#juSE=h@+HzV-$af=1U*kx4OK{Y^;k*J*N;L$I-`uN)4G$tF?lM#M%vO zuP`Lm42!#6jheZYCa{8bLw=jU^4zMkX6E8U8&l3i^Bya0&7Sa}K91Y)JTlNwiAO|!ZVgV)*Q6gX z(z3S-cjcDUqO!KHw(5wFIm#sJ!a8%Gv5J$m{szU+^$rg{(f+LcWOeP2AFA#jAEZBh zN~`u#QcPT&cu*w|JUR!c-x15p_{ol{rTHuK`Upr1f}0O7E+lsxIGG3=E%Rm-U{pi` zghp;1c9Eib z(P+LC`A1K$@In{IJs9`k|VaN{ZDKd*a8sjxOg0x#;BI zvqw&jh=>n#)(qDldbE0RPM4i;8Q^%5MWcdL4*2*$(MM;dV$n^N_xx|*xLH51yUWS7 zAqsiru4OSV<(NNa3(KYRA>R@yWQeLkpPHAOyCStw z2WbB*e*K%!kcRfEhhAdc)4Sp}1_NYok0Img6hul)>-eDOLXK4xQUGL+&L}{*a0;ta%GUBe}m8a$U9ApUbJkot7!N@q`y6hep$@U%a3H*b82zL^(1I zF?rvaj7<}8NfH%{W)qN6YTO~UGpXH_@_wJNtljwInhLQ7$OXbT;j$xw-28-MNl4Y5x|D3ryZ6&v4qJ8Rtw5u($w9 zd>U7`5l!?j z6@MNKdR|pZQ7jF-k-t2_B8%y4blfOEyH>h&y4b>Q_EjQRS@t;UZ+1FDq2w=|6!E?O zX>HP<^9C#{C||9_sB*7VRj=}78Rl3wwcd@cJt@sPYe(^~rA-pyNmwrXCM00Dclzc5 zng>bDic`sd*v(r`XfdDZRTNWs@Y(5@NiJRdF;i;a{SO=bHy!$M;w%MQ?Tt|Pz4pa1 zF(j+qZM#exr5%+ZY5c5IM}bFpt7oS65JwtixZd93i9qY+O?C38t%PNnT|L#F;qG|8 zSl283lpT*OvWVc9J#hI!V9V{CI4_`FxH3V|$@haw!C>8AP$O9PuQNdt1Z)yrJfI0} zfCy^Y%s>;cuL|yJKG;~0qixQ?QCn$}=6?c92LV>0phH$ws`r{-N~rNKF5B3+Dq?c> zzW#~52I33{lJz;X0lZ;pN^afA^c+pZsotT`+;v%Ea-cWS+#Du8+zSfW6cFF)TIW_y zMDutjT*{{R{LUbemxRB>;(#c4AOsuE!@3VFWa`V7POQbbz4$JApd0}=DJ+cXyld0# zAOEMo6ZqgE0^AXhg4Ya`5>mpjz~hwx&)Rl=A>Os-W@fP_#l|n!8L|qj+-J%zK%Zk> zD*3wgJz6fePdc=#9O*^UsNMRz@sXQe-#>{@(fIrs@3Z}}t!-^u z$T1fYxk8@Q+l;FEaqZ{=A=vx6I7KUU=}cL`P5udvkU-T+g^dQF@pZ z=#YOX%0fJX#(;WGP<+xrQ!`$KUDM#Xu6a@*Q)l3RBEyf$;y+ZLOdiZqJB+9K0`PFmtVfGAsZxiH%LCgRi61#qh!-V3*NJ#+ta@M!>hp) z@RvJPbc&r&;>BA87#fm@{KNSe;;#EXdyBnCPEK70h69~3hg9yyMYpiLPq}TP#F_(M z1)EtZyjO4BU_w+Qg%v8|#J6wPgMRn8w~uKJ7+^4W&?yI321h>0VPmyK-+1pMu_}SD zL3l1J;t#NE%Hv)t0yAuFFZ?*Noy~DKm4Y9^^VdZ}wU7eJH@3aF9a=>0Z6UoEGgbYh zE-csde`97v4OB9qmz|2|aS;{~i588nK`HsuzLfPPfgj5N`j!rc_6W4|P1FRb+}zyV zau>G*O;#VfT3Bc}J-(9JVD$Dq|Fg#_dt2fpyqyNV+s%Zfh&9V09nH-0R!9u4>ss+k zJ0)AC*SC8NuDp55X*f#DTTwI)D^k$mIUfj|7P(TE2P=ws8xmjgGRyo+oD_HfdJr?* zZVB+Q?|r_pFhs4sxoEzQC1K-llZk}43(nLx!yMli=gfPR*6*Rx7(6n!m(1{7+cV8Z z-{5&X^XnWtPA{K5Y@0i63m;WDZU=9-`(>;hcHLgj4F016?WgekXmWM``D@vhd;t@S z`Ato&2#=yUb%XKeYxCq?k3#h&--h;CBu8ARlVY0ambv|N`U=wH_|q3FCJ!8W`6+;A zMTo_HN&qiwC1-8g;4W4{BNax|nRk4`hJLQQ>pQ!f7uT9lO(z&0e)6~!Fe8rk&JZ}Q z*I|+@T7mGt`;ap=HaD{!+=|I&*$t&t;fp%B>1YlJ%8D}+!CfSn0D=JG1pN5%@Yg z+?7wCwFa^1nzP0a6)gYOAeJMsaehnAnma4FPC+HNiTlbZx3f*gYOTGOF_8YA!{ib; z8#plZ1CJ(Ia0}LD|NbmwUc5q_FT6B(JXJIN3v6(F;YqP%3e9UZz!V0}0IZ17a4R9B z&=~0ey1KeBSQQD{W#1Vur5HllJv#B)bi6@*Aq5jV(=_jTyWLxdWQo8&J@;+s zp}0ip3bB+^qoAd3Y}tavZm5_!p0VJ5+RG@-asbv4Zlos+#d-G2L{ z``qNDN6yti=3CW&B=~P0RHvMlHGzPHr0|gqsp^ZKvB(FZRm|9=Ff^}p;25qD4K%4oVi2IuBIfbY?l3wbx0VX|nlN;Tiy33_p zy7VP`C5x%rSw+`!9#qS$to?WV?qGYfp`X>ZCGjRv2)@Ji1AvIwx-CC&A8#rO367mI zP~sM8B*kq71>0%?*2yDx1*E_FpBX+fG7ua5f#Ba3wzjs>v8g1RJ8SsUb3{iul-N?A z80FKatM(tick`vxfq-de> z&84S12D>M#tF5pFF6sD-_JiMm{HsjAVS{i$lB6WH-QD;xjWa{Fvs1@}3O@Gh;K6G1 zz#omG>3GxNwM6hlvV`9!uWNTse6;L~a&C5YQx-b^V!d@)9}~E8Pqy zjCf@(EiOo~qr$UA#d<0{jH0mNJ9p>$5JhFd%`dfZ8c+@wrLLU)Bfc0oWCLDE`qAy3 z-1{v=b|F2e|57kn5h}r#yie}J&Zb9Y{a!D7>LJtlT5&krVR2#J$i*ep;EUDGMTy}X z`}?_o$@4hB{$dcxeME-)blvA^-ez% zl$SSFv}0F1Z^6C$z1!`}Tt6$%P>xvVDVqmTZssx4T-n^#8bUj&^gD;CFWMxM@3XI# zom$i7tIR78iN#CUIZ}0;H0~JC&tAyLFtQD_4NZ#3*QCzYlgvm+w!fAziFT@X?l)r%Ao&>y;KV1Kt!C}f^aCAuksMk zL#i9S`Q{WW`dc)jWJ$5k0qWESmyGEGP8|oO9V~^Rf94;bn!mn(R?2FBaQU67%z6W| z?E;38v#@VlRvu(cw5LbT(36?pgMbnA-aZ4JefQ{Rtd&nCD;YvNgZ))JLPjF^iPyK( zxkX;+aO7@fH+BS&h2aH#4Lut4rjH+-7%uT_6>^@u`B`P|m3CU9gfI)^SnJb7YUi*O z$!ainSXX-8e3%^~OT2#R_A+Ag!;2@)uqdbkt#}hMab8Ruc})my4vuu>0>+k>dP6Yg z52tRr{^GEC+v_$!Tp6X#gO+^$T`ho9`oh;_80_#JvTykF3q9z0$a;;|@+k?-&mRl5 z`1V}&BJi>?gldz8D`e^->xd1B6BBqo+{CHY4}G`58QKP14NdEH>xJmjz~64`rR?$* z6t<2e$M6e5Gy)K+C}$6NtS?mu#>U6ex0m%BxFm3+=d`tzFqV1{doI`h)j;alDV+0U zr^rL!A;&L|0>F+qL)CHIZ&Ivtv4%Mxs5%^@=4-a%s3aSFU&gL8U`0}_ExU8rYX%xKB8+wqI+L)?b?8A6U{#}Hqp|*rep_Z87qs<|C z`oil@>#;$>rUuMwZt!5sXwB>}zx=kZBo!fJp9c}!eMJB*2NUB`a6@>@H9uk5?uQus zVa~Zt!hfJjLA}P@3ld@QT=(A5CN-b{Q0tL|)q(;i2-yfjhLj1Y7?6a$W!oBk?S#3I zMYC=hgXI+Z`1_MW=He8(b&3J|skEkhg*Mh|3yJUu8aCEaFN|zx*=3f$5PBtY-t0}= zWqn4AvjhbR>Fbd^L<3{ z&+tcLI45a!!NHY3DlFKepnk@zekw#;4W~#ow5H&>Lw@GAw>iwxbEk12=+)oYmGF9iHYEyzjYWCeT zUA8Q2nW2v6UUkSQ@~>PK-&AHe=CAT-s%H&=N4o)LU2pFfXK$sfmsZ;rKn=1Fwo$!% zs^jrTd_YjoBtQ!Lmz$+M7Dd3j?KU!EU{;;*(_JBTe?)5_51~5qFLxlyh?vSi zHMbP8omL1bR}_*iM=SSLi8?zuwPm{;SBO0F?Yd3du~0we;p?{bcaQEHKi@&!V!Q=L5{?ERq%nF};h}!@-sF|^c1T@@2nn1pS-(twZR7~gQ!7n7lTOQ0E zSK_9s;Mc_6X*m|CI#*r2U?InK+Q`r99;LZ7JQwk)0mpvXB5@)+d*SESBVV7fj;&!x z3ti}Sz~yP|9UXz^0HJqS;8AC<;8}1pDGTB_o9#Tc8t3wIC(b^-`~H{f9b3l|G1mEd zF2`<`_!WU!YU1pLS1Qhbs4OQjtabtI?NrHTv?YB}oEfd{@3)`%)VVmR{hOK5jyF-S z7O@*~*ad`4d%?mZg;Zcrmx6lY9I_pq&6Lt+?r9kP38zGvF-0CN?6z5Xg)S{$Jld)I z3}t72o|fvUs>NkSf0H*SSjRMH#w`sPPHeE*{bbj+&pRPIxK%l0RBh$Kg;#CreM?9* z1p6?tBI_qkT5*XI?J5KF-Vk}R@&J$_S2s5@O}W6`i-{FMxN6zQ-bt8@X0CayVQmig zC}J`A#`{}D<>fJ|MSd#ey9I?zZgLam!g_IQxO92wR1+xN6PwDo z6Y8~~56Ww5QrfJPSH0JZ!tl5%xSZT{qALB9wDyw}XO5Y{ZTX`TxeMYuFv&@=l!|Q?LxTFZ7hfbvJ?u~usW@6&o zdEP>Pmxf61vGFw{vn%zPtPK(CQi{+YAbDx~kn<|e&iMLMTixgC>GD#?dhe?FUl(ar zHnH;9K<`uA-j5vx5P^zvGm>Rm+&esoSsv$)HdUjz9htM6(1)Qr+T3Z^I6DkhQ-+99 zUGJ!?}y6}vP?WVUq9A{q&)ppm24)~>*_WqNuQ#tfiwhHb4U*lnmHBI^-}HPNYDs$*(u zx>%=Ync)PfkMXVzGgI}dE^%MVH4O(cHO`;Qd!l2+P|YK0_Gb6PxLr&!qmdM2VeuoU z^PV_e{xSZz2&%`|=HHbmC3~D@J^%glH}3F4?69;R@nY zA)&4=!O;fMdX4?axd3)_4yfk{2EuNEE*uZN$l>CWkx&50HW4__!mOA3?hI{!c+zhl zJ9o;$4uczvT)a)Uwd|E-^WqbyX6V(KlOFQ?TxI@WxKU&F9@a{p(er!61!Yet{ink- z;bX@rGLdKh>K)Zqkm2|Jyp@gZRLcC~&tShyj`ryVACJy2Cp%a9iW?tmTXm%{)-i!c z@oquuxpR)Jy;i$+jv1ehlXW|NmHXHl*&Zlm;OW%7rJzp~UJk!Tz z=<^Dp3jyPTPlmF*4}Up7u68VdPibS)(0}}Moya66E7LKbPOV%{EJaXYO?lR@RtlP; zTd`an2ZzPgQb>>?74Ge-iSQq56vYDfaK!wq8tk~$fIjF%hq|~pQHvYfsmwZ-Q?EQt zxpUIn&NZj!=5~rF3&R%H<@qO8yNE=r?xh+_3BsEow-I*1O*=DV(GfWy@5X8N?ym{t zKn`Bw%nhWKhp_j_$7K4WS^7RydL zK`bUj_0jS+l7szM#_5?^U~S?y)`y>ItJT9M?i@#OcKP(>LoKbulO|bRG-Y zwVIJD>{#cCZ9+mqDJ=ZSU%xux9~N-?X0SukX<)cZ)7g2I#AXRsEC?0fyyG;};I_+m zj&mi(#R)#IepoMk6mb>m2SR&K1TOjwH(Y^%WqtK1ZE$~<2*OOTbE*NAzU0XGCU$1Y zA&){huERg}_xB0PDPQ>FJG-&Y;a=_FxDfJYtmnVGw3WWUP7oK&c#T&RHyk;BoN?e% z$-v{Lbbd@{d%fG+JHk@^^^YG{AVmOn5+wSaVp-GFoV-Gy&f{Kj%aMI2cYwz;x~m*}9nJSyZBTphm`{A|*Rkd5D>q%|C_X8crS2@GR#9i)Wk)Ni z^V~@0!-4(H9NQvawp^oIFrj0=^3eI%!klr+rIb_EYKxpuyF}9nk{}p3+d>H)^R36_ z!jVnOWMAAFZ-$W7BuhC8iF=YZr8H!*CuA|7hF;lnTz`TYn5=elz%P@rf4q!Bp+xeA zI32asx$r`!$e-&}v{^PsSR9R>^;;FE+4cOnGWLh|mUY#{My|V~BBSW8*dO{V@Y!_4 zc>CHX<%y#Wo8;`eAII|08m^Q`yLiKlCEr1WM(Qk^$yqjbn$giw45W3rx(Tn|eu&3~ zfU98@f|m4l?Mf_aeWKwCJu6?p*LCG?_4@(?X+t2+cX+E?mjB<4{mZKAqxQ_24YT!0 z*o0}`syx1&>$^8XkG#5f1|e8->IEK{-kzR<*8@TuH~zWOS!4_xGMN%D}Vsl^ffhIyQrVJR)5>)>oNn^Wi?=cM6i1^ z_DxlWZQS7};Z;&X)(Rlg1rZBHdfuFrmp#xcoUn(HO+^TsqcjXYl2sUH;d z2qdUhrU|Euoe~>}wUOzd@TbV*i+SCxar7t?kV9Q~B;%8kOkgkX&P_O+3a+ayhJWTf zHIo|tGRXuU99@l51qH5l!M{#SjlVYS&B>nELADS?onKFOhK7MI*#2xtZylZ4&Wma* z^_!D>0JS1_sFD7GJhjAUM*_RW|>*MV$d-TIz;?^VURz|VC1L>UWNkC{;FWoAsz`PGHLiH}q3q(eK z=De8Qwdhgzt*+Q9BEdR#udbk8P3Reuvn=bmT#wp3_^z+apYyI}a*;*WO!k4+>HP`6 zV!{n(56`~g-*;5J*H-mSt-PDPRu|*$$s|5io({W{TU>T#pQ6@8AlM=7`gM05U0t{^ zK`8I@3yF)1yFXbqdmB&HZQJX%8#w$9toST-N{TDy_{r~^i$3C{qW6xwK+c83RMv?4{tdNv>I7TRxS+ch>ib(c|N|BM3tOgyzp?)qzJ6lL5|||F*v_(2gLJ%v-W`kxKUZs%41kWf+8{F#w{g6+_;= zu^Tv-1>huBMTG*s6Mv;OK}AJY*lyV}OSrJDvWE~gcDqOvMYOoIDD$`>9X8`VL6P## zkH+T4z}(n;_t38!JUrk_KrvhBS?QUre4V4PLB$Ef#CZ-{C#Y}YoJxDZ;4Y3p5QLU3 z8kovGU_uQbjfHY6bGb=gH(ZSdKzg*Wcv;O2YBi8-rR`^3G6btGFhwf#x@Km$km#SL zMM9C*OVGI>N$BnC2bXTuOSh(op==-TT?twY>hZ)8fynm`AhAwI1WE#i_+^iY#d(VX zXkT7~Lg(zg`{yB<6dZskMRhHl8IjZ}avH7#phMqBjIpn{F}!?72hN#mYg`rjg;SkWPfY59MW&?v-@Asu)$My(C_cN50Q`h zFKE%E<;BgPJz0kpQh?}n#F|su+kq?!#7JEmyDs3);>4s;mUCr#;;j)MZWM5U%F3>@ zyx{~u0i3`k26|ic;XdHGKpvn4uT=z`lLmW1!0(}S&`LYo7^;ju*_Y(ZKQjsy>klnCQ-`o3Nxb-`P)IoAZHFHm-4sPR2NquzPN z;8?EnvL=x+)S$sfpt;TWWapQf^Sgs_w6kNav+eUc2MfEQRtqpbp(G|wnIVA$i1+8G zoa%#`1h4a+LPhcOcatUcAt&s>`UQFY3&UzJNA70uM(SMks)CY=s`_1ATj(FhuWwR* zExUAJL213-UiRnb6q924*WE(`727GEfk=8q_mS|EFD0ykUy?kse`H2%xY_Exy0de5 zym_l}^-kZ&F*p8a@agJt;{2gGH5=kiuV;{1SB(R0P-Eyfv3Z-JbJjuMNyl|F0!;WG z(WWMmtmF=ve|vdwToH)rTK;o$T=A1=s~~ z6VuVrBk#HwD3R1Jma@gBeucKVpiCGc}>Uk|^EvBaoKy?jTJ;dMRsM^mA6fi>Co zjM=Ypf=34GOy^jogEn)M%U=Pm)LoXCV^ml*ri1pdfH-S)`*W(_*wJF_^R*cN)9;;`RvIka*Hi?auW`gE7jkv#}iRq{4S8rp&}J7e-Uf{r z)443EJ^;{h;;Y27+m%oiBC$Ktpetc!q1CalU_6S*SDp-$lH{6{u4-n-V@}Se>yT+% zT2_RGc}YvtzEI}()lSG}Q6%M)tdOpVuISWF{PX1vjTMFU8R=s3*aPtdYoYlo)}ky# z7#q(IL5kT4oW(33u}QNFQH%7Q>%p-2ZVbiN1+!MU1zg7xxO*yyi~rswpoA?n2PnLU z@1BgJHSu|1;fu}LX!|~tHDp-5YyLTfMs$o9J$G@ydFzmZek|ml6VYh~V!!TpJRUJ` z<{2K)d3zA4GVGG@$lv%;3e!2}ckhe~Eb*f&7JcFhZ#>1LVt#7f;^EpXxKHIutEzOG zoH(6?R>v)iX;(Wo+&mAHmDo<)O~jpco0H>{Q?k{kHXtP36*mn{^EfOdx59|uri$iB zr0qsEZ&IxUkm-emRb=uPkb(q>^R!QA;E;L@RF*wAR)JxPJo+4xm9m^a8qVJ^EuIdE zV7~(V_j&NdPy>PFT~IZ^PuYvVI7a>jf0k=QYI9m(GYM@9=EMogjyd|2{wLIX9sXaW zWb9WiRAX$PsEoNAWG}H>nd%2{$T9RYaPDO&IdRLW)c4@oUd(b^X{UhS_ z`CX6gZi4?4EFOF6c1W?OiyF#XI_fg2%$<|Du#8^n~!Rj=NDOEuRc>q zxA07`Z$%OJ)gExB&FZ^x+?#1>R+K58Ki_U`G#9%N{c~$#+aLs7A0D1SVd)xzQy9#p zD&lP!=BLsdw)n&+=Wn9-s1mEr3K-Pa5&eal5^Ht!^ppCz?`N?EgbGDQw2}83j|Kmk zk`V8%K36j$k~tRJ3iN5e?j~FE4V8r;^@@u8565kk6z!9JX<8JdC`<%I`) zU-Rk<-cvO3vJC6bClNwoHj^)@N@P+!rX(Z^lW2p944@6{rz40nJ30pa=b%wVtg(zy z&i~hjJoAH0PZ(_@9{GqM*%5?aafu6qfHlX_^WFuipcV~}DmzH;zz_|pR*w^!ht`@HaAz}R`>UX0VnMZwA{Ij>nvw$`Jm`6#-tDfX4 z6)v;C5KQfBY~&>0U@ThfcIB6jFuxubvl6%*6U>@h+4hI=Sd>1fq{5Db@6%(vwRzzY z3&kljQ3n){H+#{U%eko<4&9B`3^zNbHAJaj(UvsDCQ>BhEq8rR7kRR#v%ffgd*Qy} zx(T5=df@xpZIzZ&&*nCk;<8_;=x?9Alx+S+pX4mb_G10ijq^mYRCW7*WZ&^orfFHS zA9Jrf(>b_XB(c?gPUv=^i`QjtyNsYb4$&oXd-7VNS+%UA;e-b^6h^MK)Df2osV>!^ zX?WGS>=gaF!3vso&Rr!qDCtsJNlA86sb^UZs#kvWz%NYt^M?<3sD9^>cpT7zgp%@T z$I2v(mi9p(xtr^xb5n|5u=2-jxv}YS)x}~N^^+fNG-8u& zZj|RvVyVk+eo)NpVu4oXCytask7dsw!LUM4I#b|zXm|Xu4mIZbSlKWUv05iTj{FlQ zds9n<;&e+>Y{99nqx)u*`qw&RY8^4-ku*>Dijr1h|GFOO?~9}H!i3lRTywXCWtBNw z3RDlfq9`NV)m}#@HcEK#-%qE|Om{IZW{i2T7si=E;kM2b?9jNHVQFsh@^Ej6ktCds zC+tnsWlo$8ky7(2dg5*$$<-Y$6ocejECD^IR~nQDCFjRlyLhlfIB|JHE%eVkRKG~W zEYB2`ePud_Vg-m?FjLzDx4XoS<8?nZx~#19bjLqyXZFDJBdNJr3FOT^1;=6g5z6a# zBdoQ8^2cf4THxO_^#_YeRoK`CBzzPvdVB4y`J=}bB0Bv=m_l1!yq51JlRZOS>f+&a z0-7vFwP2Bp>V`M#TChWk;|v3}>I=j8gN%|ll)R3VCw&*2ChR%1G|0kRQF@L7m%NR( z15r4M{1O(eXpz@(3zQ3_qLj41O7{j^(n1rWSyYuAZ85X$dW5%KP1>FrR9+>Ouh&&9 zGYAyIIzH4(&;F*5R!Y}Q53DlB@2S(2Co)It;W3w$7O^$1UFBu~XF6^*rrj2ed(m)UxgR zwhq>mu-K&8O#Xk_k@I5K7fFYXJQ&TdbLxq!%ZHH2S0Fip5!?|5kV-`1DIOrmURe-t zsYgL7ZmXXY{)i_bOp;D@nR!IJuhvV5@Pm9Y{?k6IxVC>M@x>a2+ZHaE9)8}DS&kyN zdtCS7F#v zegtuU^9UbX&*X;S-1y4#fdc(M$7Xw%)fMMnF_RjuomDO7QW)-pHOl%`-oF_ivZC&l zAy^r2(ygi2okd(KRh^6p-{5hYy=a&~nt(r5A6{uCL%c*q(_c|)HJ*@Khb@2yF>p#$ zGsuXBgLb;k*3PaTVgw-#0X!RbY-KsV%QgDGx$;sv=6jGbQBabT?GtNoH-(0hPe!Kx zA@R(Lj-_RK6`vcl#&9Y$Y$>5A5a-q-F!kz(0W0R-J}C3tupQd{$dF1;ca|A+=AmK*XXdpGniz zypYByWWGQzR)tqD9<$IePZs**p|5lR!BGp}khWzz)+|65cj-(~MDm4%1)AE>)QEh|Cr4e+fp<$z- z>k20dMHszNgOu zF)SxM(V9R;-UP=!Fg)W1KPEGT`|8@z1McL07DhBjG~rdKWB9@16Ay4d;QIbuS%qrs+Wn`t+N6V)az~(M-R>zRa0xI~Gx# zEEJdilp7mm>zQzo`C3%o9DU@I-o!+iB(@(%{9J#(ZPb4Z5Q=7f;>FKz7t@_YYOC`# z$mt(p>->8Mg4ZLS^VjBD4*h=py2eKmtZR;=tH`INCM9lRt*+M3=H_m$qO_b@@S})*;PZN;g>sP4dh3S4lSI8&r|d^Jadyw*aQ3G+n@%c0x<@C zWf(OIFp-e8dI!Y$2`o)CM2MrmFLG?lC;M(s)e*;XVJRs!1=Sb~^#_hUy6jIekG!a` z^KKlu8beT1jJ%ZDr+bQws?>$ED*ar)uw|>%Dn`Yv+q=Q$fXi#Vh_|1W`~_dMXz+0e z9@iMa$8AR|>Q<<&Sv#w>!=KdcsM^F^aWNq6v(0d@^Z{LzNmNRs80GFGm);R7VoL#H z%-tvN;uCB+=ff<$ePq=Yb@IPW7xtb>{k(S8Ld-o%_p4yR{B^x<-iKLoU*z=!OV6df z$r{zcm#jFISkRF?p}*@NZP=Zx^AT0pHbKr1q^Xqp=1|b7>SuI;=0%!89J?VK3DBie6z&(Nj*CU2MVEr&L3OAJ|!~5qEaSezLi~(mmiBaPTuK<7}|UH z?g1wp4=4w)?W-a|T#v**0OO4u9NfFr4ytNzLQ6x88Ep>&e(D}n)OXJE+$055o8~)Q ze^Re{=>KuCIjC~d#V_S1qVvvSoaL$c|9vuBBDrUl|6yY|Mzip-j5F4(g3Q+0(fB5& z)2}B#MbHbak0VG*XZ4bcoqbZ>Xk~!Xxlqt}_!0{#JVyFk^-p?qKWh|9 zWKV=9pn4*fW4J1yYNEuwUW<0n1MgGevW3z?5zC_RqS2yzTyu$iG7>ToA_Np)v=U#k z<;Wf8k)7zsf?O{(mZ`1~RvB}_VRkbDvlb~$S4?mop=XXC%^)GTcq#ssf8Q*p|KDO_ zBDEix!8AmNfsfs8-C<}eaK^Czdh_j5_`UIATB|oxN0gC4B$%QI2XR?f&n{vaOZ}{r zCvEY~RAL`c2}Pvy!J(QNzIWJ?#;fq+98`8@BwWrkM%h0TCYFdwr;ieFV$4FQjv+gicjD%?7zVNq_v;%V6uJ*!dO+2tX)y6 zaVzq6I+PAMWjK-WJc+HQ8FkjKfQ}-cI9k<(d7(k#3wSf*EXe|63`v7BFeZHVHf^a@ z0>r4r!?DUv&Jq%fk@`lOlbtD&nikG%nrS6fHC9dqBz#p5n#`^bQ!I)1q;%C&Zd0wO zDm*dE!C$^;PdnCwB4QV9smAATw>9Q#lex|t5RtM3w&7HG9jm5%*eyp01((T{oI9z# zP|lRyq7O-mTF$Ukkp-~m4HQ>n34fe4R0kG1#WV7VR#d!D`k(Se{oa!}^E_A{vcU?@ zXN*(c=%^qi6?I@)%44I`@2VqqL5bol^Z$m$0gM^f-g(#76Q=3v$tL=n_rsru#%8~Yo@sIP6qyxaE*8YcL z4{t9WphAI;9J5I=zk3KjLTQaA9~-#33PNWCyT~7$o{)e}eG?Ns3yXWoF5OMn9;Fzi z{@?CN*L?WvWd{&}n<41E$14;Ck>4&zSUfB6pllo_9h$N{{ls||q9{A|-dJJS*Wzml zW^IEFb+j_*VeRM6*RX{3vkRo<#G_9z z9Qk)Wx|O%M`)%>O@wRc2?-j+lDyA0cCUS)ERGRg;{viZDWaz zreXK)sebm>F^%knR(~N^r^Vc9n)#~vOV`6>WHd{s*z1LbSk2IU$tpCCezqLLExF7rg z=N8Ce9(;J39+B6=d>tGTZf9q+_xAQyG;IIxmHaI?HfMf(p`I8qHiWp!P!w-5;^@o4 zBw})8+jwcGk&2Nn8!gALIz0N$pD@{BzWF`|Q&{@8AOPq3id+1-ua;M#MT#Mg=V9V0 zu1YHBPl=@@QmWF$i>e72#z04t$I%{qCro{+{*G2KI$a>ckKu%5Z+wup!l+>iE@7n_ z@z;*+s~m=D^_0YZ!hSApaCKj~MsmL$A6(1p?6EqZT6_N8Px zBXy!KR}B>JsVNl5LYv!NWb;o;^y2Of=~iRx!TjbRxrqHwYvD`f zeUFOTv*GsKCa3fC?0rtcI__a?WWNX#6G$SA25Np0%nZQNN+&q@Ii=2jF!4W?`C@Qg zK|%x(fJW0iJvIMMQc@2RN(@2JvoOd1CwJ=2Uu!K_6n_+k1m*0SIBdv}$5MToWaW3n z7wpA~%bFT{@0(m+`#4(Kkfp^fOm*`XQ4tqqZ4_ulewiu6=B7jxD19<(9+WM;xT+HE{Z(Wl}e>kB()TQ6Dt#P$?!T85+kLKBB=T{GCjld`vT)hojA^ zK8%+y=Nmfx0b_;nk1A^F|E5Wf67BYXxy<3ssxzC_sq?B$qr$pS+Pm*w{Fhi8HkP1- z&KL|`)Uqaeya`{wie}NICh%x6a!EB^cw9H*PFDVyGZT>5fV-W5b%yj9o$DN#O*)r? z&-rZ%-%INXe2_uEj(2T2E|iS-kA)O*i7E=uo}FNY*`Kta&ftTBMg$id|42oqM%)91 zr;t0KO2&dw&D~WGi#rkh^!3y_d|12L|-z zF37wi#g>J)5&r;NI*5~^fMiMf!zmc~wH~ZSN(U_u*B*a5YdVo^SoKYfi0OLiJ^bx1 zRs9OS;4@VB@hHLTK`)MVY0trhM|ur69a9#6q(;MD(~x+3z}l_1NQl1v{6#){+wIu- znkFku6}{fJ9M=J>c(;9^I7xy3nk^}R2_W#x(^WJCNb zA8{$EjV5q9m#Z~88)z>mW4f7wPQ1&g3s$B6uxbu>6IY9>wL>E>w;VgB>@=K=vka8fhk$wzhvs3n(@mrulksI{aCmi-)lm zn6%f}cx^%h3~?gtkf8;{;RFOXfOA?nCe!o#qa4HKvK{V5#%LsM_iPH!`Y&B2hF1PFEWb9iZJ<&tbf4eV$f?e_!i5Rv_{?^}Uy1bV zRdY{!YX6(z7@v;T=`$(P38Wqc(WknJ*o|*Ls`8P+Da4-_vr*6rd2!1*h$x|A-b5^2 z6Xn}9v#b}*;qVwUVSRO^h&=!MDLRFLPA}9uCB_n`BAuI@5f`|yY6o}oPKXZka5j@I}$J|6e{kN8KOU(vKKUWFU7~%nF$Ie0^bGFqxSbyh!8cB{jv6fA&`dei zuij?R<0l@;Fg@Cyz;l!v8z}s$?#*6|1nNL^rL$JI09MG)z(j_ z4qrfN|Jb&X1K!m4S?UxEDyV5d2DtP&w1XtR=T~F%>4yH;KSx9nz=!Jvt9Ih7w?aEz zTUHo#ls4K2BVMT-U+`5(wZ|&q1f4y(5_`dj+3KR@$TsG>jkmSLU2ci~+Q_YkKZDV? zB0kYq*`aSG7J1a-9!KK$+`*tP6Yk>V=SCnVBWaA7WFy{Vm`{D#X0Pwx4f5{0ZS?*P75J zeZLz^x=*iHJ6$2`ZBU_-OayqW<4zi%D}2oO4#t_kq-6C=G3ppS2ke0Os11_){>4gU^?e|#~F_dOk z^kxTo-QXX|Q6rS|v`r5X=sy=vzkurq95X{cCkdNVO|C0KaSVooJz+%o0+1g*JMmzb zzOmSliQMI+`K+_kJr3pal&(b?aUzP3KZV|pduV09`AK?BO4Hl2NK5$S`ZGyUm+zfJ zshjIIt4SZP#Ez~I#=R8${+=PmO7>LawQ_P2{pEP1QE7`CjVBnNuo#jt>#Po)x2up} zwJ%=skaQShpD{ntde!?}?ydMlk!^1pemv3POYyo&^{YDG8*<93X+wS6&jNEAoSHI- zJU|lN+p9B}C&%{e`(WSq{EhAh#%@z!NO^b=Jw$+uoMLU?hi#G`8pwxdUWlB zE}PTd<^ni&I2_1bjm9Da0NVjJQa)0M7SV#NloTS6SrtR1*U%PQ=Btd7_` z>3Z;jvHOgg`L3h%mH_Q->Ih?bYvMP?4HepJj6K<&->=Ln+;aDbmTu=`t@X-euCaxlC3&zoV-Qq+0uE_i59nm$t=c(SVJYVnCEKjDGu6Pg5OzQ->y6L zt~R^YW9^C#Tf@16Sod@RTPy9^3!?80=p-IJFQa>ZD9<1fp+n3(hSK@fuc%BOdd1k` zFXMzz(U!TcIYgYqsQf%qG_ict`ElsEBlHQdzR_YKA**9zM}Teg0@NJ~X-p(QYiG0l z@&2pYWDN9_>(Mn;-l4;AG2r`kkRR7SFOGvoN%RbRc~hEtgXoXM#o8!m?U5x zyO6Azu;SqGD)4cmiq^0sAC8)NF0<6H!};-rS4Ot{Jm0F=xP&B!{*odQ3E_t1h2W#SOvGj{ite??-=(GuapkcX_ zBbd)B|1i2q%e*CAO+|(J%o&@)pgn&OU2*2uT#NzgP~4|~AeZ&u+jK%^FreDN!1B2M zzM7uxl0PuBmXHwxViX2C?E;txu*ihJf(j~!ykm}6_I|e=L^Iq}^Oxud6Du`iE;Y?T z>}21`W|J4#6+AuNbCeEhc-ri(NEVUNz6w)vMQ;OPag{1oaRcMMfKb*`dN@)~y6)TC z3}-G%Hne!CDAq)7mmeo$h|5Pkl{16Dy45cQm&@0xjo$7%gW*9BSka+<&KJ6?(9nr} zGIiO_n5jb3$K5OqrBh;mmr|jbGm*4lSD$?8d>DK4YsO!bv(HS9BPZaAfQ2w6_+`a) zrS-((=^v_G%SnMA%dt1WhU)Ej|Cx`%kr?(^Xb+bbYYOYDiKe`vt@-vQAC zd`|&k;j;dra2B@g_IB6m>gr#;Os3mP^jjUw>?RNb4`IeSpq8GDjj_Mlt?LkdC4mD? zMiQo=wD@##IBt}zUYqab?jMym-xUmz~BTV6_D%jKfs zm7(I5bcj=Xc_ZG4)d$i?Ae6fkSaJy1$OXgb74iZHEM{TX-ogS5_VK`2-R4XW^ovSh%9(k&4TpDoP@jUbyv#h|)>FkeFfl(&1&ImDN?m zMItZaV1=jl$HXj*!qgAfKn=JJF2`5+MyLJ7w zZ@y!%dDCWORv?R$Sw@HW^$}fxXR+H@ioP`YhMg~mh!Qc z%3fmp&xqd~0?|^hb3|$W2fi>p>i!_;dR4v`$? zW(WoR1zzsJ3YI#*F~ipQeCu%CPlRm4Chl>P`~IhWeE5BnNu;yHE4mI~?CZqrAOHi5 z8HTR%b@Rk5T_(=d#QIwk>0>d7A2U>B2V?&r^Aq$b-imhl;<5w`uoOYM!7n96XKrpD z{#RWu$v7mRwoi2T*xG%+G`g>>9X=@uBikGu@CnqP>eI}-+zypuy;k%C_ygy)_!-s&g9nP(H99a~7@klSY0wFFo}=As zOTg!AqeNCUTZohvnTXvv27YDS|J#1rjUN%TBTEm09rO(i8RM4jMB{?jplSexEg*Om zLs>Mw`!2sU0$u0%R8#7Z1X@Z?)vbb+t=}34R_kdm!gEBD}{n<*n%IdSw9* z41kTcL#dB7$IQ^&ZC(P!aMDKjMyL`H%8~f^oqe@@-8d3F$|O(qHXI2s_R}|xvb3~> z1O`f=2|*jx52thN^^Z%x3LyFsY%tnB>hTefe+S+MyOKW^;P*kz`qS7?t)yi;<5I1g zIN%ZQvZwmawI55AOG%x&TljZirtVL4``^O!?kmr_;D=XZw{KH1OvQ%OcDZK3eFM!0 zgd{U>6E%<>$j!X4ZqPyzgM5VT*kLm2AGOmTo3U2)$QtZuL7+>qChSf-QvL5as*1 z^@QTXCv$V9rzd1BT}7p>v=+%Mx(w@nqtHG!T~}@?bdNPQ>yNT8s)}T^tX$ksWh<^c zoY;Gc{t&IwP012XNT40+>_7P7u+|RKobjOxWD5`oQIy1XDy~Sh>N5S7OmJRn%9%$d z$gPijUbXOphL(aY^;g84FK4CpF6iixLhN)BOy&qY5EPdj{S$OU5>Q$P~=%pOdRd~^X#r(`|m3L zzF@en>5>00mo`$EfG~1@>~t^v0%6%>yCynN36ub-F?QDXth;;pgH)${B2?`Jb<+&3 zp%_3qn~;QhPaPx=>FyUng6gHKW6lgdC^=JPwZ7IqIIK_*N+fAw5)Tk`%JP~XZx>*NN<#+6hL=*$m`Bq90-1~vVEj}Nso zYhbY=v_gs6*549j>IYln+34E#4}T#@Asgz|dO)k2Q%c~nVD3rCiH z@ChV>6EY8+etvqPZ)DV4WmP>4cP~hepvE8v{T4e2h=zxUqpE9Ks6Yxh2}u-BpFM*> zYZ`>p4~aYpw{L5K>lAng-Ug37aOR_i#Ed;qLacx`TR(F0?H<{^ldsyQ;y+FCNjulG zq+4kb9-o|e&-mDN`Al+jC*Hj|+NfOOdD!h`LX-&INPO`$<`jLvg(PY= zhN(!<0DXz}kiVN{IMaIxODo-Zea2YIn>n2JECe>D!szGvkw+HfDaY@;H)fhZ>GP`} z#A?y+u4p6T0wDem&syw2_J%XG7u%2xDFWAb2?WOWBHuPxYE*7aLoqg(r}hw8|L}@y z+x~DolJ{g(-$t?nLziXj=2Q|ukoSY%6f!JN7G)hp+&?%2Ai^}W4fdic|vnlFNUD*{qj!LNx7 z;z8EWvox6EE8mgBEDL$_;E#G4?2$`qpBMw82aP=6O~}RsY0NnpC@hczw7s1NNg@Mp zZ*V}L{}r+f|IOUq{tisSkdp*>6a8SQ5C;y9BUaVchzPI*Dni7&gB6TLj^5P+g0d}Gh!k&36^)eZDPl zq#$^-7b4FX-p$2tWC{ujFm~z$*k~Edy?_0FdkX9~DSewb!3^7Vq{LtkCaHSR!OH|K zo6&G8CLe){zN=6>-UD3wr3*iHd`xKnvua@&D_M_HgvkOf*}3$xo`l73Yb(W#JeI;s zu zjatq0YaE$A=x%g0=+6w-9k6Eld`w$mrJ$*KX1?Zb5>vfxk@@~_77fM`jXTVv*H5~P zy!fr<@6Z_&cIxopGvFYgzLf!^ETh`yMJkBUsU3zi@iaI=ZD=oP9BpbJAio4lAOmt9 z5yN~<=#c(fWAF|@ZY95W905NdlL1I-h%l$|>cj{PH{vcsdYLPPqKJYT0f`Sm1|E>! zj?`fFkRN>jrclKd75YARRGH@1e5fD6uSHi7l#Jd~#f9$ESZ23__yW_8hgUtkS%0vl8s)HHIpE{ACR zI6Tz3+kP8Mj!sxNYdE{9AL|vL8*Yt9VXc{(t<~FoB!VPZBfV!wyN}PG-5az57lIN4 z9bN^-aZ8P%jqig;7B#6)gUi~FKwG4AeU%x`h-*KdH1R{!A>r2d%_p)@fP?mzf{zrg zMzE^YGBTn;STtE#@kpmeOG{eL`xxML@@>jNJX$z z@=o|%w9e-TF$G#{cPO}9`E+w%uU zkgA^EV!b;!pa-Y1b+w0OwGT$IUpej-z9og>CsvB0;ib8hXc&cii=wa^xwx#GP%wl~ z9cS_R3DYFqHLciV4CYZ7>3V$r39``eHW7$L?AfkyYsO=fY*iEDC1#1*V~*+QL1?u>Pq)1x??>(9dn4J$+8-?fnL@aolS zTl1wZa%xc(6%|iNu4C5Kg8ICs?T_sd7$Kd7#sSWtGoZ61LV;o%=E_=V#MAuBy9c^R zT9oR)qb;xM0?6G$WNr)$@Zyp15>Uyu>^ejqzo<3f%uF!<{!Mg_FwT_l)yvZ|jGiLe zvJK_|(44<;+W7iF;p!&bkUbl_`8$n|Zr_2H0L7D1KIh|0z(AD5eRyO?;8XGnidh)| zh{4K;O4q!(m92Db_SH8v;5ccD0S1gO5^boiPP(PaLlra>+Pd@64zcS#!{jB8fX3G{ zA3jV%oHP-quKgUi*5PwI_;1d4ZGnX3@LcFtc}HjLKbJ<^Yh7e)1z=X&4i^mUHIm2H z)tk=A2?>ZY8_BB#%Vm0NqMP8Nswvh7;ZyYy$G2MC-m=0R4tfTJw+MtB9dHixZC)TZ z%8?}l-L%*9yC>zL$c7#u1cK=x1x9a3P#R4pd8;Kjz@vhGr`O{yW}gkGQ@XRT<)}xq zB`<7gvZOhCzHSkB+e{-ZAem4+Ba6~iON-p;=|A9{hdT_Am<4dQ*@NCC8XzKz=h<=? zF4FP_NN9KaEn3P9v40%{AkVlqdwl}&zTSfzNtkoO5F3ca2$UlzER5JFZ5M%gCUh&} zBn&7k3ZO{RL5%`SakTMquEFrf8@zHHsCeeq_^Oc21Ky%j`b&m<;vDzbR{rwyoK-z* z?k(*~X6hU_(jJd4xD%`uI&R2HzOyUK{$(RP@}4?u51`XSDTLKHoEq{!+PkKp?|R57 z>xyP%V)_L%aNbkxdq}z%6dIeaIQ{1T9e4bThdjrO^-2iH0neeO*CP2PVALqDtjyup zzAt5c2x7r+>cb}BAh7@~#{EDL#?fX6PRo#p8| z+TS{4(S%j#0u2Llb_2T$$hzgQ*1=IpI+_;nq~Jg(5qm(myHXC*>u@!0U;sb&olQgt z*05;6Y5`7VXeLwL-ey(`auG1A0WStmq^f71A#t|tU}FNX2yC>F31ZI%j001XZC(bi zcSDeKvIfK?4!nMSrdERQc>USNP%wyaVbXm%9Na9BP7MUYG932za&q8Sb$J~{M6wC3 zz!I>ybzs**h6s#?kby!AcqAkD@_k`rMBfY%N&qGE&V$mv6p4g`gQgI?4 zCRhxh(mwBLv-SJ8|JMu~XSgpDl9IH6?+om6ZQ{hwmMOOaPku$2Um(@KR!T0x5oV)W zt0T3{oKSSuty6EQw3J1EfsO$&l?BJe5ZHPCU^nIr zP2J>+K#>ywsD#7MmqA{MDCdw037FN*3vry%USp#0i1=X4nr;E(!g;&71J=u->k80| zFhLxvAU{9u$@Y%bjq(od<<=&djDVY{OXSP}eLW%AM-wX0xkC%{ znK_TCe*D~%R~X~nxadaGH(y>Z=B$bnMovuy@<);JkN);5sOl);4$!Twz4+_gsWt?x z280ICaShmZhy)hNad(9(3_*czr=-LK>j7rNCb>#@NDaRfzB7)LNpQ&{BT%5|Y{H&( zfp8ooGYLxcSLaRDV!@nVKvtF!t`C4SkaATY>X>1WT7kOzcQNlV8j-jaXvH7{7U<-6 z&((HUkV%ZuBs{k3{ge?GdSm6t@*r)#`{2gf=7NB<>#~{m-1C!P!j&VAl>P!PJ};uN z|N5eRJcJd2i6??&A{TpZZf@O#XzmcCc3cWsh=nyRprDWeJPQG9{RW)JUa0k#|B!CnF-qWC3h2ENmO9=z_9QaxwVPgQ%f>K-)-g^Xm zfcuM!&T^W_q5ZuZmfNp&zerB#KDv=XdFrV}t7`QwWSb*F*f3|XAW7XS6z_KbeON>i zP)5=Xoi6wY2q2%s*wnNishXiCv4y-E#DRoCZ+{ndNVVFb;zL@{Q9z6L%A$-&6vb7xXiJo7XK2W_27u2heQ{ZAW$)YWm928gXJSi zr(P-$dYf;&h0GtY1;&6N1-64_MQ&+%`BGJNjYXFav}WYA93{|hd~HvL2ESRMF#Hhoq(A(H}54c5nvHs3SDo+#mZ}F@IrHkB{%}D zWz~H>SOtp966=r9vRZO8e7;d;nqTwx;r<|-Lb@L^3Ziy`uMPqH1<8g?Q9ls<@^-J| zUpNJmwU{(wF) z3>r;M_Le>FpsAHNV$S`I+1TL^{UDYEjF>x1HhWxPI(1#MI9en1eYBz>I-&dg9j_S- zPE+Wg)s&kipGeikKRE$Z6SL(wC99y3Q~zWwNPiB6R|4Mr(65~`e~1D|2GFf#f$*oW zy87*K7Vdaf^y=HE8G3)2eIGvi>eBWiFTmV5X{JK{Wf!%LSC@)QnR5p1vDqogF8?c? z{(UxsmZ*10tcll3f1dczHJgWG{i;bVvZ-}OLx{`GZ`hsE#%V1039rt^hPOk7psp#^ zE(m9lgBEU%0l+%^q10Fa=Q^m0ZD^0OYh_LfUQ9Rpqwz;BLZx_)v-0Y0PV+wio%aJa-vl3go1aOUwNu}Bc{U~OVI~8Nkq>4LyuXu?gAp8M7GcR8;68py zNJx;I&O5g8y~FsuHGz)n_mwg0zQ|X(L^5z%I26%>P8Vi#8{qPRAnvr3kap7?DGFaR=ZcZKo+K*K8WM)T*l>-|I8K5zYY6LE;~ zFEEPy+uDw+n4(qFPg9^Hy>lPc-UYbO~Y%7GDp^sEYZMSfycA;7R6q{W4XOBa72|JK6berMj@rg=e=qyk5L(YvW8;qB9=>7RT zv1D`hvlAKbqRc+r5EHQ!d-t^d@F%3Jdycra?*5F_jz|HXBA~QCsEM!+r~ep@mh8NR zF~y@NY`P#pkJx=Vhd{wj6!q%+Uzm>8+%inohjlnc=2mZi*C{mL^GO5A%pq zFII=ax6Yc3X?4HO$vrdi%f;xD``ALFz8uj)J4se3mtoWe(FqRBKbb=FCV2rEL=b)O zXcCvpFlvE1=>r@e&=r_GPT@EpqEEQV>+*mKk{LIJo=5(<@ps>GE@m!Hqs+*7+D^E| z468#us-q}TJTJu@llsc2^oG`YVAv+bBf}S~5~~>Wn6+f_u}6O)=Kd$Ho{T^9%k2X6 zm(aBixZaCG3dMfXds6gO`LBl?kA6Z!fZ&9HRRGI_7$|cvf`++V#?ve@i>9U~7l0<9 znZiRHED`#AY4FS0s%7Eoqx$t6MzFtE2_aj9%M?akJFVc)4$ifJnGS#cOb5U|06NG7 zIZGH4a=mqOlNFMnHq~WuC3*Qkhb@SB~7vF zTvGfqVfo54g)lMEiintOl&g^2i522Dl9K=`#>eWlFHm71aqZ-Q@b>_kSV)!i`g!Q? zccoj~Xr%Uqs9g%UUCr#G+6S{t9Ij}7iOsf;fp8sK;4XD>-}r=o_waOaNeL+ojb4E# z0#M+;4(-$rzfWm>=*w`2}i_pTcg3DCw-d;#uW;B2v3-GlAZ{NmD{LqDoBGd

mdZH8V53wmi&-%m?5vMDYToM(dhOw?Qm@ zDe#>F(vv|^EPeC0EBH3DB8S>a*~3jJ=NF-?N4`EdSP>~x5o`lc=(9K+F<>I}@jVQg zg%xZzTnEZpwdO99;`*YoK_`DRRAoq_}wWxR)*r zdcm*+%HJ3;65$8CW+}Vp$#v_1d)NEUDl3Sd10)L&uX<=@l6f_F5LyjblOxl=>8rJf z%mJp9;9DyA`Q~?Sq@Dxx^FM7O3=ELkZnF8B00Kh7CuM?qdk;c8qyxU_!#H&wdXiqS z`6R$xg#=ae@~5)WqxYN4@53viJWe<(Ils*Ku=zrC1VuD(qD{usxkVa6|B^6ZZUE=RNcqeX}?QPE&@->o%Mn9TsIV-zY`ODikH z1&R-Xur=Ye`uO@H`fP}9H$b=`a1AHC2j(dl5`j=a5ezN?x-Azm$;HNy({{hUFH#BX z1K=iD4X*)Q1xso@n1M}!aT>``1I%HeCEr_nHiR(XGhU0r6YTBlTL9ZmYDizuzOgom zhYW6T*^32@;UYvAmLl~hXbc@8KN-psh;^f)U~8!bed#lmvk`lAzg?Y~=% zzeoh0c8(rI#v53EZGasy~WxOXMI_peEarIo&H<+#SF}u`~6Q} zfZ?$&fQ64_T*$$**H)4har^{6z%-m@(0)DuH-SV5d!d0$^PtWsq9DLK(rJ@yY;1}l zo&&aC8H_uS$pw7y?F^gCg@92YJ(U{t0#N)TGH-A{rGt#?&!@DSf`8ME084+-F{<%jx(Cq$U_zqUIXmSW8%k}y4M&+tE;c*7;f6eX9~xw(zq2eY+){PnoxBSSt5%#B)=_46J)NCePk7-l(0={*ch@qJbnmJfh= z4g>h~6u7J$V;2hGAvQoh>%nn^t+~0Dk0R%Pfnfmbr@`6qOdbFU=lZ%c7-cwc#%lrd z_prC;o`ub{bE_@g!f{B;tZoon8VWe>kFd>p-bPG7Tw)XbM zu-vND*}}sw)7Dj4{wlC=(-|WwwEQO*5voYwF%(SYJy@sxe>7cpJeGa?w%ztk*=40d z3dt;+NDEm>i9$wFNs{a>(I5@8L3SC5WJkj)qG2|Sl+0xHJH9>d@BQO>KTl8TzOU;$ z&huDjVWP{qh9IBM-tsbYYP|JKj>>ZHEbr5aKbdUgr?ueE%(x$`V=%r;Mr^DPM=432 zL;_Yvpda``=muO%1^@&UV{8?PU4#ZnV$B*08qBtCBJUPZrF6~}?1}X3>}VccOw|ZO zyNTJ-afA3%h`p`rc$11zUBFRjuflRh7Gs^F!H>)`eiN=$o6R)0kS4e z0qp!ic&^oO(z|qLiclClaR~s^+IU|u}`omyr9E{wX%i~dJuP<9P3t~G2#D+)NVNl=X`nqpPLR=9JFuJZ0U6quRAL2g`JUFW zh}U`0ebxP1l_M{g2VWp1B#kFc%sgq_4f6rZ^6;ty;xqB8w5vVpCsD?OrIT+~3%Ld?})9X7rYe|3z^vs-XIw4Hi{*U3o*z?pSX0eLlYXS>7hVE`XtG zFiFJ-#V<`i;@)cr#SDeR0{|CT|MxTZramW5v{O*`;_NehUY`isi)~XG4T6ttJ!=>V zUPXpR%GwlDz~mr5=nIq{_W%qMCJg)F0jwEgWb^`-s8g3V(QVxUO zfJ{LWrisJl)VmoO!Ro6xk(n7qU@g29;H#aToy8dKC~I3r#Tk9KvN9P*mpzW^?x*Jz z@q0j--UMqCTKUkwi$hxfn&R&6W2)DFcvq0{WA{&G z+|ZWRqr8g(TLWv!I?0$9e}1k0(|>ECoqy#@>Z}=1&I$^^Ux1;|zCi^xo6ic?5ubsuJFW4DPR@q)mXxb^?xA4^kXPw^Pf z$?N5pkIJB+DG}8|kpm8j*#9{ydiLNia^t;&T|&spC!a5Eo%7xx!L{L4&EeGUlScA- zhHIVp&ZQ?Lnq1ffpwr{s+ z^)}JMhiXyi@UzUzv{f`lP!-*sXUf^LAJD}rRg;-6&=y5GntN=MsQiS6Rl4&@S&~VKt;BG<@hAJ}*Uc zg3M$Xlh@mkstYa86$A?6hmdupd~3JZ(f)#id)ciA$0PFxYxZ3kUGe->#nnUiY#G*U ziwb7qoPT4MwNU)CTWB92m#@@Xn~&_%yWCv^qeY5$sd5Mh(eCWwQlZz?Jvj7iz28a$ z#G=)sGL7mRkEk06M9qm1Vd8p)42=hfAc6Zrn-%}~X_^?btgtMxmQC8?a>dv}6W%K@ z?E3op{pH{DLz?XCE+zuo4S-?{jjC@+)w1Jwjc)f$U~?oT!?3@mPb-6Em)RxrkP|HL zUR|JSh=r-f?+NDo_ItrpaMzWvA_zy29DMs^t>@DGj8{W6FUTpv)xjv=tVaDOm;TEq zNCOv~9;z<|kEav2NDdw`{Dg)wSXlM@W&BJi z3S1Q1$i}`uEF@@i@0e}C#*Y!NzMB1Q_7}}w+3de*+>Z0p-1oK4+1=ykIzJxn-vRw4 zc?fvD$;aN^Fpr<38g2M9r8ks&&zMf+T8?WWqh%XY&^6jb_5QCH-n1RPU!JNspyE;E zByV{9q_}KMUQB1p%cA=4HBHMaW$0h{#rLITmKN{S=$-VpX1cO;{m<>OBkmQR3!ndb zASZ^k6SvNzYO}S+A#JdA02CnYaR2UPsSjmXPcJ6IzN^QmX zGCu?mYVc@dmYZ3Lgv9C6)ZNtf)Z?vIqi+-+0>qAw$!yp7o@Znmsh{#e$+5e%HZbb8 zSfaiNh3)6u@)_OO>+Uk8$1bO3tA8IbbJqD%9T+z&)%i9__uYBzs{EZ<`|lK7UAt~w zb^zB}@8?x|^Pe6`jJW)g`6|>FZ^ST{_)J8f&LI21`>h5)sDhzcjaF)$)*CE6OCxjI zLNgfozHRdDWTqix8+?2>}z^iV}(YCj~#v8 zmVG4GW^Lk2iWXaK`xdvM2MQAM0hV;E~nIyC5b5q|Cq5Wsw zp!~lFc0ry8%=0aeau#?`{(XlyoBPMhKGk|gaQ#2;qT zD_hr5N8)(Y@7!CmQ@-QY_R&q^=nslpgo9oQ+@}7fj?qKQBAZp>k(;SrW`z209gbp&`T2{lo{f2`b|kjQ zeaK1^)^D9#Z2fk#$%;*{gqM{!;7*@j$9f)`P3Kiur9PAm8d8Q-LxW{lm5+aFJ2JRN zO-&G`lbW+z!GAgAiZ)b~84!Px1m;b3fKsC%WM@EvEX$K-oCpMy@2~vbTZcnK0TKd? z!Qox8?jnc_tB*x3Ut=z^I9~udY-hZ#;KWj`-LIH>r#d2%;^*(Oa;5|svCp*3uzFA< zI%_^Eoe_C?+5l$vOT=1o=xLF}FCxqa>qk_burTq3I<-vM{1q! z(#X%2$~7b3%VIXqbNqzPz-^5<*Y6LShF*UG#_3`oX@rmVc zU%%R8te4Z_BP2d_>)%-C$1+IQ+3P>Kg3wrVC<_UCaRlv#cxRoPtLv|Cw@wT${q8d~ zZNDXedX^+b6BQ{EuX(SIYEZ;0`TeiFMm<cy;@is}nl*$3zp zQ{*o1$HGRUZ%7aEDU<85|fmoxE{cW&T5=H#dr1OuDgX=kvA=lmE1B8t7k4at{*02LT>3)1d&N6SF0s1omMJIM>-2as@CWjK~e-}C8SLZO>qUXr?{0%xKW9dcz zs+%QvpSrttT~rr9Q)PbBV*B@7kSe8YS^10-6CEA=>g(v&lMhV4tYcUb5^8_U8+lq( z=La>PRrg$qX_Ql!6c3VMILb_FtqjY8yi!ukIaYrCuzPOIsjOcTh7D-f|MGi_{*le< z`pziM05ptL(DLxU;iWv#`t4hZ?gUjdboL0vzyCq^)TWxt%})D*yvHAZ9+I+dTWhH^ zoj`5g@I{cb(kP8dd5f5K=ig}9;t|SQaV;B9yzG&OOJqgE z+?F8Nmh>;J>cclLb>y)}V(k$O5KvY%K?V8k{J7B+hwqm#F!e--K>&S%$$=M!nI#ai z<^GiO&*-0LwC@&%V`#fv$8r!U&@eehjv+}pny#P=E@q0krR5;vm21~tw+)8u6D??v z>Gnsvl<3#PCY*`^+Ju`%Z;FaCh;hW$amH6El;u*Vj6gGsuFYuq&L{FlbS!UzUxx^K zTuWZD`C6^ErJ8B$nx=cA(R;eml6Crx%mf{`lze~2oa z(b(7+CO2&YpP~u}fB-m}pb*>N|L{J7vW#fn{QMTrjU1UQc~`WfX87j_{Yf#|TL~gt zzYi%)cO7iaU;B2@=9fZ2S5E1R z%DKUqvZdLNP!h>QQG569T@bfocm|2wW>D?3vgvh(-yd>lley>Iz*BSa5OZkr6#qL<~b_?H@d+-?{*!?@rapyP!nhgSvY=j5+`o!7V3RshK);YsGHFlD7(!)RyH zyh802j*I$?(VMq#I|bJmOB1nzwir!Lr>`Z$Y)i-W3fe?-h5;U_!22G18946tZ{zy? zbFk*phi8m*F=1)>`J8e~3kIt+d?m-!VCp0b7=@`)MB{D}U~b2C^}u~pwLozG$rOmb zb%L-4$mp+wnw#jI0Avep3WO{H4L=}@tza{vfSHNVGqG0_t^Porp%`EWRJC@|FSEsX zc*op+V~`B23B-p0)|#XbV{J0EZJgW!WVh$ZcIeR`Hi{^A97($-fu#&sj!B#W)h_rP zLsQesGVEucF~rDF($m9RXfF?}7w{8^W)OPqO)+l0M$f2EBTt>yPZAJrXImrG&-<*^ z#;H$TG#3Foz!HFqTNgQTK#4%Kr@+s2O;?gMPZ^n)@~_T;uJ~TBy}tnkrVu2QW)|-O zssjTG8=L#HMuau;xZwxwQcDZ!2JVbtH4YZ1>AKN$8FBsYM2_+)*YBYwIz6^-hAXXk zCB+P8#;hM1tIH{8<%CAoUd%mnnQ2DMCu~}-?%{hewW&;45YkcZBfJ@67TDx!31S3v zD2KMTgIru`BN+BpGBO{}@qL*w|@q2fIc4Ck$bPt`hZJG}f0(8XoMRSH(%IYFn zmVw!=0Zd|U)_^;6A9HI&kj}3)oR9Th-i}$|9`@NUTDpfZIh0|e-L%qBO&&URX!|k5 z4+P*Pl>HAZwt!BAbSHGBUQTv)aNU45CbBghK~D`ugmtsXf}bXl;b2+fK6Pc9w_UiQ zskoP2M|Q}!*7sYIT~ZSyrm@{v?;~|+w#Ve^`k~-Rz~IQ!N(b=>5Ks@u0l8hE#T3KDCje4pyfm%0N_R`@ zb$ntd6p6(Hc{mn(Wj-lx!4VDhemZJkXrW2Urmk-A{IbS%&DEFM=&z(IDl2`vG|J9p zcl=xL!BA@yQK5gI&ToE~d#LC06HL7&{k#62JuT<1)^8@kJ2$*rUXOT@6EJD(*-a$C z7El~9*s5X*CMYqJLx(QS&d$bvRKe*Lt8xfZicQZU8Glw7))9HA9f;G9$9>N+z7pJz zjzXC&))g}=Pp&~fXJ3@fB~n_pz8)9G*D#1ax&8?1kc)7;nY0&nQ- zfwDX;O{b#w=B~ugCrVVM;2oi^0!I`K`LnIxM1#HH-8tt_sszO>d^22XP;fu!JN{?# zYm&x!WhgAcs-mWC`4d>sdm)J;Y{A^{)l$lPYpj=2eIM;pPjN2gEj?UoE^x|l)Wz^} z+n3S{MYd@v$TM(J4~HzeyRciaTTRH?Ez(?2`IT7eReEX>@1`zr(>f!35anzSdQ253P zw=}fgD5)JFO} zNB$%f_*}@xg?kAPtn@?NDi1C8Ye3`xU>$C`ttNBbzbq`(Os0?*y9Rs!85aS&B2E6& zN#ioxd^zb`26gnT;)#QFTI;TCXMCW0Qp={ki6uWu@UUk65nk@veFskMvdLCv5Vb)@ zV%?My=NnnR7#nfbeWLKGM_#;GUw-C0jtldCZiGi6Es9-mGs87A20Gf3WCP{r_qS7m zI$ep{9`hp4f8}>SobqEm@-(4^BpN?ZX5p{os%-C#Yw&B{Uvc|$`C!z~BEOm1qyb~M z28Y8Tqm)m_DA^xP)YohiNC*neIwE*`px=fOS`%JX>hh;D<2|06qJ$>Qg8nz%@bBL>$1>6T!%A&4_7I+-+zGb&#ca55nWV5x;r(fn7B zuof~*h=~G4CY>$?X@?wW-(Xq6!=$u!@Q=I-JyF2Zc!^zy;<(X&3q`F{S^dYm1G$!~ z)cgeCJ(AJvE6Hh`A{ng(0ZVQWcJ<`?{wt1mUEPSL9d}dGwsV`&spZ4{gDZI@h*84H z!2Llk-EiBdQupBF@PV_!^Gnm^qDMX$GK1S|03tNVd&nqScV%JCrRleaf-RYz5JG*EoLiu zjN~J>NbL3szg?0LtW-M^Mj3j+kr`t_<>A>vR4q`B*9jKUDabfoe-(zRkDlfoOk)^@ zgW+QLtTew({Q)(=e;864&x#%&h-^vt4Y0~-p|YuEbyQaP>`<&8#tx%P)5_G`el%^v z=It(>(IWTrI8NpT`A1}zwq!@ATfB^o=ssuDD;7z^k$#$Ec>hD6X6}h@cmI%ViR7o7 zeTUB_q~x>j^t`?891nC2kO=*JSkCv-`b=3_nE=RgT^$`-Y~N1rH@%iznAQxZR0KdW z)SGap3Zdsl%M;EiKV7_6mjt_NQ)+Pa=}>}ReXS*fegK4`fgR1YR64(4JMCk~U^h^# z6UeWC@lC+WXiv%AC*}r)1F47MAjC}E zf8jw&N+Z^V-r(X5V)(?Rr62Ug#6YqFF$_GAyLRo0bA7xWM*tbgw0f&|wQEUs$5ZsG z<+6uPd^|5O_p-Is(BEGZa4UWu#J6Bkm{--Ql!f z`<&RpOT#{4UM0LcZw*oLhU*_#S#+s14)giv$L;i}GFwTmqcT_mT2-m2&q$^u7~X;q znLi>4*RSU!T!s)QVRZj&NbkHr&TYbmp}~UyOZ6ljYCq8HXnLIU*1l?L44vvwz22TT zLYJ&rnH!%U#Wy+nW&3o+^Nr@4yW1jSeSKt0!=pOcj2T?q5)!+zPU-3oDW1qu?z7CcyrF8MLk?T! z=+NTVZr$1Y9Jyj`@8~=8K?uOo^()41kq%d!lZpMmu`ziC&mU=CT#6{k!~8lLc-@jV zc@|mAViRx2|EfYxKHJvUXDQ5=bpet9*YDL?rgu6nZVq?`N?sLq!PK}0r#Nxr2crVo3DWB*KmiIgMTx==kQ&s z_Tny1OB6lo+-17jd}zP3ZlIiQ)BgP(H|(abZ@R`Yk3a(01F8o+55K%7J-R$M1tpXL z>a9;6XH$b#ur1??@|^s#1M=#ejiE~yp5tAhtF3I*tH6ktBUyrd~doY-f3{fQWQ?Bv0n#>TaepUrGs>}S?e{qAe^ zzmQnxU5BdC`C;;b_>7dO{JF|r#Z3jT*zD{=b}CEt;xA21!%M?e@lYmksedt zxIApq)r1K}AeSJvA20e)%&sW{D?0YJjHK8b-*BMDtxU13YhNxaaUg zYn;v8&=NuBT-evRZzUsf+hS;1OXTynaB}pNpMh`%io&hZ93S7!>=(cmq5 zyL}+lo(oWdV9AzcKTKK zZg-LTrTKipAYif1;UOg?*Lm>B>%Ol8-gVy}>=Sfu+g$9cCK~xFMG{-+i_k|T;K8a zqmMttQ`IuxzR&z~#nY|nhBYJ;+I&2$Uq+S;PkX%cb$n>8&C7_YV9u{=)6-P&t9jEy{JaLV7Aw#Eq+eB%-=K@er~1Rw@;Et5YoEjEIpVB}v-(++ zk#(~geKAGcZK)hpWY$}6<#~mZgY_aWl5y`9e?sE14nU0v$2zaOiJSLfDk}*440@&&|!n$J}dw zlEO7~{F*dZ!TwrC|3`_;jrDa(@!DhY5%Sk-+|Fbzd|CFm%OMfaQmA8?!N^oREBA~y zC)4eeCW9t0PDlobW`vOE?d{xrw!vxi;BQ}y;IG2h?YlUFCJ`uw7S(iR6ZFjt&Ch*3JMhX8zwUonO-uja^7cEi;%}%GUhaao z!!3-)#g8S1lr0BGC=Pu6{m0MxvBzo>X^ttqaN$K9y+0UtMsrTUZNEZ_H0v3S1~Yz+ z?Y)b#k@s`b`4_KNhWul5@(yU#U0VGS2&>6vA9>(rz)NO^On1@H1k&u#*D>!fro3fx zaXV3$nVMgN!Coq&Ap|i#fQ<|{_@>MLvcFj#m&kVqum+3gBrhrEj!1p zaB=B3WW1>M0LF&U=;w}%?0;PBy!-q`O6$31ZUgzaaSgGN962jQO=M?}Uesy&)_e;s zGW-Ij2M&DPV5gp19noG99&iv)AL{>AvKz>UEj;_%14cw}CgB=;AQ2;OXkwG*#|HpX zW=?go-1fhDhR!shb~N3(ynATkkN-w*k@o1(HemR#eV`v;n#Q?OtbxmRkL~96b{tVr(51jCNKVzuDSi#80J}4z6HTUMi zE|03s+xR8^8m!W?8ZnNV&{o$fNayN(@2M8n^)ZjGCF#qTAp0{}w(00caBv1953}(* zZ+!7L{*}?X?Vayg;u`Omy&QdJXLbc1!g+D0)P2i4sR2@bK11jvQ#(i1`{wRQK=QK-;0L^6rh-7C5M9swEW& zxv5{NBb%lFA~%A;+q=63_j%FZ7%JFDBYhaPv zM7W@1oLHP{B@Q`~a_5dwv4;v|y3uc;5GH*EQ8gmk22P>OOPe1TJpaI>%jkE_jHpte z7E;>wx2R#)ic5}C9SZX|)87Sei`6w(sS}T65Grvv+41ygR@y{RN5A=Z&GxC|6KRa0 zR^sVzBq`i?^Zz8u-g&pSw^4xiC$p$uAYk`Fz~6?xzU67Fe*P0Eo&*|TEg>wdI1UUV_kySgcKQH`0ecqph*(2OJINDNP_ z__b~^(}po<0#W3Rc}I+@oEiX6ilD9Qub?2Lt;p`FZB6hRziJ(66Dtu~z0Q1}lChjF zO-=)A#9J0Ollm5ik}QX$N3Ip?5R2RNegDJ5S*uPkY`K^%vTOZwARLGyA`LVN2q6;X zj^DWe7BTR-@pBEVMTUqhF@={7f*qhst&~8bI@@%8gL_6KH5dm>{EtwLD96e1ubwpl zS--{3&(0kQb#ptdx{65MA5K^PCNtUrx5A=%x&GsNmM2D@uIAhFYcNGwY^nACjkJFnO55mxrnp`Y^wt*Yw* z(-^&6K&*EbP6N2Qx^Az|$&)PFfn{Y3fLT~ODsvz43-d?3Km9GEGLM5V$HL@Y;>1Zm zKf3&g8_y1?Kvo=p1|DuhX)K3jm%3NX$G{*ud0$W_otk z8xcdYR1i4)Wi1!l{U zHK8O*s8#FlHqf^bWF$EvG_k`U0sHku=FkzS!8)+m;a$PusEu=|#3qkeKAP~V@;4dT zc}3YBl6)_L$UuTk@4ltn?(+8jWq=*_I+y~551N&qPn_NxrKQDp(d$~z2?-NidDKgt zOe3nT-X%s2gf|iwzv=${JLC}X_U7s=Gk2y>Sk)+oxT6hU`98vbSf%GaAgoJKQSW9 z2-pWD08s&*2G7SH(Dz1AU(*Ez3cZGu5vvtBrcpA{!6Z$I$*FsmI4;mYLpQ%IDmL~< zW+JPp^H%GUowv6IduEo<*xoJg3_igYDC+8KB!$T7^jXcoRiJJ}@caQA}c(zrX0Tc(pKLBl!nZl3@?a!h~e(>$m z|K=S%tL}f9rSW|HHwh2J??ne`!p~x0N=z!yHo`JahKZ4EI0h{}WS!i*cI9;Y0e7#9 z$Yck95d(+Vs%<{h9IJwL-WACtJ_x@;)g`_HfLtd5XN>h#rIMVrs`<_WPn?U!(!?V| zQp@0Ag)!{k_&wNKVliD3-_n`{DB{aJ$15v5J+udmr9BY!@R;*Whc6p#-1#W`G=)kl z^S~maNn#(Th5Tx&v3|*!5>|ZP9LMS7J6+IrJlJn0Bs(}@TO3UReQifg?6jKCL!Cn^bw*k7$KS4A zG08d{pSF=#pF_#yxSxAM4QrwplT&M=zi?%pnGSQ<_ezJ|&UE_(28@JiOY9pO8;{@$ z`1Qvxd$0^Fn}Gr2amZtAY;Jx<1$bAka+>GRxv#uLN=Hr&NLo>yp3LvPH5LbbEYjr& zc>U(b$H$*DwU(seG}oV6yqn>2Ea)_0(QZ(@4|0;fYCmaFV>SN2<&LR&0)Rr4vtPv4sZh@ zYNp}SlhY63dtYr(@&8LSBM@`wGr+(?iKPve3kW}Qru0GQ#lQTj0WMR4cNo#do4x3c;S1{@GQIiOJcLn0ReQwS?f z#o6)ar&~25uoe+ZOz3cA>Xa=`zS5zHM`!dShbohX3Wfw`>P2Xg8t(46${up1_vo5f zDYqLx1M_!>79=qQJTRML`j}BH7IQ?2jz5Xk*L&mc=z~JW5#7;iEe=yg@&io@1-YNm zy)BZC4h9epT7X<2VM7OoK(d-3vGGoJfj1pVQusP9uCCeXu9Ht#yFoi(k4F<0 z6aH~zVAan}d?tERGGYM)1zoS*!t-ihN+*)oE7mxfAzhmrH+uDOw*=oOs2srk|(hWi7 zg7K;%kRR+**ySEDikc}I99BfKFHilB@|c(na5zYx}?$jHdZLuP>X z$Npz@$s$HPfR#Ah6faJ?d{oF6woSL`;hL8u3Yrl>!bZ3UvCW2)&-U&>KV?YBF<-2@ z0f^t%kc%28&Qnmnk&y9%DZdMpjbk83pC@z7!|pXcTOe&uXDNuw0p!r=ViMVh{9~n7 z|KTDFLP;4?V5dO?HxQE;I2BKX;$Ma{1oY5qoF7EkgQAASG2)6K7Y-EO#JaM7|9iC+ zU$2BLkyAQ)Lw;c6XK_?88SW<$&2+`14o@TuOZmT$Ly|T8pGTJPrla4#qaBRF%7jje z`}!+`%3c_e$$b>=X{CxmAvHU z#FJEQ%0Pxj$x{09hw7rCW%v;m?cOY6()YgA^Mfbj~e4d8=-Pw zhfx~V2UN6#oCjC|yg|&m;?om4v9YS$=5=e{p*O1E+&HkRa5!qy@~gO>?bC>{OLKH31R)kN9- z!346^Zt!?aq8o`V3ve;$3>?3sajSSeWM?G-@OZ zwAF<%^hWDY-SNjg;azqC2M^Zcr{a7pL~5%I$SC6c0jW)7G*1u7@olsTCC}O6VH<8g zht6yZ((RCR0&K8at}wWHaQoWw8hNR%A|YYus`N%}L)EP=Xj(6FXi-<$u*U~cOm|4i zwhib+7W)*r?o>HU+ZEm5Sh^PHTgky&yVk$iFJKY&Uuf6;J;}zXY)oL%!XBAXg>8sh z3Olgr1$XRVc(aGEo6#bKjz};jAZ?LAr*zGX7=0$cydZZbM3u}m>hQSc+CCwCA+act zv_6EvAmlU#21zOj_DEf?yk!iDp*L=j2@SU~4ie7pjR3nRAVs@%A7RGXJ;v}WhSvo; zTAJ<16rZ^N+S1XyGA(#KT*yJ?%@BnvgGL~`)NT2J^2V3PLz&cXZ|~d7{LUzAin6zA zE3}>o7X#ZlU)7v)&Xh`A@3!hfyMJ@l_5B;f?yUr%z(lhRdTo-SgA1L6i-0=EwuC_F z2zW&D>oCiX0}2RP{<2<+v>hmxiA$X9DVkrE;}^KVp#zAh1*{Ehyp4(`S_;p|6$g9);Y&_OAs0yLX`y9u47wc8YBu!3kRl7Ka8KGr8!`Zq(#(H z6l3Wn=oE|qcR-&>M)(>*2|<(?Ug{H&m=ozC;+2Tb7FB=(@`;F|6ZZ$v)B(GShp3a} zw@3mc!210Xp|vQEKYbYnRiH-{NI}t1a$7rI>6&zd5(?haRKGqii4CQCO{BH|!3b4KE3jKXUQt z(4!y0Tw73j0ty>O8}lpg#054?s)6liEue1fFKjMSC!qh1!xeqh$!QR0wFIgS5>rG( zhBzy6upr2k9RXEw+a@V2bo(dCVCNDh;jGJ{jRy<1N?7eNYJF|4GqghHRQZwbojpq48=pO@{lHd3 z%fmD2TXcTVyP~c)1_&2!CxpD@bcU14k8~$GIyy$$@2@VKm7Q;0VC#FNFCB*MimQVx z6gZMevJB=&A>#R{v-9_A+BHi=)r#cJ>o+`(!L82rPM025)ln$xBdgH1GDP z+5Eo=?0@4Y?s=}cFK+zc1K$)HVE%uR8?!6du@BGqcD~Wr>-?%ziPiW$-8#kVO+g!0 zNPSMoc|Vc4`_^_$y+A5_AZm{@{VLMZJJR%aV{1MhX!wfKgTZ)*fhmd{HvAL%PJmoz zDzMfDx-aC-$x|g1_bJqDc?cuqV2Dt6zsqS0do_XRJ2~x%>St@2_rx=GN>}N5l%j*W z%XE5ePp-9XtG}F|>?+4B=r1Eg{Ub@J@4|RhYT>&}_z3^pd&B7w9p3CIzBiixai{ygGRMMcA*_YO7RSef9Ot@x9Uwz!w=sk1Xs!LSc^DuHSHP z8qG9AXnUpt#x(@bJox+b)kT`t?E4W35(g5jmwCjqQ`D2_E3e(!K&wDWQ#&Y}?wcM` zqfX7>sB=q`wmpzw!)Cf<>rgiM>~TtxdT45fVX529ho-!f+kcPXYJfoQZjg2$7&rT& zmShr)cvWCB&<8~}xLYC$hh#Z94h1K=7MRmuMHTk1vNLr2h?|fkWhpAI!1&ow#k!{L zs1p!BrJqz)bo6TbX=%5!p|1Rj63LXaJr`*#_NINnxMrf9x1%qtshT@4`os>E7AZ+os{U{!!}3XtZiBxUaM1^WSTr%_=H*@6miFkiT2&Lun+G4n zWmhz6bL@NJxKY8o;$U~8U7+JL%v(GtIV^t6=@tL{)@%ojyaf z381)f`5csDB0tcy0ue!FMRRZEiK-=K+$Asa9`{D$z!^nS zNm7y#qKR}+Z~PO70@eQg=`5ikQ*TGU)d5cj)W!K%mk zL$Y|eaG;Ye6hDVg)>>P{f6gAs-%(KW81NsIJld&u1F#jQfE@6fy444AmKzu_C>r8C z2O38v5MbFuiA8<}fH%l%aOp3cIjBds@=$l2<8d|Zn#T@NAw@N*f?2nxs>KEJSvn~> z&Ec@aKb& zRhMM4;e_u;Sy3UBTLmdSQKYpbZzbXJ0F#YLV%)!IM#R@^_)bgVvU~wG@QB9mPa#^6 z45nQhggK)QxWQgRj}QYl?8^+;g;<89BLzsmK2B>{bt8T*E(zq?rZF8)_q#|925R&# z-v}!?BRUUg*i$f-B#;Kdib(6pc*A7{q-3o7p}Kf!@7(^f&|r~iv$|1=xY^g(hlSh^ zmiA`*G>P4?(3wtSrlaW}^r>qVDstOF7rat@$k1Gyc6X(o`4*L%F#}$Dl+fc@+jw+! zorlhwRXBI{;fO}Biz^lR0QaJ2@ON)p{8i^gvThNkkc^aQP#q+ERUH-ve5!&r(q58? zP5HAwZ4t&qx>6uXq@?UYvrXb$z|*x8DFz&WSs(aHv27<22h)zC96HRad*CUb_5uve zxN+k~RXj|rc+4<}!c!ty2+;WBBn3!IN&#s6;Pa=Gr+{glcgb0Uv<%a(B0h2f90%v{Lu?o7K;Zn)?Bm86F)CMm{j`IIJWb zoDEO|qnEo^sw^F?>rwGT*$=SAG*-iwjTgqBd(mstf~Y@H7~ zsQt$)H%K#L`RSkLEkU{>uaDJz)&Buy1>n)T7yH(e^KT$er7sjBQiLx5to*IHD`hhy z3LgK|y|;guG%(ei8SyNVTAW~My?=Dfc`@Mp=>mK$w0qc|7x5rT|3!8YkY~I}2*QXQ z7K_y|WS3d?xbl`QX*oGD-YCWa=c47khbo1rYs0xjISov4@fr?8-fd!LMn-%Pbv4=# z4I-_s_*u>dP1YPFDIp-zNjZ+2NrUNz?9~IfvoPb~nv9Tj1K%x7B z#)y99_BrNO# z1hWUC3rOtc(Vk#PB13~{1m0{g8YvryJy6-<&9h2!4Yb@Sxi6yOdretIh0GU3iKar9 z5)5@$fZ~%V++tkQ`|j@&ry+2sYtefne}DgiMa5MoBV6uZSl#S!hz_xTJtB242hx|Z z)KJz8U~<65CrG+$g0&+C`9#3ch%sYV^Lqegd3n2!D^2|I!>t0~3iW{jct>=PB;t$& zO5xBg#va2+yM6mY08%Rt@1ddoH-AKzg4P5wNp9fm54wh$Z75_8^5Z^#VbD8xM(d!@ zKT4KzbTllR|B!xcnhedRrm$*wD05MgGne z^V2Q>EXW2Swm*Qp{bp~TjEK=ixYfm>3`jaEh0FHk zn?ep{D1uu6BL^w`#2`Vz7X`2K8eCEY{&(;TZ12k?qXCUa4x2Ln8@^k+d&2nPh`t<<* zlV^+jQe+yO*b-eZB^08lZ#4QXqqE-{T9cxH9kMHSxkp~%==hI#IgI99N>-NP-pV4U zW?dYoWEK}%N0K11N{t8oBpyA3xc=%~cVw)PDh5Kj-lyjX_gL)mHa<0#>m6IqN0BkO zK?o|}WqXNka_3 z6ZAGp$)-@26~nj_I&Wm)`C`O@2DmpU6f>hUi02F}%u#&L4%ZR~ToPn>g7S%8_Gh&` zKX0$*WF`{|MkjoTlL_D#LFO?`Q3(hZj#$<#EpnF}&CKsr89Hvc2@VSJC7_ zdasbtE6S1WV&{gvvpg->?@YaUtQWSBOUbx9HQ*-jMtk4h9|tB&SPn9`GrzvDd%m!0 zN(_UBz+d+R1SL@yWYCIus2FmGv3(I&rw5fhzT1ocP02ve8^)cGY!{M3M&>)hdr71k zu&X`CSB51Z^3_*e2?x9}@@Z!Uqd16G(bwh_Ty0Od#y2FZ@s)6*;9mxY%C zD$<)cDc?nhK*K@orBvPS*UU$ru{v?>IMGlu*RbW&;7+fa%5`JRMt%nR-(!X>?w351 z(nu(P!2#N_U%0#IH)vW1A>S;1XmG~&;i<32WPHYTIon8>YndJuJ%W{ezjNqlTr;gS z&+}G9Xm(zWla9`Cx)z6|GUVbiVBnDX0e+@t*doPM0TrFxZNmGeK|NfdiPEsE!HQSlYQiV~hE_kC;HE969M%KgbdFRcTvXfJ9cYXxZDzlWx{$5}kILv)Z*0IyUrq$sq7)*K5?%xh@)+JDffYhh zjxnda)eXNF#T&RG9Btz>V&(tidX^hsAEPYoGVb$RCx`h)FdQpj?j)uZKcZGQ)YnsD z_S)LsR{tc7LNxm!BJ;BM(ToPWKmG{qaNcb5R7Ui`MUH)sW|{nN0fgIBCO(?c_tCBW zy1$doz1uNO#{`^Y?mW%4JR@d8M~h?hpEGjnxU7zWL0H>-u%iz}bUW${ZD)o_&H#^UVcA<-pgMUcA-#{q?56!@Mwhsm;{OA?NqI z$3G~KRMdDb^L3j5-^T~+m3s3xO6@c#>E-=)_x~oe8uskr@A`SbRG^d@-ZvL>T0xi(=MQbprlB(#GMHP!`2#S|4oCxF{{KZKCpa#osI+6BXZfiUhBBm9F38@Q{GsP z_JqG_!ui4Tr+YOxqEqtR-ru?~aa_VZ-u4J}ZLm?AfVL}rl^DaJIA+Z%(Q(;x!h~c5 zal_Ey2+tqh1Mx~#;~@Ek34({=g@QAbza7@B8>%_Wd6Z2N*y2h0TrWY>$6&yc;BO3yh^@p0QRwq?W%qu;LoB4XX zwVrul(r@NIV@!_%cmAZhn0nmG09}@2g)@8ZPI55Kue=hOCSr0dIHZ_)HplbjaDBY# z&Q(6^9oB}J?6BR}I{G|>`egS-<{b=yrXHW&bCQH%l3xIr;#j1j3zx>=P_ly5)pchzgawifxM7w~& zAR(bFqUvB30Aw^YG=Rs#q-SKOTRjj%Y=YN?+M!p~8M&jvdsq%q&fJxf-O}uFw)8r! z!s80IGiORL@<9+Ihc0Gke-0K>Y)d|ws(MmFrLdGl8#tpt?TAa!JDxW;pOtP_}i`HKGGHT7}9uwA&Y4nMkO@`y*W|RY=uO{)Mv@~Sc z8Vx!CHtbk3WCd}T=H59t?zc_7RwRHuV&(3*@o!D_%T;N8^xTn`K|It*M^h`)UQD^^ z!#ydkH9pTBYn$DFxqeUorcpmD^LhMvD*spWmo-65lt4X?kvES`UbG4*+~t1vjNY*3 z75$PTLvkhU)yfGuoNP!jXv4xmVCKJRZ=cFeB5x;p0%>E?DU2X^1iol)BfDgh+K9N5mJ7;S6FRsWA*D9hv@s zXlXZ<4@;QY+3kflRUQ`{a%nQz<`zDNT*%LNE=#-I)!BcbP>TN{THkK>o;*Q-VXA&} z@{kdbDKXGhzVTEh6Wd7DK=QRLVsB9iFGup63A&!*H@SmM0zf@pK5hn5h>SoYlfnoA zgfZOAdQ4mfAmiw0t2pJ`snTO64%eN1lh5ySA^bGPerUiSO4u6$eG%yZOoMSiW+sv8 z{09GTi!AZGWVF`rpZ7LhkHP&geCed-oORikz!qjzszWGm?6mrq_nbNdaq zJIpL}m>3q>!zniPiu@LD(iO}`QWQPKbUa2f(p-7cri%jR2x|Z;{L{MmHAy4kd zx%A)@vzlYgw@r|#}@UWpSOCsV$Z*45o;6EX7};8pc+yhpYZvq*82DM zde!8np{Y5j;EV-hw?}4B!tF_;1m%ty4w+?483K)Ient4v$688uSom=t-Ho%g*AkB1 z3=g;0e!GQ_@0-Hr&Cw=OktFRMtFz^1i!p${XTQSa{tb9|hR850$>$sX{_Lv~#Vmup zzsR)TfQ82aF(lY0NGkW^C$0zlx$g6$r0ZAmxF0|0*E*mG_L~K3rk>smG4o&k z-te+({&jS6Wvt|J=gopx^ke|jszwr=Nn|TCa2ji0k%X*_(3Lm|Vq#(@F+Z=vFMiN7 zq30WWPtkkF{af054#rO|M!80PF%GGnso#~5JHxYY)A)PSr*)=_OD~hJ4t@)Fv+9!R zE0N~px?_B`oA;#Tb&ru;{z(g8xxkWzVE5hTC*D1ao_dpV?wg?3*|T-7CAxRo`yoGQ zI=Z>ZsR;d39ilnyw+LM}zL(bZy~}U?*|)mgHeC>*(dvqgiN9R^WMy!ow?6i%YRMm?Bd9_M1&$4afT3X2X9}Dy;goO_O;nmpj5VxbD&M;lv+vnyF(RiKYCdAe;qLSYVp+g ztkFwGwtJBUopDz!CQN0VqD!VeRET{)n^sW5`p#+0CiCsU4d;|}$sM&=B-K+-ZGQX! zS+6h2lYUK?nZ!Ok7^Gvp*Ltv*ckA=JNXv^vyZqRVvwY5Jz-Ige5D-o5+uv(3~&(vd)Dy94$jYfFY}#|S-0-Fg7*+7A3xu_-FuX7HIuy*jkpLN0%T%9lS+q0MR!oi z#~*Wh|Bh%4?6jNbS^ksyS1@2oB_ISXLw3N>?}Z8efC>J&%z*jz+vbPw_}~!roUGrG zw9)ccMxn==+LE8GPU92XWm0xr2?cUCfb|ydc;ls05g?;@NtUXa165(Rn@6VN_Mi6kc^TZqtY;okiBI~l2CTpBSayD z%(BT!l8_yegzSVQDTN5==&gwHj}XU>~a>g<%@*L|M* z_DxB(_`||qJ9UNb<}cC}opgivGTtVm+wO(ftL)2$54s(mwI{V%YyC}~e9Guui3zt| zcY|*&uh~jX`Q07<`dfW=AN|cud+6QrC=ENh^K9ZOr8m6x#&UG6@Y`6j+*O!e7E+rk zSgyXvJ39X{FG%aF%x#+8f7pRGocJ_7eMdF>!>YQKg~fPk8hn!f-C#4eHIYie>cnBN zo^bE|@#Du=j=Cid0;3=%Z&3Qw)lIl7YBs%kMQdwo>$EhX2n(JM2o>wZ{1T&r%bnuS zjYnk%<`1ZN?$>S(xhrImr<33CF+ zySr?AW9}D$hY9Ozm%e|?onI?;%$o_Hy`mF$RkSj8)|0<4!l1ajSL1x$4obD2dueH{ zaIrpHF*9qc>19W-ZaAKl7U?Vxs~${EF`+b``z^p5bc&49zhv9~gUZBQ!@ca0deVP5 z6qyzIV%xZqN7~&_xoI>0FyiM2P0!~?Pnx-p4|hk$=AcH4_3LL*K+uzorv@k%^i4ns z%3Hh9c_p*1ZugBFHw4w&&F0Fbtaczm0{tOqD@4Hh1)tE?zZ)n2ZmTyhnZ0UiV$w7* z!TacZO*k=(%VWMtnU?mOe$e?tw1NKqn=yJJX+i$tiNT(p^1T@vQ(LU1_%igpZRbu7 zRG+U@Wl5M4t$qFW%M}0ZP@yk()bpyhiRXqZxG3`J>^$5&G5KXWPA@rszDx5tjTYyG zYo)zz)v5iCk_M@brR=AaG!+y0i}#bP4JkU_-r*~4uwB`Zp`f5ZVrKtDF>2BYd2&9@ zp<$b@_5&{+*$#HBOx^tOFNDe{2nN4Ck%rH9Y>JEALyanLnUoT%*mI zmyxlDAf6){^T%C$$4JK6DqGzDptyJkuvbzoXqcIp{)zrw(Sn)Dk2N1l@kBy)N=r-o z0T%iKK1eYLn+YXB=KQ3+F{%Q9>bH;7-XI`AbhWQH*ML|)98=Cu*>IzSfACf$Re+z# zfy%74NuN7i)52b}4hH6|!B#AxmT|*&GrkFuf;vLVagVp=F^0QVa(HbVcsXhsIve)U z-F^2Ly{AH~thzdV?41k|@}=PtcTBX#3)X+on8lEt(aP|v9~cny+4ypdZTIfeW@ct> z#{|NO>0I~KFLZYGn2VXaxM*z`9d*=9|7Mnyn7E&6OlWtjB!{|Q`Qetbn@SIG7VVY$ zx4^!Zq^o+gKg=&I>8EZKGiS4HebU9R8~_!5LmrC zBX!ve&sjgOp{V@5Vu-Dy>=?V8W##Y^F*O&7 z`h<^gK#+~Qtx>eiF5B{5=Z>%T9@Q-otgzS3pcU2CG_8CpJ6@BtIi50{ciJqDe^yI;xDJ63VXonp(UBM|-`!y_d?< ze$#4eYp?g5{H=Zeq%SvY+WD)PhcBDCbG=!p+a1G47_ro?dDrQ$4#{QQ8A`U;$CVw) zH?FPt$W3|IXy*2a35o=kw(0ctqiK_msP|JhZwWv9NsA>jYko(cZ-HKsP@a=+)m0a% z#|5MvSvn4PxpPdQwm79tVvAQik#^erh@Z8%e_I3gGXW^5W(>>Rk`_+$ry%ab17zAR#j}XOxibcnD zlsK<4s;fEBuCh1i3uNWIbVT%&MMWL-m;rKITL@WPbFzo>wX{L>G83@KYec_RjWAH zY83UyF-zLIxUzER$@Tgj1bVfVpm>W(OW!22t20?z!?QXI6w?ZZRW)OBK67gi?extn zcARk*j2G$P|1Fz;?0cMLx>4&&^&V6AsWYYN>A7#)E8`+5HF;Iv6|3r|{?z`Hde_}D zr+4&>Vf-OzMWToI8kiV0jf{wdP=wM`ij1w)e*gB(+}!*_?Lj(+XNEIdy}C|zMOcz; zVc;Qan0}G*+^lu*a%`)cgh}C)4Bp1G>gs;x-D&#OGmDGFF!E^ioFyU&H0`StZn`2x zN-1P*D68hC&u8@zqHEQ1lg)QOc<_pt!Q&z=hRqdv+V!*h_9T$DioCxZ-TzWCk6CeQ z$|Y{NtBl=_#LmY^W%A5}Dps2bk>?&;+KZP9!!NB5uRV7KkV&+_iNWbGsbC&-V69tk zKKSIagZA>0n>PQ+E&jwA__gZZHMg^spDSKR+;9DECC2uZ7=DLd%{tH%kFk`bQMbQt z!*pj{xyqi2l_<%1<=r$YKG~X2uaR%rR-im;pT=H*1IK^GCWSYq&0-LAbZm-H!ox!*5S~-9J@+Hg<4-MeifNFy!7hTbb@9E=|A; z`m0w?w2A;jWXl~D{>4H4P#Z&p6QZduY_>s7Xq!;M0bAGCNgh_;cPzPc0)+L}jg@Kl z;2O{#LM@;3{+E_zOd1sQ{H-Xyak=xy^07{2{x8IC4UH$M=6g4`K;f`^U8OXj&3Z^8 zcodh%=+7r9jXu8x&y-YdU^8z{b#ta~lqOn8y9W_=XecF3|mV*0ba-j_jy&=_x1FMK>;8N&>rqF^SdVn+8izKPUefcA%f8thXL@3h zPP+8aJiC9jX}|mK!XmBf%n9RJ8b&ddc+TTqcYO=irZwCJdY=k1>y8Q?3^Y#KEti+& zy{N~)q_|nejGiM?FksrV$&}PiXY`IGk8N+$QlP33VpnNaa&`C6%*n;j8rhyg_5;mgw1P>pClchv6Hf`m%JQb3;$xKMjXrfW zMM)?{PAD#DY`S%Ro;Eu>hao~Kx@)@f@!n5P(ge2vJv4ywsLm(BStFYE^KL(%Odq&Y zlAcm-c7=Of_6!I?#JxHDHOGXefLvu zT&J6`e;FHdJL98uketOsE#)A&Jt7BxGEfKK>G*UFpwc&Zp7n&xB<_?K6R&O!vCJZw8kxZ%BGg@;V-gIb*n6|japX6 z@L4#zu?Fz)B)3`mSa4@JQI=o5dQ*v)-h(?vCpU(lnpZ(NNbu$z8>%n`#wwGHAsrd+ zF%8CbxbOTscfrO9#R-Ldm7=~4P$EEtd!4qHT{}`lnJE9=xXYb=TaWuoGkH&+&}I%c zcOtL*J31C9(m}77mn&0r%UYb=@WAzh#LMm26RUpZtf7^J)Wgpa7Hq_ApuB+FmHd?} z2ayy)Xo9>D2MhQw-@I-F$JQCEaK$>uzB>2s6>o8-KeWg*1 z|9^{?e)`@i9?2)}tc>9WN=UG3&hH4Z0N9ezXD{3Hc<9yb?2C34M(FDy24*79IDMPD z8Dx$0o|#^2S49&s+O3gF<$I2;%M4JLPLE#h`0r|W%~Lg5?2&vDh|>&Lk}H{`_sHn? z-}b2B<>I#-4*)M7O`*g7M&gYg4>*}@Y-~i&EsQclF#qF~IBLTkXAQ4dc9Cr%0Edhg zs=G}CjBf2MH&Yg9@iD}Wy5E(5Dezj5X7<@zh_UsG?TeZ;7h@wX%<;p+3lzJg!d~wE z7nh%K*^p}+cJeu#93FjOE_ME(v!nYv3oAAXTm#gr!FmP!Qd1{xC$>8>I!9HG#Ya@X z``_!e=EW4;b>F{>fBmODUF6+9r{0c?%&4l`$f$bX+^k3-)I)~mmLM9%{Nc?_oOFUGZa8(RD zex7P6!vkHr!5e3@OQvgYY}*~>AA8*2k=VCiJ32c3o(|vyjCVF^GPecZxOs>@k^t55 zKmYg0BMHAx&8FU7W)v-SXtH;RJ2UPU&!OK0h(}(2E4KQ!B0cZ1lUB~o3_A~)2$mkQ zys#tm#{Mqj7(O`qeSj}%V7>XeiCO>0k9(X$uoQbPSUioZ3;pE zU_6TgFbA-)|4{*j47(zvS;|RRhbCE#4N7+a}@6$cIhQWl23Y^ zI72bh%hKAKj4+$6yfT)JDLrX$svpzaqpryeH7t)ZBPvqM(t#(D-DM*GB^R|Ff_Z-usn0Ir=J!Dc`@cZZe(A0CaRqXSWqjnkPXAKi4>Wax1ZRH)nmU8G>n(b zk}iL{N-m2d0eCnJE{ob)fUCnv0qlnSFOoQ2HJP!YJW?@9MGk;OXV>xAwh!3ImyRyB zzfR&)PQS>Gr2!_8glRGl<-Me&28@jEyQJ-4)Ydro`IzUQU!Jh|ay{v8VX+wtnx((8 zq{MIR&%&6JG^Y}Yee?-^-8R_+3G#x>4N&M`YTvBsmVkGi(LZ;IwX)uve$c_KvpI6Y z7hPmb*}OC~Xl2NXo`->vQU0?zjpykPZe0L%Qj9%P;(TdnY?9i{CZ<5x??1B5xQ<(tmz; zV5H*y+vUYWM5dr!e${|eQtPtB^P!5qABJTj1!?ZZT|=L1R0LwzXr0j)S@L*3`&vbW zRi4DIn-aR9L{GN<`%ettx6bp&)Oz07k>1!>NYLD=a47uc0ozlHd!onP6<;|h*l99X zWL?sJ?Jmk8Ot)|FRinJwR7V7g8(Dvue22Mo&5qpa}Q=*uS;D!&%#|MBA?UKXCfPA!GU8s*zfo_jE4XxQl=`PsfM zyL|FrMzXdU0{lLEBz^s(0q3s-Jh;g2Ra2YINw4U~+{!vO$7WRIb~bY5vC-welDqb; z+V>lc$SDb&Hk()Ar)+~5EwXH66ftcdqbN;q4yUMxUOQT2`}t+fmn`+1z;3&d$6M3C zWe(&kZ{^hp*(;I4wWDNgq_Y!R(P8AH;A`V1^LSXumx_SxO7awZv9|0HR ztS4ieb(#GwpNjjps;Z6@78dqiA62>7?LC#O!JMVHG1PusJxhz@*^`u0@iqkQ?8@^c zinlW)Z2!tt8RS0DOW=jS_g*}XH1vfX$x$deHdD}Ad{^K2aCqeYJqM?{YJXMz#ONzv zGKyckbo0Vb-o_7<8{WEoyMO-H`?;aJx?fYuqzvPU>0N}560vr4bky5^q<7{T8z*Pe z*ROpeh z(Zz)k7-scV!47epIv%O5MaGq=gD^e0)}mE5-UeJbxKlWFWbByZe*t$#U@Mx;aB@Oq zES;Ku|L)zBPo9F7^2PmD$mNZ{94mYOTgUCTo0cCNEd|aMzf3`3MaNmB4t9v+X*gl6 zBW~T5lhQ)AQ{$DZF zwjce4;{HjDB+%Q}->JGurFAkTX+2VD?=7XtsOzbWN@d#pPncr!-rku*I#Fh1=~~_K z_4Vu5p!!Z2kNt?BwY&1oE6yo5rQKQM{&Wg`aQjcpC;xsw5@XM+x>4 z=&&tEj!DSCaNu=SmE!gOnCKt3vbi7XCJ{W8NbCtut=37y@^6R!fsZ9&^%4mk z7S7I^qbJ5*y?C~B-;-?{B5vWRmyj}}SX|($ixZQ~66&=rzIt$Y;C$^91Cb{)O@F&+ zRH#ftES`*;(&X+Q!_1)nQY)yP1L|BF0x;bBt0wu@&EY&50t-1EI@&xWK%x zzh54Nh)$_71te`U++88Tx67=wi^fOYq~mED{SYT~XY%{9D~v?>l=G_+nz987xSo`v z`X@RbOCFrZk01YU8xk4T0h6bGKr_&rbC}(jTJglWDtbG4PS-M1kQ&(V9h+Sl8k9J1 zIwpIK77Mk=JEmOnwzF0av8|l15=ORh%(jana5HLVsVncBPy|7xlV_?=4)0==x8`nc ztRR}A_K{e-x91-{W8ejUhd5&xnMdlFjw^3cc67&-n4}~{)Z~+E+$w`_?b85E6S~3J zHZt&Ru?DiJPQ^TQz4Yo>iaOvC9M88RBQGD>>{W9~TWG?;Jub<`Kkf|Qoi=MX>ps=C ztvKW)_~)~=_P=`dDmSjp{`dt==6=z-5qu@=xsm;e#(mH=I7b4}3ZacBp#wut^A~&c z9$hXHvOtyb=#k@SS<&#G63bFH@x)iDmqy|{rVynzSC5|8mUB6g);co62`#2QUEp0l zDjFJ^O9$>CasY!uSl18?vKB|@&h+ZOZ}|7Asj2+QM|{eJi*wJ6jyK^(K%_qlmn|%0 zM@@|`F(~fRf8xDUDxqUf^qCKkyuggETC#Ws7T?P-)rthnMi_q+;C@BF&>IGnkMnz$ zGQN#@z;gT2P)q0Z%9)%VK#>J`d|71;$B~C3xjt0iRz_PsSoq}?77cP=wW7=fCXD-* zlB%@T7R|EG6AZ_vAgdVC+V?FnaTi%&eSUw7Gv|ooTGWXZq-unVbE2y4aAS#GQVZJG zg?hq7Z`-xP_i$S^;`q!pb}S7L$0Ve9?aZVV| z265Ac7JTX5Z~4TdJ_7}GO-+r&ahiA=3`hdNpyAq|VbY6@1N05>aMC>^8so5hbz_F$aeIT$+A?9I>{gtV%jOPp) z!2Dw(vwCK>58qRI0Gsc?j;Z{%(s9r8mtR%AuUN3Y?jsPpxz!n8cIlC|Z)#ZUGKqTA_s@SPiSv_X3OHz{IZpB#M%3iAD z$(@d$7x#xZU{iyoZeRNzF)D-2*CzCHLoki8TZ>!I!)CEp@z0q1#r@C5JPe8G)e4u2 zQ?YPzaym=Z87??LiLw_(p@eTdpE7tEIzqu1AnTad;VqdwD$(}7ePvXc!&{^OX`J&O z)`-jd$u2z@ztzsh9H!u=;D^?bhfHmj&#vK=z*U~q4Yvo(J_L62FBDoHin*swusVM3 z)8?14Di>D&*A%ccH{WzL{8g&i5uy0>Oh0nQ|hg|M@m6m`Sa9lsg$=Rk?P?4>y8^ecyQ$M{j+S9Eclbf8J zT=}>IFl)v3!F%V6VGzhM@8+$B>~;$>my)MXJ;qY!l+wD4_LzYVUmc+&L;2W9R6(=Q z^EC1!1h76CTk3#KX{^h+5^)Ofkg7l_6r~rZ z4R>9MZTpzrbP83fP+Y#D*hq1^Q~PRir>#NJJ6GR+7@6CV;z#yQasJD^PNR}7oi#SL zdC>K2K<{07MyH6XgRa=SGB!D)xp_n`h7UOv@-brC3AzBu=PmTu52HcSCnE+#;`)gl zs2*JDb?)moM@P0!8qc1(e})e=L%)n!*8T@&vl1synyp+2%cPX02Vnf}S7GNPBg7Yi z3kDk6B|TRN#{(P3bG=SNdF=fAhbvj2EFeIKImsH44q(-I9)RE3K`sJBVqo0T4v&i>B4XkC@Xx{quu$xELpQ}Lyz+t`ej&V$0Z!uX(@<3Jl&Oc8l3{n7^%zi?kE$v zuRa|w_sdU?zoOzjzs;Jm;Z0;H{{3^KZ?|Y(y}FXL7(X%6L)s4W%(}6&Dc zs}pz^!Xm&;A}2qtAJ+c&JX%`i`GM&B|kxBScTs9IBqH{)%jd&q6C#n~`;(U~F+{#B&5p#F{ ze7^nXjGkL6s02&D7kzhwZT;i@TJ1hxj7P5b-8lF+Sm?apK<)tam&>bj{V1=LSZMAn zj}BTMT0i-x_3tSe?u0Q+>CtMAz{z$#DElH`tTLK41Urr;L!Beh$sj(sb#gGeP0d+J zsw~{)TNe0j_N{RR6Xaa{t#QE7&HeaQVF?3D-Q*!J32Z%Cc^gh zirue?p%o72jcMb9*bPPN9rYaa6qWD2otk{hmjOP zFARD1w-C8p0DtFwHf$gb+f5jY`7Fn#eK`4d`Y&x#HpZeY_51$r7vJ>j)0IC0tGEBI z-!8*@y7K$o6-^{8NV{uaIdk@njz*e6z_5%3`s3%%rMG7fY6kG+X52rKcFqT)1jMBP z(1*g0y3OBvj&=<(3K`iiVO0h5?%W!ZEs??&?m0y!drE{=IPYRzv_j)Vy+e){&3rGP znMuB@l-aPCdX~%fzKM$o@$ryd0g&X4e)%MX2|1#JWQ2hTMRI5M(123%mxYIsEMuVy zf1Yo%uu8so&EWc2|Ig%#mT(2$%@_xsqMkD}aNg-{b#*leQ@y4cH7=$>-nT1M?BdHV z)fiuu1$qvJQy}yJTy@~Z+r-;Ae}1!P_I!l2NQdRci&Vg~(z*(c?Q552iD)J~0nmRZ6x>FrpYpdlIA`%T zyw6(SCA`#ayq~A!{WN6B6UW9j=t@gNIRopRy2MJm0-1*J;RTC`psrRnqq9r?DSp`( zuaDE1rL%=ZkKwYO|ClXUf16OtR|k1uZ%t4KEKWCVB4z>+25o3I%F+|w2)|!1{)&>3 zlngN!J9A@7*en`^F=8lH8w^#%6IG?Xzl*Gb4A&mOV{T(Z{&&{wxA5$T`apuEZH=Z4 z@{^?{XTrm-x9(5>-=w7b_wQboDFUd#2{qGXk;e#eY}pw7|N%YczA&$%#IO zuT7+*0n7*XNF@p$j3(7WO1(@79?)3dKv(_q;$mo@eHIKUJ`!t|VD5iJJ&^yrZEn^w z;eN*yN}hisXYsO8U;!59-EkWihUA!@viWsSAYWS7-h_@gQj~6cTECXs?Gh=pV;~91 z_yt7+EX*gNq{sjh=h>GrK?{A%V~-5is5LW5HERI z{{Argd-e};9PEU6ZSUT_Hx+oxv?j*>OSB2y47R9%fWX&nzF)WD|GCI;V@8`MJ4c61 z*J9frS)w<-1MwJ#B2ATmNP+q+^QK*Xd@6VN7?d73kCtWjY2^D*2Bf=LymRIx8Hu8U z7h>fW!So$nOrbc6uIW8rWfYVH^a{w46wj^N@3L$N%#kh>;l7Rg^f%ou`aqDHd6RO? z9{CWyaQpx&`{XC3=*6W@qspY_ca>l-VOQ(SaL5$ojcxOr_5`*M@F8ka=U737KH2!n zj;+I zn3WYAan9D!atAkOX&doGg`+8YxAUI6BbSV!S>$THXd!7V$ue#BD++z580x5_DN)b& z+Q_ZG%{sfuZ(pj%795T`X+1_(DZ>jbR=1B+pxRu%fo>r9TzBmq9kY>{pF6h$EP&=R z5g7I%CJ_9_`^UXmH+{YGr?Up5Q%vhX~!KlA%Jp z0-TIuyCR(i7Q|R|yYnSqw3Xd55P_EEmcxP2Gf$o7RiB7$PEr*xc*SZ^#L9L{X`m3J z;-5&|HWaY~96Pb})W}fLa4N%&ojqPRII_)}H_StVo|+uJbhMcGKd#8!vSs4g`Ym6B znQU_zpZ_$?jl)UFNf27>h;7q3B)jw2rEWXsaa@`XHOL%^suZOjw6u*ctV#Yo&Q6cM zA1gH~nzeuLMfV>xAD;9ut#b;nWBs(JK_M&KH;Q|m!TDGIL9J@XKh-#hM4#bF=#u|H zen!ip`Mw34-D4v=y$&N|k$4F%-G;!OJ@^d|tgNsXw{+Nl$Scs^d7Sx}#t&K>@m&fH z7Hr>+!sa;>&bkeH%s=UZ<=_$l3j1lvgwR2UEvNh~3}9LpzJ+HiC^2yCxN-Vj!HIik z_)xcp@{cjT&7-{~z$BQoMXftsFZmUU2>y`id{=iyETI($SUSMM@N0nI^#eO(>3j1fy#hCTnP$lHlEa+9>a>a)ljOb= zxQY$4zwAp2TY#yFv8y2n`8`*^U{BrQvs){VkK)Hq_{8W&mqRL!h5{F#_C;ya1Nf6( zXyU$fj4>>TfwmAs!H9`QAf*>b6P7w~ZRLqmZ?vpvm~zoC`kKw>*oqwSx6eIVFXHYq zHM6ulFWcHu#A<7U$)9e=4v@mDM)t;Oq&JeuisI1*qiq)Em4is+zg8XeDAC=#Owe-^ z$Yo#b{@uM>bFKQtO+MwPo+|=`+Q#t9moM3=zJE5yE=yzaAGJdZhv zodb3u(bU_P;BC&9b}y)70i%HS@0Wk}E?gCKH9I5^OrsH=N({ZZ+~(UQAhb<}ljONo-Vx^bM2 z`zU5L>=YdUwlKaNCoKKrWob;QbxJpDnMp~V0x#WSFLUj3T=Cr?D;PCR!ermS%~JNp z4f(!9l|yT2A2&3}7$mho4siB;w1-R-givfTw2z-uyBa5L_f zp#Ei5W` z9=)7!&Sh#siCM9?zwbe9#%AnkAhP_vhfkioU0QmGBq;&PY(bm*B`)pyai{c(B9^@m z9vB!H2yXmV6(HE@dpX0>vnsY16Ol8k{)gu#T8+WCmM_?F7nz#kzY-gt+`uvL8Wo$i&#g_7sbODB8_ zkh{nixKe<`u}JDUTmag=z{T}uX7%2^Bmy2QEj4}Ybol7e@K5paYC#zq`37Q%uRf4( zK9N@ZQOa@j8M$)&xSO|%Lg$agGkav7RAsUF$Hm1s8^rK|*Pj5;3(nf5fP^WjoYpaDT)jXTt4R{+lo%ZkjR2U}?(B5*E?ZX}1cxt;mUNKmIX zyE8Rb7%gN|C{4MedGz=`YxkLl))UpPbF8zo9D1?0q@K8(g#n~8+Pa~Aj;(Ni30sjm zeti3OqPJJvK?8uGa;Ftqd*dLQjOm>bJHG^A~1YmCd;o6aUI|Gna;@I9E z1<>tqfX1*iaQg|VDR%?Ct$t`!YWe~KYiVt*FB7??^gzf?(%PzwHc~0@SqKFP#yKCS zA3S&<#-_$E`6Sv(C58;_#GK(s^d`|VT%GRd@OgDmB>hqCW}~R;Wc|lcFZDTDsz*1A z<}`vQ!9%&_P_}6R&&`58`V~hqVgzBHclXv6oVBDzzw@2Z1Fw!X6n=6Bp~aO}ds!!Z z!Q-&{LC^wbt`SRbY&QqT)=n|J5f1Lqj^?+pijcq56~h<(f%6{&4gQn-REet3tTP90 zu5Y3)sE(kCxY=Tf5lT;w);rji==x}XAPtjEm-IzY#Or}-2;RP zyNZ$QeV^rm`9-zzblMJ4#jS;9U%V+dKB&oY-K~8QJIg~xkC&5$BbLvzM~x|NtP7~U zKrD-8#q~-@?bpGAyv)>b()x+2teFM_M}}|ZYML9e&RlyoVM4XzZMOeR-u|!GYa5>A z-(VRw#*@$BApu#coFIJYa79 zQSxEg3F_cOJrN+}PRuSziqp{qY{DfGv^~7vxAd~g4P|4u^ZK|#Bfscjk>it>^oz|M z9Ut|UW?m$V{|y>;e){{*#~M~-1X^1UO|G8M*%Fi~2w{e*n%a2UpP95fDp|0%k!vXV zF7|K-0P4-BLo)?GmZ2*?DhtFXC5P^4O)Wj02E*b3S!QwoGa{lQA6g>k2(FEhs4&?0 z-;0bte_04dvx2F;!&g5som}=5FL!HavOF8tE}TD)q-})~o0`_8y}Z0FN{#emoAV?J zT%nND5f4KiX!7hD!-d!P^UfTNQsRYbxw~()C?vB;Kl4s@Zi9-u^sP(!kM4Z@uzlzr zpE4T<7uXyMrZ=|IQUn_LBnbU5wb{uVtKK+m-0=8)aw6)s!z`qtl(8IY;T^-F&1^lq#&+p$QLk_{=I=w$6u5xER{)=$&p#zsPS*AJ9mXl`+&+YaXUR@c>4K+ya) zHzLLuoGYF$zJ1H;hx~M;x;!!>*^&JX;d-w0XC^BxeH(Uq}0}v$dgE2w9S|1?Z&OWo>oe-&o zdhwdVv(B_?>nB@V`UnN$>*H-`_E0dBzx2~9&}}?({cOY;zKPk7y3e=rbd)_a;-Cw4 zn){?is(kF%;4QjZV`P4{2973v)?vb}! zQq*^(oG+G4*1oR_5!umLijEQk6N4AMO=NEqRr?-VA}5tPw$*=nJg*>dbX-tJFjKIC zEL&W2=lupTtuQjBR;5G01SV zAXBp%Ts%!;^yi`c;8|<^>K~1YS*9z{#*j>Z3p5uv?t8|4I>JVJ^o0;^Gmq ztsOuF>}7*oywwF_XCEzp)fneG#J>HF%hAu*M(kg|n>`lO$@(aE%dZo=pZ92^=fA^- zybuNUEEFt2AQfQs2;!2emofv8Lz(A*f)Q1SK9meMNbtZrA8IRO$)h`gmBV$pQ-IXl zUvFD3TRbU7B9Un_1CrD_lz5;hWC%ZNSHWILMjhw>ik0S;l9fe(?Jf<$CKY=L5m33C zwG5i|euyxa1Ghv%2=Kh&;ci;FcpGRF*l3kLfAg{<1uM9O*rr>seU=t|Wy|!9Vynxb zNSUH=8?eIaqq0ezwn1{TO_M~D)@Ucrn$*%*bq?cak zvIF?g@P8oe(_5Xf@$l+jYVDHXHY(_AuqLIyo(vgY2$3yr>`a#0qs)j+i=#0tk~~lr zgjMia9}US{kPq0GgXv!=8EmER`jE`gHSV3IB2D720YT4Rl=SizW(IE4 zHy2}Jr*k>VMJl8q7d<75dQbY1xs_MVv$w_~4&nu+8z|CC?Z}o4Xw^D9#S+!-C|q>y zW8b7$Z%t}2C-KIrSlD>(YSWd~K7aA%*AG>|^0<9}IhPbf?%htY6%VtyPNg^}de8Iwydj{U zc&(O$Q)Xo)|HOS7nf-I)OCAIwh!rif{Bm#p2LNEiANI)EpnTFDG}f`DiHck(PROt` zOOP6vz+&ys1Fut$7pP~4Yv*>`f!bv8X3m0?2u2y8YgEPXI0jq8_QD0>m~UVM;DsU< zfD}&lkF8FbqON~W!SyKHc8Ytd0t54ISZxFyl=%Rk)&UWbQ}jVGbWPaU1Ucl&ua+yP zz*;Wu-y5qg8>3KCpkAuiA)C{m$JJM4%Vcc`9>FPaE^(O0zC4PK1@?%ly88djKadlN zbmV{6b-17+bur#1y3JZlTs*))M6b+VrH2Ii9x5^dI0x=yyMz6+G}pI+?;i{n0!~Ot z<+w?$dDVQLn-e~wj}8XPVxmljvC_eT-q|A;D~$TW4zow1%LB9ukPJ5Q%PE(8iHWjk z^tH9yim(Ru&ox8LiQ$ak%tm+E@|@)*v8%PIJVVc(uvW4s=h^$*zPj7)?!dqS#$L=3 zc%-k;z(P9kqU(6jOejtwnjrCo_wX2Dm>E(2Cb`H z8t-r0FX9R`&xW1mjgE1?XTb(17PK01uv6je32C<6ejQ_%97#OeI! z7t7BJ;8E*_xVg2E>legM}bZutxE%06Y9_bh%W zvcLQp^<^Q)k4Z_G0pqMNuS;ee@@qV6G(q<|x0_HZ=&1h^6A;_Tx_MOVk(y$gRG zMTcQtjWaH?9nQcUF=dC1gClUzL4c_pB?)D}{j@VCfasguV8M7Ic^GuaE45!1iBlPe zk#z$k2-KTU8E$EH=9h#Tl^_MIOm|sbyQVW5CR8}<@}`)YyxCAY>LkD&v^&gLb~dAz zOvpVOcbsf`^X3k_55yuMkhaY}DU0A$W<>G(bv$(Uqwk(#aRWHSz>9AJbArb7!G3Da z)_Oz?IHi`BG62s(JM2HI){bihp1f#?vF+LgVh5jzmDG`)q)7PH5xrB8LkT7BO~OI^ zDe&88Opv{S!01o>#ro~POD89~{>*nljey=2$n?T)!~jU5|LP=ttM2!3U2|W7KC+y? z(Ss6eR;ygfe2j$C){;A3DO5j@Nbq-x+8Bn^o4jry<+sW zPXhCvFX<@#opu1WMJyx~XP763UKU7P#4sz+&QQXm*WvmZ@X?DGo51OG{xWi;$@0*v z*RSJserpO;fT<7Y47{xmcw=%GC26aEUaa=>6?Bwrn+O%|Z6JV@>geB|idlG)6eL?Qz z==Z=r(F*7n(Mu$h(}Zup3;Qpp4&*#~C%k5H?HaT1pJ`t@@J9v*4fohnW}%Qn@dImC zusEP@hd6V*_|I%HJ#}y|Cyh;O#Iz?+KSI6-5~p_ZR@P}hLCJR*)B^#l2rwLCv<0kA ziE-ffk6fjIff8EaSn61;jp)H$qTR9M0Vcdbz$FJB6n#_xPdXHhu);}Ho#^`es|(S` z8RMD=^BN2ri3uV&wLn*h=R(YUL!<#oCC~D-|D-hM(QA~juq&?l&W^`S7$)RbiGsE( zJcQmLD0wIUEZv5l5@V3)$_G^&ez{Sp(7{fH5aK{x1J7wfJcmiNCr;2o^#|r!#xG*V zD8TO&bt`~chYtg`v%ITv9sOof!mt)WLRpDNjJb+2YwDihS-Ed zDqkwJO`{gx^-b`^@QqJ2M-a9^1d|Ioiwx)wA!)mt{ttDOnHP+D2Ui+Gn2UFXwrlLZ zbNMV8ciVKJE?#hJv@PGmMhhQ8=Kf-{(?Ys}ipDaDLRn|v&I&VT0^>FJ^dv@j5e>KA zK5=A%lF5C_2hrxl<$>60&m@Ynf(1oipX~g?0)}m9*TFpMsIqXF4dl>TEi8%*cqs7PXO266>m)7g1eKbX{GW&_X~yf*;E z3cL)2{^t&#vYJ!BeFd0;4QsV)#{t#gJkGc9Y~ElGe}VoaY>l@eGgyMDQVx0n7_b1c zCu#;H_jrafU4$JqPCt9?b2d4_CEf}z5ugaj5!7YZP<1MKEw}z#N_!m&;(9Sc$9Z*D z7Z48MgM5}@zlJaVysE^u#O{&c(;K9U=R-e=;F;oAAlE}YasM(>kGO2_9iE$gksaM? zywFxS7ZIsTnFt!2`L#QP1&V^#UL%2D%v9_F@$Ru8p@42ClV2U$gF3qQd^A(2Q>)LWqAJvlk~^u|xgw=>t8$<%V0tKb!g zM?oYys8UQ0=(imft~_M3PZ%6NkOLsOvCavzF(i~sFad9bNh#*35l(6#v*b(KiYFp6 z0K7q+5`;}VHD!b5R{Z^fjw#5ff<-zA((9-rF+sP_N@$uROhHUW<`(K4oY7HHQTa!B zKto3TaQf_78p2Eg)Jm*wtUL+#W&p#OXNqK%c*Wo=AhQlz=an(Q967AG17@{_QO*z; z+0!X{o)3n%vbP_BzrZRD2HeF15e~Hc z>b=c-#4#RelOGUjY#XfL0w&+3rMm~Qi^0djvLpCmiK^KT|2&To>4M_~6AaZAM-|7V ziEzu(+BxNdJqwB^ks4mQB;2;y5VGlU==)TL=5c3y_9yXHZ zT~eqKaPfp<6m?gwT+PriM!Hbs=#B$IhukyqITzWnr-=lLs4G2WQe&LUe$^aiqcu2x zUT+MX$E^>#&$Oui^JQqf^`CtK1_=`L9T0_ZO_-7Z$LYp#r`+nzoUJH+34#tpmLP=_ z4GmDN?euI&?8GSm$ER@+2f$R*)z66wt6D!BZ)5V*X)_+A0;X@F+IEmM4&LjHQK)DQ zj6C;b!d;XaIE-4+ArjB!Ngf?cN5BdX1UUyH= zRzJK#3{n2`S-40B$3PJ_*<_$d# z6v#??Sw1gM3LTLJ!4WJNMQnTAQ&{9h5<6O#oA4`Jf@yY^`2O`FuDG`Ld<|W53zXc; z%i=&jdSwKP^o%7gCVn-Cp*|K75##V=kT-*qNzef+`~sFoAdrChG(iMwpgH(racvUg z90|EAy4Pp#JHa;q=2@_9A%^Y}_t1OZK0SomhMgc< z`LBI*JcSD-tKYv{Iy%yUW`}*&iiT!iDu>ERR{qlGsGUHUVrC1ezmRq#1dn+V)9d&p zm3kgO8@4DP?EOEDU1?C0R}_sH>cA)k*{m9?C^kUQf{3wTiwIVbST-TV3`#^S0hPri z0Te~87@SxlAjm2es7A2?!&abEY={AAS_taUmL&)y2xU=(K}LHXwA1!iJHre=@{v69 zzIVTK?>*<-nOwLVOJZt8xiUryxOEV?kcl-*Xfy@vtE5Dq@f+0E-mbphASJtd93rjEr=4M0nTFAT_G*VEGg{?KdD+Us%ZeNgvc2gK8?uJ6L(&2Hf#^Z`SnrZCkMSg#{(=fwO9 zY(+{V%MGN;JwZV}U8&j`ITl9Eu7e_VjB^*34%Z` z7sndUev4FZ-Q>A33;P49vo{Qv;iDKUVVrly1 zCiEGam+Y+k+7P$m7M10@fMOTB9@PJUmpHz$JjN|WnMe^0p zbXW*cz#@@aJc4H|>Q2NI704TZxe!o@3|J$?L^L=<)kwj?s$+Ax{wFA+`(n|yiL{30 zXmcW0L`})mGB$mTj}1ow!E$7Wu*(2jbFG+s)7Tm$I({X)NZS6ChT)E&W08W2qS`(z z;JGdoA|Jxu3@gQDz(i1crR9_`nya9PL7D2VnZUaJV7cX`?;c$=y_&&L!1d_8r>v`F5(}2&NudeK5A-;33R}dW!BiUJ8jvj zd1G;Ph=1#(A0ff#L)A{Wnj*_!1BXkOlQV)a!!iRusp01>aq`Z& z5D;n8B*2piE+_?#-3;@EO@~L2>ruC!ZX4iMRzm3Ht$ej(jgGkYmcF65*YKhnVkpuG zP3YM83&_f_GUV9&@`mxr(ka>)XkyvfXQ4JpOiXltOPduh_N@BUkP1Prd|pQD6V|{J zF4s}c{IQM;Q?43TkuP7UalOL4^U-D-V~6H4L=DS{7mqnKKk-dwIYnceXc@3Ks{oxD zy2v_wd*Cp4L)N81R9QTZC9TW%nuyzi8V)iM2rpnC^!c^5Qo%A8nVyH_%a$-yURR0= z<3`eQi?#I|vSl(fG;l{eC|fA*)d-F%R-j+gad&(8>FQ2laa%YLJqqf{h3xi5rg+=O z`GJ>%S{g-)RvaGO0;q6#M&9Oj(?jebkK#X$Z1`Pvu*j{n^`5V=v#AQ>$k`FK^ojxK zrTd)93owpRSiq+Yg4-i5pc7X3FSpVX&>7 zC9;~cubIr@rtG~wv9>y%^;01|!@3I=hoWDRRAxvFDilu6Q zSd7ZT85hh?+geW5>8W>x84G#!+(d!O7Qg>A%9*`*j^X<+`y66OGJ8F_4qg5KG~fB> z8~raAA7>_;g4sxazwuv7qUwZvHuiRY^6KDGDRXcsg+j52^JgF3cI42Z1BZ`ND1wgI pb)GDS2g`@9W~jWQCSNY1P&Vc2YA>98HHE7w9`3u`N?rFQ{Q>(ZVxIs2 literal 0 HcmV?d00001 diff --git a/images/GlusterFS_Translator_Stack.png b/images/GlusterFS_Translator_Stack.png new file mode 100644 index 0000000000000000000000000000000000000000..2926b40702ebd159e5a5df45f1cd73c36c8506af GIT binary patch literal 169527 zcmdpec|4YFyY8b=G$>;fjTD(O6&WHk5i%B`h(e~!Lq(G$88gcqQijZwq9VyGV?vq9 zJln@Zz3=y}^{u`3T6_KWAN&5J_r){Z_jR4uc^>C+9M^SE`Ly&FGI}xsfv`nZ=9CJ7 zu<12{KxRw22EUQW*;`#=5S7Ja%tulkNeFO z*0uNLS=T((Ra_r5%qMV4R$g^S2Gu9_w0oC1E}5TKR8+LSHM+rowlbAh!e60KfKB5Z zh21Gh@v>yGl$&JZ^Ig}frWZEs+7PyWbaG)}@%Z@h$Zj%OiNfPS6f08@%AHU7tGl}x|E8S_^ z7U!oaDJfM|RMOMa$;ipg%*+`0ul5cO4)*l)ba%&Akh;uG%=QH7y|W)_FSIC{HL9P( zo5>P?f5HX&AWpsZ^h*`nL>I1&|EM&cw6wL|&2{0m(@0@ss*Zr+j}3&^( zX%BE@6mdSr$>~G9{o4|M4e*1DpTmyR*Q+uyMN+e=+dDV}3@wzirLEhr;aPoujId9!VY7n9t;f)MOyO`2&Qa2sw}^Ug23q!E@bEWtCUuy_S2`UU1i>P zwlQ&JJF=cRL?t33f*+PT?=aB|m@k~zbnMu%L(2)~ZFyBZR0Kj6-Cr}$m4nxvesWaO z^V%gM*qIK<+h}1nwOLFp>Zsg zN|n9PX{b4?B8XGeesm8HPi=d9`^S$-);~WpGBQ>jAM|Drz&!W`2CA#5JXK?D$uz0O z;0xOJ_Z~EsNUr|4mXI0cPF4{0`0-<-$`EQw${6+Jtmy*V0d@P8Ykq1dh+T*O;s=VZ zi<*m`ckU#sv2t(SyxGRqmhau!`(9q6ixa9Z4d%YRSu1gFby3naKin-eHa;HrBUaY$ z>eZ_cBBiJUH_9t1>Af^4jSzNHQB|!dYJPS$`B&F_jOO|I7L(STUsz+MGYhlh;exh1 zC^ZZ0e|eJA@(Q+m{i^un=%wM-+^t);_B?FBy{jgiHrX{XImw=Ft*rbdkXgpTHQ#WkdwoOS!hN261 zLZ9e9hd{0png2wwA3=aCX7<;6rnn}=+V^+&m=&a?O|u+7K0X-fwls_Hj;Q}RHPkZx zv#KMpdC{r&OEhjveXsScXT$-vIYmNk|1z<@ z|IwpITNwB+?;2@(+4cIyTBnKL&hG95=8ho&0RjI0Hi(<1rlx$m5jo*)Od?@HLDIh$ zlNfGduIZ(rN`_g>E5%TLK7EnALx&ELZr;4GG(W=kFf=q&=-9FDA3sE}j06)^lAK*! ziWjF^I^W*WI5t+BptQKSxNX<5s6t|1!54F$%TZ3#EoAMs*uy92KyV)>j$7by>mQfZ zlw6#h)l)P(dwR~k8fQx@*g;2^sGjV@C``;C`=uYA_*p+a)WYJD{VF@#?Q^xR*sE8s zut2BA#>S?mu3x{-$aTRziDxwqsO9D4$}1}aLfZ0ddGwD8(hFTrDRAr`9u_fc&Lmm8 zAtyhdjf(6fg?VevOQ-4K`eY5`OP4m?R8~{tHTp!Sq*Qf%sM&;n=lr z0c;xYZnb1vjC{>7{#wyzm(=}JiIJeZAGuCW3zDu^*Ww`6sC8-)?Dh$gW zm^3~QR})^XnS37#ou<1LrKP1+FFo}N3k$Z{>a|gqsv?lR z;?pPp5I+_r*ZHArHle$Iezge8fyT77#=4aSM=;GOC^+Ge5z?W(iJZLR^Jf}EO#MOm zz{?{Y$RF#;X=Ex<=!O;t8d4dyZ{PcT?YZX&jLD%E4bzC@*74tCz2l@467uTN;m4sVyy)EDE%Ut4r0x9e<)ylWo7SSVDZF=_=rh`gDZC^Mtzszy4+PWGsS>=9IwpVSW8hG^IBOpZQdLv z;^Js&`En2G`-i(#lk2tR9=yH1%v4|ZB}tOBE6&7 zP47Wfq$sC3Z#rslsTZwY&b5;<8R6k={)bh6d=8&a*{y^8V%cAJ*!xydQBlwJtz=}) z>6g3$TUVbcHQ^cn(-nu1WZa$2ljJ?GgP~O-$ z-C|L^Tg2JkbHAv|TwO=UGLmF-{=tI>x9mDLTH;AQ*gl>P{a1?nue0^%jW02*wzk|qR6SJsANuS6*=c(ZZdTF^f7LR zPuBAS&^RG zz{AI8((HGwqyf(PbhBPoF+*ut!NYNsa$XLkgJ%&C0#jISvmGqx{q~Bp81V z<2Dn~KH<7BGrQ53hGYXF>h^bsol#RLrV#~wy}g&8DcAryWnAkNPnxZkW=cJms}keL zYSNt92W%S_9$xw**ZSwXxy0)jL)`2m4W)xy=y{v+Y?CLB=u1hJRmVuPEi$e~z{O9W zKIytlo=tM-DW~>s;j6#1P<93wz3%MM>w}uLg#Z#0?rT_;=Vg5a?%RBQ_Qs>6q$E8f zqqM2E)*4u%d0xPz<|4|P3eSlX=bV-S{G?8ux;sD!O2({WQUXHiXP+pqe3`Q9=AB1R z-A5%YTAI5;$sFUEITpBw6qSTmpZPWg6O*3)WHu%^{xl`wf+Lmkjb^3R7pewVu3W(r zzXXaqymiwihlWt-RgwcxbaZpABu&dB zE9*+y+lwUbY&8fA2(TS$=3@^BpkUqgrLt1-{_?`O(s`@ZG5qor(L%{`+CRj^#Kii? zr=(_OrHB&_a}!AsV>LiB4bRT9C1FZVuRSlr*^iZ8dVBr}fuJk4@`#4zCbrK>@p_dI z(f)7WlvPy5?wHf_Uf#3iaYV%Po*k(=+0VJQAj=>A3@qHAzuFJZh$0mII^K|yleZK) z*=}p7Yi-Sij%DxC($cdZqR%C(r^?C7R<|3^ zj`fVCt!9p>hHKZax4+PT+glw|8n2zo$Y-i%Wb|Wxq&TgMbmP`t!N)AWqZDrrjM+n4 zg6>aTLZX!Kla;mg+BIvCl4D<%JfSho(vDW9JNjn1HddDMaA2A#8b|AuY(j6%0a$n8 znF4EU(06lJ*P=j;%1;XX*md!hMaNQ&Jg151uXpz-QOAL- zer{xPzA(eP{v<`7OIc42zBEgV-^7!{c$f)F6~qaVxk zNQc`%1G^#fapVN_u}S<63p2m?&6*45hx088C)H8HVC+5ROF^3M~*yLxv18?X>M+AZx<0273Jit%G{QopO?2` z!v@#!N&y2NK0e#Z5I%Yr)G`bxfwkhIyw)17_`MRhWh`Z7KQ`*QnU!43yk=$f_cw@CaYr?|ep{-b5#*49?-EK}S0DYKVIGM}-$=)Q|P zB8i_r?1~(;iDxw^`ReqR#)1%-!~_|qa*&xhJ~sB^rAyQ_G?*yttFDol6ghW6@M1-%eZ)Bv!l`8|^zJ2TOZ+>N_V{B|ZbSqKqncVrh zs;V>EZKVsZSq0dbHhf zAQh!H=zM{s-kLRQ(6H?-!&2O~b*p(@qVJ98lVRO0-WQFHL%CK-Eah50G+7}dgN7Cs z7TvRH1VS(w5iOG$8~;Q{Dx;XCEo@qs2;k8tQi6ggh&txEk(%uiS`1WJgRsz01_}x^ zX0h{qWzI!uvVP1e&M1B{=V^)4OT6f|u2=iEehgFfIhb3*l!lfT`TT2D)d=8Iv(ky{ zgH2b`3i<6vzhUL=JSKfINsU80Q^vKA2pS5VvP&qH!j5s{9$viQk}#THM%&~?Wzq7A z9qZt7&?-2=w`?fcQS{pD63UaOrzh58fK4}gKtY>clj-W78Xx&Uyw1Y zT{R-FBhR)XSoU)WAKTu&`aq5b<1@dy(1topx7l@IG#P~*>(cb}PUxOH=Zrs-({e1# z4Wxnmp!7Fsdcj zBU@u#q;TxtOwTJ*G@Wqc)TvXNnwnR?rb}K0c9vSX$ZlQ$3u$2yi7P0`_|tl)3M*UN zEnozGgj&2aGBxcF;k&{+w{HD8Xf;+oUDiW~l#|qeTrVXjpwYDh z4`gF*6&)Ro0pa>V{fByWKS^v3lo97Tl z=@4lNrUKk*!}Ie4Lqlff=1VE&>^iS@p<%G=a7j=}BGR3oHk)0&inh;oZlVt?>IAmIYVU#xk-H3V}|aPT40o8U#z!9>)XVbt%lenro}y<^LT6M$Ds z^Bv0^WnMJwa`N&#W+H?Oq%`rT|2+AFezYto;YoBG*RQwj{qiQZkHAdH$oO0(>D@6d zt}pfVXM-|mj%ZR)QwR9@`GLJc_svL4P@eybf*v){%Dj9=S2to0>5m^jBAv(X;kFf+ zPn>-P`me%adOm9;zXqB_FTNC$!hj|Iw82m+2eLwolnbFR-@t;Wv6Zihi znLF)=#|PzsB}(nf9_-AE?CtH%aa(qMdtiMX7ogxs!T9Nm7i0I3N=39J73C z;zwf2%gLFBcY0YOs8v)Rn~akXgb{baW&bBLi|=<20m;d-Jl9zKt_iJ3M@L8IEP?O5 zn3y{qSjoyzfl&UPgfIK2#MUbq0xR9Sckkw&;3rQ^w7wZ&;*k!MOJ=~Kxq-n+j$l$g zpJm=QH{!OOKjZWg`OV$kJzg<1MDQ^2NyRED7#QY326GR$WT(Z%#F&g%$JEu;d3bo> zUyw7X+zrVZRd3#O#s%pD++L}FcJ`7qX0N%YB8cgP{Udy@g~dpBcU`WvF(eO(i%Umv zLqM@W0LWwB^nAqVLT1jI7Ij-Hkc&f4IXKeSrpEaa+RmR(@xNDEUZv6rm^Duz)DlPi@vnX9eQJXf7VQOKq6f8v$;rtDt~OzH z*rL!GPo7~7KwmI5JOVCQpOBEI`O*MYA}J~9lAc-pvw9>@jH|h!p=zhjmorFo z=wmp`epW?})Ft_8r#`%U7jKZXvPzfw$#Fgpy+u4w@YpR3=l1Q}5rKIQtFa7>vo#`ZgY)I2{Lj+*Byfu&=LB3>Oc(bOj)|uw5%lAue=Wgy9L~;D}y_A;Y z+?U>~#>N7d%KZVT1dko_-~9~hDdBW@&9UBr0i9vnTQ_gMy82aRSRUEqx}6;@?TDhh zypZdHJszZDXcK{teA_Jo^9uwN=)jW{pj$;}yD3!@)&?us+s}wD{luyejOgj@J$9*_ zin~4QiUyG5n;q+jLtMv*zU$7`-H$`iJ@e@wb@YAo=nGm9BBh-aL3VQ#3mW9lQn+1v zb&WI#D&2w{+kplmg9Iswv}C9yn>bh~j00)KA{y;gf#Ko87hbyq=LDqU{nwyB0H~O? z=A03H?CV>dZP78^;Z}TgQjNmHiUe~8*A&@U#B3ywM7OSo>8qc=P~-EiN*uE1g7gflbiF*sJ^Q4%7`Qm zfA8u-Wqa4u-o6B7lHW|E>M(!{m_{fW2@6p~>M1D&W%T69lVGsUoblz5pr)dNJmaVerxC z(REQ5vaEFk{4OqD1_;fx1M5K#4j+Ux_&4Jy1;@#LYDPwmGROfCb_Bk75?%+R;+DO- z_;E+y-mINGM!l|2V`93N7U#G4Q7OMYCL~lm{#gj{`{tfq$E~`vJ6sK2=vLO`brz)v z|L?$MD2kNK;|s_I%x9I@+1YR1xE56Ho{_3Y{Dq2S*Wt3N22_+qb1U zrVyG?%^(Jnl9BC5PCz3Hn%Q6(!W^JIa)SeC=^FN&Z6Y&>OcI1I`*hwm#uN5k-U8kA zwZM$T)(UT@pk*sUPXp|f4b28@_e%+gaM!NAu-7Y5BUW8nS{BlqUS9I}Rb-?dmOomS z+S*#TxxR!;6@hWFv3G@$Dt~=7j$}Wdr{?OYpzuJGj`1JF6wc`lUQ?3A8w_*)cmYC7 z13V;=ra@~vp6lL8Q1CqHOcEofsMwZkoj?a723IIQQ`C>Se=K^$q|pv zN~5ubVdO2Es&yyP8XrS61AG(|6vXx??I9)ee2SYRYU{`bNIG_dO=pIcF)|(K9_k>M zPN9Av-Sl^M?yV0NK5+uQ6A{EHwy>`djt^Wr>U{T0uRCdv@a6=!fh>aZ0-_9D4+;*L zRw-#|d0|ymRcUYf$+z1?NpEuDH4bh%Iy$JoU%n`ttUq>=Vz|907baw9j!K5R*2B7` zVb@j&;q4=8or0vW9}`etxPMP-6M6FfR+lsIi6yB%HjpLHj85qet=CSTT{p zPU?{(lmrEjHLMrZkiwj>V(8F4RcC%A@Pz~g1x*Br{S6c_#%k7M-r~41Ezj*Jid2GV zkc-+MyWR$sD?e|um7X`TXor{wX0BM*1-{K#t5^$S65 zXPv%}uEzV`J>L*?{pWM8>E~wnG4PpAKqbh}$&ry3S)8aP>+n5^_!(+?8Pgiz!N7a@ z1(+nBjGFo7Rc(n+nI2FO3n?h1*{*xNd&o@;zzKAjvyH_|2g}eJEiEm<1(Dc?miV1m zsyf!pSBweqdX@_UTgDOts}OXTQRd1=vYTI?N_w&SP}0yK*Rz+QPmvEiQr%vRluu4` z(q~`$HBq&!>z{y?PVQkUL?n_ZRivQpv+}O1ZFy(!gAcKp)e+(1x`xW| zed_6|ST@(J*wE3T7t*f%9Cuj}ivID7t5YgagNifY9lRGPbq*UL<$QZhl5`wM0V{Dx z7E0--2w@?1cEzj67RAV=%}U&l^xxj*Giz3JiqtK1yhpyDd_x2s>#}Da=>JZRbp$C@ z7mN36vwWPBC}wx;+W=xZ3+Uj!B{J$V(mI*aEdJFXvM6AP=La*umu5iLhn5HTjCRqD z!vW<)%sD^rL(eBSZ;ND!7r#VE1JkCby)vyg1$6~5-q^)4fkaZ|wp>IPDe6{)U^mSY z+_%pF_zpS=eRw|jQ7mqJSjDzLe}8oLDcV{62x|I80LC1nN_r^IyZvCl!H=3?Tl;Dg z;w0@4!2ED)!xrrq7j8_LhPH!O8Vn1}zxW!UKG4 z3%4~E4<0yBF#98vnxp*JZ-56f9qQECP$| zV}~F^Bz}z93$0es9dN06e_hgdUvRO|M48}LXkDV^)E(?7CQ39w203Vr&pp}hHYq48##8F2MG?R!r%4rhnbwt9Vy1E)k4U`hx5kV?Rb(bDN zw7h>%^CShb0*pd74qJ67JpPEHMP!K}YN@AQW@Px0B??Xjk=;3d{5U<25Ag1xQrKUb zgP8p6ot)$?#l>%OB+!Wy0Wgsh+=$#1)>NVD$2`aH*UTn zym4-RzCt7Su*Tmmm#mFWw$;fRoF;Dra&nFh=>!ntouI2p?^P=TrC04qZv~*{PfZ&HVN} zDdEBG!taFr=oDh9?0UbP)C-jeE(fFp+=+|pL6ZgqkNAyMNiv6Q0b3d}V;jVDSWD2$ z6nS}h1xA-W+|8Cogq0ReFFK)Lp&CNevPU+;{fW*G^7MTzd;0V#w_(`_jo@l8hw&fx z>Q(LRrs0vo{r|+(-3{vT$E|H5kBZEa>-!I@#zRFtcIc2yN`#}mz04vs!>+Eb1cfL2 zmai|p*~s*v_>?JF+&2ss1$)yXz~fnC^c)-tN8Ln_e#b(6jT-{2I=w2K3du!gv>Su{**y^WLU5i=vS-|*9Z%GSisERXzGfj z92H(VXl|sQ8(JSQq=$77NL9gd#WdkePQBr#T$PCBp#fj#v&V(ai`NsF)x4Md)|+NS z7m~3=p;8R~IGzux2QoXIj;7`=xCFph=<4c@86T%6lZ3&g!+9JAl~G8|!$jy3ji$NS zY1%yLI9MvQDN5mj^>5!wJmZGf_KeQ8O z)lW-dxcM9@+JTu#8b`w8jo&YH{5T%~tyYH7m>UnUfR;n|z?y&_pln48b`A~^Fx4U* zD{jrGS1+=%0+b%W!o#nBbng4$U;!#55qGd%2xuX$Vc-hR;sq;+$Yt`{j57`4nWWbg z4=5FtXY4kejI{Ay+0$}0;_s} zRh8JBr8o5a{Fx1*)JkKA`EMK4#=U5{(cPOy@ghO?$Z!7WR=tvO9vz~(AEfIjYZ@OH z_k`;SnXG`WO*QCeXgm|`imRZCJTpnp6Pp_3&PNlB1ww+v@8OX;s)zj~&$vdzP( zU|2XczZ5PPX9Y$|WgN&4ntwx0M--%an6jdKwpaY|WR)9V7zkgWm0R*!QElAAoCnJ5 z>eOP+Ys~Y9^*rWePtFGd@SV+jTt^{jzyoa9yQhnW`>^*f)gK2Edvp_RDlBf;Q?qqZlU@!`Ln-b6LC${t5 zLc(GHgg+zrrqwvOxRmF%I>Hzv{Cr2($OtDQSOd$arxISf55*UQ>SLt09R?P^uCE_0 z1>nhUzMmDgs{-iOQwLD?L-@^D`Jg+e6&81HX5d$dkj(`paj|OOX@yNrk9p}55W*<-5ie5nm2GX0)NXW&TcyZZF!KsI4it& z4t65hP=ppQFFAQ8(B0)&rh>{7J5wdP%zN(W*d-4>*T&UFU^CF5cpq5ga^^sJwoZ9f z75Znu<%XK#sHks{Y#nnmp*W!R1&LG2Ko89vfmS2DWwrG|?bEr6Y?T2e&N0_qv9qu5 z>kEAeljgVRJ-ZLqTfsvMI#R=QX~@Ejt>wK(q&@YA=?&0xO{k?PM(XbQ%N^uVCaK#B z{ACCi(qGt%xATF-aVVkaA@0q)liehWy08qnxhGh0>rL)_HZ$-ME~^Zhh?7(XCqBfXMhkJ4=O=(J$zb5=1UKi z#G^FFi6iLQRKooH40A4audiEJa3*2_T*@(pbN$KiH+(np0~hEp`t64E7M-^*)6-3> zpGp!vt_wgz(humOM@>;~&MVQUNW0)8};3I=b{FhuA-D8bMB$P>=q7mubpVSmlW zCbQ|m-Mhg&#t#=nB(zYNeAAXspp2)xD9qo}2y`w=8D@A>{?X37AN?Yu)f#tGcMv#+ zUo^vBnQpT_%LW^o|A=cycz7Yek(-;_3eBBu*X}^9;LVMUXQ0m4h7JsNEH7o-JayLZ z{&~QSe^jemNb^G@cSD@p*WH}mwMu$uW68-?Gog2OsMdYUwUZMIvQh2 z6hKSI5|~#zN>$c?G1QCPJcc~4+3B7)V(K3h#8SMe3$`3LTa>4o zrv^aVU|DK1y7Ins{g8{_*FnqA&TaVW{2*>4jTM~#W_PMOM zR2W5AOVM7{NSr1pZz1N;H)Ul~?RCMAv?+6Qa)=&J$cLBoVDN26c7T?GPN5CUCNVM5 zio9%WOh2H{GB5gxR?m`X%GG5UC(~m!MYL3fyh~CUTX-Um$s z(D!tHHbT`gIbkgP)D+j;a`KHv4<`3qni!= z#L_*6(_krL?V)rAwCzqxOq6N9Sl-$iwuaSRJBGfEWTPoJI_=qaInPpWpJ@Y$O0@WZ zr2-vUrCr7UZwmRC)~&PZF_G5(ENRMzp7F~BD5WTZix(HrvcUw{UlUh>B#CBhznSXX5#UO+;l!-j z@@j}B(isj+J0wGgJ8*FX2M2>g^}KysZX-HWzCc+@zn#L4nRNiM_V5ofD+CkiPR>DU z{Cn`=5a2wKsn*V%8PHqKL-7U;tk#)urtOrJR5>ljQg4h8GARrtaUXLqu$X6}9^7-K z9(=HRMM(iMZj9j?s#%Qd?(Rm(BMSFe0_f9pmZd9bQn4Gtk5}VeW245rc^X0a){Bj- z7dDf5euvma6j_=8W+0a~pY8X#19M0!d<3Y1fN$>s>0MzFAac(b>amm0&*j#jr9ry~ zbT7XVvUOYzZU=azMt+R%?jI1~EZxM)(;VrwVV~O+5$wgwuh9R>ZG31|^om_krfJrl zj8lP6#{%8j(GuH%x+Kc&+qo~iCiGciwL=Ih%y0b+TMU|;I!n(^&`Yp>fjREExd+=3 zcrSl>4HaELKqDrCl~f#^2xJ*QtBv>mQ~y$$l<0@&k{}2nflR|pA?iG%e8LK$fXu~8 zbsu~wD7(P(YtV99pw-nb<85VmZifaI`fcs_E-?f z_uC)06BOKcCc686$bRl`!Dn%~IlJ>nkh6(a8=&gd?hn3r25%pqWoVQPJ9o~ad6iE? zUMWZIMRILA1>pOBpC*kyNWK!>~QuFVUw>gkZYWYR4EL94{ zaKW?pA!B-9Z-c5J4U4|!<2;MwqpT}ZE5 z8rhkd5ANTG*Gri-fSwZ_GAK7hZF+qzmTqH+=v$0GOskf&g-1?O)S22!N!^y3ti$FL z#6FrK>fwE{9ZIcHumWa%im%_t$q5?K#bEIJca>ptJ%X|XJQfdP(NQ6fEjA@f>0YR) ztgP>$a=7>0bl2|P??HHg44j=V{#Qt&*7q-@flf0{v+GLXhXav0n=W0v_+j~o|8o9{ zl_4qxpaET})B_@mf~u7%4^bbRbWpyPw52o+^!0x==vaV?f!Cz(XdU)c85$a5EfS4{ zai@$eEJPu%qnjdXL*Rs(-|rH2b;iaJ_?=+K=&3Tkav9w}7{{XdVIgd1>rj0aZ*6BM zYBzk&#>NJTw%k<=k=WAWv+MX6KApHJO_wI1_ZOwWAqIv}te|kT^5E+SL6M*-_V)JX zn3pmG@0d4FwPVM=j*`s;Q$cL*dJv2CqvDL+5HaVoku+4oA|umOk}@qMm6esFE@|A7 z3f_Rb1O7d2O0^QQFeZe=2x&?h3B&4M<*s=|I&x&yQww9 z*ry;?`#F>v=(KW`#fw80m*#^(hgt|tLuA!qmy|F!Hv(SXk}P8pNdun#cGUB z9U36Q2l?o6l`Ok_Yo3gr#t0H1um_q6ViJd74MPe80|RgnpiX&qACcM;U0$$e@2X4I zus}D2>a}BCG*A?TG7MQB2*{k$c(4y(RA6DFh5ar)G{RIL@O}sJdhF+CjS!odu(2eI z2fW36B_~6dHp5$z7p5vylGHW~ai*3hlLMF%7c8T&;Qswxz-X)i{UFECF51H1W^0R# zn*Y#N^|*`6_owB^4WgoLw7SD9UbgdWe6IHPIds^TSFpS^A0S9ecy^o&tG3|(0On++ zeb|nr+grWg<$N1RvC`5W^fd;mhLwKs8(?e1U7}qVw++fUHcei`jm*;ktVE5$NhsRz z6Tf`*s==z92n+{AA(G`>WvX@k9&j*{GkIR>g%$kbzTZ&`2KKIWu3~1i;7F}{%E5_{ z-yAKa)l-N#kxSz>)g3>-DxnWRWvlHKLG0jH1{s|nP82ux2b#)=T%U)s0&#(pUcZbxD9XD zUBy<=jHSob^DO4a6hnA@zdvI)a{=pnn1-ofWn+(+khS%AoP5xIODs06C=&fv`7qvoeEPF2RyoI(N52k+szDeRb~L0#e*loaZzlphfAx-(?3 zWwHWoZD0#_P8fcBeFG)$j?Jm1nA)gmrPgb(4eu*O^uc+ST-PTlDT!VppAKdn)zuxk zy&oVHXurPiO2~o`O!_ZZsw#4o3!e>ic9@pDXCunmS z4yD*1elI-Wa_;<&`1tVtJqt{EArFXz9@bM9DB~h^s9%OzM?NJJM+JMgo+%%C zcWl*fh!oli$3AqIhUMyX$pIzNMeHHlQwCkW+0JQo$~lf4Q7BNg1pS4!DnxL1RM{a8 z4lIIKbklvc(yrFq2&P-$@_CQ|CJ56d1fgmVDONOu@d0Gd?axsN!|&d`i_Lj5SK1+U z!mQbbtwZGG`2a5u**5FtUpE6X0Ify44j?%mi3e>CkT?CL+DVGJsiFCgERp^~GN(?# z{agxZ`<)TO07JgwjskIo-Uk{VA|9MzyA_{l-DMQCwBn^XhybPgiFw9xB2hg0GEys= z@UpU(H}}lU&YCRef4&x()1LdU?a;aoJpooqLB&ac#PrP4M9?WVREI_t>p$s?@YzFDl*|gp&_QK9H~&xUk*Qqu^hCe;dmkn@Wb>Yl_5ZHU z!^hbCzHWrYTp2z}ZKGOK-=H9rTM_Q-mXz;ij9Ln# zsO7EnmoLAfje@!)3VSGeAaq-Roc=nojnIA@u$h({yH}F1nd1h}>Sjd(p@4!a%l527 zRC6q9_U~h1`3WZ& z3?AR96wv*EInmH)MB0Qjs=;1Gf8al=eog@zlr*k)!%hlhZ`(4c-QENb{-?)YTKpDv7P$9eLM1SAvRYy-O zAE6j=5Fw?<4j#OOBuSLG@5Sm7sqaURC~BEk_br2TQpDUde;XO;$hWiL*2NBLOq(LI zeg@IK0*2Y30D3+Q54Au3=`HcwX=s>-w>kdntYHkmPCQy*^-`{QF~08tJ$e?ehYyK6 z8~2b7-XQDc`1dR8_Zv(ZJhfB#ZC5w;t^7`z9WMRff5m_PXPmG{h(1fYTjJ4=46?M% zN)-NlzL;&<=Kt3D5C8YaMiA=M$UMb2;{XQ}>?RP?3%>EkD4a-^X|>`3(Qj+eQoEA5Kz5ZWTqK{l3>{IGQ6=5O+SYa{j}gC-|S4)xW<6U;4ju zQ>SzNbsmVjwq&%j%u56vk$q4j`Rfn}v7Qg|vD#ZzuLLO_j&F{$&V~<^Q za?VW~J9c2@Iow?>AqXi;Jb8j0U|)*MU4?$OoLK$K!yMl=gv80eZ*by)AR8g-2T(}V z^_oQ?!HyrRYnauuC~|GV$%yXu?yuX|OP)C+QB9S7ZhN6={Ouos&0`q~s!8TrChG0Hl~+vKY0R=! z*mzRUSyg5o;iEi5MRij0kqdP&iL2YL5#E=ZtdDQyac<|-EByQ^tjFo|CoiLjpU10$ z*8Mqb1P|0IzJ}VJFFBXyv7GGc4Emj=)!y5^hVa7Xz&{+O@{MLkA|Yj0)}z%IktOZA zu=1ic|Nccxf4zu%YX*fC2|;rs-n(+SAM`yS`#84pXPSkGTll3whLZHow_nA|-X5*E zeTio&b;E_~1Ex0#ygMEr_(;Ccwvwwfc~#b2cHehvvB+DIfBKO7ef%%!hBaIyc2VMo zjndc+p7fAajIoTjG<9z{${47{eRkFMk?wrg|;Gwp8!8z5d{rX>xZHnW(1`JD2Xf z`!C$UmUrLZGddeD)s|I(JguUynZKjdNVb>e^t}h0r+5~e?B4K*-%b8Ss~gEZ6dkdf z<@T!(;gpe|%dV9tBv$f&6zl5EUospfA-JdSW3<24lc6Ls_I;m|G|TK0p7#$g#va&4 zCw$uV*xa(w!_Zm(f|OU){y_nYSLR|lTs9U?z7^SM9MtV0Z<(MJV%&R~_kiqo=GFA< z?kN3Xy}L=yT536c{;j+ecH4Fhe(QF&Z&82C<5Z;8vELxrno?jkx>?Hb=eI-4wN1@N z?J|nR5zZr-edf9%+CuZMr*_|Y5NQzqa%H5I*FWXJNZ#LfgY0#AQa%6L5w{B?@sne3 z7h1y%ovv?Y6v)k+Y%6qG(0-*oUS6=jrC6!hb?EdKN^kn#v*-0TMfL^hC%m8c0HVUE zOYH4-ZgVT=iKN&Z&Asp5j1*6fnmB6sh}_n2o3W{s5Ed%Oj)<^8wP%e*tu@c(v)P%7 zc&x9bX-POSbQG=xkNel{wkIVhe!adW{mRWPV^7DZLXVbT(TSZb%b`W1whj#k1S}%6 zU7M!+(zun1XN!E?$a%bX6Wm)aY*iyPROLH}$)q_=IcF^S($WsKWx9?w$(F?#Z|`FE zakPmq80_ynygdCTLC<-#kJgKf{JPs@X}PiSY_>_uQ!}UW2aMhX>(#t{OrXU=)6T$ z_{@-8tLKer(F+TY!(;sJSg6iMdP|LG2kqJ=EwYf4n#%ucAy-I9cO{0ON!G33LXrVG zKZS0S8o^t<>6`vzxyh-5<82!xd4Ekj*RLF$U>Inh(J0B~DHR|ew@w#GJD=J(a7$c# zq)EA&^bGkX;s?Dr^(n;Tw$E{8lHl7kMaps)UoiDNx_s;^Z^(@prlz@HPPKa4=f*=` zU7zxt&1HMaSS3^V{AJeUHq+?^A*T7PtG32vE1&b)45PpHO$t)n6|6roL+;7kpm>pK z6;BpgmnAb#<6U84#z(YW6E>_TeY0ydS$R=&n~ybX==JhVwD%h+KC@zTXo%gC+%9v% z?cq9BDoJ9@UhX81Qzl*c3PRMnryQ4$1@mnpTnIoc|Gma9MB|rRcDWQ2f^Qe^R=$5g z)?Z?cm|1Jb^ZtiY#HUm~SmB*_YQy{!CsM+*3(;;Wru&aJ`5Yi6P+r@JRBa^1PAp39 zW%~sMn_DFaqXJ;#e=y#>Nt#RG+OkRQP^ndNV4~+bdghA7D58(gorCZ?S{5Y zRCM(3Y(ub>?BFECpLya$c%j|;{_h+A{c3{y6WPZXS5hW^qq)~ybp3ynn}~_APJ9&$ z9J*1YM^KPhv$BBkMqCFeY*7W3sWAJ5PY+fzE`Iwvfr%gfRJK|qiKB*>6K@q?{(IUn zmUFHoE6;`)T=_hLzZlXrLF^2%-f;1l?p=MmzZmg(_TC_i<6>9^s_r}P=HVTO?h~ur z@1eus?dY9f|7SI?exUN2GqeN5b66e~JHh_!t*4!S@!kR#`f}Y)9JV2T7sA)4NKw%f zCko(7!sdc-UXvP#QMN5E4c8_>wBa-cBaqj84f;UL9iJSGbrIplkDSniYtNk<(PxLJ@1tU1zmH{DQ5h$KSh^k;pj&b zV>{9=3|ejP-cC>hK=cCokRwcF+;ma0ZK**lToRFkzq7 zKY_N{`~H3KjBN!D83_qLK}16~nf!KZ+viY$w2~0wK23JTzAXmMhqN0(v4k8{4g1$bavC%XO-XNHyAk7cpAQ*J*uqUc_ z^4B(xjAKKF9yZQlmzZuETogn%W@j8WFyHa?+_LlNI~cO;DGPN)(cQC ztcMTdfG;@&54gJ9+n9>5kZ{BA-@j+rrX=z;B<;izs+F_}ytQNJp!D(B`lu(HSC;RV zjb~`DU_TBnea+{U%-9|9BRQ?e5zH#wetSKnVxGYHhCwwd5rK(qOX&cfA7t|FASoZl zLPTSEDDeO-mwqhRk1R0o zIv7xl2F}#$@|9*J?id6ikIn1{gHmy$7KkcfiqGGV%jNmAO_6I=>Dv2Z50~q-= z!STYBx`n#`>~O~I7@{~H!Z!de1rLR70#6oH;C@POJ#g@#Bxz@VfB(P$n+IsR!p=DO zcxDg6&an5Cvht(OI3iuX7mi%GNlfjJf~L&L(M+zVAAv({D*6s1)}yY2!z>~MF~#N+ zosu$$&DM($%HTx8dhKROmZz?`a~(W2%ZSw1f_Q!Q1!!nXhMf%WUwk?zIWO!V8gR};-@l_AiCv1dh$bT6%c zXc=-07Z;bHAkk2Ao2BQX*(JDf#qUCjgZ}~sER4@i-N&lWV1q?wz-EB4E$L;P*vnNs z*`R|%H(b+WW8am<8@PiG(uwGkO|B=N-iMSzzGX}53*tFnB1RhW(K3dmLQyC6c_uV2NAgd{wRrG;Z2U@ya=2vS`^P~ah3F*^1} zLtd$v_b1v~aoPv+H-=;g@4@yk>_^j19mbv&&qXAFJvfH~oDe)|IxjA~v91yZ{ml#y zN|!CR0{5&BKWzt7DAGJbasSVsEFMj;7JV|7d^vM z4)rwDbmER8+#DjZD(rYI>I)Wg6PLpmP=`=3{hvI+sUnsEa(vgmy@t3<*CFu)R;;<= z2Nr8nkBEpYg<2FHx9NQp8QB2^^4z66sbswLl`zlQ;amY6sfH5_%#aXn2Z3NEp1DHQ z1>)n&U@wuB+!>Lbm31C!9V!;islfL4d}zv;4WfgS`g#%2#oCQqc`e!(JZW|5K$W9p zV>e*Yg*P|JlDFVQ0Z`M}Gls1Na37Qnm3vU2A`PK*!tMcz5mfrKdQ;-@qBwg2pNbFx zB!Iwl1xFBN3#$^u49j*W5)@AH>^#Y{vyO5VO+{^=!x0Xq_O=*X*v_Ai#l2^A`jk_l zF*L1J!3XQT+PEK*1d)W^vJK{mtD4x$N!&xx(}Stqv=;1^xyr?&h zI2;oSwSk*irKG|G(QCSZFm^itbci^fZS}TyZ zaeS#XCV{vu$7RmOdK;NH{j3uqKH7MX|32Bro`Dil8|FU>sFr9$+dD2KR8axfh$BkW z`Sa)TwC@^|ZXba!Q+zqLu558ABn0Rj*O5(UXbF=a@?toA-?$z)NeFhQDI(DC#u|>+ zN2|7S9sHBT?o(^9zo`MwJq!~LhAcB914`=ZKk3>*jLo9+)mxf7Rn@X;>2`gWte)k_)3WYbT=j2AM`~Mxa|`f~Tr;Qvjiys?Vm2o$nX+tab3l7WoyLOpYIJ>m7I-ywtXMRUxx zg~%siP6KsrN%8Dl8Q?D$v}}l7M^xv2bW-m)DF}9Szx!ygG(>RyrG!>)@0Lw*n3j*@ zW~*;X{MW4y7karBLMVriV&#SZB|>Pp;KPODesh-}ftLyHlW2He%Z*;z9q6^%S-DH@ zKXPk>*Pkw|^9rTEvP4$QH`SG%=<=?aMRMz;g|A(;q=Xl8=%6&$7Nxv>`n%>_{$^^v zOHs#!SMx6_N-s1j3{EWP{*@c`CMkB)mnzfW$=XapXdpILu)Qdq!bjBn`G}ck6|H{N z(h^dC+ObEqH^sX-djdo+IBTuk1n-Vh=P&3wuOT6*``{BQ$~3NPlKgJtXriqa1*wNF zfBUORD>`xV_@6f#Es$;ax2>dm9M|U#zY6KM(tp12izfB`?-hc?8;+{p_b(_p@_miz zs?}S)Rys!CnQ;=&t0Hb~trG5&UYn=7Dg;x+z%DEc;j0MBPJs-zo$XptB=1tCQ+g?rh~3JU_d%cre4L zr}SNKr7(z?BM<&e@ap8ez9{|B-`**C%^I~T(MSz-W^oBKU8hRfNkPVSdB zJ>@0aM6M+jbjB-6iY_RJ1**qa%ZU6h^4>G7$!+@{jayL=5iEeBf)Nk_0g>J@ND~3+ z9Ym^BsR07SElov=ARt`?r1xGzRHQeh*F<^=y#)w_yMpe0_V0hrxz8=nz4yc2U);c3 z*P3h2F~*#0zLMEalZ$o4>bdo4b?CRfEm_sb}g z|49$I`SX{i$G{iYa?8mOm7{_;-G}7bk^n z`IPg6)nK2fG$yA@p$AD^wt)bWCBmw=CHfA5657 zx{{Id3wJ;qifcKVrv*wi`b$?O4JQ_F2lx-(m&eA`F_ z{u0z=u{zeOUH306i(>R0bXe4_Ih_*~6YPe2y}iuU(tL|=Dy6uX1Jk9S;bX)9WK_5Y ztgY6zh7b43eSNiD6?1!>rh8o7v|2Ji{X;{~%5c;jVaNvupmY9I0n{4+t>b?Ur-<|Y zfszclJi4WkoWw*{=(olY*;nHR`hV2|fU+&v%i7v?E`!$rG+g?boTI)R{^2ZZizT53 z6@s34qnM{Xi%Jh#r4DNq$Ws7S@*jsUq{y(E_-JYB>JqX&rDV$L6V4meoSaP~5N!HX-7d34?LcEw3$QGs2)U1U*I$&D7N^MAi@9~*HZd+VSNswuz^$T!G$`3bH)VA= ztyIZR&SpGN;RYgPZOwAAh*#--@*f4sc|oTtsE+}R4)Svo);E~}+h^b<$-xlF=M(7t zk3H6D4r2}h7r`RgT37cuaQiV-vd3q&nV+A1>^OfU+sNgQG>xO9n%vyoz|HOMPJ^8X z1Nm`>1Stgc+h7r>E=i~7S*WmM2Zck(nafS!RM34i*T`V7Eawvf9?vZA8knf53woIl zIPgywM;wEF53~kKvuyfrpF$w!2YJW7__2a@S-Y1Vxn^@f80jZg`kfrgH=N%evXX5I z!)-5du5D|FFxnG7A0OVU?ku!EGHnlucQ7c+84mu)mNrgoWzk%Ijop;UBgc`4mYr9F z($b27>q_=<-`MC1ZVWBUDUN;P=eG%r!Pq!kRSE@UI^>pbR&ngZX&-H2*8t1Aey2vj zcIh>0%fwLI(TATDFTAq6OBy6dFn+=^dS+x+jc=iB{>dpTAtJ3WpCf@=DxdLAl$CRBXWA&+GA&_LiwRsetbL! zNA~Ne5I8v~F#>`1!|k}1?+;H_$o>u&C$5PNkGjzxGyZV%2uS^bCnYSBtAd`NOmXIm zrR7XOZ1>J>OkrAoX%2YP2lQMV%x2f$3RQdfwCOSFMqHdP0*Jwf_i!u z2IUu%g;Q`?>~X&J;lwSZ>^CgB>RIs{aEZ0m{-#pA%LMvKUza1bx1t_Tfaj zF8&a%1~iSMMfSn>H}27+g0wP7VLAb5VGsq}72rwl7_n_M# zphJcFVD><2>1cd=#ezet@835k<&+?hihD;%=SY@U@kcp*{*%%1cv&)V{0pIr(SN52 zZtoE;{7!*>_)XhOv9Q{5qC|Z$~}B zeFVH$cOL!$$vp!SOWFY|xWfDYF$r}mC{CVqUuiSBUNU&nepb#^x3|gTd=!wvyZhF%{1@5!S8A&8df-)#vPJ8ie^C{YduqMnzmz4ekKA|Y zB@uv)z5pDD^Iiw@ZTjB>s$}{SMdSac?B8_(fCfI8wEaa`oNg?<2z;*Z4tY)snC9mf zYuB#zgTx)_QJ#}QuOFv`{Y7(DWlzJ;3xpm0rO^+I2LE%E=q_GMJ*WNr_bC={kKgsT z_`Z5HjCSo35Z%1y$5M5$qjd@C6$9zUJBXa9ajG%~x;-{B2rwGj=M4lw!69d)=pQh0 z#fVA@kidjUGZ)=;Nhfg}$B(0_K%FBy_gkHPEo)VhnN)JW)>gag<6$=Gaik?C@aMqu zl8Pp@Qm>x3e)Jc=mNq(i5X~7zNG@H)@&7dY5iGw96Mpo&AbbQ%a1ueQ-CDwu_H@Y{j#tRGLdT)C>csxq} zP|f(n_xt{f_80O}NPhGC3QH#0@G|j!f*Ce!jQ+~n!*4HlncrP$pJ!4IK7*SS> z>qR$L($*i*qLLcLknr{zaMfh7%>>6nM-&q|gyHy6rA#b~;;8>=(!;~Z@enxnF*ac; z!PY9@J{Fx`n#K=qz9huc7sY6ZSY2Oh^SyT9r7}N%c}_1QHD=*KAA{SqpxoS?i~`G> z7J+99ZVb6QtWX&7FUwukN)rCmR;cc$4Lk?N6o!B;V{Ozjzsm<(@Y- zyL2%zQHQGw)DbLHaT1xnr=B(C&;HE(LVwn9$Mgqv&w#4Vo-Wl@QXK>-wXACbP5VGs z?9~g?mgX{d`(@V84->WIjLfeEx?UnQf}#+yRO~CUOZZu8WMCH+PxGz8-NsHAMCbIU zDX;SNtE=Ch7b=>r@2z*WpQDsZ7;_Vx4@*-w%G&pKez-m}D|BdvkrKV{+N|1zEpnA# ztQ)ND6&FHQI80Vkc{_gy>HoAo6Xvru$VTm>{?34Zefn(%QasW7Cdd%?UpVA*0#k>! z-|4{g`F`6!H^$~l53_rZ8vpty^G#_tO%*pMC#0BY$UGk@iRg7I3JH(`$aC@Hcy42`B-5UiA=WFiQ zktCzxhSGv$qUf5(M=Q=QBHy0ld(=JhB4LN3)Sy(;LFZfbUph=NZ#_GWJ9Ff7Rd%>i zfbp6|zsY*`qA&3URI~!ousp`;PB=-BL~ibiq8eIwTjV80B7C?E^3-ZhRZ@F7hwmpb zh*iuQf;)Xcycj(`I&I~E1lHNfB#fNyr7h?;e%p5Ac>NzaSF_i)aytq+O=cM|hGzDA z6P=il2wsicbk-PTr^w)Xj-fy{gx>h_IrOaUO=Q7Js8;>{X}_+`_jrr#x%59|@ zK;zEse$42&%_UhV@Zd(&~!#du8lAoMD)AM$?EN9!F{}D9?HKB&x)zIWr)QP92o# z{EGFD>9nu9iR_P$*E2Cj$FN56ueSWjTk}MHQeD73j-Bb@4^iu}xElNSDAO(jgqQL` z4vU1H`rx;|3)9nGOo!qE+yZxQKO#brdrdu0>3cKWZFW3R_hNNfEC# zYkH=b*7&xQ;=GTiIGHO@<@Wganl0JA47|AgtesFngSbzTq{z*oh9-;7Z4rjNPqn#* zI%#c+Ew6UjXZE{hN46bp2W56fS53R9Yu1~hXvaCbdD5}%RhN;%#Jwt91WU)5h)(Bv zEAK}Jl{i1evfj+`9!<1^WG(Y}81oK0n8Jh5w*W(JVf>!lgWZ zdz`<_o>syQOyp|SIPg(-3(U)}45~PM(#ojN?GzZcwg)R3^`bU#-{oZ<3P?aE5fb zeTAT#XX}={FQ~FOycr&Jcq~TGL|?_&!S|hyO=Na}$9HUV^yZ2nrqLwBSD&b{@)Q-k z5eyFY71?MbYoc#`eQY^h@jAOJ6ZW9FNgj;sNmnZJGnB4t zPFXz$w+CR=9yRyHX;V1+jieB$?XqwIrY9>4sN+eOe$FlL%;J%CQribHDz%zmG=nlb zBuS1l|6zbexGgjc=_BqXzxkZ)q$XZl@FXaiBt`VQo@X@;5C&m%j35@_skS9niN~P+ z`3e=88#n50K$NI{k|>w+mWjdlnohKvfD-xm)biBg^lkrDd!+68*hSl+K$9;~Lim*l z9{+G$o`k+vGo`+KUY)K)_I?I|IN^@iBNt|O1y5o&T{c83Vzg(>X zZ5TP5DqH>g9HkH1k~IlaSE2rX@qV@%I?($)+sZkkkH}3^%PwzDeQ@g;h>D4Uhb-z( zi(ZNpJK(hjB$JifH zD*h4~ZneHnQ7UCwmd8saX-CIfGM_4w?O-FOT)-4Bg-LZeP%ZnY@OOw zF;;PAx(W5qNw%em&#~_X&|YC;gRWt$FO5su7g z^FbHqTWSf_E;L?W>5(&Iwi#F{T_0*ePg9nhc5$2nX2z17x6P~{uD5#7hNXn^>vw>) zHtYj~S0!pK6%=|$otdu(u|48J+-7H*6K7-kw@&~oxi@uR6N_HXOpq>v7j@M|8v!dI z$gm_UF5j&VuG3BSlI(kSkK;_AuKl*&uIRu_iPiNmY&a6-yzpQlp!a4++=(rUY%f6N z&YjPcPE@0O>iwb@rcTM88L*%HJAu_Ve<#gP$F`s}_-T#y;%UZ9hVOJ-x%T27_Ep={FH^V_n04(@#{me=50L zGK*ea=d6+EKw*umklWt*17;j!*76AhSKGJGZl_d-DhukzzdDTF`SC6+KN&?F9xD)_ ze5#$z2@pI*{KfuktDJ>EfTV+m8N|{EDdzUpz4-*eA$M88A*1nC2&T#8>zuc3){%C~}QDN%S zBN^+qx7eaQcBTQ}gqGx1P3RpwpGgwB;y7Ako_n1;sT>%SwIkXCuvDF_lr)noU zJ~t!0xN91TP2NYJVRzM>lL*74ma8ETTk)^h&VyZD1D2du@;VZhTtJNe{H0_3+ouck zXN^|RuG94;|BSt6)yv>kbFWW(s)A{h80*vJiue$Z%JJ!LNz3Kb%R^M0Me&pdj%x`8 z9wzFi`tIj60WZ_4*jYvAV?Xuv?fM2SOil`ocWGDUW#p1rHt|9Zek=U0mZGOqA}C&! z@)ZNj;JDRUJ12N>ZE3BaEp-wWS*PMyR1#B?8Z_C`!B&B;lUROiI6|~%+S^HK%^BMY ztMR7^gvi>T<=t->7hxC4D$44B#YqsiCY_DTzdi~X5L$}V%jilHCx*(1_1CIGq)Bzo znWq3PsuChOsx-P{QXMiF%;&RVk|twH!2JNoo#Xw~X@BB$*F!Mdt#Ry)T>#5St@a-; z8}*)_P7jeF)yi!5B<0JhP}+k0o`bWCQTK%#hJ(mqkqb2r8fO%A;}di9Jz zPw_F;Gj~FnB{o}R`j%<~HCFAWcYBvJ-Xv!o7%UF8y7WiuY2`o7-DvLQmgKg=CvY|2 z3a_c_dqy6s!&xvS)mgXnD5OcF%~YqdvuuBnHSysC^gG5&bc3}TF1xY3N{!7WuLJ~- z)NRrx63(QM)Vm~GgO-Jp{LE36^;W)`tPw-rXU0GMX3pHj-&%M)dqoUebBIc8b9eA^ z?}77pv-w{Vc6n@H{Zc^M#$ao8w_Zu<0n+!-$|bDfJ^~B)*d?%_T&Pb#ZqruOO0&mz znF!%&9@2w#XE+$J;4Qo>qZq+>Frnr`omTEJGs^#vVGnSGy4QQHfLb~2KT+BjGyz<~ zW>$x?_%vO^?Fd__?Hws=-+Pu{Qj?_^?%tT{JX0VTp>h)?veWUo@N<>eEXIBM7XB9J zSsEbo4qx^p=|>;#0`-W-x#$TXoc05+jYI~V`Oh*GXd53e;s)I8bd=6iHMh34mgHLw zS=$?W8`>C`n93P}xS$xp(uH+3=)dOn>dZDX#UGzfl&%J-gQ~CGox_@NvTbsH{>d_O zG3;QhGdF9Y{9|7)q9YD@A=-qp%Biz48GGmnxa|yMC~(_Hl|DT*prrqzAn$FvC-1A| z)f`!(rWk)3#|v-@y>K8OjRPV!IppyW5TouG?Ib*Np-yp#8|%A0lkr{&ZP+br9p`Qh zd_WxH(I*^lmi&mXy?tr)UX1sqZEDkPMvXRo1zh9rjR0VW52->an!z2E8$ekYuG>Jb zT>Pble&+vGx|0qn*dP$=#lIA-!0+K-WsQDWe|LO?$mF08Q{DotT#}hwHoD zdmEH@DH#v;e}tcdp1yW;AD}*Fwk6`d+h&8EgXQYP*Ehp5BD+{BmdCs%kT;k0s%9&w z&_gxDVeh3r68x9f;{eO3^IFJByP$}%uO&!e3IcuTEa{hxiwNzVd_y`VONNVH50{-* zz9Ir>u2?!&KKlvt_3{OL&5E+Cu7`lxK;nA~>u&S0Z=eX+n$0%WFzK*0&Zbj=WT&Cv zCEe?aHb#?Ns@SW*sy-#od)-d`{fii|ENs7A110c(ZJK&~4xkW+6`O+oY%_cl_Y!Jz zF~0o$2Y_8F9FIchLE7)RTSJ&iT8CtZo- zX=k~Ja-4w3_H+?diBR*1vAEY7AqE{2BX(x~=fIbhznIrRMo1XVM@=0Lw5S!~YKXRm zv35RZ_jKCSxBZJQ#MzC8-G;tA9^A&Wd-MJ*p4Zj&67ZV+OBqq0oBuOAf;F+I1*#$*i#XbLf4)0%Fj^@`|ZN3E2)Bj za&s+5Ey{VhjPhUw+GeRLMwsB-CG=EJ9w1+VdVRvyUDJ z*iQh>&_HnjP3Uqc7^ibE{*}?uLVHtf(}`F2J%=*#!V*goYdJ-(aQHv6?S;KuFJ4bn zf|DY-gN=jdP+szD&!?WTI(Ksnu^;|I^}D7{_ni9sZvPJkvgL6PRMz~pKjM}@KmG6Q zMxn!fqjp*QR63Xf?0nRE%v~KRKmttNP1PLUCI%Z>PM8d}f~CgEr^CRQL|iMS`FK6` zI^YvJwmi`LS^1iIYRSa4O^g4glo6;I%B82!{0v5tLig%#HzQg)-_w|yF5fu)ChU`M zuVn4PI5o9Sg=Nsovy%2c>O5)rSF6|aP2uwn3!AKa^8FI(0F=JT{~MIX4HTH(0aE3v z%~F&*FUf6xIqny{{yJaQL?cTJ{2{YR<0OOdKQ`U82mDf^Ah}o6VooaA=lJg9n$WNJ zIOgxGn_7-Pk3BAkezY~J2w*HwCl-y1#4L5%Un#RK0#~83%`d2Lgt=D{v z5*u1#7f_svC9I&wvuB&-a}50jfBwQ-QU`<#AeJua7b)S=fT}cNrXJ=cWebLannLr# z53qfS)LQ-sS)6qE2WUWouP3YphYW0SNABHZAf+1`1d7+#}= z71yK8OQGPeXwksM^Qd)ZV*uxqznD$?y3KnGJQ>3;X}F}_!1xUKM(#*eE!Q;t8}$kCVr__2@Wp< zS08(e;jy7_G)D55^yG#bagQr7R)#y9JF6|^BU(g7$Kg5jZ<Uv^9LbZCh-lO+g9wTqlj2;pevI-(p zSUfA%mhm1^viY|m5UTTB-pBaw6H|y>%MSOrSQsdNt=A9G9(tS(m4VyTBr``@HqeLm zH}~y>EAo=dFdm7h9Q?^Giqni2y|=n@R>DBzBrbGm+UC-@Bh%i`@RFE~4N($knlHs1c7V+dTf2OHh&&l$+cBWo*2h zP#_+mZZsdaW;;RMB<3js9vHK~Z+U_H+GF>lEv=&`}lSLJVl=xG>|NscZJig}%m6xhv~d179XhlDw4Hs1}KyQ znlPG}fJz9Usax`yo(0(AO23oiI5((FYLWPCgt?D-pp$0M>-l3}aASJW%u@YRzOL0^ zYe8x<&DY=hlHq%;_Z~OYh%rtg^@Wx|9txnbF(?CW!Bn9_;(m^ConU z{zh_QYK<9CSo~=^#Ds*#yN_8Om8>4^uZ0~UiQj=IE;A_~Wcqjzfc8$r;weTwBUsLP9DDr7uO0TIrhhZC!!Ar> zlf02J<#FUqN(!+*X|CH@P-_NjlNm}IW@{mE=X12cx7TCz)oM}pdBgqOrDgrEmzMI% zwoog!IhsnS2ef2qZaEX@=^q`_d#6mUq{CW0yVU473DpsarPP zhkD6izhuJ0kFdp8kh~SFN13cIOb8tkNp$uKo0Hi7TJ);n*|2g33LNgXlABZGtF5BV zS0^hGyL7j}YcBf~n^a}7`E+M-;)R0PomF^3;t6R3&@K5Xj3>=lK|RJ8R2Mwg&UfkR zoE`HtW~Se~I_lDG7P%lXm}QkQG4dLZICz(?VdFSU-HCO~@j`K`Cb&-2bqQ|Lh2)yx zvlSGhf+`i~A7M{XWY^nO?jlUHL(ODNfS-YUWrzY0wP-$&JAQovDHT(C6i=9xZk zZlvHdpXkQU&7pbX*!jo)JZer1q{?;ai{?i9QC=6Ejy>lGzlE^q-KJA=Csoxzo;Gi5 zT<3NFaGo|KJnuDWL?H@Siot&7x5Z!sc*H{)_LoM+{AxA;Rsc zrw}#D(;44F$!PN35{Q$64df;eg<{>?mZS|0(zgAFg~Xcj<%RG8=+K{#Q}V533UB`4 z01qYKR^vI>F>Ns&bGWuH(`~Z8Mv|0u`v)4>?x%G&Bys9%2AzXJ z1sC!e@mqoNRhRMib4V6Qc<04OP@lN~|BLDLpy$Q)08K5;F}Go(E7A6IB5AW3!BVKl zGcTGl#OvJ0hn0xW6j7WxoH{d>Cx zQiGx^svDn>wq9QfUyL_VJI?Lthxs1dn@?7UVoq0dKGV_s`Ic3k0YC{@QXB6LT zB2}B9xw(a*&_;Eqgcx$cCAoSw7+<@+YHOt|Vq0HK#aAcA>^qdHAx9g^VOaCb{x)3= zu?2^q!qLJqF&fukl@2p*Ln1>6+wJ8clohal$1X3#T2F8glhl5q$jTyqPru37%WloS zVFk5`G0GS#bJ}e*GJtb=Iw;OZbT5uB1|yM9DnSXgzMefdl@SY_8K8~L&&fy90h?*` z=ha^xt>%lxo?A0Cu#DwIQT*an*qlT$e1DD0wtR84)-BDoJrnfpMp^LXhip=UmRV2jn z3^q-zJO{q!AD5JPY%|n5%sDG1DCPgCE=wFU$w(TdgZ>MS9VlA|m5hX7B;2W@Zp@Ns zKGQuL6NrNPhYx`Er#M{yqmtqh7HRomPMbb)ipX~dG<$}zAHJ}wZ$LTa6C0D#d0q$yR~K}$#^;FmYZ&0 zD+yM)HH`AX$C25Br;tm$n_VR(l*ZzM{xu#MPAo@W7NVe2%-s@|80m9xVL)mvl8x@b zOs!=z0?g%w{P)i$w%C>t7TPNPZhQstE=>s)R%oNUD3%JP<;vj3Ywk0qV6hR0O-$mQ zaBqN_^N3bif0?8Nt8wjdv2$^LvgbjrNxS||@7E!Cd*}SS9>o2XBKlB!Vi*;~9(=?! zjSUqZ4%;MkYAAKqh*rr$RQ&e&8peY~h^CTH32y!GN){wNmX~Ie5QHjabe%C|*KY$S z*aTk~%GxQP_;lj9efT@d@b_=bPe~7VlgAnY_ia|^{ZPw73%p0D=QY)uwbJ0_I%_QF zBr4STis7KC$Xiph~AEBg(_*zcNo2xiH|T@n(N1aTaE%@;OOs=@W7)Gkq$l$t4J5z z5b@g$7av?aa;l1xMbzLj5Ji@wJA%eh3U`kz2%>fePx;&L{uKGy;9g`S=YxlqNowb)x1C&vkAsobswfjw%D-zZ^7vb7fjvSDYk&dbo7it0Yq&vg;{#l>D z9H!{4>_n?}^ucxlmK*8h8Fo*iGuev>+$4;gDxlX;?U?ix(^DL2T)>7ne!Y-?tcIoK zv}Fi!e=Z?Zq%W!uEqb8SbwI2Fo-x^D?3Gx>qW90?Q^vI`4K!a*t$5$rtW06UK3gB8 zY;@4AGu^BBgeJ7tK>fkEpv4a1O35a}0C2o8C8NgfJY!zz`+lR0@qm~rARut98m?Pm zJ$5iK$^^4!Nt=0nT0OMNMEc31G%H7gY``$o;Qdj^wWc&Tim+%8Fyr3YB3m2TV2U@X zPU9C7B1~XS5_+I)1tu6UCP~#BS*biCd)2W67PICnc|$c;_@q2sc^$w{4XQkhzWb}` zhlbU_@9N4eRyP&k`YT41@MwPf?$RBbHe+4ENdn(Qg?!@Og2BtZb@^l`7yOOyN2Q#JU7TzOnX%yDoD(DQ*a;Qp~%v419CV8=(pK z3{Pr9g0!2~){mI0*H%Q^ncktd{PH%4{ElFR=+AIo0?*?yFb*zSLp_K2X58#UpN0DQ zV?|070H88dctx$35;Ihw_~$)YGIln);2}~i-S@qa_rf2yw`cm|WylfkE4$-!O#PC& z<;A(qI@7tyU0(n4@>s7UFK?$;!s`DWxB&;BngE);kYydhR{$RJZ|R*wj{(Ps5(1uL#&}KSRlz$t=HI!jt#B`cv2IUpH{9Fr zE)?>osit$Mf-a60yWq>ms*If5wBPmRyU$$?QsIi%N8mdyI|`O6g|w~a>Ni$B&Ti93+%Znr+bs((;Yu*;JkOS<)c$AO1d|IFMW z)1uYN!RVGvMA4S=`c)vQlI`GaUl2?HgqiJ&btRZ{eMIBL5ZG&>+~_`?STQy zQBkv9Mk8(&#GeD_g~PVstLBfXt? zc3fN>4NP0?sX1hr?y&3BJOH3+CyxAB-Y5X`+dhRFT`It2R|q>*70C$T@R#9c-@10NBCL#$YPL4nK8NYD$E7Uxg@$bV`#wXK^`6!uvK^ z7>&f(#GxmS1Ahbm6oC`Rvt<7T{Jm}&sqgg^Nl`%`>fKE@3kwYqciZ?L-`3X3b2vF2 zl;kwmxoxzwy$CQa~TZ^Dm8AVaAG_MMtgf}2gAYc50Psw z*?u4qOru?viWO7vK&CZ;b;UEA{Yx(*I|X7Rv3DSEYhxqox>D%yqnc1q zCFTFUx#_9(mlIC{qk$D1kvo7QU`yd#4@tZkOz`j@z_VQjSe z%&OP!ytm-FU9-UlZ)Ca(FsBCE(?;f4Z{7BwQW#fV-VQ1TQ-dq55#+{J+T!wAySSdt zP5RSp*Zc&1o2dmYAqupoGs7J$RboY4_rF)8!qb%=oC>+@I^`+fN$9CPmGASRI(rX1 zUnuY9GO!+3;Jh-!!WOM8JFvv6# zQ)NOgtB+86w&hgtuRAg5KWpZdIV)&s?Fp=zG<$9}^+E-htFXZmL>IOEVd3@{z#5}! zIIIr5`*WyK)p@QXm^kXg{*gLU1i5Kj92e9-bWF@0H^Sxr-O&|2OfR_Jd(3aB%5xSt zjkOU}Ab!}O1mBV@QH2FPtR=mkk*8?*X6%f;UVj}Z*_rfOLT^ZqV07ot_bJng{|rOF zrmH(XSE<;1)^Y&TiHoO)Sr4R)S>TPL-|mjzdkTELqr_VOXwA%wYirdGoF0xFv7b@3 zI(P*}ZbVrVNaOA>RD~E>9_6mG;B2+gv7O2~e$wT4G$QQ8^I`06J=X`1FrJ#K zd}U7qfqfb?POPZ{W$y&eR7ap0*%2)2F4-ENE-zewgx#V>LbkA)my*zAGh;eV1`o+fB<& z<`~PW6Nz=Ckj3*_zC6N47azTP-6I?!``cDYFK-(j73p@+jbQ=D9CaOSZQoH6n@?$sX&`@$HkiTOk9*^*>2osN)hJSjeU4e@Qjd0h!{p9`?mD3WiQ< zrej30>+}Do;t04DsF_2W|$!vVDF2~A2cyu zi)SRMO{CC5x4qAlluXQ z*p>ZxZ=;)}3w@8HN){IPUnO#`=;&zkAe{8bQ=Z5-EbQhCaOvyRWz`x5xgQU^s*6}_ zXXh(%E|Ej}x~eoKR0m&mBdU!N;MCAFJDblZ3nYn|8)4b`xx~JCwAV7e%WZjR_Zv=S z6ex4Y2P9V~e0T)Uw@iYM(3#;wLG;o_SM337z!aIvYtVUa62kg>dtK)+reh!7@Fp%u znc%K!AMKTMC`8zt5ke0t;-5|x6@DN8I_bT1+` z232U+h`0J?BE0ZUEyH)CcW3DWTcQULzQI@?KHj7*k8(g9yd9|7o+q&^N1w&?COWM4 zK1ONfNHvUc94&LxA>=RNuZ%wxN-AI2(t;qCdYj#5b|0G@aKnu(r4vlz z1fS|}92g$^umuF!ZhvD1RCpTqZ$P{{85Bel4W4F5DkenO^xO0LQ@07T#jIUC6o>JV zSprWcUf#^FA}MNLlg1FBuWgVJb&ZH|{A~lh@{A{4ogHF2?{Z%rTJ;d(ZA`kY%#&$g zmNo4FO|Q{1=%sCI`Gqk84elM5-M8Xn?GyCcMc?8({^29672Cx?lLL*~+*bzk94lyG z=pP4hJ3l^b9VrdhGVjYWlk~?dl34m&5Z0eQO%;D=U{^d8>S*`1&+q#DZmb6#^8GUSu`*$SuIP;nG~z5 zqGfgqb)2~dwcP%XP#Ina-CIz9K>pltJ$WgzHYQO~iL`OwYvbR(Ra+^}4=|D;2ZVkU zWe<0V87-!@2q^%*PUaYGcM}*swD{6AQ2Lv@-t7O(qmDjs(E}+6>rckb!>4fDutuhN zfm)rV5;O5Pce!-+eC*f0}(mU;tEiZYUcbu zj*Y+z^a^h>^O=#jnf~|+{J;mq+AGnHgWAqx{lOxAEGkntBz1Z|pqI4nr46p*y4R~W7rK8*hCo?_ z&-bxrXsF0(%PDJhtgh9^``&J&7)U7G>*%w^=x;1abcGH{&b)L9tMr}OD)HsD9oL|= z!t#oNbHGY%G8Qg>OH0ahR9K-<%ZYBklRiL738Fdc{`OOwU5sa+jE;ox?w2T5PI&ZE z0*GWm=*dME$nXg=O$8<|=oT#E<1cm37aNOD93BCX*%xGvx47RIySCO9WD_0}f9_wI z8GVEw0y-=$R(tdvyufJon<863BKxiE0tzvvy;H3!;H(E0zhanjHcD(fn?m^&ojx@? z{dL~UEoYyXrk^XGZa_d~$bs;8&+L?ixrIN?*>ZYk0NpDb2Z`V{xrt2^xe2*AD`P3* z>owr2<9(VIV;DWMvWpj1OQ;>q19}`p?Qz5Z$ox>;?sf&&1A4ulUUBgh4=YGi4c=}8 z@k=^~^*<o@%=WjYSNw^3h!nj(%-czk9i?QtJr;q|2$4SD0-DC&e*n7j|6A53;iywB z4VfN2moIWDA<}98k`E#Gzx>WiG(0_OH?9``Zw&N3fs94&$WTdda{cuG0f_N8R5|ke zkdKd->BBGm4+i`H&|o~&E5GtQ4pJk7AY4H82UpB9VXjfp@XF<% z?;s7_k@3Y1Qb>!cNt1Qob5ctwm`}^sy&b(s>c{m#al7MlbIbR* zA>Y$S4N1H3ugOgduQpJzmr9S~`>hYJO_S<;NyE>gBs22R zUW)8`2lz(7Z)>?un>F3D7yEmv=ERt78e{{ge3bE9*MOJjzlnpLUn1aU>CZpkOnrVj zjZHxFS561W34hJ+Et4O2w-1yzYJe0W?X?lZ)1n1$%Ua5Sq_L7|Ue^*Jl8p_(|I=Ad z76dUiq&`vCms+aBUDr%t=+}_6oLuu$knl=0_ugYPt%eC!k&9#uAmCtK;?}6+RJsL{ zfl$t3{QLw7zycxN^+3uCa$QJi!CMI}I_*Z)hV1 zrAgDfzs>R|iGa)PoTsgUkf!4A1$O<~H=(3q-v_UqNy-UG(EhcW|JSAZ^y{|9uT1wp z#!>jFA3tfL?7uH}Q$KLnUx>VLvP(Fvt=-8dS;TQ4(X;$YO2EYsEB|`9z-Pg+W5<0Y zhQ~K8^MF})iBrh&^&A&N_cy%Pcuq1B{PJs7MY*&Br1ahq`2U5aMmdGNpY}212Ntv1 znfOvBpMM#NLD!5oAo(Zv1hfzgp5-G@`Kb`eY@>l%OK*AW%OpS0@_OV=uMj8b&orh%(X;is%ysjQtG%T42Xp^kSno7R5M=iN8kDmA4sigUe z6EZ;&1JF7Sn97RkYO}+8e%#ewtFk&x1<4}$j?>Y_YC`*u$9h!l^d8+^PCA_b4*z)t zd$2dNTj0(hZr98@&~N13X22R{(8^KakdGgx+DO8PANFng>zAfezlCx!AZFMvT^tJK9ge&DsY__M!(a=l6AlJB(8sRW-j@LIEnbAet2 zZEUQUt)+^?c$Srw_QN%yvzPilV)xTF!rvwa(e>4tbtJXY z7YMsdDs@fKZ7z$hYf$=bFCEksc3DYE?R+fIav0kmR@Olg2qy1Pk_98sWa5-i#v;N( zW{p~SSw{vajmasxy9E|Px0UG-X-`f*S=Q9(W{`g%2o`Eu+)gp zVRE9pBwqs%;;$sN!A2{U$?)cIFc|!)u-HPYrv9BfvG?c=hYWE8noBLstqyZT4nbFo ztO#`V=3t>HAV@Z^2A5cE#OIHddweN}V^2fP%`2{~ZNO zgpa?P=8Rq%Di^3x8d$@3b+osP8E9x|6xrhU%5X_ZRm6ZQi7?5OZLQ;7Ra2|%@F1Fg z^qmUthGJd0#vLh%zB*NO6g8F}D?a>kT*o`rx*J|myEWW1%XkIZ)In*?Zc-&4{g#&) z=HiT?&5z^Fn=*)5!j}bk6EZ9c(3Qkp5!~UfrgjFm{K0zI<)Ni*RBp;nkkMKQ6^}&8 zhF=s}Qv$#9dLGoj%*Ari1F3kM&vyT~i;LIAO_AM=jeJR~tx2TFRAch&_M*{B{U4u? zf#BUyf9G<&xq|L(x)M@{i|2B#OcFESDU>Xz=x8W5-y>OY?lCMtZ^6{ z!Ozpf=0q0?j|cWzd`p4)=XM&|V4Vi41)%j#+eBQBLGOW4lA09ctoFRXYJf&?_wcj^?_z#<*8+4(Yj_T>l|9~tr}gNntWIsyMemo0g(2|Q=? zTfLQQlMMqB20|+sT?W`o$LO(wRxS%itgi++l#dPGuz6^(y;ti~@Kl_rYiUUi-X`wQ z2SZI*0vl?7ekd)L-{6I_lQ~1yKP*4z% zkPxIB$q^)^ks8S{=x(G-q`PZI=?3W@k2ofP@m^H=lss^pL_2g_jRuSIecP& z_FnN`@3q$63_+R#7;8EYM3NwLMY6(aS`T{z0#*`G^pyr&QL$KZdXsls{MJA{1G=&R z)7Wpmr^*rUyaG&$Ip+xMv#jgQj_?-Jn72|3Na{r7`ch0v-fTQ>81vF5UTMIeOUc$n)g*UYUXpDmT@p zue%-i9q39DoE5Ba{TVpE(J6XZRXx{z^}FMw^L4qy^$hD8-VX|=Am2$;lPU9#l7w$0 zIy-v1CoT4(uL%kXa_GrTlo;9F+@C&s`Eq#M^2mYDIKIXW?M{q7{N5a$WZC{8X0=;a z`1nE0#F3sH?OpDlx&FT-GlvG>9R*$rZ(I7&|A5l$1aT+7Wyn8voMeGIBnVl{t*r%} z@ro~dqp>#m0~N=A=%PZppIs^c$txH?6F*iX#&PT{M!~xk#OEX!Tw|?%w1*|dfe^hW z-)R&-K6X6a4vl#<%NA~irg^-3oObJFZRoxL$xYj%Pd!6!j2V*O^@Wdma3G8AoxTuTd72o{h9OYk46B+VUH~(SnUhCFYzn(;+p;Z2Mt^t0D?o!pxp<+fEHAML%8A7Dw zp=|R5%ByK7j%ugZVRmurF`DxLM{Q06&+PC~pm~dh2Ujj@w~_equzz(YWo+yeNxkad zq+Ci}T$x$8{*$b)C(%;7q{coF<&ICmZ#QEQQ^3D2Z4bTiwwR+~F-uCt=h%uY1QZg3 zJ!_v_DCQ8^mY&I#0qB2wos3oi~@yE*f#Yk65t zSPEY6mlh*)yp6P)+;)fC6ma876m3Uq=Q037z?Tk!JtudnTB7QNI6v&xP}_=6EvAh* zbeRujVyySc@kJHYhid|e;nQe|aYHOi$V~=U0&@;Nn}9R>nj?h-%v=1@4_@E3Ule!% zs1ldqIN@4%YX^s(x7S)GaW=+Q*!H}*q?*MqYcdpw%{p_1qwyR%kC>T5bt@JszJsC5 zoJVdQ;{~u=Pi9q6btTy8nR)khpZ)coCK#$5U>|-`YKOkpOxjbdSrf|YRStGXYpm@& z9EZEXL&Cx)7_UAOxAQyL3Qm$53)|;4@nc4klN+D@jmKz@(@^%pt~wW{cjF55@bxHw zOeeH;CyVgO&nR>S_fvB)xcdjMgL~K)RSr9^&E-t3jKCHBahFadzjaJLt@Kd#T&&bd z%p_Og*quh_@$t2SA8HcVRsjA$*Lwa@aOcDxork0DwwKP5&gRGhB6+vIUPm;MVxig3yWO`@3iQh&`{Q7%k&Rzq2ZH-W0Rf3K zch~c>G%=nfTRiJmL-Jt^uUldTRJTSUJ;EZ^KL||TZ~;n{y@NxW9DO+ zB`A9IO8o{KQqz8|>-Egrg>7F-csNMV@CTYIZb(M3u(psH>GEx+f{3s4{!EjPDE zAsWdw?XoJdVa$8Vi*dgRQ4K6R)NRtV;AOw zW3GTwq=BOBc9;kMt_I}ITq{ywfhW!2KBQ>zSHU?O}CK>JBlIe64 zNB6?wpP$2@%Of06m}~*ry8t~Ug$oO#m+jPrkE6w`j+`txXE(qSKU%8Mo^oDWvI`C* z4R2fTa{z0{ZD)%C)Vk{GSe^S?b%sekT;HlEO<@GXULEk{k%y^Q+ZD1C4*8L8^mNih z`Rp1sntkc<^Z+1$t*{DinC>uZ-E#e@emi-ZQPT|{OMePgtI7D7aF>xhWInb&9q%^W zJ&0a=XZ-^BS50dEUBR`9E294c1_7!9#W2;~jUW31X0Uy2fDhhL=c4ln z=9B%p7whKTwhB}UfLZsgeL8NErK5_5?O{Pd?wf=)7hwgZXEV-8@19@KpXuls66!aI$a!*-a9`;VFamc||t%Qv58QT=|` zXzqshr!74DuRzCRS+6g>CyZ`Cd7`blRnJs*0E-8DqNx^xFj0-@X74s2_rDqp6Opq6;Dp}Uy>#>{>~m)hs&aOeQ+@*dAm%{$ zxOusSCKsc(sn6w-lamO>0gmUe zMRST>NID8;X(RauA_Ptw_3_Z>>a{8_fMsPaFjYwu8e=Ae?KZnInkU-YGg ztc6kFN}u|quRTQ0#Q2VqZ<7Jb7>TBi>Py(v1>dYu6+Ubu8E_BaNSm!OeXhW`-<(1b z{y;G)9M8VILDCMG1R=Yr{dMo)xCd*rZ9(aBE?+X;{EiFSPbF}t7EShnQG?SHAYE*M zB^-bQ^SdK*nxN78pD#U1_)}ApnRxm)8IosjshPTTF5d11(7)C<(c!BiLiu9WwQIdp z@F`$d49^ol4IOwKL|hhpqx5}-G%?f%Ippz46c96MSBkcH_G!!XzZn_7CV4!C(IdOwZB@C#_Dr{ZxzXj z?9-&^i-gKR`ap5-z(Nj9Z+$#r*Bz1UkqSjKEK6Pt;cWyo!^Oy+8~TuNSBN}lZt!b{ zZysr#{?F>v5YgrGLUWUhNKk_Gn*iaJ9{Ot)p`UV9RMutGX&;lY-(DVbN=SdyUe=y`*K z@q`y>fr;%4K{Ea(AOZaP%SNYS18(#yRt<2{fm=-5XlogDkD_}E=pjq2Y@??jjOzvd zLL(K<;G2IJ_w_#Cj}z?LKQBjRszV&yWN?*J91^6PTi1K?>DI3qy~7J^LpliPoTKqm|)&Xv{SuIM@tr2&>1Gz$dRyElP_`i!MHF*%Zh)^=bUYSih3;3pJr0ZE$ni}2@A`{-$ZODY(y4# zFv-+|%c|_eE=>RO7tZ|=M0qVADWv22HZ$`=3pIs1c0448lB4oYE|k`)iph13jaG9l zN?X**>Ir7zQ_zjrlO`hq4W99~RPMy=iwqd*-V}P;nMgg9hi(0oo}N;=^O_y&5A~qX zyHH~6Ia(qX%`x2rpX!Cq4Suk6W;ncjHKCH$8892-tPF`-gXhcNlQmSew9BzJ}{KR5u2IS z=T&&Qrs?no(%>xwpbMjX|89dGT1D2|xM{mF!*_DDJV(UQV+)k1xv zFP(?tIC*@`fxgFE9M=0veRH5b?(mA}JM0=MEqWLYB?jj>cG8}L#Cif)t_ooJUB5r| zG`*Ez-TgVe$l2<@%F%f|u+jn37k9vp*orK4cBtIxvpL$FW`ka&(77`8j$w8VxAHMN zo77rW)gvtIf%TN69*wm&5`SFyTJk0@N4%ZuPaaB*zy zy4)j7n_tTXMtPp*f}pbp?a`eH7}T!$5`?S>EU)|H$J2=}T5mpgGDUm54mxN(3ouB6 zfVVJ?Wd}`*z)o)L^vm74tD^Yo^QE?w-%Nb%?$9&n#xZu6Rw;y+;{RypkjpLAPH zflp3VZbB>I+i_3dNfUBWkQd_5R5#*Kt8mX0u@&@-wL!L{br=>|xgL9knotz=Cj{Y< zOQH#SLVx|bM}Zr=hmUvLgc~d~n+QxbkM(iuS-IPH?Isib5&47y2GkTSHEY~f2tqb64!gh$stBV0u?U0YDno5B&y1xnk2Rv4gm27NX5M6^kN;}R;W^ASAI&Bww(7J9>! z4};&tDlI#vGk;xIA8-x^pcg>&usfF08E?x|w;rAZQ$LfxXqwHv0>_S=2di$m@hKeg zw~`fV@W$uvGcbz%Cm=d4=F#KNQ?uqZ(>_)L(E12syWxgzxYY<{3}siPPe{1HOcC_Z?fyJvM|s*Q3xAt-jS~yv^#n$&gAz zc)KAZ=&w_sL_3yhfk1uKeaJ#=E3(hvB8yA;7v<1& zDXF#ssZljnBg@D)-+jVDm}G|fMw-#VJ(c+`=!q%}H>M=MXJOZMXJ3f$!$G{AFtnQw zjd3D40LX^e;&mUQC>_XRgCBc9vCX)94(F*TeIMGp^jW=M7@M|x9)hdhZxk*6vdGO5 zCu?C%kz1lyhn|Ko0lzHr=wTx%ZKUB(i5;LjK>hVi&-!pIyPxXyMhd*T9^BG!r^hmL zl!^sSqS{BN8>B<%S%8q~4>pBfNVpX^M*&M+r+~PPd?30*bo1AW81$JK2P5H<%G;Vt z&(p4cHZPhzEkdhqp?_U|#^$1EhEmJ-Ob+prYu@`5F8^Y@pZ7U9FEd|sk`r~Hu>sjs>1&m38UA(x;}4qNhm>pA$RC=tMBO zYe0R7DBtv@&5H?I^#$W%_$2>Rf`pHFLbEL-Kz!ubDy{Jmt zc3S7d?^(5Z|7{y~AtC$To1KXISIZ|8dm*|0UciOH`YTZKDYPi|->n2mu^D7eZP==`oc##1kMyC<72}j<_h-uA$%Dmd%H`HnN8Q>+3U>NTsFUI zC}d}|@D1^&cTZ`aduMM@{c=fKpi-6Sz1&#On;FED?*G#55NU+{yCL=MnLgY#dgq9Ymlmjaf(V9z1p+itNrBC3M)xkUfgC`w$CC9#O-9*%mYoZ z40@DLmi0u$O;*BSo{)0Gv^V3N!z0(FrqB`b4iFpP+k}~BJ4>>eSAA8y>0=&N%3GDJ zDui(PPslSdbJ;%XPrX^GPieXz{*!zj4*Pa5zB%O|c`KXuHs{t|kvuIY<`6;;#_RTg z4aSPlYyp)%C$eNyaG$u2i^%>`dHUGx-;YeDk5bVI+P%T_R~RBZ$M>||E?!{jbY=)R zed^lpX}d$eoL{YYJWwvHclwunAzcQ?8D)Z40Nbb2m34+egk2mYDS?!?c#f(=Pn~|p zueBjlb_i(o?E8C6=zqWEGc_x5z0@g=lr^hjS75P%O)H9m2MRD{34ebb-Gbt6d#33l&%ceKtM z<~W4E>`5#&7)FVYtUfM|_M-^9JI90~-@=hBZDd;<_Tx>cCRu|gnU*NN^k3)OOtKKZ z<7VRP6R>p0=xV?lxhp0;SYd9Ppa8A=rY7lKYqtAeF2yo%vOiujfRLYlpAQ&PVvw?9h)-+EwZ0j4mLHN^Lidc$wP!Zj9vUoaXtz1J`wrY>;X_9cYI9)O z&npex?f?mT@|&`h7)f`pFRb~k? zpVolK$JU>b^4Wo{`_{+&c#VF})@ES|@uxIdFQ2w9EChv*3No3H#Nf(uCpm_7Sgakw zlhr(Uj#(T!T7TZ?t13U*i{6Q4VN;e6=O2tP820l~WuXh^jgo&nx1*VL5Fo3`QS`7c zrTX?$j)wUc3#QR-V}(v(0`g<(k!*I*y`!(fbDex-p=P}PKC0|2^tLTbrQkc+-759Nwk*=os7G1hy=&%R+sJKmw1Kf#+5I_x_#U7WTC~M z7#+Y-l9^3hW<`vsJ8_a7Dc86RKHML_2d54<@ci=HVF@K`+vMw`GcI?0EctwrK?WX% zhDWELneDcXcK5gDFf8%gnm2PYtghYvuw%toHDkYX;@%;ZmvT5_9grRKg7oxjIc>+U zxn2k6N=52=bDxfiJ<~GxZ8M++PF=Iw!VFI=`l$3ESQbrhWPRaSyPYDkrrqeD1)5BD z38HiG@o0q?>rY-F@8A-&`tE3+n`F-^Z_RJ8HQ{s0Pw;6?-Q8MoZzE%a z3NZoISZVKb&8@}c1t8IV-oh`nr&d|%x0=+mc<5)^jLB!eic$s_bUG}-B@FQox9Oes zX3KYZ$uyB-;2VREt~r^Ug4ByZ$REV>QrO~^Vv3#{=Wfz|>w+?`;9K8KAY)4NY)5nS z=S9^iIZBW>LUZltWNt(WO3ro-_L+ZWY?Aq8xTLD`^?tOjce?b&su^PzdUsx zWV3jx6=fNblE-z|Vf6Yur)SHI{>cf!5b@=%L3;WZo676YtC;liyO_Nd+O^GRmm*}) zn{n^4tl@Xk{aa1qW=B{w_v`Q<1BRJ}J0ykuS{NueTFsL>#2gs;?hM4bKTFiRQ-TP? zwJ_p-?Q~OB?v`K+fq6UlVsi?J6~$`@_^}(y`!Uev@`(Z83LEF9VI~hN5qW*?qkIRA264Oyx*NVpjaU7I~$U zY&aw}K#hXz03A?^R2Mf~5!$q`-8t&yG!&2f8Okoa+uoX!Fjpeiw?kAGflixKON4=+Q$0E)9_ zO;3MzTk4mCX2tM#oyudLS_ebje;_`Vu1w{ zMn}=1J4S+{>((ZkwpkRC!Y{*H$JV0jvOPDA;q3VMC^IJD43vutfx9S63XM%SYCA_3 z>ZrzaoSj>+SA7I5M$Crq)BWm4Qc>x`emyr7t?w z7TIUVD9IYP+xiBEk_^uMd<$GKze3GwzgJiI#M|q?Pw(3UL#KI|a*!Bxi zTv_1CFJ;BXan+)kdW_prLYxQ3vW=)&Js8$#KQG)$&IcsJ*FQ36t9?9JIzX}+9`-PJ z&rCzjd4h${!06}ggLRmx&T+8@mM1csY3|#_m{M7x@q1M>vpHmaWr%noX0$s4@HGr7 zvulzKbz?uM4W(q-BE@dxhMRALgRUNPxVeqgAiqNxIo7&@Ey8s z^~H{}Mm^U#?)1};9M3oOe-uWOQ?RA2hbmzVH+iT(&7H;JN3^=^ zyqDh9;TQR?_ZY^q(>@18LmyJEAyK4;&Y=Xv_uj%_@a%@|+94I>n<9X&mq6V0$i2z1 z*4UknVeFrR;*|_NO5p8W%U|58yJv?8b;*{_X+K79vfnLnNa}<#Q!{r|n{czTx|lW@ zZp}q$3eGm08}2{V#L2vDGR*IwRewmi_-DExhn0>v_6}YVF+w1BMZ{A{IqcU9Z}9;o zSuX=Z)({-U20APtmn1GiArEdh8D5EHj<;+*`2)n~3OJ-~v0>ferqOgcP96Hu9SEo@ zeHoR4ByrHYY578One|h4{}Ie))hySGk0*8}y|rT=3H+p?I~KuVu*(Edyb2-|Na4*N zD!(7hph!UN0EI4Xr{{-Zb%@3ks5bkWCk+;mdvr`JLfB~qG^w*N(fhX`JuX5^P=oA+P%@$G z{LjP}LGBVrFD?_YoSJ!W{kCqQ11(`iR|B?C58CT3v3ZDpC?2Nn`R~QZwn46yvgkH~ z7zq4JzN)U<*)rx&byN3tob|+2AVvQ?gDi?%gx>Ug#NU6@-qvf+)b-@JL&(VG5(jRH zbPManp5Q|hQCFJ!j*sClK^EOczqnA zg_U3w)sk3oIz)Z{1Ki^t?DY0uxsVdmLn2|Xx&81Im{qdOMmNGZXM-t2n#Lm#w;g*PYvJeH-9Xhzb>U1}Hw2-z(N1)%)!Go1l51+G`WB_f5#i#f?m? zO9+{-lAayaWw)Xx{@3QM3g1@pyquOncW%?L;cTQ)(#SIQvdWV(qskL7|LuZoS$)R3 zytqN;k95UXVFfgh)%OtAR}?AdcN`m}IFw`DkkXx_qAEJoQ zpTTz1jjDH{?!z%**L$uDxFY5z2IR2iYwSc2j%N_^LKZCPqUXj-Mpw-0*in3>g|azk zncN==sd-Zyp=iXl4!cjnZSHnGdxEOfW#w?{(!|Bke{obt+B9UyPx6sIiHE;dx@CWo zm3Ig6pQj1PZWknU5auNW?D3&G053Zr0JpQ*#Fq{ZW)BW_hJ=CJOCeaUrSe9Oox(vw!88ixH@nU~i<* ztB~I`PWuM+B^Le%L>3>lhOI-sG4SdPvOgnz^Hz=2gA>Gj@F0RQ^9WXpzJK7^?i#IA z+w<8);xNl)E~+)RGcFn@@IX8{I+BD^EoGVMsaF4$Ag!EKD6$OtbZ|X_1 z?p5YDHDc?b0^ETSGn@U~kTT5ZuJ~n>A(Z{byAg?VBBaOb@0Fg?1dGmGH9t$;Gy3%N ztw(ObfnJE>+due={p%@P1d69e|4oHn)#`1_;&u+0^wv~<+hO{7;PORne*$F#woH3f zl*|(14lkX4cdzgY5v2{He(AD{d&ylJ%LcJKziH0Y?)s{X^7|pxFmE45X2x3^Bj0*Z z&Ri82bq%k?F=D9TXBhpYlo5}_grU1}-~E{=fZ7`n11?DNGnap97P-Eyjct+;qld9q z;zhqdG_R8LU{m}^*pFZ_1&gxQsMN4)R|gZy#|=k;1=p+5R?XPy`YG{O_9JN8m8J*RQrZbEs8Gn zMU=vgDEPkCb&oWF81ux-U-#M?vfWcm)M;In?2Sa6I(l;FeY+wJxEoMv4n^uw+luKk zCeIr?Z*QbyZTE`fI8FXhe4%~*gs$^6-~u;G1?jZjuVSW9zWp$k84yRm$>EnBqo-6q z^$iW`dJ#pOyp^O?lj3wHWXG=v_{p2~X!}8t?widLNvWMp%yWpvaVNu?+gl?A=R9ZhSC2AAzXp z36=lQoe|bf}zKhbA>@4M%YC3M8B&r|~lCL{YTVGsL zmQ;3zm+(#WORSr+K>neBPxRzkRY)j3@0A<$zs3Bz=wyLZl_h1nBV?y2@4 zuQuq!`Aqk(fb}}51yDI$=Q{tp0PFoH7C}$^VTx{r;P4{`urTJpadBU%zTRS+F+l?otdx9Y*G8+|#R;Z_1t&5Zu94pnRnQiz9)t)o?wG z*)v88FZY(cUYPZ0^i69q+VcvgfR7YOj7u}M13N#IuPA}x?CbhtWB$cDr(cWkNgs_* zfoY7v@quo8o?dEZ>fj|W0-d?P)*@UM^1<`y6fRu>gCvXL(*ISfY4;IfzqdLnkTc}$ z+}nGFR>m~kOv1uC zJhqMh7fbf`oY|M!stc|+<+{f-=fNf+d|FekixbTCNMA zo12@vSfMLJ?Pr{Y z+&8|Rs;Z1G9b+fITl?UvqbF+_o2ur{u`8LWUd=Q@&_&`4Sau*UHL!J}&9*<8QAkBm z&wA(fLbt+g!Tj%QkJ~BO1~Z)}d7*JpMv=+)&x{Bk7dgB^eK6^cXNgojoGDQ6)Ow1> zrTc5(HY__FEl;E354XPt3Lo1ajXWgdbDmwy)K}om*Q-!&n;p6w$B`(2!t&&?=_T!t zHLRg?fb4c02D)qEKlzmAq83*Mp+2ppzTx55u4)fiU*BgtB%yqQ2`3$!gjp|k@B{(( z0A!P8$B6ddnXZ4*zwuK^ldd!vCh-_+zHF|nF`wi`<>Qv)e);m&RoE*P94kB(bRHPc zv_3#gH*Q|DmQSoUK42v~*l9>-U*XMG<*HKq)NHPv8|IHysj;s_nzwYTNk_Y+cBCyW z^!Q87EIafh2`5#*wd=k7Rc4;OGG#F^-Ri{_&PJG_%^r$n5_vqdJPdYLui&v->B8s8J&S*|( z715m7(Et!qtH98pTK`jwtrn7L#+n*L1S&Ir6xt8h={3G63A#o!!j}C`o>`P z<6yyowayrBjoCJp^7=fJosnk;w)=^S&2C(MWha*AU2Fp{_aquZddZ1Im{{H=mJu%!S zy%_7!QYVq%mX$}jD3@1AK5o=fe|n1~^!~8k*v2HLArXF8GU#>^ymqx?5ISJ0XuVOw z;Oww|6+Ux0m~b zOBqT2jE%v!w?#eME6Q?FZ?NSkal^63w*I^F9GZvoOl;8ypl7iG-V<{WN?d@peL1dOO8m+WLr*wtHTrtHIO5mi%CLK@d3rg<=w~=_ zL)@6dOrndD3p~MVw_14f<^J%ikbyb@8Y?(ztN`&snxlU?vD}-K`p|4J)=Q4VGHTGz zmInUuLyGas=$F9T6Q*(YsF03J_W4NIQSp?-b zAm9EE6RMgX(cs+SpTAGC(jp)8#HR59nbXW*R)#`Sx4etx;PZ=Vg#mA2(LC`)8Hvl$ zIYVDEG72;i#R|iTC^y{NQh!F-IX|gg{<5)&@%$J5^Yq0qOcK3v#xud-7L{a?EU(Wn(fYOiV36!i6m;n-8{T(24n|pTxM?d^fu(UVnmU8U6S=bVaVxT^<}=bG z@#StEU%RH5`}_Jt;jOw|vG?~amOl1Gap-L>rOweVw1sQ92%#f2=37I>#BCeru7m>f z-!@lw*fFnaxy}zP3;wv{SB2q=C8Q<6^oX2KUwB4tEnN&81+>CSz-2GjCjy!G(Pu)d z=%Y{k6L;61{7EI#)qG(`FZ?{t1>-BFr8SnIWj`AMjXt=ppsfA$wqpMXj-5G~rLJ{Ih#a7nibE1o+29uV zI?*|4_OWuk2PW&lWc4HnF2r#^F{*EL#5g?JMP~v3^Jst5J;}8$TS9sTGnH6p-7{rN z-l36cul;k8xv>x-o)zbYqU@&3oRZ4~rbVrE>Ym25I+-T!z>oPHX)jNT@s0Q)(L9`& zpk|u9COdCjnQr!p@h_XxJ8Ua&jyK0FaMy%n-T%bh! zC_C~$G-(L6qsiTg&q{Y$6{c$G!K02YyJ#76@f8W5t#HV)ivOyyxDux1YKd{@c?HZ%* zIoOqHJ?6r0Qu&j+x$Skn;FP<`YqkS?<29?=6w@5pwAxsXI_KJr0q%kzH$D!Ie5LKL z2u{;8nwD2HKmDgKHtvR3*p1_TTM1&1jGe9#=*NC37wC_BdAm}88+v@?o|LxMJwSs} zE%s0kc_A*3!&-O8^Wp2>r5f^FL)guC9!ymJsH0s!UKz5KGaFEl21Hqv-FO9VXgM+B z%Bg`da0rJ&t?5FL@K?EbojUnExWaMCV+R>o3yXwvyP|l0%jh*p;%EZgKNx4_F`ukQ zUuF}+Su|xNp{J@;#E<#jDv#$LOzi)7`0$zWFHJ9R2mj1p!xU`lx|KF`f`Yh=UEzp4 zn;f0oeR2wl3gd(JZ+=iM7EUsj7}adkjv6u5qeI&{GxYdJJux5P@gLMkvF*%Fdfbvv zDUihO4ADR&6RPdAgjR;)(}r~V%;1;Me~=mKPZ9D zz5a|8TrOp=q6uCo!@Iohles2@vssEOkYm%CJ>0{BDi(eNwt^db1RP5ezkXkM9d?iM>4VMrFa+nI7a4`J6ukg2(+= zIXlv)b!dQ{;oK#f$NI8Gaef6@<2;{%ZZjJ4yTL8}2$hz-gro75I<>9Yt{Inc)of>M z=K=b#ZWJwuknETk&D~7sx^@0|ioqLyJo_n_v3|d<;Wt6~O|V^~dJRb9s3B|~xYL%V zrUXHky#-qL?Ed7bo)!3&P}hTPlwvL@B=yk5n6ER_)*g%vI6w;~cV>Fd=Ci)6(u#cE z73Z;^70+jfD0#CU5zXvYWxHD%4ebJ!{A#+D)g;1l5yeDLYVTs$@pT|COWU|-9|@N~ zzIIb&GWM_|%!#*`Edt(g{A`FQxlzRp8ys8_#ZUQtoGE< zsfNS*quGJe4|diGsN_uIEv}zgbO;6Kg2FeO22(A8vAnzEFEo>1JwxZnX-};QTtQE) zg%aYbLn;CL_0m_}6ICVi@%#?b;#WBe_|i1@5Bk0_3+q%?4zRzdKq)=mNWWjxin&`c zM8T#J=zdrgmTpb+#2^!)pm=CaMeriM&z77No8rR0BDfpw)^Haf_F_pA1nNM{S}1fT zdoZ`ixJjC9X=@}+X|w@mr<_H>97O7RywXL*g`F83*5lr(%J}3Siyn8?U@jHzK6s<= z!?rYv-b}>NkglW#j-pmS{~MJK;9Sl@iU~4!R-0V+BK(g8ris6Nc~|Rxz}>q{(UkmV zv&%giuGl$^3bicP9ydJkLPlR-Uweei5bU#eUTJlPWsXa+e1-$hAA>8Mcj&NIB2a=V zWVVBby0q1LCb8pYK?FHTgWl~;(-ngoi#s6ifQp!VMfxt;F0AgR9ah&5i!c6<1+&kP zUtEt(t{#=|Y5?KIOg9ju;;YUm0OWPJATtom^E>V=Hu7cw;~VX>f4tc`jV#m0eZaw; zymz&dTQQa(elt@}OZ3Ur!OKn44+CX4S1b_HR;d6Y_0^~*Vgc&jhL-M55a4y1=m7D{ z5w>@FWyXJ^e+f=1IQDqsl5ZZfowwC%P{)CKGbLmh|8zZaxEc8iGuy0-G?mb*H%%b< z0IlvTUZrO)8}`9|Hw4M8BRc}Qe#ezW+d)YlAo4{>cAsTuu>=1<9AYuA%$K9e1J}WV zFJ`G^+zq6}k9;HO#jsh|0~p ziD>lb5rfsa?Xut5wciznVov6KF88d(zjfB9YI_gU-jI&1~rcf@FU{lwUx=iLecbbE|=hv=t);ThWFBA{1CTJ{(HCA z!^5sy?=me$$A`SxWx8{Hd`cHgsi971mM z)uvnI@q)CfzP#~b#KrLWE*^&WW|=DZMDRL5b0{xX5Om--Zxg|%kzV?Zb%LNTknwh| z_j&7kDxWI

^6lx?b;pQ;;PMle<&nB_MPL!4$(95`RmrKpSbh8_q2M4vJft6=s1w zM%Jm0N;8F{O~j zMienN;mGY~?Seq~+yHny&VSWLm@u;UF@QNPS~jrbnLt@2`9W_{Ifg#_*9YIVrDGsB zwfb3yf#oVy>@_}|@y5Vy0YvNxq)Om~yQ6a+K70tvD7_`TJNV;T3+)8_RqHjJgdvE| z3=9mc?y2vj59?K7J_Kb^@sciXzO#_+{2AKN-_N-NFU0`c{zmw?E0(h(LMF&y#U2an zGhhg6CNV!fi7AX_odo&l-U7_&j;=z=VVBz~B01Hwt(we$FObKgh5rjGS zVIrXBs1Iffzk^M$wlJug$OC zb0(A50d7j-Tf0m~xwU#>%_cOIv+lw4ZW}&JJt9IP!k~M!Do))9k2Zr-5kcjmDd~1na*2tMuZ1c z2FLqyD>9^ma=O{g@QnUG@!6jrKDl$&;k#@6r;^|>>70WOA!%uZgm`x%9|Qfb@XZ^B z{nJezmGl>RVlR^3;(K~9RE(|@6fnt;zowTIF95SbY4jurz*ZN3JYBlad^kNl{ju3pX+Hz$9<5L%@OA)V?P2%U#=ILF8)ZKq zYmEnCVyB+e{9#MQ6DDIc&lN(QUi+zAFa7**25>?!i%||fy3HoCo^uF0AYGSp269zX zWj%kT#|>B6b$7zVzZl`3Lko^Y#Q|1}j}I9sc4r{t+roqbZpT`+&I;%lY`D%G1mq^A z>0bW0y~D;~{&EWq#LKLE6+E}{j^sMvRqF%Nv$%45NL4U8~;%(>CqthoT9|$)g>CuoPdO7aV zA?g!`R@l+vCF)r}vjWZ(=QX=EcX-)_Ya?OYeN<~M$V`{UrBn@DUV|SwnCK-lM;?9! z5!*i1Rq-!6O6(%jsjgQP7FaT##2W3RieZ0-L+3JuLL9C&E?Jog4DrlaRq`q|6NUvzJ*Ph=xuUn{NL!Nn94Tu-VEp zg5KoW`gV-0Eht|ZNdZBnwOoP_pw5_NpzS!2FcZ} zjx%d#3s~>bgITv8m82cEQsjiKPb9hTZ;tny1W@vsVY+phWKi)hi#=j47jgdq87*51 zQCSE9?)Yjbz-Y<<7Zhl~ADVXVvu*40S+#$utgQS7BjCpEf`GNLnuTOCG4r7u26~k1 z=3s+h4I$X@Q7us0ZVPRoT_45yljk_D?=$d7t_t|i3vur?EkG&R=xI3*HhRCyJTB6z zuoif|z8ZZOc{P&fxrTU$B1z^xd1~GDLoQlEeaK0^l>b>9Z`AyIeYdCx^EB%9d_7!} z)MQPXQ(ROLpTUCr*7vbIVG0ggH$ajAe)8S%Ty#ESetA@8-VMJpKWx$yk6dj4z_j!u zcS4O|vExclmJ94@UrvqBBhw#8pVT^dju7BrAS2jgb9(wIh1U5wiIqzOG5u37t5&z8 z)|A0nLa2let&I=Wm3*(ZMy~9>(m*;%Me7L?LE*TZ*HHJZB7}rL zMI66*GrLhoEx%(u2qY&5bK4>+Od98X27i=_IQ}V*kOel220p9g05kF81RNX)Ea~Yf zICS2OWj6&j`JF>B3ktezRcb72Wa|MfZRd^FEpbivIdWUH`6+-wPcqYM=(_Db_RJ>_ zjpLj*!%rlwEoS~>hh{6eFhzyabk|CKzA~HN{>%Q;jety6E#sAgcr#GE_&feFx zuC=~vxW4P!Yx9F=A6u`}!kdIV&-1&UsrP8+g&g;DN#7@MJ-*;^hO4$WdM+i)l8Z)j zn&o>}%wc95fq84+C^qzF%38+_2jcFj9u^!Lw|{h2{?aE^pSk(R2bU*ORXl3ewhUr; zzWVt(l)t&Icxi5HeEvG#$D#XNeP?I&jZ!_J@>r3yJkD#{;w`NDNu_2N4-7gJfuRk9-ph; z+kUg&Zdj2`d}MMBP4|5~mU~~CO>!S|YWTUNArj5GvsHDeV)~+sZe-|vORfIT=0e9W zs7Lc0xjONE#LUvCW7YEZ`%I~G6W)u3U)wScTF^%-kJuG;Jqx$Y6V-F<$z2a#{7@_H zVq3x0SBql7D45Dfn(|g5{fQ)r^E1gE4~Bj)uFW0yYPc`zvo@{8epHHl{-(IUt*G?*e ziccU^d^Sd#VJ~6h(#WmLIZxgf8tfZ7r13X<8r`hErMAB>mU`0K$QQeSUMB zgs8}5-;ViAcfvBQ9!oN^2HsQO=gMUJbZ=C;;`aBvHR1oajNmd<1TGSy;Tnjah9BZD zRL^-ZJN3St_6njqwbfRRVchr7Y)91l=WSIV71*hH+3C|SV=tA;{>!*-eMF$=zQzv` z$+X{fCyRfvJ&Nf4q)``C$Kv+#qSX^o%3~iK8lJU&KK#sWWOY`-65X=7z<8~;)>~mD zYrdzrna<$YkC}nw@v*U6n>Mq(=5pvd&kWF1U;SDGzMRB>KCFZJV0Uzug%99-1>n5- z`|!s_3MGu9>^*BS;*~Nr4V^*cxqr0|KF-}(XumRZE3Ae7bvVP#)oGQRKR{ib0gV~w z<*xKZI@Mji1V@K4j`-DkE`>ma`l-buE``}Xb9eXmqhz0W2Ft{}AhQ{i+g^lIg205V zKaAasf1{7ufev)r3h~#Ct1+s~2spm~n+wpJcb0V@P!J@mZ9iO~^)vr}@^E$?W4|-J z3>8B~E1&G5AzwaCVzYpDlOt|@O$6&TlT9Dd@iNF`HO0i%>g_j9HDBeEwsBc zBFt8tF=RO{D zqOZiP`F9UUt{NiNdPeTdW8*t@7k=1*LEB4a_Ak^i0MeSn0PdKb?Ok)m3zWkpv=ya#IGv&X^mis{shk}#N>sygKypHBA;`%C;!EI zO!yvTSSDd_?a(_|y31Cv9n{dTY5fxrKacv0veHUIB9~WP2&wnq$)HvzHmD{w=L)j> z$|(d1x&m9tA1g6mD_4j1qs-*LTyEH3gq?p+b(gF2|I8EUN)7_-vPKh>4XS^qC`G^7 z#Hu9Ksvp%Vp8AI`rH`{tx#WQ&3fiy;e^;dc&Sralr+18I9=d9vL&C=dO$@0mL6NN0 z{-sQRbulu{+|p?Kk1cW=?0&Y`TTsz$~<@!I;q{R`6qYtL8_QZ%zT{pg(5 zQNDk1c5<~pN!7pRrF-&UV|I~k*RV$!OaFsiQz7@M+xUO&@;|ss=fwMO(au?}y*=(@ z;Chur#N&TZp={Ts*>g~a)&E#9XM@4W%>Om&*6r%pYij?JNz*&8`nk$A{P=&+mlQeA z3CR7INc8Zzh5t(($~+F;KNh!-n(j}#F+%&);gi(SCoX>%5&p$vx|?z89@Fdp&9s^q zEH(3L{{FuhOtzgMYr%iX+PbR7(l9cUtLZ<~V@|OxkoP}mT=RT0{~_u@N%o%Ye>2Vo z3Kx>M)-s*vZkoJt{~xkem+07~o&9?eBhzC=M1_VNDqnzoLwV~`vTfIRc2vjqw}byy znVSPYNzr~}ubnWx)8S^vj9q41Kjw^Nd+viWO5UyO!e4bF7N{%^zCC2LLHXULY0Ih% z*!uM1V3YgMNS#nm41z`&|G5&KUeZqGZ*AN<|) z&J0^5f5Ffg?7-wsbtLgX6GZn zR56XF5DkTVcsH!-e4xV?n64ih^E_aA? ztcXKR!GE|W=z&WLM;pqp4y7(Wr26jAp{v8z5zp0r9{d{zZCTzbzYwA)!dS5-Os z*a0Tt%Y3@M%A`!pG0ir+%^1)G9n?j=X)pDWh3oo43#Nw6lI!)hfX=laGr2xCkuuW7?Q%TLDbUKZBDV3gD$(4{oeDmFqn4XO^Q@ zNfY>?!*AEpg+~ruHSy&}thj*0sqpa{=3y|itJ24p%*Lv28eUgK|DtYx&sbHyL5oR= z{Cmbs;%Z4L7~3~rPvwTs9z7xvi5~odV5Qr`?ghdD0Ei+~SxYI*I(#?n zNgq4EUx$+b@#{y1kQGNsAk{#Vc{gBdLTNorSJVzm_J8XoFu8sv#pBqf#wlE z(tw~tb5-pE1rM)lXd9{Q>_^p@nz#7SJM0Rp^vSc+_?})+?c1h_ zwnGRr!CDJX($p}k^(omL6#pa1d)ki%Q@ep9Joyj9HZmrMtV&NkG{@Vn@?d|(?)$`v zg9#~fxv4f_I4%Lu=)Lw=ojgeqSlY;GblAKBa_*G z8%ZQ-)=>i*61dR^&HJFq9nw>*a9q12xxzas6U?>caUi$NKhQg-Ld zuFGE*BWSo~YdSLE=BN6no38Ka5-Z`@KjqipmeFyJ-s~w_zkzFUE7FffCc?v6!e?}#8k>)_uivveP zb11D3!tn?Q;!xTt@#b=-#^41YKVu*w8;BTDe|y|Rl}*fjv#@opnl14UCNN;9SPAt1 zjEl0Prdp*=_@j!UU87YF_a6%;^tdRve>n8UG#RECs|R;GYn8@KD)Y!?Osh9r)kQ<0 z-s2B(C|f>B0U6l)x03Ly1k7c}UhZX9qi@S(^V`J?`ALZ@WRkB_y$umW2h;)f1qB`8>=ci`#)NS<%gpn_#$bMVBkj&P{e0pSHhT; zSQN;MD|7z{2q8$BiZe<6EubJnhTQ0M@$k?bdyytnd<`{7h@dBZ>?zq~SOI?{4`Ov3 ze2PnyYTX{|pVmfAc7z9K*KoQ4Fs1`L0MPZgtZkRNRn-&^2O}X%(MVuYIW|v*YV&A!Eya_J9(ObE0)x9@sW z%4(VUy`4oyJ{J^|ARm@vZ<7Q_e5@Rq(ZR8){gCL^u+J?p{^hx50V_T`1QES5~OY@+p#JJpMbYSpD+=dg2h=v0_4h8LsrfIi@A6oqZ z4!AbJ3q7R2t!?anDPj67_J?ta{XsgoL~w;8laSc+|KmgO=wHZhjULFwO@bajpkqJ8 zwEK=Q^hx``Mv43fZaY_J%iyj3bAqVz(r%+CGVT72V(`RCB}xT>dx6e-_J@I2WHzor z-}{aJ=|W#J^6n(RWqS6#Ck!qcXef&QrK1koaNx1So+jMSQT(n3!N;ZhK?U_S2U=C( zMh2meidJ~w(=S(b(Etfet_YvpF%KD_kMSUU;cVAyW^Z&4^@`{sx&LYZ=z<39agguTH%8 z*IHlYsD%ik0XoVEa)wFGZh%Th|Cr2k<1RtNl<%5>Hz7dl^6%M!(cCL^^UOE~igwLb z@kb#edZ!Q?qC3K`j!Xm`U%25rejgGxrLP)8p6;d ziVELWH)<}7!&%XA*RNlZZd=u)=r$f+zNMgR1K^w71K*m5eFnAzx}FvikdvKV>Y;;tFKqN2mJ zcPQrhF8G);;~ke8S~Yxf60DtcF5aUfi_YgTEV^S%b75fU%xNPQ-BUVOC#}=xXNE34 zDqkI%9pC!3^>}D}Y3@_)=buY;J6kH3OUa9vIHy}`%#cepX1x-Ebj6vYR|)5$s*^BE)+7c`Gg? z8y(r)!{wy`@;wM22`Qc+%WUZB5~j?AJ_&aUjz)`>CJ`sPzyLNAbQsjdAnHlL-zx0d z8?z-lPM|!bgb1d*=v9pj-$Ucz9^+d|Q*^E+Wg z2uo2kIr})SOn3&%rETcffFQQ2qtn#kyk*sth%_5K=!D}$I_241hEu035o8(PUwQrR z;dp5|I)smj6pye7P3%iRm8&e*bx}#Z7r`pc`O`ivJNxa5wud6`<=%E8l^s6~LYPQ3 zj%@sfLC7^I#IdQTEwo`u2hb@^ya{xt2Cd|ihq;~)y<_d5(|b~3E__Cf66XK9`TT>= zO!o^BECEEsrd_UZ1UCmieHP5&wRsqlg4$xAF*`N%_qwatpfiJo9^{H?dNHl*T{A4k zS#lsU>pPSPxef)a|9xv)+UXuEo!%TV6j6s_AtvD8lTF&&({lKA2`+ax5RLvVztF_+WNDEYpl`SIwRJ3hGJxHE#Z#G5N3Ovl-Zm}ER6 z-GANNQ;@N!gc>BW_kJNwx~0s|eIx`PP^b?!BC#BtGk>nxU(UF5+Q8W8-pAA_l2`;J zy`UYhEC+8?X7Q5Wedgh*XUwsBKD% zWL>*!Ta@=er^N3I9JJ6(CW>iFrNDZF%v_x&(#beyQqTce$-fQJHAt5}@bxuv|0J|H zUe1xds*9?;w0!3eto6AMh$bJF;!J=*uPzM3799ph=qYUuizNx;TgoEk_Jor(IDYTS zliaDT*yFZEHb!9n*hN!bky)Ojpv+nX+EV6e{UO^TL1Xlalz0}uMq$^{cm>1=;OI=c zW}W#{;h7dx*XH0HR>Sp=2aW~h9C?dqg&}O&bmezS93_t)M{!{)`|d+KUp4;gt8zO%FjYF|=S3>cIVCqTYCDh<)C z?J5S9>KWnDke;w6@-VPvMqge(u#l?a@D5If6vPIp-@wxca@v^M78x7DOo=_qqg^j= z_~wgVVVpp)vLN_$&?*Pn-UqwS*78=aYkTA%dQ(*QtveOpX^-nlTAJ=NW4%0CohKX= zFQnnlThIKULI|T;1{YO7WFwl{q-@!@oxfbQ(dnfDtUz-tY4T|?Bvu6`oTqf2d0(V~ zprvimo*vZmz2OI~y$Wkb_}qdk%tuj@QW!px<;PM?MG4^T6*wQhlIB9CBh7@~g^@%Y zo7K*fI>@4cpv#IogO#gmoUN!p z11;`msyfcp$zZTClZPr|Q^uO!&>ZKdx|82l@~()(9Oi=sM^1+RT9mB_2{4{bI&wBQ z7z;Q;Aa*=S*@*hih(7ZyJTuA_N0t*OB`n*^)N5B14QJOq3-7ea?6`x@VHxH?h-<;A zWy7@iQ2Vx7&Vs}D$P99=Vk^svcABDAfxXa*sZR7AT26aR$Ph6XnXfAdZ7&t;fROfFj0UJ z4@}6g(#+qnHp9bO;#V!>&S$JJ}5@mVJuWfd4|okA50DtQ$UC zHBF^o)l+EfLh6T;f?AXf#k6db3W{E1BgCY9>+QK&}l>#1bVwO=5^ zR)U_5Kh-5Z#vcpT1hXKDXvY{$sxwJ_lS0gyvq^O|5w-lbrgx61NdVG>cS$iuJ)kYt zGHc&yX%0S~J382CsC{lf*9^Y$X!=A(DLy$xSsqr+TEY^Em4j8Yk^7o0>{Udvt7I*) z`Iz1Dq@;Wdq6Bo5kvoD)H8i2n6khlWW7%^cU#j4Ve5tT=3W`!p8DAS87MfnOJ9_|; z6~zW6u<>5$h~@+LCygR7-`;!&d`tGGqzDQii>hp(CV0s%e}qle zn&JeOYrQ85qi_aQa_W$k1P0v;h?NL7Xd;y>s8c3)AE$E*I@Ht4(tAMD)8*`umL!CY zPa;Xku`{WNAB8a?`z4O?iz0hLTM2sTI1A&XGQy@v&8i$8*^ndZ_g7ZNPK)^V7%O|+ zAtORuvOIQ#qsN%NTW{IE2n}VJqzojw<1%~L1*r)406_gRujge`C(^g#$k0fHDX-0e z&7(c`G!#<3LD{K;oFw=4h%5TSmsKV@~ZbiV>Zpr^r++z~mIBS#T!*aezO9avfGggb;my66A| zuE;OvOWmzLgQ8kNzAFSPtnht4yGr#aG?uht8CoSph~gyT6Ag+*?uLQd^|nP%(I8P* zA3fC-d_2~?GTTzN_l!J%nk_hwa7Lcp-+u?=!K9%***>CvPlcV&&B0*cFVE1Q_NL)m4za#QjQV*S&s~R&5ON*>*!a%R%^lE) z()r4te#9C5au+X5=&iGly7tX5n^Q2QZq$b1FBn|G%c&&}spjaDt+>2A6rrdx7oe01 z>lk|1T)wur9T)YCh^o8Rz*x?^JZ^;e3pBaAt0g%Hz*mz#zbKj;i47Zed*S-I6}phj zc81TtGxGc0^*WD{I=}w&*FTLmhVb6_;(vUev8p6PBu|1M}!g71!{GxY1Rtjp- z-6qCx{?&)l@^V4ui;l&OF2}Pj32;AuO=3}dTHZjLmC(|$Q zy?twYbN$D+&ia)e9Rro6rsVp~ubqAi9x=x^9MezSnC{7k{wLr8F7(%L|6CexUVJ_t z-OS@N_kFa{tA6K4){d`2_PqV4WRb&fs{YW$X&s15Di}9W4ePHZW|Gn4P!0Y$h z^m_ThtKA4e8x*X?apJ8KV8Mgn=(-t_n#Tz8=UjmCdQQY@fAPFpfw$E_^z>VQJ|zUmE!Hpk zE&6OOHFstndoeYFewMX4AHOl zcNVVKpWQlqLy1TW=~*VJeFfU`8xS|#zWbj&$Y+STc)*FyCGO^fTB+3FD2cQ3n3_*>1|OWQ*I6p zr+&Rk*&891Z+JMX^U>hz+~wS-zrq5jkhT=jO`v`%ZX` zl^WwQ{Kh(nyy90#pN;qCuO;^jX|Nf1kB%CV2UKo|@l;BPv{49@TNN-$z0(X?N38PL*E(emfH??jCj;SV=Fu7{LGX2PcN>=Z}hLC@|FV>GqxXUy>cKm zP}0!GY_PlkBsoF)v;mdZ*3W+Sou6xkRG8hI(bANed)e=9t( zHT^1pYL=&J;l*NIj-_{Yy@BUB%`t^~>zTu(u}YG@B)MiYg%24;^8i&z0X_-OpPyV? zXyo0#GA)XVgdJW+^6vJP#m}|F!^3ZH*R2j+V&qo#oM?&ry;#4a#=Frq_AUSPM}IP^ z?Ulk49f}ON;g|BUm~url$?zX+7c|>6UArIHJ;0(slGq>n+F-vnl481~IA5m{D0nLR z<<|V=6VSG`EXsId^E*1+t>0_ce+R(B06v!=*H@~P0TOm@uV?MtR7|hk*;=gs`Qhw~ z+9?Yuy-W!%glO^XdysFck7)bL&^VKYf+MX*bINP%2@JxBuG_cp2P0Z(a3;c!d?~*N zzv7Jx_5Egyvrs&=SpN-`#v8vzLws&dzrN@<^Pq6=j`F#k3kTefkr2**-q843R2`uGA*89o609c zGw=HztvbXef>2e~K6(_lPQ82g4qEU~u+la7T_JG-#j#_KG7KYOAKopJWx&65H5Iz}LujUxuoi~T;mL}p2GL_uNUb9<53uHUEnkRJ|GxPKFjJLiA zTp#gUYd*gE6#jeR!b0cm?H@lnPn<~asI079sF{6yVOs>hJjH;_mMv3MAAB74f=?)q z)k~>N*HPhy9G{?xnb!<~6!HAAwRju#M4K*(zX5l)19&&BW~#`VUK#Nam7aa4;AnKM z`lPrx0ajgnd;V*(@W?wb1BeI{m~Zj48P8>TuA%6A;(`C?(Vyu93Fsw?O9&f0DGXSVa}#pj=o{V$|x zIekly3_fqj4Ur|o3M!?fy>+oKtkdU^_K?DXhJ z9b0PJgL<74Yt@}>JN>(^>h^a)!#@uABl^4X=9m%pW^VN75H-x++WB^3XTA7IopO2y z1Ml&V-!HkA{O)NgF3WgQtal#`f2Q7MT>pIY_k2~SeYs6@wBFVQ1i66*ee(MEl$I9Z zy+4Tx4>T^TFL8cc^DNhYH({pkavBs^VYT5M^&cV4OcAVKL5-QOOrI*V|+8n zU6;hltnR9M_wCK*+qzqO5u~UfND8|jT=>n-V{7<=!n3=7#V4MeO+z*A-~Kl^ypNu& zSMNgFK%dub?wyRT2yp1A6A!)fLI^>cR0*`w!T&$G-VX5R(*^0<(^=aO8oza|=bu)y zo3Iy_alWQOK}ZTeTB!S5A8L(Y{QgxEx6|7hZ7*G#-KeSo)zq%{=uqfCS8wgEUI#At z&^4a(HUD3H@bh1zkGEc}&JSOo8+eL_5BXVHO77M_las#|?yO$ksdVTpxE@<^AuXW= zx92^iOf96ul9|k@y{Y1-_Cn1SqJysUb-wE_Mz-9RM8OUF@Z8>9s;ZrLO{~8?U(tVS z{*vX{S>c7c-wQ^38(<6WKl_6RkczK5@hMP2^)~KBFcN_99}#L2zn*e1Mw4zP)- z239>2THAibqxRl^&O`6*j!$}Ds!sBu#9OXj0X@(zu-Un@+3BJ5^IVOmlVbYTZ2k77^GlbgTrZ^IA%Z>)S(26Mk1hUgr-$0jL;jSA77-0@`ILUF zfAp0)05ONflI7as2pUZJ#Gm-pcJPVB&icRocE*c^n&OrC&%f6yZ#(r=g06`B~Dc(x4*nC=q=}G;zH@t|dvQlv;y(8rJ_U}b0yJx!w zZn^W*vV7159N+AR`n|Q_)o1JEj}*1z9FnpGs+4>aWgZIBVH;R8?KQkh=Hd_ z_kj%ZliuM^_|5(ZcB$=A#@ifI72we`v%t?%G4e=ky9w8tRRZ*@@0ca$!D=PN+aO_)+YL_qM4KIogey0f)*drFPh zVygn;y%Sq2?;~!1mc~NRvbegsx}0_0&q>x+rxy?94D3VoLe9HT3K23O&~9e9;sU%N z45+H#nnww!+Y_T9yl4zidwXY_erNgMlb1dA4|f{qq~Minx6?Z80keN!uKoEz?}EFJ z&u>8G&l6IyWSq_o;)@EEFDdwB+?<`z3d{tS&XUkIKUn@v$8y7HNwiQuW23k5_)o}3 zDwfxz&OQJBy^*nMbKKSF2lK|7YS?+XJ*I=jHw(_oA*YL8bi`qxScMgV?2P$Wn^~RF z(EgQH(JYm#6D^Do7y=WM)gb&+e+%Pf9iFn@!Sl6wrs*2$~u^n~UcN`TQBn=T#bvGv4Q$co? zen0!b)$?wCp#t<@4bRTTY5Ni9rs;8}=E}Zjg7Xi`~2O zF)S3Lum}8!)&2zu>|8#f>TEC0obaq4UsXKgOuL@5rPqOUprecw)@3P|4_SgsPt+6?iXhGBK*?fM} zILT8FW-`Jk+;$)%ks{X=DnxxwP?^KVRo1eyQ3W`VvG8e95cuH>MfRkcu(yY2k?nQqGvS9u{8$#i9OM%<&R?V;8P6ge z%OYm+4t2bN=-8%o3AEUQN&B=?Bn>@Kn-@9_RWu?BQJ(x3e8*&a@4e;s@?XsvbeLj#MbfX$1hP6FML+IiKQcWUCUZ0t)f?aaK7H5oBay zY2BmaCT8QaNlO(Or)&WLL8GWcE=gUIaUi+-O%=S_h!KeeQ~|>T@XI-raj6#(X$8hC zU}2=dkGg_14lZBR!P|oeFBrF`Ly$;t4_L+$=eWv@4DNZ~2m2}I4*o{edVpu}N=_7s z)TOh`W|Q=Na6g%=+WdUE^xO~Ile|`rkDnnrA8ZjTUvZqjX*lCl-H(>Lsr#xQl&E=` zw8t9Z2`5ou5}Y74n~Its>>prB4EhsG5)$yMKCllsxO{^KiHAJBG_J=w=>hkVQekl& z7?wBw<`!!PsYJC9DbW$6j+}47nL8fPP!K|f-d92ikU9imn`#J`)(FS7CSekVp&qW7 z<0^#}W{lt6KkZ8TNyo*(V~g5vdmOr>G7H7~Y~g-Qgd-lv3EN?`E#Z=eVo>=iP3pT; z5J)g_%L7563x7g>S=d7s9}bq{ne8f-xc9oj8cx=BfQ%Rjz;G&a+}!R}M0+bv%ozqG zjfC$t(=oV<7ym5(3EP?)A+&pwl2<>JFFhrm40Z|A4g*_sO>xAv&a)td2Vg8y@tSp3 zI(So5hrAwr7N(fD&A&m*v580@2c^L7*cK(gf)Z9yOV9;Y0=APir{NAJ(F7!u4Ldv4 zOIN6oc-TRm71KX=tjoBWlu80*yL;JuI@9s$$;o@Lw+LltBp{wPR?d(!ab~qmK@m#W zE3Darn~-BuW5+bGY=!$L~hEFYXFoTxSQF~uIv8+!-m!ZNI1fOG90*@V{{rW zor%qC5EmsybTtUHB^`=Io;J@2NsoGA6An9hzp(U~$Lrq9u=67Vwh5(w)i4XS8JGjh zmvmlF!DB(%?IInpZpKD0B6XA`qUMZ-ofcaCX3o@fW|` z-%32wZ{5yu1okRmv_ua1i)$2h1!EG=!-{L>eFu63>hq^UzFG{Gf24G6!8xgk ziA&1|!FpqZLC3Ct(3=V}xm9GNgC9b*-h$AP_*l9CJPDQomQ5EcXzAFRRP+osIiyKZ zAd5xU`h6)vHQ$PJIt}*<%s5zZOAvk_2-{Ii*M)V|3oynF!Ayo%Fc$8zF--F~UTj81$?k7w+5+UgSEbW9NE<2&iSi zvLu_8u{drpRscU`<@MlowHjH)TO`uvX0v;;zhViLUP=LJq-Dz0r-(^KC8#SDJn-4{ zqqUV8U^q1}Mh5UpVk1JS9h@3nMs~q7m|T6eQzJ_w7-ZUA4OltCAj|?Yi}zK(NF`ev z&m;q;23vWMoJlT}La1RUhQN2`qQ9H?TUyEHE-c)A3sg7wo-D$|2a@0~i)OqOms9q* zX6jRm|Fa{_(;29u2>~2unkVGQMfqOb(;%%3+bm^eN);JjRew@^bDn`>mc|&zh+~ADC+=?o=pWUSoY*EFm0N{IqS8CE3 zz7Pz?MbBm+;J}0gqS!@V(%&dz3zu2ha`RlJ8qs}FbkiB}?`dK81_P7JFJhu|_2IN| zSK_qj@^2PJLSYAIO3$6v)f8dkI-6TY1K&`BZ!}xuI4w$#d}Bt)-4@*FS6b|OR`m0v zr!qE#6b+M8xHP2s*{=}}oRK3TgaV<3&(S`y>*&1iU|@6|6W6pDmQ_W91bYtDwW;ae z9@zD=Q-k>^X@ARGVdc|(eL1Jvv^;E`&A6VK&;+sHitZ_{V6ZC8tKge1?+)Byl6a?H z$b(D}+x8KaK$VzCj7d3E?W9aCZS5ts4f)BvYAZmQr@)A@z9nzfVr+|CiXf)6p#cpK zjROTyN-+H<*|vM(F~27FP6u2-T%dY^X-N~qN{Gwhlife;ZrKAx3q+7QpUe?0fage9 zKIxqU=`!3V18$R|HFXh}*q!__f&t7LK3zifK!$Efnw(R-7PZb?_>d3_x^>ZD{@sSZwPYubPv5o5FCU**cFKlA{M?JlaQ;r?;Z7< zXZtTx@&K<0d_?G7%{pl=PVr;lq3HLD-g)ouo}iiL5AdV`mVsOY|Bc?vw7N*PtZ&Ia z2I9X1FtC6F`t_2OGN7i?B4O-X4=*vf;^K1pE-tED;ZQ{#NQ5bcn$P;n46-DCo}SW~ z6hGy2J`r9Mi*6IT!h^L!D6Vt(V-jz{#Y%Ci>H`tGUOZbEl*s*k3;P>UwfyVahOJQz z#&6Kb;-?_um*DjciOR!sNJ} zutnl_`3Mk)fd|m?hY7QBimP>XCkr_ZzZJS)D9s;p2y+oF3IB0dh>Ln~ z8d+axHjptc{_1*&S*4; zY1zCN$9Yv6Y*!X2T|%0j$AG&Lbo$0|5`rx{>!j*~kp{tS*o0h`Cu38IRx)fhjBmcZ zBqJ1|V+)6|9pMxw15%*Eo6tLno^OPzfiPwOj3_r_#j?6-gQMgC zOQPe0Ad{_#SgOmBa<5D%B~b4mCra7kv)!wP@LhvNWC1%5%q|;!_#>`P-gD(I4HW~x zMx^;T#RJe?xj|=R zv>1bKz5pu^=!VGEi}c(Db~<=Xl*7Scxpcab{U!+?JzOy=fQM@aw{`td>V2R)u}UL5 zL0#a#n89-+Z$_s{k-M>UX21ESnu$2)C(`_QhZ#BP9lML?U} zr>1l)lWg960khP~1Yb{RZNYJd7~vIdA%H_{!4+fo!X!#7?3ox_Biy?cHu9H`h5l#Y}U^QV>M5k|W9mS7sIDcqU{i;=}*&3)# z){{s}NF6325j$@`%v{q@n7LY@oZ1!og9PGPC^m6@5T*&tp(Zh6Z%Gl? zF?4WaGJkkbh0RokAN9Ao03(sj_$;jB1l?%yr6HJjMhK4t^324xUGWZb0|89<~ zNPf`~ar&@_f` zKPbg7s^U?m<6t7-1Blt^Oqf_n#HB08R0#^FQ~{mUPaWZlM=@`c0JK&36|e^pWXzb; zj^-%`8oMoOo3ZT*2>4z|IF8>`7GD4))~WvGhiEcJV1gl7DVCd3be9Q>qZF={;)RER zs%QqHOH?ch>}&}PrGA)V$rsAgxOZCLmULMP4Vx#7DXegA^5iWI zT*NL=9Gq!dNQ$J|ecK}4V(>X?Pfr!|7W`C4g2xY$A*o=ISc-CMtpH47t3p%JcztRU zUbB1Q3mAb5h=u$Wmw>;VI{yV=^2DdbiD}GdoVO?82uW8!CR#7NCnkjdU`iGw#ixny z%V$zZbPmMM_0tu~A$d=)kRUhFpsYeShF^GSorN^N2&E)}_d#hq*^Oh-Az`5gz? z`Uoxsx00VL2y!D=eL1 zSJt`@l(tHrBSa-}HTEigksQ_vdRLz?efK8i3|MYWSnh!!0ld~-Od|GxhHHVVRJRzt zMI+moiAqMs+g-vMGWX7f^RAGZ@P2K>*@WYqVi)OKU|n|K5#Z`W11Si^ODJWo!u(yZ zEx^r{0~(LYH_zA_lD`|I6?4}l!%j{XWD_SmU-R$PrsXs74rJl7Vac^Ayr31CNDD~} zEgo@v`;61b#C*MQjz9wuxH}r>V695HL>~a*Id_P7l@vfdR^Y2K5Tq0GgaE$9-_D)% zzA9J4s3IsBjWMjirE+CTG*=~gcu+$@>7woKPtpPN6zvANj7(BjO!52bAecpE>pW;ae4vrSJzg&qCpdxV7s0veC%$PiK0L*H5>39@v3_* z#GJp*J{$&qFUqGz1`>j-T4FJYRzb$$H5=hIXgY(k?T{7SP}0=c9F{UC9m9i`S}0ct zD}f51EXtQ32%N&UK(7giHgfd=fuT_pb*L*YEG_?HOT45Z&BqpQzlwQ?j!>R8G{5{J zFB4D`z9N{A>pTp_h+P(E*FLzDgDjr(u%p;Rlt7ByR|iBvwm|5k`*K-L(2NUVf#|Bi z?sY1Mm}*daRP~^cCv3V3`IU71`|tqx*9W38zy%{pRxQjvNtBq7~m zhG!$M^*gl0?%_p>;PH3tWz}9v2$RX3&1H$K>h{832wiz{+R>Nso)mV~R49egM#EE%8=$H1ooLkD&p>wqqG_GMP=tbVK1+{cqk)(>GZ?ML>q~c_ z`3iRm7WaK=6Na@Fm#Y0Ou7EZ10P0@|Xu2z%*>fjD*gm@@sVMS^>{*M07mUKS&)qMK zJyIo#Wan46k}=6X^3uBNPcLsQ^_p7TEcP&`lb249rd0e{2K_u{X?8Ni0>w?^WqL=e z#A|h!M{6r+0!4B*Quj3E3tgFD?2dK&wc%xat-*&&wYkOBI0Tekf3Vcn(;}Xg&rPaj zNDz8hO#pwP_V1L}6jVQGn7-L>sEPJ7pb5cA6OU0dn(s6_t|?!q8SC;`7(aGSmKFj0GZfRobOdnc%*)d>AF}3< za^pXIuQp?0&qw1uw7dd|xHIONsG2Z|wxLre`Y8%?1YE+0Xh{arc?cTG4`}2^y}oe; z^6TB&Hh};Um8Zx}0eY{f2lV##kDZ`sc?KrC-z7jhGxD01B%lj0AIctjuK3M^Bsic; z1!>5QlPcxs6nBE$K>!Uuq@iwvkT<9C!Kco5mrLsL;cdETh7*a#B{}7q7*8wq)+ULV z^TAYxV$M*(g2()toaDhuNMyvOkEI;`;{VQ6O(Z#CN-0z)F~_MgOOrvDgy5!Ipq2QP zUYd)o2rI9`epgOI5$iCtuKB3UP3c>R0JmTHt{oLXiJE{-G^LTGC(CGsL;r@#?nc+8 zqKyy89#o9aOV~(JR-~smoxvyeq=ei8J#Si<2Ee%isBMHMax!zu&iU)%-&{T0^2NuK__DU@bI}f;9US{5E#V6So_zP!imqKP5%bG-XLlKd{6%4jhF4D1K7+ zvE2QWQ2&9}gx#6d>C$7ggS8(*S|e0)L*}pzDWZrz@wxdbg|~wao&_o+FjT zh#sSt!$WfkBH9)b>xmiq-9`oVPF~z|>#kHfDN+mAXecXqr^no?f}JB2 zllTmZd0GTvQY8DjhXDunet5CPAv@c*VNeBA(7oEhFOwMdz?}hVb0`fzmbyZgctv~` zcmA4I*skp)fl|{-xWq}qJb-JJI5EiPMSc7jvKwUanJPbgb;pB*BOX)X@BCW6*Uyaj zY97wHTAzZ!LrW70pV)^3{yD8UGq(2%S(;p)E1%9Sl`mI=zjX)wfX?|GQbFS^vu7K; zne(UOIJo@|j~ava<)Zf2d!~$B6LwF6`gSI2dZV-;1PzLZi=l5n%2S3~xKFoHM71ZH zUd>>JFmO-P>Cde#;y=oDw}*dBgjRh^1?Lr-0Wp|XVBcvgmYW}P+InR1W?mVM1j`6i z385PW&*vtN%W1hQES*?F`r0MJ&AdoQmp&l%6f?pxuxvUjLMJVcN+r4nqpgRLPOI?~ zXud5P@KECLGw;Oo8dtaX`@YYD!V?`;ZXg&v0&UZy=ZD4zneF!;^q=lkP|BF-!0;D~ zpS@<1oKqk_*}rRbMyS;#*u99P(-rQ+RFr+kt0x@EFVaDSGd>wmE3GIK9V?4OPUn|# zcPneSm|CXd>f@A`!>aJ+V*OqeNn(Xa#A6dCl8dtQdzSN! z$v7;bMwYa_2R6t~26Ox618`s;!cwl3^kRhzDVgph$m07=hFS_)O;EfMg4zROu66m- z-cSwFv$^6bc9N+HxqEl^W$?id(SS8p0u%r{!6b6>G(&z-#E#~)m~=tYq5Duloh(qs zlFBm$P`=*NG)SU*$?NrdudsenM9fl!s+Hr&0Latd+fgALX$YH^y8DLhI*qBhHNd`_umMDP`k-yQ86DrL4-Wp= z7TAh`t&VxNCsB3#Xpu-P+_1W`$p5A%sPh3eRkY^X!PMsEUbW<@(ij zb8bHF%9BCo#Z)O&70(4*Y#u%27YLc-&R}Dl8+YD>yxm~sMu+)8rD8J1XdY1yMW9V z=~G3IT^9+L>E4)B9sVA?RJezVI_U`n=%_~!llyeXJ{;$NrdQ)s{_O)>mXx>8O4r;v zx}k00f9AO|2_l&8vyJrFB(t80Fs2OEN>6CXkKFIFT}v`8#uTysk@Wv?_ul_h|MCCu z>l}MUvPUQ(k`(@4df|m-pwoKG${q z3D*zr+xz|QbY8FLc+C6b{&*Z7L|6hes#cy)fe2eMyYrgps%Xj0v-xXuVPO<>|5cX8 zAxYpoPxU5`!MI7|*4^3bxuzf8NB}AVjpe>VN9brQK{To#&w4rJQY^?guic6NR<9N% z&e<2`d=k+!t{SWKiOvnt@9KVjgcueJVE7T?B#0yGUV}Uloz`)@1Z#!^Lt{w2+~HAmQFRntmu{%J%1W)OK7Y_z zwJBFwxoPh0E7NKr`Em1AXKrQhOjVC_%_7vHjtVhr5Ue28myh#Dll|>2d+Bac ztbW-`%EXp55fSkV2q)Q-R+rR2h&7s4UO=_QdFizZJO5i0{bOGGLM1E#CyWSJZJ*Rl zpzQWL5j=a5z8ky%k_W~E$Euu%W714)VPekLH#F}4zt*7#J|MgJ1R?9ad~)G(OO7^C z!`nwOsU_gQjfrz4A-9C+nR;g4c3h>EBxUBmfW8eWMpLZ&?Tw5~iq^iScm1 zG*SHsNu7A;NQThNRBXCa>+Xs)Z$O+!BK0hTR~wQ#p=< zgDQ#a_t6d*gbxZL$dvMN{^G>)uboYuol@I0;p7Y(0#HS}C5k~t(Hq@)%sr3L(%Su< zrd7>|c~!qFW*jnem4Fl9S4e~4V;}_I>+ZFD@X!)ROjlg!Nv?CPNmsqTLt3nl_5J)+ zg45su8lS;?i~}LfGnsj=Qr15G=w8^CxJ2EtZ3Nsuv0>_^(Hfu`J_i4S^jO~|F#hS& zp>}JgEjKnnJ0<13UtTmw`Z#zg#G&5lY;locN2io(25qidN9RInA)sHa_c2HvcmldF zB6-57y`O{pO2c7#dQ=3CkofqwSHy@eL@8(+tl}A3mz-P*v{Z-R14ob7294HeOGtmY zOnP=@?S5eIsv8; zPzSzfZ8oOBXd)XZ0eaeJA3d8B&s|84#6UEl@%IO913-Ja#{fDy^D^ZZgf*WMsSx}Y z`t$qN&?d)Km4< z$Nw!BHV^4JhTGfL@PO;Z03n89w?_CRGyk}TR$saZ&B`8*BokOXVX46FsugzdRq$gO z++dk=k5meFmqPo9!(hV*6O@eAlKD#xZ1RfZA`SE+*dYQS28vyMdp)B{cC^s=<-gO! zGWNxoO;|B<98C@>n<9C$AeN30?@8y+4RkuX08;b}j`!~emGKb`>YLKh9Yo_4E?wIB zPupKGRO155mVyz0mFUjOut)KpkZ6lbV39)*oF!<4lBgjQfI=M^OZO>74f~!l!TJ(j zp(|gCE5G=DBrri;M|D&UJ^{B0057OOEnk*UR8RxI`j`iw0%xD`;F8EA)Ni7?PFgO_ zMV8U|MVz=JM`;Qm^#6cn-AlVd!Y2VP@mqJ{mt}uONiT5}XR=_Ku*rxhlqL3*Jri-n4I)FHd@L2K^rMYt7sm{vf zdLMCGk?hn>87y)ZJR=eS_M%NKU2Bu~x1Mg~TMVyIccS8o2NU>iVXfJB<# zahEg$+AnH}%e+EF0K#QW017x3kB6#vBJJ~@WUIgSj~xw$zt>|4#`-wpp-VS zelpkYqy4G~Ga4*JL!sT$TeRyyM(pj;iH$?++7bs`o`@KK6TU*^c~tsF$Wo>q|2*ZLELzRrliHdx11u3&RxD3($=!xX4Mn@lM@v?7_!PvFFdz&oiYzJ(^jO z1pvsB_T0Ej8*oM*Di>$)l-qs_?1(gVXyTO|hQQDkQ~)C>b-Ytec)ejL&fl_=99AcZWWt@g}WCCw0xS4z2Ag;ATuYq3mpV4s*?{o%()0aFyg z7xajS@kK)!55s)P%47sf;*&|VP)um)hF*B8$7w|O82T-ON-4xTDqTU1Jv%StWQhv_GZH0|9e*X4DpaNg0qKRR2iRF;D34*uh*o@C6k?}RKNe^KiMrfN5Mr87*Wu;nqh*H zDX(KDHDGM$vt36bN$^pi8wP+GFMX~=@L1H(QT9*VBzTZ|(@Yc$U1YUZqa!>CJt*J) zXZS|@@t+bfzgq0MT^_Me{ZYI)21<3`#X}Ic5x4PjUi#Z79+_B|0|QFa$Jul*kk82o z9^48B?*q+}W<%VPz6MS<5XUM+>k4!OYYY>%kW=V9n@TFV9iOy>;wm zBDMWP37?{-4?%WaiTQCtSM-Cjn7;-mwcJ=!Q1WWX=rCY1HMP3)7Gg(eGc$KMh3laF z`}H9K&X8qRcT><;Eb8XHBjbj$Pj%S_A0eGHZrpeFIbi&S4Eh1uLXPMLWw%`ReuO-q zO-$F&x#@5ufsn;oo+zh9kn)PF?EJ#~D9BqZT*^i?RaEU^c@RX`A5RjdYb|5_IOKKN znH-ff2r>ay8=Wo&yzr6`S$z%PbL6j5aaT zX`deMyFEz3JoxT{b_lu4oyz(zq}tI>&Lem!c({)?0_nILH1-h9CA6_WRjyu}po#j8Vhm(t- zo(V?`a)fB951Ka)j!fHsub^$kaRcc5RRD{$0Zv~mc=t*pOrKGQbvo|N>hF}>D`T+(C1418^oqVfmp)B)U z7!sQ#0p(zrsKLtHDi6P$2|`ut-_|Jkxe7Jo_%A_PI?{fRrPwku=)`Rc>kuRk#Ddte zg<7h_pRhPWy`!@{b>b8id<;iw2%kxCTLmcewG&J^e_bn^=goHx?1M=G0d}lV(12H4 zCQ5T75;RQ@K(D#{Kqpo1dY%UddKJBmjhm;bEiK*{Oq5Blh#H2aMpmD2#mXW$u*nE6 zFq{1h>qCUyN>gX>WMC89sVq8o1uGzON{IEUBZGNGGc9>h9M)`U&zhde&9^0dQCcSr zkjjK*d4jKl?39@hm*n;JxamOpWxgzcv!EUXVqZ+(Z68&Y@G~FD5KU69s(o78DqNf| zC?XDlGpPz760~_p%dw(hR_4wn^hMb2yik!-K1~{cqJ3#}CIiMIt_z2wMz&!xvZbBzB;7ic+ z?$LGC@4M=0vf_rdQ4Aq(LIbc`Pmn-@(u2Q`K}P@M0^A7Ah!q>B2NL-6bOqO=?ZqTI z3gItz#ZTp2Z8W!69173HN&ObWhKqW_K%^*uobd(YW}|IK5iUVc?B;k61lm!2XsE<= z#4dK!l)kU**sQLTPPl4ckk{lGPD!tijsigxQRQ*Sh+-=_xLfY1fn^quat9xsfxZ__ z3u=XD-rN^(+- zeI)vJCU?g#+*UI*y<_}~rd)#v>3fQ2>v~GoF*8|+J0F5yKH~p!1WVxuqxk#k zI+ft9&FUYlwHz=8bu{0}@jHiXgPKt`h~(+=FcU;!rtLW(r_G`_e|*P^`S%RF2sDi$Su0poIv3Zn6$xHoco{+ zQGgu8=5#w6DU`$f5GHfq07pHl`j=VJML<1RR{M01Di6*r;*PHBx+QXinjlP?Bjfsm z=J>vgwDhjD{;I<}-`tLqBE?joia5?pr$YNd3K~HGF1T0p4X@z=j7L}fl&_XJ4t*ww z{FM%VpxIfX;kw>MV*1+`wt^s>#_@xnV|*M{*r*8sLLVCGTi(!2x|_N1+J1nTMhrG- zJ1T^JQB2I#0XhA;C{eq-*3<3lsC3dDL_dA~(zvPx0AH_A>qP3fltBora(lf{@@-T> zIeGbYF%9p^u1E}HbBK_ct~ZzCC>;?zCH!rsC-kDlb!Es?rH2L?APU#f7!So@V8o|G z1|~2;hbJfYKFF=S<97k3KM3*~-BC~`DLvCwU!kFDpHFba+2#Ch1Z)VWAM6TuE+)Xp zme`W@U(_qL>zvn6yO9M@v+${G+uHo>#4kh4#fjbh--1j{Yej1l1Dd7p&EEN+H&5On{5T zZ?&8&$245EAWm`*xTJkCz+GMENk>ACXFC_GWBBWv#}HvD)RrFCdx+eRN{MR#@smVl zD+5I%5}qy*OzC);oP+yA*_3z`2SRwPEhwkdy-WaWQy7u}#|QZE(;qM!l?$%Vc%n-n zjd%L28))s^dW~FJ10li$NjE3q6VtT9N~_v)m@ajl>+g2BM$IsZ{gNv0YvC^>0my<} zo*#79R+O;3?Dc~m9=#y}c`s56qbgzU)rB~!f`#aQc7lI1&jZ(DiJ19g(*za8L4C1p zUl}=RwByq5`5r42(?cR`#V90MKpM&0qZ3$GfcX!Tc0kIhHa>JSAQT(l?YsKMJHL~R zf6qmPCEq0Olpx~}G9$u;<>ggx4m%c@klC1Y1u{sY#K$p61DaPeX^zERJ&f&oz=ZfV z>Za2;#z%+9Ahjc~v=hbE!3uAkT<4Qg9=rlUaRjmDIHh|`5tP>$z5M(Cf8g9UIBSV4+}({H7gf#mSPduXy$1ZszcB> zSHN8pLWsa4op#~oAJ3Hwif`~VC2`J#S3#PGzxV1c=^Ee^ct<{szz+2AgwgaLm-X8n zQ!6p>xP#}RaIJ(_FGeZl3dgmW+xET0BNZAE7LB6wqTu^`j0YW_{}z5sym@qNJzo?s?&nR3yHKK?YEt?B$XwR~*qzrWELulab9E z3-vDU0y#r76)Yi~KWU3dUev3d>JGziSC8{^YkO%pbY)zI`XLrMDrQX4wmyjYKty{+ z_RJL!J1anLkvNM#LUSjnA$+Ls9-HwJDz|8SkIQUG&TO)#2Fy^f z!ypFE!3tHrHd|W^HO)|mYnLHqNw}D-S)Tl7&y)1UN?sZ!rw-=`RJ!95;1PH$IIA44 zL;S@vCBAb!lwqlBu}TkT6`Eo`&=Mo0iWHzP&T6+>)lOT4y&`>l-+q$fj2l0wDNLF| z((+BP)QKd;vSMWR(di0JsI&npsti;7EmC&Nvz$UmU+(*|tgLio;te#KVVBJLx`EIDH z4lhv?Bb-pdMRW_Z_tVBZBC|;xcqw=)m|irs@V0>!L7C3vT7yyy5(RUpYGdn8)^5D|NQz^MC|Klu$Xy%s8!U7zz*aC!6&xjw zXsw!s^-wT8gB@ndTpyMke6VS;K!ps2Pr#2!V^qF;0 z@plDG#ZqJ4DBIgTz_BkI%gOY@PPp8V_=qxo4fUy%{bg87!-&`6Ivjl}pQs)W@DRE| zbH{n~LV4mg3g(P4!V=8<#cfv~-!VasFiWIS)uf&rvBC8-Q=WRJhvr{V;Gl3yZ}|gm zCK@56ICJbbjr}-){r<}lPH6f%QZ5&t76Za9@>dG@>uCCbVn)cgWi_LK>T z68$GABg!e?zp?|Y47ybiwT>A%&B2KIdJ`{+Ijt`yaUoBS6S797fBp2X=A{w3V$}U} z=PaLJpOZ|I7_$#(NwBfx6E(C%(P-2;L4y%+U%sW#tw9&SO{n6@#aI@~F+AS+Fb#%X z9-1^kd6m+u@0Z;OQgIB+J89!E5_jaK<)9K_Jh)NtxSOPDF{a@5utT7WE3Kg zuwZN5bn55^I2K)A+2C3*nq}yEG!cZW#eoW`%dFEl~{!{(8R!u z$B~dc#3c)!#@C`qgI-b*WdSwGU;CglUs)64E*Yi;!nQ3H&}~0hc^cj4opGJ36S$KB#uL)gn4iie_^ z1E+bMKaR}RlyOrb4| zmN=*Y)u|b1i;fAhmqOkrM1#0={9^p?LvMH#-bHZU;sZuT;xY7mC|LXdQa?#X0~8XZ z7R7vP!|0xHe|dt%(c0wUbRu#yuJTYJX_7EN!YXU{EOXmM0YPwGQRKZ$wI*6H=Lk)@ zqhr!zE+o;NjY-Ew(U_gb9}JM|w6H?m7QTfK5O=ygkQA2MONtM=JxQ^K^6ZY9n1vJi z9dg*XS<9cRMQ;kyDTiqAvk1kI6$vV?&mhPfbS=U#Iij{>_Lfk1S@UqoP=#)617)`i z?$u{0YqZy63UF4M*E<=%Od_`+64iPmN3s1oDC2^;YXS-W7%GUqV8!tTw+EA$mGiP1 zS?Ph!Td-r=uIwrUa@ukd!J*Er9C%=HpC4t_%d*wJqNF#DN5vk+j)a*Uy-RHl!=Qo{ za0zOitj;yus1&0KcZcPK%BvY~DupMnZjWoyX64pGSVJqwp?`!$oCC3_`-x5BZXV7X zD%@UvNed+DHpmbh&=O5C8g`VQDdP)pJe>ioo{ck#w_Jx@&q5J@{Sg+Y2U|xjbT-Cc zg!iVfg=@(87-plGIHV*|l$zoQQ0(p=4?tK^vvnq^-zhEP=Vh?PP(o86rDBXs-NDd^ zD-v1h)+Bw&BjmYx>J;%EodCf^71E{&81wam^798m>#G>7?$v9#FhBsgdrzc=8@2l& zy$)~33=KO;5gRyKq*gBO!4n7C;|XhYOpj0NktwDd7hbaJLjBEA)c0V0BGHIhnBLo8 z>-O~g(B&|HL&IXH2qg0@eEWN7_e5U@fV{v{qd6ah-&l||oJVHl4q9GPuL0yMA zXrB{GDvCtqs4Ut5;)>MUe-=K$Qjn z7UhfgWIItZp#y-?TmTpI_T7}upm_si8Tij&6`0V*5Yz{7GSdq6z#vSFrz$2&mYH7| z4Fsalbin4W`>pe{Ru1KUQ>~RmYGLqx^h-yDVb^lw#Jht(J>hH#RyV^vD57XoGq408 zUE(DOjuKsjRQp+s=d+Tyo$@q61x_v&XDrmp;tw{Ca&%?z3RzixFB~otDg7i=4$ZCz zrQ+lijF9e?+k)^DnzoNJ1tz>oH8>U`^dv)27RXV_08uC=ARVuZ&Z`|kZ(0VGWlF{4 z{OnhUsekAMbe&}+Ant{7ofifl1)qfjnoJ`W%@mQ9iz@{AJF1c;06i4aYZz$>>^LGY zwMS?$${<37xg&gDL#I$MTCcVBNZbjCf@aVE=|v)V9wh?ZdXGEfjfvsZ8~)HQ-VOP_2JJy4;Ha+Z>#`sk!B;>McZXzkm_;-;}W8z zxxVg90}5+SKOoq_1)PMJ9l=5w<)4Vl`FZFc>!M>BZry3t^<@G4vvC}DUE2q5Z7Rk6 zFt()KR>B5Wtz*x-#fm;CoCdPJO;N93`~%#lUUkWG%(I0!IT+1fql~b3Z0In05QA z*t#`_4~=+vGM8dIJiTE+?P&3Mmj(MZac(6rSbeAl-Hg1f{iown&w0vh5f_Z^hw{;4 z^^=rQBItU|V2cSIR>uWovuVJs)Y3BFq5aHfgBr;-&@p_t{h+4(=f4=sa7FUYQLB*q z=$)p(-Vm)OTPg%%ej+TIBJYB1jDOQ7Q45Bkv-n4Jp2%S3&n{}hkaiHY0Sq26>8MK- zo(Q)11u@N2N^#dX%W-spx6vw>CHArIiFcmvpS6&ikXn*Lesa`40M%+DHuYi`Vqr4z zn>aP}N}xW#DbOQ<`_ycC+{8=IW(eT|eI?a`Z=i!oV(g?@)u6;F0wPfd1+X9(9dKs_ zgT@^QlEWSM1VoVYq^KdMFl-(Ln7$Rq1YKs*5CMx`mjg&#I@wbVcoteLs73x!sQpyy zd#X~c2R#IJP2l>`G8xJj;v`vLe|k&HbA9snDmA1`e8h* z)OH?=g8p3^bJer?XG!trKwK;pBi<-=n-q(Dvom?)_VOAbrwN=PPdXUF2~hi59+v0C zHwh>k&ugi57~sd@I1(PiV^L$?7-X#*Xn%u2gT)f=>dMzw#8Kx#BcO{8u|ad}yKUu+ z4YoKU9$6x_WJ>gVT~YsQP;eFl8w#Eh7Fe-dOjaY92Sd~YDW_w6Y+U1bZq@ zR?)8chYgn2a7stj;~q|Hyd|Y1pp~cfDn$GPLy8PW zr5DyeC$I)47apBejHrcZGr;)6m7Dv_6E#jS<;$Q5?!(Ab_$)6IgC~saXxFL!A^3`$JNGd9QXW*L`L1Xyi zC^-iSOxtrx_rm637PS6w6tVLKyEBe||6u;!^NNfVvc{!oHtHmJ)TD@p?VSEkCgPMC zZE&ZDkEKu{59HwhS+ANw>GkW~BQ&VWjU{R<| z=hTV3oGh^I5&HD3f*DL-xAd;J&BH7(QHs#t;tFLinxKGo^KA@^(}tJVDJ|Ve$lddbBh8c(w_)@R`gL z`mn~A6LW+|+#_W^8>#mHuysP+EhHWuk?$&MD0xGHAL4mVCJaBWTw@Rmvdnj9SjmeV zhXg|)YywstHT!rrC?3`GyJczWU`TWNKu#0DBB60T(g77I-D_VX!`JBWwOMrNwy;Mw z{_MLO^6)J~flRrd(@u~))N->tsaiQ`CW>i;!;8dPqQiDyy(lj!bQGd{6IJ3&O^UZ~ z(hax>DsTitalfs`@pH>zb{gF?ZR|6|7BMs_@ao&>t8wV7B`i4AkK+|sw$J-%ubxB< z^!^bkp=k&TLPg1(#R^|*bVxIkLY}*UIH3L#ts_T$+BD};OBTMHu-hH1}!-lHU^jGN`!)c zBBn2OvNTgami9LjQYdA(yv%PlM9RRFohXi%<(39O&EZ4hu?ar(j>X!nBX z+O6m%g=XB^LlT)smcSEIAk6~if;rB_xt&HCT-`O%F+4P@S`aeq~)^dHU?>h%gdZfN#DZ^3p zpB=_4fBS}enV=p*%-GZrO8gIrwausO4n0?V9&`x|HBIbS14~-X)#h4$(|5FV07*jJG)vq1q}kbr zhkZy9quK)+xrYJigaXQr(sz{l1#mFF9Vvg;V^k>%MpC3%Sww~y2~%o34L>$?JdMtM z*!heK5t?4$XYY)qVwiyKif>u} zc!x0=yG;7I*YYj_((?{TG>-G<1=aCr2*($-3paYcSwxmgXobs zhfdl~5&_{C2kgFs3#)y-dzxDJkuTuj^YSCC46n_Dw2nZr|C2LT@9@pGN2wpCq1^fG z%%^0}N@pXF)6%thXcC6dG>&%oI6D_D5BRbc4g_!q{g$8KI{Qxx>AU9JFDjYTBC*Q{ z>-$osZSaKIfz?(F9|gz;-OWEqZ18j6h<)OW!@sJ^pu=_;&g(m?+k1HDkmm({|cT_FHVT_wqY3pqFvDz0pcAOtBUuAo9;$=T2Cjte7Y~!lWC)XdD=96bB zJmf>_ipIWua+!uuf3#ipuumQn2E(P&I5p$vlFTwI-;uoIRw2SSImx=N&`@LfhivW9Eu6$Eio@mJ+y zm|LwdGTYv7`?xI+LeE6Ylsg=-9n7=0Hd@_V{``T_cOt`{1NnIAuy*^e|81Gm6k$=if=&_QakWxuS`85G)v1pl;R|jlc z>?gY$AGN3j&&V*A0Yg?truRfJFXoBBdQ56iG}w}>GD1#&J9)EPg+AtCzE!{GP<1*P2rS0UQN%G7gz0067`mMr7uZqT2N`>|yHY!hr!9j)J z5by%s3lUbpGrp8cP*!3K%xuztBd1~M83XX{9v#9--E&UqtK%u84j=?NZFDL3yf5Nd z?=nC-FsOtVWJmpmC7tudSt?1hhn;(^9`b(|cxC6Z7Wcy!Wp{(^D)x7K*}PiXO(Uz9 zHZ8Uia^-E4{0)q{bBA_cKl?m?TJnb9x01ogKbx3M-+lRNzp>#{X7KX&ZufXg9e!nHF*8VL<3>2EhDWV%4d`pVR7&(!*(95S= zHpf~oGZjsJX;t-?%zC9x)$&rr;=>8=!ksCjh(m|Xz6Hg@S;du^|rd`++^-`kRw^ZVYr=(%Qaaz9N~cB3`5fb>s1&L7k*`j3uxUh!+a{ohr?CYn}Rz2 zKj9=k_Xc?T_EP)y&L1vrBpod0)DJJr&zt!i)TeF*>d1`O$c*O>uXb)qP0I!NG2`2{ zL`{EHsO9GmRJtwpIeyR2_ZmqjiCme@t|dN?OPcFFhY$@|VmhjJ^1PgT_qJIN_ug_y z&u$bA`OV+zJ@~76xD+q9t!cgYNaS7;Y4I7bxWAk}t>o0Vvrs>5R$O>EDm&}HyT#$R zw>q@)xw~j*E>zxo>@TZbY5%tWgM+5rhRqk(egoXRh^$J5H_r={|Z$D*o zqwYToeiik@Lq6RtMn=ior|lrlanKSsJy|s|<$TcQY`#;d$>G=Y&HrCa>c{WC3+)?1 zrIS#Wl6d&`I*-)6^Wv{6O>^%i7*{*mD(4a?RP*;S>w{cdOcAxvt=jEf`R~i{_DFudMqh`^|J>2Rf?J306~DSBBcnfiACqPe4;OfU zS1k{7cs2&)=STW&PetF2Y(CW4FKj^d!{s}N$8xL19(DZQH{)j)eZiQ#*qr|xVD~er z@bh$wj@N3pzuS7^;aGG0!LsJ!cEP5@o*fbufJ*6tv&`l#|CM*!4-P>*UmYHzrt8<2 zWa^sjZ;Z*_?E+W)>bp5AcXn!O4hDV}7H~)&@Yd|{GRiDGy5=`1;J+PRvv<&2Rde9D zxv_CDwI@Fy^Q3x2jST4(F#QQ~ajp6c>G_rTrI`Yc?Nwd>&cLO|D*> zi1!~^`6=(Y9qoTf->BR1@WG(8#m2Av{FKeO$(Ber_hU+TV)|Dya?RG({r7h&ey^H3 zRsHQ5_8W4(X0%?bNbS?Znj${s97kCZN>J8h-6GoLGPX*3^L^Ca-PJ?e?=4q$t2^bk z7j(=I{LlM;)l4Z~h$nC;MyAbu4P_ZPvt5Y!|5Mn1p?7}opY)JlY9n|iyI7*LTc5kW zojh+XJ8F`r^fjg#dHZ;<0|~uH}dlrwszRB z9dy4~J#<@(%Ou!WKxpFCBUuk8bq?L!ua*Azl)ZXbeNbd^IBHkDm0PotyISMBwkf~! zy2xjyx~6?^(0udt?8bh4`@8?uT$|x;)%U|#LiBg*0x~9E62e7Ma2y6l)>MtmP<%RU zt)d>Np3&? z4%snd^8TEK|HieNweAYpO~=*F1HZKj|NF}or`aXtYQ*f{s~>c1`&V#O?ccT79(W&b zCVklYP`bsprDncesB)XkxK)B8ssYtUjQ*h=ukUwtW}grDORJG`6p{L?j%_sBxa$Bt zGVtWW-7a@lHpv5gs65n;|5#T(p0rDEvb z?B@x^KQr6@&mQhr_f$0|`VW@RNsvLDaq8Ki(ots{-^;uAk+1-&8~5KF@Kfk;WJ2{|TFuC)Vt>$a(WhVfO}^KVX^N!$OB+9y**pH0$f>^) z$1OF#Ywv^8RxE69f><>9H3w9nQ&3Ydwz#$RmzrG;BuQtlpEqT*rge7JXqDhFWhsXD z_iSy|dGFR+sk>8?Cq1VE*8P`0Ov)@=clPW74^UAvG9P+4y3`S$>bEd$WMl@mRMNXy z@`lT|?8Q%OtB0GbMn>$F`+eyDb$FP4@|ct?T%8==kWSr6PV!x4GA^YFmvK6Q;gnza zn)V{;K2vMPwT|({k|R$o^2&c?Z%O__N_73m=tCFnq^CBMBUiR6JkXd|SH>WSdN^W! zsAPBGd-cZDwaDrPv7*Y&{YBXkkLm@3-m2{;XTQENjt2*++V}e9>F&nGD6cErSbOm# z$B%UU?A|%X|A}QGJE>io;pM-4#bE@Cy#6p)@q3=ce3hWmWA0$xd4Iilcw5MSOz2Ji z%ym^(!MhBn(wnFd7a(!#UQmZ0Rj3s#L>%8VGKCUE9i&L>Jnk;VjQ zF*Gqzul+rg>>dAkBc|VBqb}8VwCh>+PqUK93VtHRmZ`h6V0Ezqk&PeGxDN;u;Zl2yq0er(jkooO9!v4Mcin_mx$5}RyJHwEDu$w|4mgU+49z6&zLUFPgLkzY`*_U64wgro1TAz*I zonF6mcGdE8<5Cy|(um%&x~uEr6crl7@i^M>eon64>R;oV-_8DMJ=Y4lF4`-*Ik`Xj ztm)k54KWMX36tv0O}V9S1oXN5JXcabjCu>0?|*rt5N4KL*`Up2x2te#Vj^0SBeCSZ zijiJ->D?51g}s#@LJ@uqzZqS1=A_#7JFSFSzUBlK_}Q#KDiYeNkFnr;9lR(HKVi^X z&5_8ot&_TEU}rrUeds;DM>08hXAphQO8N_m#p`)&hp(A++$G$d_9i1zYc?O=(#!rh z{Pws1_V0~;pb+4#*D*O5aUGQSp8GxNls5XMvG|A+ zPA^A>oO`nQ=m?y%GuG~T0kr_HCu|qM{qE~K!wWx)>z@)JpIPnFux%H_Bio-0pJPzq zeygSHcVRQpqS>cp&DdVeMomkj`LDm6%+Aly)KLRHju| zKVK#-u^fC<+;meR?CfgJ+pVF;M1u?oyUne`2YV%lG7M}dKku}6n+clvdyVU)9{e>J z%&aP%W#{luEo-q~U3|%LoP#SUOyjZ1auvOSbIPSr*2LWwAt&#WInV35o^4rM>LiEL zBg5~n7Co^zDSw+;X0w~u@ zq{n5YO}0zclmDIt8DN9y|AJcNPD!s>6px7HPv&m%y;UGU?4VF{izz zh8M#YQhP=%-7N}X2Pp^1_R<9&5=Ij1n;WeAPk%h?>H7Is#pm=y;8O}#MwS!&L1EfY z*jM-`$dV(HdsjaqwU3AN&GJr>9^GZer&+8@Yh?ArkSWiVq^a7=J+=jc+j-+H$YV{DKL#snM21DERcJ&ta(V9D259oC@!dBl0l=u-1|V>82#hnzg#Ga>t$V22)q1qSmo82 zb%Vh7kB8Ko0@P$ kzn#DVL#Rs1unh@Wn$3?AOCq{(=b{U*2IyL?^E>kRwGH?cZ1 z9Haip!_T<;$nX{V?b!2G&=!;O)g5CLAVtBQ-f-bK(f^(_=v!t?M&YX(*Y|5PZ{L>& z(TK(UO1S82^({NZ!g+69o5JbRYUH20dRi%B+fQ_=J-vGe*d+(MfcFj}X+x9FEpX6A{A9(WqYYz7@M+wZjp~)y_Z?-i5ls4!|n^WhGOk0a)XtE z_Ti#pYzt{1d(M@P)Yg=aHm-Y0p zw(}cDZ#cy(2h-!Y;o9F%Klb*o93O5Rx$C)98|b1r%1DkHPJnWGai2iWL)RgS9TI|YOu^vIAee8RUGn{_Y;ER(o4~l6-LGEses* z^CNG?OI3U_;V^n;u|u1wn`f`llx26!VnxBycO~#-ST5VvxA$G@H?Q(Z zf`{5#`Wve1Qxx9GD|c`(R=!CXOww<yY>=YpB_d2CYQD(*eGiL&-iMlPku4S{ z|CtTnXA9ChNtvvv1AIXjSNxxKr4%KX-!@TIR6R&t38r~dU315?UqF0kX`4Ej#$)d% zdx}#dl{GIiq@hglVJ!}RREr#Bkm%pJs34Y3CA#K|NNr_F06DJH@REwf{nm`cpLRQ2 z`vug`)FY)fd)?yYs%$7PsGLz2^taspT>0-|d2r#ob&CqyxgWX=H;@sPcGG-PiVuk8 zGnUD7HyCN~D#Bw?%=V{{S9QevNZzw6MHET2w)-xB|4=XvRka1qyQD6cjLH5XBoR>w ztSyFQ_ zQ#2`m@o=bgahqscnMdjTC%bm{a-TX${X&Wl@^}4j-Mn?pp!dgOk<8Zk<-u#O*FNE! zuXB>ow+Mw;gN&FzJxAH69CPgVcay5RvBU58FD2)z&P4TCk8!Z>O1_wsq&Uy3bMJfF zU^DqoS_L-GDK0K0+XoUAvaa59hZk*wZ$<3;J3sU*Mby+%&MQQ?F}S5}=i1B9J1qQu z=#=YfR=d#SGDfmsn{q1eOKa_L)TlPKD(~^9I^4~cV9wW|_ zyMDhqwecP-yXIH&htJiD%#rtxR^rX}@@sv^nRSX!q(u+T5#r{@$rhO2Iw{G!xBqbt zXg3>rA-v(NQ9*{2Sh;`v&TkyO!uWj4;O11dssH^qunI*T_IFk1EiF8T8*(Lvb}~(6 z|7bDxtg6K|JjqaR3^kpZ<2iXd=yS81kr34 z{j1rW^DYoCXuw%-t=*gWgg3vp^O`2({QOX zPe8vi_N*vQou1TV({=p4_bb)7TZF86ctv!EMU^m29-ZQuzIrv5h0{996omo#L^lZN&2cFs`r9`s@_ddV3OVN=Nj+f@w)?{L zoS$Z)T7%T=)%Sn|r)yVL3H*Dw_f5VolB34DHDx0ug;&DCSlDrZ?|VP| zbZEVik;kvp!}VZ_@A>oAq5t0-Nxt&^906YJA^|<}`{!R15mbK)4rPKz-n^^VriAhNO_y30e@%4RQP;j{3#H$eIeBPW*+VAtK^Q5aN{Ib@_v95}) z|Chi(ijro3{pR(KV`B$nRh#E~dWd&-TO%VEW@Zf@V4c?fmqc+Z!dEJ zKEC|74%0J_FCUY8J^Vtb@17{J)@JJ{?@gk5MpA92lwApwVZA%}8Fu7To|BXRdb)zc|HfcX&){&;TJ|@2wFgcM zi8C|*hqCvM=W>7J$KM*F6q1pQB%`d%l2uBQtR#Cx_TGC`L|H}1UYXf@Q&x5e*?X_- z&F{M3ojRR!`h35?@8fa)IV#>>IkBlu z5fM=^GKP^6tx zwSFa}1MYVLE}_xf(!%kp&p7Z=;!uN8eV*Yct3xQ`3-fIet!h{&j_b`$+3Z*1PD+Gd z*JykHerhW5izHQ*OZD<<^exye>^E;32u_gUOIUZjy%En6I`>t@W!mn^gB&ewd{H%_T%?}+DwY7K^?Mz65*~i$K|yy-+;j5RL}PI z%FFH6Jc7tS7#M_+6AMMg>T z&6w+8?E5eS6O$Bed)wybv*l$tXfk4AzWsKy-&-tism8;0q6)Crm~vkNRiH9Fgkd|M zcEQjCL(Kxs%XE*K%pJ)fKABUH7ye;Ffx1HP{x{`P_HlViKCD z|J+#H`JP67Vg%5W0)HD)LH~9(1i|L+qtq9if`e`0CaunNjMC7!5fbh!uMoGqYyC37 z>^gOh#KvY(W!8VTw`aRaOOxpa*5hJ|OuNpMgX^Xva=J;~>eJJUzP=vs$cSiNNye{L z;<718X=*+eaufHsNwsgcK9^i|dpsi#?kw(^Gh_^xbQfl_tf*7_`>81;btN*&%P*aW z@5!g2A?%*pOI9kf{+Ub@<-#L5*haCKW?UC`)bF+Vn1~9G?L373FQdf#ZN1-xqsE=! zSf-j5yc%jfL2UHWGxW@9wo4Z;Qn6)y_jNuaXtBC_AM5yO!OgSY0xIPy={vp2g5})? zrA)5h*R~?1mX?Fr58ejJK9p+h>}N`&3CE%;TfUZe>}*VGvAwmp58g=g){4G@!ZN&x zcne-=4)4I;+I8ek_YYz&m3nwajf1~PtI0JgYHw?6l36uxJ+l6syz|4J_JQ{Ho_@pQ zScyhP`@MUkHQt+r7lje%O}(NMxE+{w!Tc>oE_J$csO>!Zib*&59Y-ae;lY?Yf*QCz z5m(w@SXjnNf0Y|D?KNNjFItvIB#Kb`eMTi%u*Er zwIudluE(U`p|?3-t0UF+sPXt~nFx3?E( zX-Vkqojx(%U;O0de0yWpwzpIE)&oaJ5#{iCpmrYxJv3-&?r}B7Ot({u#rJe}YM-JM z_j+}io`qPD(Z}o+?ClnQ# z1n0kDqd4k@ahB3qlx*UQw#`IE#mvoLmmfUSZO>zp8=4%YcKudpIU6YvemOaXS59uq zn4{)}Tk0!c%5h13KR@xe0oYW5Rkez}lJoBFPdr4Rb z-QLla&&a>8efI3m%8KWQWS^iwah&5tOf;GeeTAnSEZNi$CjQ2XL)1}pM)6Sc4V7Z9 zm7b#r1Bs7k#zlRFC$FO`qxr&jCWGAN%Js{X(H+LFmR>DSos;n z%Nx>zr(d7_`^GcwJ=go01^9XUbqD}UKVqEMMPt6`zkih2`Sa;}+o2rSb>SIKvC>V6 zr(mBk!T_S(P$p}}b5G7h^uN!%A#r7KQDrpe`DlqlghH@TbkpVA@T@MnCtBL}McCg_ zHB;XoLA#AGI@ITb0Jv+s+FRS7+?M=lb1ki%owl;X6BGf2NQZK5ZEez9Ub=db?y98~ zbgsryljuQp`P0N#>!VYGILTE8mtXkAl4O>}i6_{9DA>GjnLE!;Tw9$gA>KK=X1}t0 zFI^>^UGvp^Y-n5C2=xVp;^IL*^Z0_~dYvn~jMN&24<3|vJCx*bd>a%n|M*O}Go>)o zSd0{bxTmp7D5x-_9z1zm!R%)-#9yR;G>CWK`F=usp*=|uVW3c6><^r;wwJl&>%GKt zJlH9F$m|CLnMz-mMnhTFnh44h2is9_A?R4z%KTz_q%Aro5sUY#)1nOu*j${q!WxiRZ#V! zc3*Z;SD_xh0z4d2Lz;=+fkcNivG)asT==}S5L~=)%g00O4eg-{5jYnv_L&4xVu+;q zh6jPzsGJwvo?NVOe(s+Cj)wAEHboeyAraUXjVoRne4i?_&ZZo`d@Dccrjqti)ZE!n z?i0M8@Lm0b!<$4Bd#ZkF1Qlxts#n*Q_<`ybPoO^_$@3$JPIK^il~|!>5K=C7?l}gT zCq9L>t?aXeP}ES(7jwH#O^K3A2#v?}Y(}tp#rRzU(2D`RSCl3a#)^5h4yPuvB>dyk z_VF#hcV4*$r6lnT^ePT-PIad9^)5vj713hv-4DC)X|N-Q&s}Qpw8=TM+Z>)-N8$AG z!OZ-3Go1EZk%V^JB5W7Mj2!g0MKP%X19{@6312i}5yZI>2UzyMEJPdUPWJBj+BLZ9 z3%7_96@HX5K+Jg>Omd?&HsfQMsd3hhwS*ivFF40ct zuT0kZP^R-1!F$93ge?GWnLR;#c>|DeLTSR}%aiAb?uT+=HWfGLpBre-yy*NE@vF_vcx->)~bb znA-h{A|qxgBhB&mdW~ynon|7AZ~OjmOwEt}nvolYrg?P)Sf5)hBF3mNQTCt*Md`BP z4sBy%s_~L+<0I$VJbQRFS)Qk~Z6SFf@@$VT=RuyS&B_W12CFRPMspB|gs0QeeTQ31`TOit0>UT-Eph&k8L zKP&vj0%c7*~r1&pi1u-dB8dG z;`OiF#;wE$I{`$C~#d z?9|k?$1oLCi#OUOw1tF@?(6_yFMaL5vdg!DBgZH~dHrE~n;+iULS8DW$=xl$0$QG$ zVh{Y*WOl?O0Q{PZDutD|>h$`#wZ|LY73ViwpIhkblb4c$u0Q&zvb@k{={2S-7gw{- z%aSw|QB^y%4^iM7zC0B>&qg{K@MHN04_mjvV#R;~s!&#}jOv!N=)5{;A4$46H< zVob-c>(q_WABQI$Ki|rl;VSKsqwKY5mS!3$T%D|emWn?hQ%H(G|w{S!S z`hwp)<{$xhxr2!r!ove-n*T$Z_)Bl{fo3>MzyUBzowV<~xO84^I7heKW-=UL(gu^V zUVmE#*90^ZyiYn;T#p?Sc7HLz?OYZK7McxHBsV)VZ{PaP**A@7+6M|}aM}zzpBqAt zg_m0ytC^Gqm+22D-vQZ(JA~rn-lB59n(j8vaJ$~FNp%-o}caEKhR!d0m(^6X-v6PY(~rI^a@NiKAc>MeSFE>rV!)2DjY-mP~u9MV{X1(nbBolaHZF z-VMER!-p1tHir+=bZA#lMq0PwOsfhTFAldS5KvPGD6j%{qIUHl7vSf;CWjT>HC=W0 zl4boB1#NR*wy-sJQs)w<(q$mGKhbvPj&O8og0`_r1hUX$N(C9bRF6MD`&=g)G(dm( z_!Ux0lM(rLIJ+LE=nhxy!eSXvMMjJPp0Y^%uUU6lHv?1 zXA0W-WYL(UynmliF2H|5UEZOjG`1=9I_G7M)qA21PzAr%`4{hOml^1MnO$3n7cY&d znQN}ootu&ic=zrM`JSM7v-x8AK|^DssgqMD-MjRYB0EhJqHAk3mX>)elx@vI?Fqsm z_u)dcknx9+5XwlZFuTf;5nmn9Vy{SkpSKP&K-d{_a#9kx?ynaOw9Zooy``VUKW&7G zR!_LB-|b7Q%Ozc3=IS;&}vRl~?A-I1ev zafylV(-s5`4KvCQ{LZqMaXFW9UN!j_O1@gJ<*zV_9jq)9JIsKqAhHMDG8!f2m=AnSdPD2qt!_K< zeRzFc30zu>gd z7(9}kEOjdGOn%?%u)eR|wiy{$Ha@vId1{I%^{sJK<`s;;a&@gXH9bjAUTkh|7@2*I z*$Ct>mGr^TAX&S$86bcLl;myVpeSW~c>(F#(^`S$P;)+LJbHpcK3l8GI-e+?S5nd> zzUVrfFZbC_Wr=LVIx@{0^Ply*ekjoG-yRdooEqZq6nfx$K-oTRvC#b7(`hC~(bqTq zZE!f>dbbaPp$`TXBL*1@!gxPArfqRsmmlqqul=VP(kP$f!hNFV-NUCQ)+`92@QK&9 zeeX#i$jh~#$?7DW1l~<;2E!9i#499~xt)Woo#2ZCF4kLz|CY1+rJG_tpG482&vdo8 z3s)uq%2>#e8_p9diW7bYqG8=U^z?7K5TDCAqHJ6m5FB30A~4u~@~pIiCtH-&@)HmJ zY``^bmA~N{;oI4NWAd?X>W$uc|M~2OHOU#?0D?)I0w-3{3n|3mq|Ki5VBX?})HFb>q^>80~Oq6!U&pObp zzO~hB+?AJiC;0wVv9L{d3I4PDVZ+#!a6lvgW45)O)?{Py_GO`G84QY4?fAO-Ej6Dx zl6$4pd~&9@SNnbl2RoaYgIW|`Ojf3;)&2YL>%M+#ePh1A+feHxdcq6;JbY!sQNPZwP&yuq3V@K+j`lglAF!5b2-b;Q<_PS)n73B0 zeBT2d#`+2p^G(vkdp;9|4MCMP-o>qCO&iVO?Xwo5qoch=#T(<3i_LZTf{cKOkbch! zqHnLXbWls!rL=J!t;W!NHrAzk`m?YG;g4SpUqA%L(`aJL9ai7}t(D4^1PVPV3BBvP z(?nW}!9#bI_`N0^V|1Tgtl!fI{fQ zNs@;v6DI>|JFhkCz|;brKJZ4 zBCgvmcM&r`jDYp5)VTtvYW*Y*hqV>aqlXTUl;<^tjNYGQMS1-~K7x|3@nW*`^W#1+ za9qjAN~&9z5{S*rFE7u|m&KB5{dR3kW}c4^K4rX%%gn??dB1>B7@lByJ9g-ql0WN& z>t&a6e&9)&Bh&01KE|g(V6x>9H5HY8!(H#v^QJ3J9=(8Qh4z%Hgbp0D+js9OEB3Ox zd(U8qq`$S_%`SS6=-1Ilvl;E zqQAc42JF~ANJ)wRxalpB0G5ufpsyX=+m~3+Xjs@V=$gAvvn${Ig&T;^%km&LJ{mYE z3-#a~u9zx7E3aGk*!k`D{Lfu&e&u5{H@BM=JzPd3YCyRsZfDo`UL@Dha8gf?jhHcG zz!54eRw>8q0B#` zjOS{8QK5WY>+ar5g8DtUgMfjdECOC4c>c^tR78pmOl%Of>d%n^smyD(b0rG!8 zG?uGRgov28(dMtae)WwGBsn9cDvlO+yfw^;UMVJ%n-krAcH~gHFk&+!B8-@jeidtU z^f3y%DM-izb+s>Ix*Qure;BV>(?hXUJNXpa7Jg6OWQJiz?8jB_LVP}5`A28Dw+Y&m zl*fZm_ zV~!6X6PZw2@o{=yxcVapQa}t=IYMj**T#|F`DdRPLA>Bp)-^UW)3cQC2Yt=S0hLl) zOFMsKwIKcR%mv%G7&yZwFRnSpC!^i4L`@DjYT?m=^oR9s|3hKpmS)hhv$1hNSW)DO z-shPg5mwcRP-C>Xk6O6lD2n#HKZgOAiWmJ%rVcO6_`6}oZEu4E>ocH2RprBH4?Jf< zC1=V&OiW(6ziMdc*rOCsZ_WrR=^8ptPql(B0%abK9=X|T_*Y%v($L^+Id&IR2T){!4;_x=cGy%moCgVFP^IYW;mG!;i@t0wRBYMV7^gieaXZE+NLcU*-+P>qn&qm>qB=|x09sx{r&8=cSL7rR6f_)?nl@IT5K6~ zD$dL0-Px!PrrnIdwp=XIFF*jzWr z7}+r~;J4PexjUOLsmtaPAgUjdh|Q@Gj+`d36awhX+tn-|MTEe~U_?@;pMX^S~=Sl;lR@+}C1V{sD0_0ykj!#ZS+*4DmolR+yr)UJ|d94*RaWT)}eScF$TcV~lI5<1yD zKQ-SU9=bC;8OajHWq8XhYk3s-aw)%XJ2e_x{irU6mp8s*6S(3%Ei0?{1t-RvoLtZY zkSX@fn}Qy|((_vWp$#-|=jAzNcmGEc@Ho+neY_fis|-N-+F55_fgWRvCh|#o$O2jh z2z0Yq8%;brs`|A{U~798Dhu)2S|?c({GdW&<|mUA3X#U4vD3yJ;CR$GH}O4um_vJM zdVofhxwC^%R>tv>%y>U{$jot8l-!R9*Hsrl1cF$-{uZBVya@y?*4M4p){bs(s~GBM zJCq@h1ZYHDyk@a5pmP z0=g~%F@U;s8Q_T|&vFl^C*nFUH>VJxUc2k-SL0mep{2cWvC1HGcjhxpD{VL*>{ns; z3QvlKlM<5l^bR^_WMsTgOM~EpcT5bmb_?n1c0V^atm=o;1e9`_pA0g2H}-cxUNSOF zjz>uH1A9mA?Z(;Yb)d*bZ%?2L5I$0XicVwQX=yzp_^qMUL9abNvd`3gd6Ck%+|kBo zqPnLibB9DjR2dXf1~h>tPQl-qS_7m08pHLXxtR{I%b-ddP=ZdtlW)}jU*ggW&Nm?; z$79FdRFZSlF@yJmrOpfZ@vm>{@Xy5bH2Nj=hh7_rxwTTR%dB6S% z31}QZF%Rp;wz1t=$2SI@AkW?V)XBZ+Kqlx-q9U1m9$=%&my@W+HsyUBo$cgXFkxUl zjU1D@FrNF7=tYK91aqTGOhMc$B$EEs>FIqM;gxD%{>#O7xggOvs zmw6};-0RDrj@QW(iiDIuj!Q&Z;^wL+i`C==f9B)o#{@h>6Zxu89G+;00);e?C}!po z%*zj5k1ByqrW`BJd}9H*-;%_?wzp@54qnS;-Sz5e*gyJ?BC;EAiT>Spev1DH>NAC( zlOa5hd{-yFuN0a$wEo%5kj#)E>-R@vr}WXTmZ=TWf@P2vZ1VK}k6wn(-ea2l$_>d? z{1#>^vHE`{TC~7{ppHXGwdkEN5lZ*u>vxBn=RFYl7uLm+t-(FK1A?n{LmgOp}1zgp5b4FKA7 z!ZT-N9_3kB3WeN1CoL^YOM6P->jzx^sU((mM@(I z-A7Ynbne}|yZ%X;vD6@b2lmpoZlSH#0h zDdP2O;9m=UO4-#}w3l>0p9tSNqfq%nDiK{i%D;VovW^lMm_E0F_ES=XfsIij4D8|- zc$?JdG+L6nx6MizcP|k$S%w_N{wQT=aS=Zxn@-l%@vyUbVO90_SX*((^mUr`RpI|+1b!aGcOtmMr#Uf%C3A+SpO9+4l zntF^Ti%h1*4z5h9AV?xd?xX@Ets9_PuW<>X*dfqoxeVlfSM6&1VG%Bc_kKi|?K zV7;~cIw?K9H z^>t;;mn}-==asG6K7}d>TFF%h6}YIaN1?Y3^D$Hz}m4(eQ)>nmDoNf0NLc?ACS>Qz#W%1@7~ zYm2{A+XVjD;P(W-HrN^AbI8ZIQHtnEe{U;$Hu9GC16T?v^DydF(1}~=T7=R zm-;AAISV-u$eA@ef%{8(x=Q&SgaB{QQ@_n*2gqaL^0 z@5)mqElNzo8;O2KF^4%Nixuf>kT*dK@t;EDgI$|C$pFxLiuSx&h&q9i6g|Hf5cW{3Ku>N z3h{t{WKEHl^ebx$JM*gV$1Dqyh5C_(_!@)|CzL{Em{91u>>H;q;@R*ahB4aNB2}Uu z$kdk5va-5qTbOW;&5DCW^G3(}db}>R$mvV}yVQ&15~$_L@%A4{MeD`Vg_R-m?3{#9 zm0lY(KVz+gZT+Hf=qaW-M=p^%N~N+E8Mlz|bMW5mWhw9L=-TjBG^%4^Imn8px^#Uz zTIR#4NdCRnYR{)mPO7Q}CPISG@GkIN-M9~-Pzm2^^_N)K-LL(!__^h|KmAQGEQEx* zJ3atn(c#;S!eb%l+aBIp0Xlc!P)iO;n*Y{(ADh>Z4>Xx8KdyZj>1e>0c=EhN|JBr} zWq=$3hZam|XKg?Y;Q%_SC?y-f&gcn>j3grfO%F_Oko69(36y`!$Y?7nBKD;a3+J_U zP*eL~^?omsI*gZ_hjt>c;F@!+RH3${2mGTZZfB>Ss+SUmJ!hsSH>&pc{+Km9Dd52# zDa3=|3Qadnkuro{@NUK8piG!1CmAKF5vbRQ*dfY-6Tma+(HomAM?LU1a2=M}c5STg zR)WL^cyrDzeBdw+ZqFNoM^j%@v-I@o|E(RogGkjS(F0gylBwBHRTY)?wvpM!pnp{2#t0a${d2cEMt)osv)AUlWmX;23g9q^t0k*_;-Qn9$$(D$_-yPhntT`a0c zEg^(cvka~}znhQTKg`EgwG3-KuV+ZwdMjvsianeTVhE@-nwDuW7sx~rO0k6w3Ha>- zC&T~22v}WKT4&wx1QQ4ZN#6EyA(75IQc^Chn^Pg21kavApULoFJ0EHtJCl|T=AUljQVz^cgoyY&RF6>pvx}Y)S1lnsA0#i>< z_-7HgMI3-arGdCT-#HqZ?tS!^jn=i29BN!N4VQ$GsNn#xe?r-=8E9it(b1)Qd&f?k z5S^KU6A2yXI(X;bOOGkh{fa3)lb&KvckhA2QXnPXEWbWJd2D-I!Ba6JEd?O3KEz&^ zrdlV`n5kSJ^s#9Slz=TQtDvaLZ#8${a&3@&cuXYrhE%$+%PUUn3-K#SOx-eGw5xd2;)E>CT{oW}DA;0`#^ zi_%oST%b)u+jHXEXTO}ue}wHp9=z`?Aw)@@(6&0{ zRIcH^wqEn~D@Y!ll&S+|{dV@yH+@!D@qN{Ha)vhy?urkXq*?{yjqK0zW0jqDQy)rw z(o7!Tg68oklVfz*LEy?gKn`-Q>Unchr9L+XS;+e>G&P96^oH!%JxD^Vm6&Wb1}i%{ zmh)K64IXTF(HRvRg0|qyZ<(B!wXF6yy1bR-98un7{(*jP%5!?K`b}sA`BQH5@4Dgk z3+{{`8f*3!sCTjZrRfTxP!*+p$_$uhh(++ry)g>Umh5|=fPA)}X*3*;zegK}SEUU{ zNE0F2*CU**1`dOu@dHN%McE8`8l`QuTxQgp=a z|0t;;G*sOg$69`z>9>tF4lE<3^Fo19rP7~ zV?bHT&wFvqCntvx8(YxhkLs9v`7Xp-?Ik3HjnnbeYa{r8-dM9JSU{>iCCy0Pz3hWe(H6WCB%$8j9M6ZOo^%}

U?YWH15k&A?B*w?lP_dcW3JO9184S)QqK1(7@6&lFW)s9y#qP~a zFS4Y5um{0=%YGq~pM!V920|_8{QP~x_=R)JlkdP_=`S;Z0OVZRj1)+;VC|V{ks#{LuA58-oav;aw4I+~ZkIt-NOI20Cg( z6NTnHcJ|hj6Hc33lU?SGT@vB1Acn%lm;y5rZee7-?sMZhmum$awsVpJl%NCFNI1`g z#a+5kyS6Ij?%iuD3duN0P{R5>iNJN%|GgX5p}XOVEQR@uE1akss;c9)5&Qe5&Z;6c z>Y3m6bvQ`465{waJ|256$Qa5>4a!tzOU%tPub$@nA>oU5xA*8|6tCs>rD`TVPxM^D z7C|pr6L4Lg$(D_kC4A?|f2eL2d_qQfjp3n%YER8VlKf(I!_`57Dv?9`_3|R3GIAf@mUA4q#GIWD@&P1MGfUXdnTr<`+GxGJ*Tn}+O$1;&05&D`Ss-1S#M~5 z^}#Zb`p(16-B_J#YJjGF@OW-s zHO&;1OVKq*y+Ih|S0W(UK<~7m>yaZ^R1^PGmm?Ds45Of6cKsF zTwFuAHo2+_L~qC{wv--}86Fk&dWO!Lt&LCpZb?-$b@-sy^Uzbj*ez8aDQ zP*nzcJV7~gj>w*zX*%n^=-r*#USn%oVQDN=KJE}m#wo83*SyUEBdb!NxhV zvlB_r!1H?y0GkTfesikgA85$;bB=U;d`4~Wd-C$kIXUY)FbIFoy%<37YNVjxg^v$t z-OQT?8lrz^ka9=S)jJ0*h5Zi)=4Z0xR=f*JQ|z$^5>VNQou9w!UzaBXQh?KbZo0g4 z2_L6SVdh^sm3<_qvRlLdG#+g{^&s3Aaa~ZTtERshPxepm>-%VF8>g4IzB_$xr%BNW zzsro{z+vVgn9CnO2|~t(i_t*Fp`_AyHGFjyK0XAlKrV)0q~t<=Q!8*?h*~*pR}(`m ziu{)GkrSM`yXSy@lK)OW$%-*AfAvA*wVqZ+;`7#2y0JJm7(X??2bzxwP?GrnzrCja zYJPwEt|J^)--H4V7x-Gf`TApaws>VICbDR%b99sena*^~(|9D;`RxFn`t)OvHWJ2L z`;BkrPn!l=(|GNLwizCJ?aixv#GD8+!p~S?0Jg8c0f@t!*lBr+=@C~vUvFmvuR*C_ zqp(QQ_YNUGk})WHO_e6r_yzSVANqI$-j8f{_+LrvpSIc`Cm}`3?v%hp9*n`kfKqE@ z9)yHJNCeA$aO#9|9CQM99v+yN18LS^VCe;#hy7ob#eW%8?5^GFrh^}Iy%2N{!#-g2 zBA5Ng=mwH9|C6-1%sB$FG-#IOrKN%%c?%1Nj~)f@gh^SM;Q90R(b2_s?6r`s|8oF% z>)!$3H+W|+xqbucY%kFQWj8!6k-pe&t)CE=jfFwFI}p~42+`}EyXxsVC}5qI8Cxlwj*E+3NX=vjmr>5i_}2e{ zwvIkgv6oeiEH>vjY1eFOcH&>QbBjoD1d@I$8+{s%I)iooFzo@;Nh?=LkNt1qGUNghhA-uvAW_&5-A zP!xRAlxP2GFX=)C7v>Jh$mY(G=q$`JrX+5rpc7cX8+p*i%vh8@H#f=nbhZz&xLzT} zfq^8j58_I13$>ZwX{%f%6Q8%WIdgY6w?eX5rIgomiQIfF)!qF*E z*0m)lo2092yW1h_hR0$&NHMo4fVpTB`@{#rP-D3buVbh28(C1-L}E{Hpsr{s`PKI$ zGjCYy8&{7)L!)(fyR>oD%HrFpCii!Qzu*jSQIbJL6YMwstwYEE(63|3TB+HWW< zrWotbl9Sl%eNFmHqP?jN676!?J$n#Nv8g~1S5&?BdqUS052kV#ggm+8;<&d?CZ0lu zagyZ_vDOb1MU)BEZ|?Zz(?~-9#brSpR3*BFePwfz?(BoR^N)<|H7ZvXi0IOEZcVKT zV)frWwwimEkKx$Hk4UZ=Jrc=vm}y`c{BP=(VevEDtg!3|-T?9=FetuM?~oyd(8Lv$W;sW?YFBEBA3bPL265j9vf7Sy;0k!WeqEH82jc>4+VyYCBlz?k6aSNwjXxgp z36X~h5szN{fe=U=%wxM@ZX!VjI#__#Qn_=fQ)s_6In^eQ@&Htse_L?FtKRQG_oG zPuZ9uY3d=$-yNtAfHX9d>ouuIoqZj-nhSkXFo$~^E-D!R7BIoqDcc|Nc6D{4$v0h* z&QStd zDg#eN{yeLBWd4Gp8xojJSNq@Q8ujOuvlLF_gOU~X0AhH}l#p@xpJ|(?&xPGytgmlS zGBPqIeSjdT9jMG6Vm)!CWMbLX%O9wP_m>9`_I3#9Qg8#lcJiAJRWR7vLep7>T*wh5 zuTllCf*ew5o;y-f;#j%BnQi8oFTK+CB+Kym{H?rWa(KslJpFhvORUGIYjDQBtvoyIxGWEMN7v%mEH=vgysU(S~kWAN1!;ir}Qzyg%MgLBJd4H;b$>Pj$ler zABqV)f97UYmHiUGp8n}-D-r*BBfY++yig;-{HD<`-VoY^iJ<<7m2V41MPfR|O-1&4 z&L6mBdU|q1lHc7uedG;|9)}NpApw4&e<()H*=9=VNl7)2V14EZ`=*UGhcR5TwAqjo z{7bVN6MTaou4TUS($aBz{}O_@oNGxU5X9Zr!g%^vM&}|wM2~dmF2o#4#vwU@ZOX|i zco^k8)s*aV`SAARwTJQsW`}Q36G>v+P)#|K_%ZbRmMMW6wxDb&qx zu~jIXS4ZLPB^o|*Sef>V12KMn$wft`2PBw@Y-)^tbXApQO*n)j!>A$LQh7p2%^Lzw zZod_}f5hbuBP0OIE~;X?h|6K$D7e6r0v`mG{J1fOwg#Slf$=P(MdyJ!3X>3j<{ZjJ z($B%?+d2Q;@;u`faq<0v)cN2p6#~KC-KE-8qK1W1dwWdGvK)));Q06kLqid4Y~NIs z@Zh%>VXB*q_o1?5a@qbJ35okp#gQdI39&(_) zd-P!2d0$&mMJ251(b7`wfyvls-2L5_gxf-VvI;#g9dnK__cC=QH^wOXJl+$UOHdL-69>pUg@Pmz3 zS8Z@Tv%5lWGqKUs+;$nRB^Ln_G2wh+=fRUlo&u)c3;ZI3b6>ycX&g5>Xpw>Ceofm>iUTJvhvBB*!1 ziCx^&JDlhogX@&(bx4Y7^)DnTHct2lu^c4F*J$PP7n+XdeEW7dAYd|0MdnEkqiq0& z*J%^MFs}A5n`?($1lTsTy8GDVo}{suNl$XEyu$synEFztE0=@ zUhc}=yt%Dxtz2xoqGWE~>PMr2YojyW=Ci*a@8eV0o*=wT|B`QnC$TL*We1>ka>75O;z=6AlI~j*k|w7trklMzm?eDQ3`7xw>ud1)t{Ml3{~8p-(DN+%Nh?=%(KBAke1to0YV;P7AK4G z`dwu1>X?|A|0=s{$gT{gSHq6{R)vFB4GvY+etmtU?8742~NyQ>?cPJ1!QG`QEPl>MPSg;`YQ%5ld^)c>SB}5 zfoB?JPMt@P9j(FPlkA6UW8Lbac242_HWi zEQ*67P{SZ}X`8A@f&y9-V@iCJ-RGupzz3U|8g;tvQ1Q=2Hig1<6;G7*E zjq&v{sopn(@`8#Aaj9iAFw)iW0*U<4k;lHL4_~2&0!-gjU|yBJ&|~>sUsaLKSS8W8 z3gxA!c|*+3VfQVUv=c8_X1i|+uQnY1{9I__^5HweY0C0dw^2G(@YvHR7Ot3w1m~*H zE4?9qgs$F4c(opzi{8>`JS0N0iV>V6WZfJ^gNZ`71B;=DHv%v;sHoCtX<;JtSKZ5H z@DziGVpbixjGFQcKyhZCcKP+GNoyGO)Zv@qoyW3&OX6}f6Ddu8E%=KA#rcWv-zTRr zf4!=1;3s>wf*ymeN|*hlx6L)wrpA{6M%?oU4{;PUEAXJMdcMO#dCP%v?3gL*74nCU z;M#~(hnrO9FEP{(;YXKYps;8TH{oyK;OxdEC&x!ee<&BIGaAe@87@j;T+F{$U*F8Z z*-u9D-G-NaH11W!_|GiBX#T{6QC;1V)9lsUT)5g@1HI=SbG3N*I-h4THq@Fp5=6SO z`DM4_qej%_!+3(w33MWB;+i}c)T&fXXh^>}$n zspQ+Z)zzGQ)!7AjwACmDrPtuoS4iC;q$6q&_z3m#4HZ%^W1_s|)vh6jBz6_)2#xg6 z5u{-4qBvq3@gvjHjTdHKd3o5Fwa4pxu(UHU5Rva54k2AsuZZ|0_@SDOmzURK_K~)# zs-@!|T;9T@q{GZXc%Sq#8$CQl#qWNzZQ+x`#}BZy+-K%oSfbjmd;XDL26HIR@lX&~ zyN3tNr6aY|da=Kk{mS>%&Q#vnhtyEh(5IUfP8@zVFZ9N!+l9uft5{SojQZ`=2l)p* z4X+wQNyN(DQj_U;=}FA9v*V~-X_O$1m3f{<(@;yRuyP5WbR&rMp?djymZp|}-C$e| z-~N6H@BBvkd8B9Hup&L<#~NJt*)ymP9eKi$d*wEqiWC!_XBg+z?o3yRd3qBH-j+`t z?8&O;31x9LDeTwspB@`;C{Iqxlut$Sj7(hXH#El!KggC=s`=NgLQQin)e@7|o}Av^ zel8b4r5bAmdI9*T#6xv&--n#Jz@6!)tim{=J4tM6xBt;Ro@L24 zj&Pf6(Ok^2uq01TQ@LiJXRb3BwVVd}iP{)fdB&#tt+)o|-9 ze}4jXb<*qCUGbKy(Ug4ish~yMOiq&;4J&_PhoWS3buDq+3$iMg08D8PQdO z-n{COr)G-}c{#5oT!>*wOUs;&kdSu;{M5IwGB5D$35tpqR%WKhT)4_<2{ZIE%HbP~ z#Z5%Sw?5K0(whkiVu?f_zj{iZyIv18Q7S#R=yHPZ+~y@i?XuoB4s2rL zWV;=J@%%zp8ew#5A`XaQQAHy_~Qlh0C;)%m$ICSiNHHm?~eLix$WZO6;Jen9vt6l zs5Wt}PZ7-NMMIBmN^%NaPpRy(4UEvofaJ_y_4Ez*S9qG-Dq~dTjmsUik2am0PP?r92Oen(C&f zW_cbeD_adeYZ#Rej(~C2HxXqtbhQRtiysnAl*-HKnVPHvjkD|FY-xvtK=(LsTA9E` zU$C9v=k<6pm^iPLinBakqP}>*JAHqEZf|wGw9ltLKtplSJ4BJ4Q5YAEd@7q8T0eld zMARWkQu1p6t1IO0dVF|^t%UmSK9F)eku8eSY`$LI{~QXkQpZu->NGdRQjVNg$}A6q zKSU+IVD$_^kjD|MPZTtmYLbA(B}1N>S3%}Zh=vCK<2!u#W6=cQYR`N3l4oX~S$-UP zkWudvEfRnyoAG}F^v?=T53Sogv%ECrjZlWen(n&nhqa)RqMivRub!qoiFDvx)YE5Y z#X9ccoHw}bx(%;Ze|s^KdZ5(WxCK6ZT8;0rcnMpVi_6ElP8t;_TU&n zn5|1w(-s$S15f)`3v;!K(WOQ&-@%a3Vp!~*)WC&>NEmvV-d-DXb!}l$y7rN0z~VaY zBbg2^E~~W-313f!&!277S}#BuT-Ivlx~i&Y7*0XFvDuRwEFwZJagZM?ETL3VKtrc( zee)?^4BR40=bc{Hckd#3w-cGmqj;zh82RAggVWzQm<{8j@!?^7KS>j|QCh$H#LC?l zkALqB6$}{sI}#B}5LM#k;f;97WWL0n4X{E|UqGm#+V8(Hp( zh-ATAVC;Rv1rBCj64YU1Lwz>#okTu{et$pv$cSHp_@3{wnX~bjS#dcencF%z8;n!~ z-bl&F=9gz>WyX#s?JTU2uk<8Ihh}9TQ(_j@M~2q+hTaI@QF*K{-VDXJf!wEDsjy8i z^P$6TCSJ^W>RS!I#*(Y8_-U^QJZU0_Vw=8-?<0FAy7TTR`eME=oD{jkPXC0N6?Na` zGY%^5tUB=7}r#HUZ`X^INT6?WzClF-;)?*OE%J1jHd`lV(L*29O6T5TBK)+v7mq_uW1qX^*u)n6^*%smgEFXKGOU+RlGX`kEvyO+avt=eo*%yD>IZ-ozLd zRXzEspk5}|*L1OGPor@f9m2AEV*5@=OP8wlO-s| zpaZKLes@c};&5@Q+xB4fUtqp&r(lqRH^R=Ywb?vRPG*slXIBpBKSMrsfO?(_9{+2l z9vHdT-RFfs^L4-dMkpe8!M)t#6QWfm(jI9VR9}yT*&f z>-QeVlDIN#Wmo>9p+{#&69AktBDiBKUqC)6U@dnIa#4U2%Dl94I4rG@av?HG^|Rb-ShFMt-|)es{=q zo`cOY>G(~brLx5%k*5{H-z;Ts3<~sqJ@c6B1a{H%r}Kc5VgdU1+<&fqi1-I8p9n;^ zxmfBh=395zFQO|v8Z+WW(&gARhsk4OgWu$y{DHVRVxEk&$ZYgQw*jn(&yJPvmvJjq zBmH8V;sYjExz~Pd?EGbG6HU(Bdy7CmN=?`2j&w<6^};<8P*n)w)DN_F|3ElBEiKH5 z=@9eT!vpSTd1`8^u8zl*P}yuL96S}Aw+ID&_Y}dA$GtU(C~g=@qIc9DzyzmXIcUSo zR*x{EI5hZlYxFYbx-&KO#l?kH2cu_ZvQ7{yQMA;rT2NKJT3u!0<_1KgD&#i0y(Xlq zTb6yHaY8%KaL2%GIc~NKil~jV%D|B>=jaR*X{)@3d1mWtBQdd_fs9+*HQtfy{P~P> zL-X~6?AAtek(gAsKFP^o;-!4SHjK~w4(8)`;eCW3bRku!CT{O+B0t==KQ{3FDzEt7 z(Jgp=(|*^%ion6vLNS37Fac|O-j<-i{i)3*N(#QqhnjRwouHEWx&F{Nfq$hOLHJ^# z>>O99HM828?7AEvksCIH@T_dQiw@R1R)drRL7}sYD*_@ER4H}^lF@(y_Z*Gw;H4q^hilh z$;b*TrY607Im^xl4?_G8owUtQ#Ls6B5pdd>7wt+1YSR}Pl;71NJYRaY351MXZ}0I_ zwX5I0wTw>~wX_`MY!qsKeAcC}?;cmDLz)PCcN&pR?i^#Df}YKslLHMU;o?O`ilyi@ zyqJ2I%p<0g=364)v|b!XBdJ}4nW77QjnMCuG5!1kp$3+51kXkp;DhR(^Zr~* ziM@6mnjJUSO1Ql>)JpuE?F8@CW|cG!Jofrn;|%~o?*9Nm_fn5kxc_eyWa0=fy}0Fg zB3257!fL3Z^xmfsfTPBkOj9*neS{gcdCQoq;35=y(6A%`sK;#{Gr`cQ{UeW!s{!^s zT?r`k{0+QyK%mdn5k~)x8sW`eZmtC(XCgm=_Vy+8Br`YV4eCK|zdg7$aK^zsD+u|{89AzM19-BM;ZQuXZZ z+eu1??+Vr%{s%kGy(yig#YlH7?~;*Ev*v+w}R?yUB;wV*UrXRY2mXQxr`eHijpEi1`NTL22uU00{7SYEce zI-M`DO;3AyMpQH?t*H@bt%cly4G6j%AU}-zqtGPDmrVYKlD2FYjC^a=Cr0o4*ZwYl zRy;c7iU;sS5DOp%lkcR1s3+d1X|s;pTmS;AI5@z-Rp%a0 z?7ekZRo&Yzx(ErA5CmyZFc3saX^;>kr9?oK4rv5wkW^_92?;>~k&qIQG7zM@OOQ_K zhW*S1zOTOV?R~C&_CDWruHX7cT(HKPbIdWGxbORUMoQ|N9xb)5&ZhZR=hgL$oZaCn zNQ`S*S`yQN$Go8y85*LUchi`HqRjr=hp?2C)^br8Jur|)>$2lYYedAIt=`ncy#Swi z44cMOpFmO_5FS~Gk3TdbF>Uh-k`zD5g{1gVusxWvNIODF*=rG`6udT5?Ufywt`C+c zYu6q;>DmI)kLGsCIS_NEsgdRs0*LVHO^r6!+*#(W$@ll=?sd|7K3Hvj1?c6#{dPS) z{?%_Dpv&fP9hIItzdAO8tiJqVaOjZZZd9^54Z>5pc_Cb#TJEZ&;~5tf=PJOSbPjMK zP@By(96$lcsxqvlCE_3K^4oeG(L6ljdK?3McJ=ozrB{C~fbJWaX-*@ZS-~A$t_v^Q zw8@_~`91Pm(&Lch=6K1-_-=J&XKighSclk9^bZ6*SG3Uj#tn!@4!9L}<;;~%WdL2q zZ?~LU&=*+N)B+8Ro&>z>xhLWaSXw_ZM3&Ac)FPsICbQ!r*{9atQ;cOa+Z zs2cONhRy>!x43x4M`BnLBV(RB4R+@(_RE-6=yG@w=yFt^MgvuY1439IupzsN zJxLsT>3%8!eU(`p{1_QFJacpG<;w@Fx3X_BvmPfktFE^#>>D{Mp9z7$b5Cc2{l(UgjY-5?z#P!ll*hsrO z(PiZK-TL$$oo2c%8=UWVWGG~Ajm`FzX69eeD1aIj{G;Ba_l|p5oVG_E@|jbamr%6h zzvP&Z2fUhbbzqOGi;3y?OR6)@i>NaTG*A*>6w=c()zK;Z%*M~PVqI9+v$%NGX&@xH z5EcUlf4Q8-L+xI86U`7M>?fbPyW@k{ILE$CZcQlZ^qf<-vfD$=M ziTFo^SKWmp{%p|a#f^3vZP*bUfK3A7hsX}m2r3gd08HqrJ6pGaT0k7GTT`%!Dk&Qw z7fo>t;-rlk(`V*Cr_-QTzw>74)2%3ue!GMzObAo}uOHMCI?LZT_=D3nN*kaanCOg&@(5CK3U-12?s z;H68{H*>!lXW>CX9PTk?)wonpzO@h&lNSn{p9&tSk%;AGE|&|S5p>vbk`nopbBymt ze3Xx^koaH;RBrZuNJ*;;X&;A3#QZKzW$FP9U7!OX*@YiX+QgxKv2MC3oy~e zB)Q#{?bZDL;`P7dp6w4D$)4UGs;U+w`pVVK_)4l4bU4lP$O++`YhlQ&FEVB<^rp0J&jmIPL$fIl{P zY{yPa49X4lZ!`nQ@I_R-Js&Cy(@1Ol3JL%?h*|9=CLtW%B_(@RjVjKSL$XFfv@sg( zpWfpdWBcFVuPU|=b#=J>#|r#@2z8PT0~@t*P%3OwTrLpF&d^MPJ`7q-G z{$d8`ZJ0*|?Mv@lt3D&p{%jyfK@7Sv2=BAG*mHUZAFOw>(RsFM+k-FPTwKTv(F+NQD;~NUMuw_5xY$mPV$=^i+WVWSb-G&tY@l=?v|?Ji z90Qp~j@fFfLLJfFRUQ>XyplWEIZR*BX9%}6S!X;wgn8zYegTg2P;1YL$b4<^*c;y^*dP) z7eX_hO~@$usT;~lm)jwAXSrRpvh>kIFf%2^^xnPkaRQO@o_pPM1c{9j5(VdEnU5}C zaJtkj?SEJSXi@^8Ngr_MJ1zEJfjIsg!}p|?LSXNn>QsVjth?Zn;Sw3JRKUEX~b!HZ^su!a}65C_H$AwLdDFmw^iq&ikMCz{oqg;)-~KtcerOmaRu!`te&-Rnl)T%8t#yC2u!zi73eDv~Up;vh_ z;`Qm%-{bdH7ZU($ePrVVoJXVzKMx+U3FL9E+gjhgdUe-ws9dmYzCtTagZ!`X>@LSP zmCGJ3i};1V1Wo1>S9gf7V-mdlO!Ot+w5C?_V%$BENX% zr>lzv1;++*gLf|@UgH5z5*p70gx3oc+*5tM zR)0@QhgL?zdlnY76BDYb0&?o=s`6JCmetr@cQPJg=2tE@m={|Xrq>k}jz9>`j^<5V z%j~VsxFkvH`JThV`7MP)`wek6JXOd6z={fSVgw>V13EzEqh;UoYriHer^?&P_wp4L zxZ=iRfNW>M$&UM!7*RahRs3|4pCiO%{z?-1U(vQNelUDZL9z;{*AKD-0H-QQN-Asi zb($-n@!Rtjb`6t5{ zgk5NhB0IgGysT4N0<{E^i)bt?>Ocd1r>*whUcd$V&6kfHrB2NdP}XuyOlD%dWaX^V z&QS9#a5I4*0g{=2Ky)(~qr1EF>tjsjX8bQk8J?(br&pB8$(8Tl-#mXFpvBnta{_aZGGlo1&hk2b z7_rKISL!s~d5qd#6faAEXN$Wb#$;-ua5JN5l_eu&ErbqaVOclQOPC)Cu`@9)3K z$;o@)O29hZ8yONr>uKT0mt5(nfM@x>w6yTe`AhUeRWi{D>FpC) zS*P{%03hblH#k=6(l2S#Ei``qe-)^W`4(su*3le=Id!d1Jb|+;q=a(Sx44XqJ3Ou*TiB zwN^ggM-bly&SOxoMxe}Mu=B)RL(SA4#49I^p_q`sxW>;cEwk*x_VMXyjl*{!l#uFv&EGAZ8J`>3tU`{8Cl+uy zdHkLkKDPKM zjXeoO-==1p)yzYX71ZtTR#m@ETc(DQ*<*qnKeYh+=;||O&#!1zN?u{kV%KLM#3_!q zUjUB|7=^#{yt;ZmODwyAiipT{)ebNxUO5K5KHeF<1$VikPc%d>LKh}M=dqaS;bCBk zyus#(7T4KY7K82TFIfHNTy&~v8*m-SE}<(YNJj9{$>sLTvzz3WmLk5yCs^T0k+U0Q z)PE(rFZ4n6DE`Jco@%o`P-hZU@vrK_-T|l>v!~nmI2!JzkA^rLg5i_|H1jJndSwV; z-xIfPf#Y%}ApuC{Npo{?%K);ivJEN&pwv6A)aq65;1BQD@TBa1u*$`x8W~Ck;xu4+tScxEO z=4^%kd2X|<+1f||oU)!^U}&kqV<0>K7GI;gHfxTo^XXMDMD~HCf^`?ff~bX9fcix+ z@l(I=od=m@dpN9}@W+Ay|37p-TjeLb2yCNgvgR{Fb@K%QiP6!bS^dIWj62uEUurwk z`CY3o&tzCVa03BpS!a7E=GGo%^38sH@db%Oa*^O!_#XSKbO1A$_A>Y0PA)JolqoVB_oOE^$$QepG3o?=#+cuU^UiP_%s2|*SyU$PfboBs9(jN-$ZdX zK}7mf!H9pa_S0y1;?X<@qI=UtfTS+D3s?7OKr;(bByoWc()*9Umx6`RIwYSewPypDsYjhM+6HBcns$x#2T*a3Gdj*~3#5+%7{6j;^Ry_N@wmRTBJutMH||q!)0gd(8tD!f@Ty$hgAB~>w_=s z&Z|DZDJ{;PE7Dw!PZt}t(R!*ZQOPyO?Zex197J)5j>VQx;V0%w_5#CAa6cD)>yY}t zlbXwIwLg&v071`mX4>Ite~ws|E5G0?;(O-)4LtpEA3xtcDjp$fp2<3X0Zp49u!}m0 z39~;Q_RP&j-CEoM#U0`PkCHXHENet`i%AEua?Z2B-rypO3@z)g!3 zbOlKu!O(2c?8eq7FO$&*8vBekZ8fO2XNv{~dIQQ;^+S!6#VcfztzGp-&Q}Kb^J{+Z z`-EijPq%;eFSLc_m1wGyNH7G8 z8vf(Z_5OA~X@+7iw?a+;lN0}xzpz}UVbH4LFi3$_{USeo1012DMa_QiS>tT4zH)|;DYs04RDCrY5@lgYA?mnLoe?GCXb9&W(^ zJX~IA86Z60nlB?{k_Q56e5Ikqa}aN_lX)R7uK4@I%(XF+(9*gpQQ14u64wRt5M|1d zo#JBHnME=612I<=dpa+#pg=(;a(6Ik`rfl1%~^|bF^Llw)E1WgDJo+#=8h2`+`D@u zdhpObA-qR5Myne$xjx1C4w|Mk3(X*$i$&ZCxgZngd86y5fX41{_ zH!Kv{)EDWIxt?Z4+!`vi>CbuoO|V%@h{%j8K2^%Kl>>7Afo9z?2eNP8brANQ_)WHY z6JP${^rS!BYTrsc!XP`2HsU-Mh-f}5h(MJX@uNTW|A>;Dk$Zo0e35cPi#@enub{zN zkOy)8O?c%|PxyzJRCeC6;3`h~rTDaXrgA;QOx*(9s$WGobPNf9ZoMzJ*^CkF?D6#5 z-(*kXhY7w01%?e$&jt~Ka#zIH7ww)yhONti831D<*VZvGKJn<0pGMr%<*y4PrE|CE zCf2pe*6Y^8!y%fR&of~U4e=y^90sHyW=9V{;ERo%pI5iGM)N^`+H8a9Xx6-U5G*go zG=p>`G=HG9wL3~T-_>RCQ%LqDy7D(gwqL{~4$*<~!bL$@xuvnOZ6HoJ-(0=GLQ|!^ zQ@!NhGT)&ma!1kL3o0%?Bw7B{)m}BytdKz?y0?@^Q*XI~$YG)}ZpA(Vf{bLb?IoqAz632c;7R|DLhsC5S-Hq7* zsz-`RVBO{i0;rcuzJNd!^jn~=VB$7?{O(qAWkq<{Q>u*DhiB3JI?^-rB*+!cVivjk^_3_JR3WUdQmYr$R zM>BA!anHcGRD}PaGeua6x7XBxr65DsYFOi%IY5+6pj@~!t(}S6>^UigV|?~PZ`&INpK^#BTFv$2Acz6J{fzQSU=N)L*+9js7 zo11nG@vGtuS`|5K1@fgC+==g*fRw-V+UVBHm-zx)jRRkp{Qpb^6LtQliklCPUxt%k zy^@aU{s%0S`1w$2A1koc5Q(rrKiZ)p=4-ODKh-rnf@fc+W|$4zLB7X+o(_oODf#=$ zpSp5lu3l9&>%ACV2P`7_{fIew$nor1v+4Pr!K#CbZCi71MTc;KjF(&#opFbLK%@pn zHVB4igkI2M-4L{t*Ty;Fp*%B(%7^3!9%}80d4ESF*#md}Ue>YlXil3g8vLQDu$2{P zb)HgiJGon$YxO!F14x!tG6@Ue447^!6^8cVzC4hF^d>5WQiHRXqbF0fSl*HN3&T*EjI8 zlHdM-5XqM1_7;G_>qfBbK=0+0e6OJa+}E(HdYzPkJHIz7 zR54fg5y6m!Ox?IT7346g(Cb0_GY(yO5gb1`Wg%@bUv4Gs>DUB8ZWs1lTYPPVQIBC*C19w__D%FBC~H4pXdWrB{j ztp;~X$YlV@buyX=44~N^Y)Txm6}Nu)#D^aV)`s?qP8x_G|B-Z3E^mN+UN41J06S*( z;lsWI2Xt6DKc1a$YHBbanBCq0b>KldK#a8$5{fe+gRT^qc-a4$TC2!*`Yy2+pI_Er zr9s$SNSu?1R9{>x0%;J9{jW79Oagjh4jW$K;Z{C6T4G{m#*+!TxpKX#6%>hERrB9L z*Rfi0#&x5fcSw#s>hCgdo$2&a`I`^ov$Nl2W%Uk5P*ZFc_V&)KuM=xIHUGI+#c^fv z=0P3`$p%1>+;AAHRspSNfgmgEO-&;cL4z^pByi;uQoM(*1Tk_1)irwg;$iKZ zCp6=VsEb3KP7C3&*=;O=&TkP;q9r6Wf@8bt>NNQoYtM{*(Gy36L*Rl<8IkHin1Mlz zVev{JSmMXXSALTVc@dbfvv=t0pPV1E15CYw-hTYlbk9C$eXR#UcxXf3L6S3^@Q^mIaCf1O}Kx}*FT_X;b$w@1Stp2Kp`ws^tZ z?v66lJ-U1ZB@}UfFWtu8FZA%)AdTlm0<~bL+DkB{Q~dm|I63EdC}B#s`;uh5pxpwL zX~4Do$&VvEg3JO_9RmM@h`ru%7Q*gV!o|OHmBPhox&K5=AEQl%(#Fm~aWy|yDDi>8 zgtXqBf_(0d6b>YZ7-`}c6 z&wBn?B`v*PVmsNIG(C;+tfr}ZvP3DqEd7TtY%Qto?GZzEk89l^*tYq!6OLwvuk|dR zN+f4)4ktq~A~4mfDn5x~e!<;)*4FBB4dAyV_#4b2!t7|zTy-h=r=Um?tdCpGY0tN} zm=V=EF_CS*wVYCUT<$tz|5fCF0O~maUHEU=*$sUA=A)VkT*n_3rnsbu21aea#Z6s&AJx}te~O@sjz7Ub(rt1}<3O&(mP^`DjB;yybzYGxKaBjZ zIwvIal36$kDsQ6ir$_0{R}1BCmu|g@IvL%z0c{Q-hB>CB5?kcZ=2-mCh_}DSVUXc^ z5hM{lgOUTLU&>Zh+uGJ0pD3(eS-5c{xVRc6j(-`cG%`e28a1yVWvWORj6jd)|E!ZU zvX5=_E4WNo4tjoEjSR?vRSk6gcs@86F^pRkvF7J#-ZNcxo*McKHqDlxI4kD@P;u*P zkoJH#MUE7|nr~LvJ4<3WFf(`IA_kH#UP5wnbU7hviJ%kFgkI6CV6nG5Nu8TFeN{<+*}Z3eKb zSSsG5F#^sT)laQ_K2waxe!Ji&vH%$J>2HWCc(2j|qLJ)EJw)+GdpHP0KYuQiY*p!y zzMY}^7zQnSP&);?azslP9XbdVNPdp9NcGXIp2Y1_Rc18yKCs__uU>~pg&}Ztm6TXZ z$`TE_@IiSQIWvUlsCb|F-O%#UaPJS`q=K%1#^Pt?1HympBg9X_Jumw`K}v2UM$sCz z-}FsiDUq`Ei>eT~OsWgv{V7=xvfGzAeaSBz>0ej?Pd|-8Y;Lyxhty$9DO&yLp?Toy zA-!ZRS#B>YULVCacYpM^#CX3ViH;*3oZe`|0SuutRlU)G$Fu5;)|w?R!sBS=;tJru z13^Xjb4th_G7~;oZf_*yat!1~X_xM-%UKT= zkX}^gqz`3dhe})5L;B5ybI7e!wQ8Slalh)qrlqit!mD6Dcc=EDB%?CjlGzBz&8iH& zRjx(BdRA17iyF2In=~gSAXD)YN8+-u3Ksf5zyt=4Kc%TEa{o`kgr9Q;C9c0BqrY_} zKp+kz^Z7~M`>C}e#cBU03;vxfR@jrG{aSYQv-I{~8YiG73PeN3`^ERaQS-;q?GG+i z0!|{$NX&oV^K?nh=UQqhfR!SiUq+>P>{oLABpQ$+i0)H1-+Xzh-RgNz*8?$ zVZZ3lPHgsL!U3L-#zqxC^pne6Z2}C2Ge}Fs2a&U_%rG44L zBS}1h#%eE8kP_hhTI1)XiM^+RV8<^}TgeZ@pb^XZ!iTB5KYE$LhmkwM7n;=l~y z2Z=z7rMMxHkj=hQ5Ur>P%0x@RK~aIew8-cE%)2|@q?_uP8+Uyu^w1@$08pWvn`Bgh zojkaF0rmdQ@0W=GP7wdkV&PKBC{MqQz`){(7&!)p3JFZ#X^v!-a~3&{EZD%R4QtVQl08R}7+ijCFV4 z=fh(twR$?&j!6XIg-#Up_f9NJd+P?I(D}WT*GuUsSfZ$zvm%aE8DQH!@do=_{5Y50O}OAW!KEO6#!(2}p{3X+5G`72dB&~k zuJ&IZgBS@SM1*2C;Wj_|UZbDnVHUUA4Hes@*At^3+$z6ElGBuNa@$>(4IpFd1X|ZQ z<-LQg_RuPggWs{vem4!&I~3;ZZkq2f{us!1Lh=pCrwGsRND0Dv>4$Qh`of)e zki4`FLdVy*uR+9S7l0OgSO*lqRF`DJF*i^A{wqEIm0RkrEB%gu;&?qd0RRJ3y@L!b zktNTjzc~HM70AL@pzQ*njmr=`PO$4c!Odlv8|U4O?aiHljxM}|7I<_}CbVn&+1t`1 z{K(AAp&K_srdL+_`*p?|${dD7twcm1E{$4W1lfE>dh!^MQc-bX9Gr~$aHE>0yH%m^ ziSXoP+__cmsOLb0H6+J;cvW}!*l&GYpdnh*Vf=~{kxY;8{L}*M8?EKnDwzFHYT%z^ zP@)9|7`O`yP`UH75K*M4?j79AT*=M`Kor{}smlLQJO4d*(lyI!L?S&M^tv=0T;22x zPq@tkE=E^6uU=_M8mWub)YM}2ycr#N1+KgI^U873TNlCs#DM-GJewP-Q&WP5#?>-1 zeX((_?+OdklMM!^yZTGC5v59B@!Zf^vV#vhJKSp zpe1q_gf1e&0lEn1nI}|2@P0(+0PXIJvn1bR0&jwJ*8QstNNQ7oT|F%TiDd|haK?Ev)A0t~-*WTK-UfGc> z%@!#Qr5ZmvOxii_+s0;O3f@#+1|=q_5P`82D!(5@8kCNm0AtPB^0XM#t__u}?$mYXK^ijE)3aNYpxt_Rwx`7U zXRCp!uXFXjAOZf9r2ANhJE&@13B;^hu)qwvyaaH?ETL!L^3!6_Zb^1VQ-RPC3OFKV zl7K4!_4)SGr~MJZcGfi*&)!(f4}oE2*VfrKHS6NNye6flHUWfOL7|>!m(!;~i(EiY zN5jDcWY$z0Pt3Y3mp_8UUc{Gkam;adz66wLh@c5JF-&IMHj~_~3#279LM(Tmy1xYx zy44siYb^WN?$*{=S(g1`9IMu>rWqx<^>GouI>Vig*}j3zp%ai-S{C*}n}(B)bqeHl zU@|FI)1kcdS#mPi9h$AdW57!&_}H^Y0nY)bb8n7L<-=iJHu__1!s6Dgb(<_?8HKjO zP)4JVZbSvefOv=k{E(LyR84D|*%f|-wkNBf0##7flUoJ)s7H@CHoT88 zhE7~FO}ESVXz;hNJU4jYuhF}##p2ySWS3VV#kj(BP=AgRwj0w4=dO0xC%TfF_ir^mkfYvy^7K3uM=SJn0zSAP%ay<6!VgV>jh$@jveDTHjc;F!eHjZ%E+bKLudLla7hhIb9 zdY+~l9~0BeuzO#`(`WZaam=ZV=C+1I(Np!+*wo$i?WxxE=#0WqaT%;8u1wc$8t~Zt zU|CBCz5SEq25jm$%}ZDNE9_Q#bweF#oNYcP2G*C%yfX-A?y~K>|Ab56z^wa0pn1ae ztjFp#a}9%0B=Hm#Hpi3_1}S517V_#JoYr$|&9ykzio;Et$-;Ta9dEc+F;IUMJNpSn zg|SAsUJfY4*$rg7)G_5T-}t`#aNFCs$tHyH4Ibuk{H=TKL*Z0}Yq!0nC|daCA3fqC zjF*8gY}5>K}n9OY+ z&`G|)m);}CHd9dae*3uJ^he^3sqN7E=R7&9=5rPTYBm~|dGm2UkJi3s&x;Lv6Lxno z7_?-Txp|Kaqv;R!;1A%ye@qb#&^my{1 z$AlRgr{<@xYG%aDm2~^ty(#3pl&lk>LHs zzCAp|62WPrvUBr)b~(DUy@l4JwN*5So6R{?@#XkgS-3hF~>$CT@)yS!tDmWmMVvbr!dNmM_VKLBfdHQC|}Ng zwh^|NyjYMND97P*D2SBvdQ>Yjwg{FmdKDShho8Ce#@?~?@j*gje{ zZy8#S!NY|7DQZbncB-no0yRofZj;}lv*T?=^Chl0&$#o=@?)IK6@tC-+aTJP9@ZZ; z7(2-|L3H9H>IJ@m@A$`$s&2R5-(YFDk@J!{h3Z9K49mp!nG>x{hi);D7;|MzA3?d{ zAsfpcmkmV_3bU3oEfbGzOo~aze}Gn+xVj(Zc8QbJicWQQ=p5>)PUrM-%?y;7gJRCG zs*v~F3Y~;NIU5#?#=Db;;$=q9p3yz|WES5*?9ibvhw1UhkJsF7QX8DsxW0B&8$S7& zks!*#8;|pl<`DXBJQd6cPKzfO;O^ZEQK?Y#^FT}?JS9VvSSvGh;1Yk1-*^(^9Lgs4 zl-tch+;b=)O5}M(n#bZi_Vxxrca(%FlE94x$WIZXJiI1vzDT8eoiXuVr$Po#p z!S7Jz!m!D6MNtHd80UgGBtfhxCO8jQhO(3gD{e}U@`QbQN`?s3sf}RdMC&k77vMG!3yF$h!lzY%D?<&%>(dc* zi=yygDz8PIf>OnJe#&^?r~{~5@&7!*kNz{k|C!+bHY+%XIr_?d{idmJ%NVFs5ySxe zT%YWHk*84ciGI)1n-CWt{O5{x3y#P%@&`j>bj+NLhwtP?47Y;{uw@LIHo7tPH<)1% zHp;L0Yvoai@OEUU4SmH7x)jQOp=ECa1@TAb?4k(G-E10ep|FuZbFt_HDQmin=314P$u2$Y^>yOp%Ry(ITG`Q5uWlgu#UMUR%J zBiBMRSSyx-65ca@hilIAez!oikKvYmI9*YRWd^ZJG`gry9528dooRt!G3GV* zJ?mA9C>o8j**K{_N;e`6mqvS??WcXVJ=tCZ-OIhJ{>!REDsS4PzhfT0UWXYJmdU@v z$>1uH!s^BqnH(Qv#QZ)v%UgV6F6&l0>FQadL)IG?h;cZORWUcvKj@S_USMH6YjjBN zyd4&g$vACSM@L6=GWC+ExBQ-$@*Ot`&oWN)8Pqch^)Rn~*iC2@U^OG%kwPUtfdsMh z>^RF+58JPKMHOO_T)V|FHZEq+Um}g^-G`IBEJQ0s!zAI-U+UpK;AbPQKyp$j*wC2N z?WG@O@H-9f<1%F&FBOHELn-kLLxnv>0@!TiE+0{QS{i2DNN%l0{^4s#NqOq3bAZ~l zIA$J}=P~N(D?gzmHOIoyH%&e2(JUA1>@Dn$bizQ0ln*Mw(K-Jft!5gkD&6rwnD^GwCLrd7V& zQar4UuVv`2loHwTP(b2y@kKZC7Q-&)q2QLc z_{bO0Z$-V5nd!x|kk;lgv-RAYV3Ly~2}Uh`rM&D+LSCdWv-TIx3@3`LdRhx8JUSIW zIX03=I8azrg@+z>^lfCTTNgps*9m$ubi8zz+DTo0gHv5h?s#Qz?g8U)WMVLn$uH-r zK0EF7;=qj;`nL}qTu*7_AABz7d4)V!pD3tgY0m$9m?n{s0Yr2jQGM42Lt6BVcYIVG zwm9}C8ZSqm-2cTRqln6>aRyKNqorpdTfye7KFxWBSj|pf*6rYhpo5%?x8%P{c+KLE zIf)Pv#9yxOmJx4+z^7NpyUyu@w0#O?>LmXemWLQ3#PtM(Uq9M~rMkRUxp6l&>a?El zjBg`mYZ#M3!bE+o+NTq@Xi1Dm^WuX75B3q)uZvy|CH&;};v5yuHO%oe3tKb1915IT zLM6N5pVybHPp%gB%=k-FuUh#>Ze%zM@xn8eS?x;sX@-n{ok&6vq9DDl5!{%uX{VcQ z{CeEv!pN;(r{OvhQKQrp+#+^S$nX<6y_DcuK>TD!bifVWXS$pz=uj5#lu%pJ6(zCr zPjIfUi5&WEft_)_^a+2%aA|p2dDdxvK5Jt7f?@-1kBH0eZ>eUJ@VloSV^rF{d+P5^ zWSBs8WF)!pE7tN8@qZnW4gZ>L#S6}-(a9J0F0$q%myB_4{4EA(#_rbW$u5d*Wvaq5 zqK?HSYAw7!l~!0v{85QYg*bJ!M5T>HujO;_^~_O1@1G|I-rl*jOr;dLc;A4cJT+|z z7SF@dQh`hg6xGYPvmILpm#A8wDRYS{=MeZpSRdZmZ6;0es!YvNgyGm>x!>reo1KtU-v$N|czZjzLD4;$b&xxtz zcH25278C4m6d0etxTc7%q-q&!6hTzYF`kCO2iW!*8ebCA!64rpP7*oC(~FTS4gp&F zswB$PD*GtGX(XzYN>f`H=g7G?cV_th;~NLg2|IKL9F(a+V!UDM6cqmm2S4E~lV}>I z8dNNdb3LuOIl`*yHA|QR=J|9#v=J7k1?wE%eP!ba_m=Hw?RS&B>9ic#e%;U`+04=G z2D6e(&N|I6CC8pGE_ale)nOgu8;W(DHQ9MeW&2?2MA-wt69V}bI>`-c->;9Vld?Ez zua7*77I60GU+J`ub6zUpuhPUmyp)g;ZFMguFEZffMe%dm&B~k>*lbrf&)VIYbD96*-rzKP9{LF`e!x2BGGj7OpFN#O z!@qPtQ$1&1M=?1g_an~K$ZmS!=$l1WYP(*^XSW^nTwQlZqs=*-H%=ZR-5P-(&yHv{ zb#$EA>8`17dZfG-S6csArxV*Pi|FD@A#lba#>9n#-UDVO%hWyhaw^mF`-5He>Yo>? zo91oW_RKiXm8=@5*?x1ExR=*9Tes`H5Xv+vxc-$P(0L^%dAe|Hb6}(0uIxJ%E~UeG zli>Em=~r9l&*eU572?%!KX~fGM=$LFzJ;J@^U@KRK;HG?7Mcg&UKqqVo59gYo#thB zoV&FFyk?HmSu{pF)gh;^14qQ0!_52LkF|7X*+fsReV}aVg%9=Yp!crFYs&a%^kiJ; zg~7WAW1EDhS`(i0OzR~vTfnJ1#8R-v9=BdyI2K!KF_@5mQ^(oA8gtLQY~7$zjwyCSW&B=Tloeovo#>#-&$1Ea$p0ek;9w zi(OTdbFQ9uuAZhLBC7$elh>&EWt)on$egR|QcIkGeUE8=uf=C>vv*zM{-F=NL$C?n zU^7`9iAKGw7)gEvp4_*!Ta@0g(Z&x&bQfQX*67vq#AhxEg|qKX%=6kt*T2JhF=|sz zKDyyvNuy;`No6;(3vYeedcqX5^!k3geP7&e@wd%^-KMNc7qk4nqg(x}^>Y)p25O`q z-rz3Xo}h91tbYu;x||77%0M+@jl|N89f=}lk}ZIH>37+rd2*8D82{4vR@s}3sAs$F z#&6O?^tzG-oflx~ZY*b-KbS2X<1`Go*ZAh(MHOUoPEAdnU)9}yKjp%1-Y>ImKP=~B zRmH#B%RP>}0(c#Tc`?UsX43&!Gv<7&y)CTo$EO=ycZhq;iu7D}n{(=Uk~^Yf_0+=a zBC=G^Ht;e+%BS4UFMlLDePU|rO?d>qko;(U$?I~1!0$NjBcfhvzJ=q?T6+0MWP_95{Ez|l zgKr-bD6q}mesoxOt>eZkUTQwSxw(lfk%^X$ujT|d>jkm!B1%_ml~StF8rs!tqQnvR}9JAd5?&7x5-ZdX=h+FQ`Q(qq28oatK19UW6*6L9%kvq?{rCwsT!pf}zF!NO;n@(m|4(qI*-}}}6qM4FavpTIV?I-@w9Fr#h zz{cRQ2Ih^vt4FVV3%YuAe5iutP45*4Mts`aCFaLyUb59SMF;0teb(Q}Rx+!_rsdGP zu+j?oDwJ6OqN z^X-u+3F>v4u^781uAbucxE-SwLYLJap9L*iWb%7mcd-tU_6piBJnk_sTORarjfZnE zzC@BEScUsbuOMt)88$bS5q*Ev&sTUF)0&(^b~A(7pJ;!`eM~D9e8V`r`I(DRFRjI$ zJKvZD3x_r594fm_%HFM`_muGUHGS4T+8-ZrcWuD}XxetMj@1=Sq~+94Sh__S9JU?{ zf1AtLh9U7o{i!$T&0LNA8%OQ?LKZ&fX_cp(ISevgHzZ)pPk1FX=M-DHsgG zE&NV?bi)^lkYbigb@ceKMe?RvSPe|Rh@ol2`6tYS+1fY5^D&5+z!j2SgW%7`ye!S= zTygKrS%vS|CFd583aOoBF!=tk^(9WjsQwQ;9BIVASEs5)%?b2>XSla`1QT3cVUPK) zj*G{KK<8->hTS22rsoa|Rmqu!qf1X!W35*ER`p9tgzvb_7-8e;U2EK0FEg1vDVdr^ zrb1~NwGZ}ux@2fn&FoLSI_j!JS>liQsfn4v&nJ&3>dyx}_t??2JRE`YOjC$mn(n;S zM1id`KkEjMjqsW?TM93G`$KJxr97-*1a}gV#mweVNlsgu;c$!RIL6>nQ7d)q#Usxo zv>mve!=X}b>G9#N+7J##G+NYbp2R zxDy9}bLT$b_nKYI#`tiCL^DKr;v#48f%||)(${hYH&IOSNhf_qmb`T2DH+c z@F&P>oOi1$8bxsCKd9-yp{8J)1);l`!!2>#8X zY;O142=-b&6pm{A2^RC8#y1?8?m98&FuPgZ|5y*U;~%iCFLnMS!>_Y=tPDYzzb=Mf zQEVZ(8Th1zXB3iZOm3fh=v~L9Je2B;ieE)z=GR8fu(^pM8@4b`e&n(YE?LRJy>MZv zRR7BUrDqkK#=h>M&$ACSFGYne9|t^3`RvS~w+w3hv`?QXAkLy^(`RtrhucrM*&yRR z&6D@L7=IwgKuhDr_1A9o)jl!|W8=zZwXOU27?U?uI2X;P!}>++kbXnyg9^Wr)Ej#H z0QDIOm+V<5=djfK0kHsUt3Em(7d=Y$O5Enw3pQf6c*9j`PsokJni&_qn$?_VsW=xG zX|J0D-CN?~ zN)vZw0EA15vh<|Si^|9CYmPW|VVfUtt50f#QwtvTCVcg(t1^m-X0ONb9n65JDsgr6 zS*%L<4qOnzt@a)vducyhOGLpJZD4|5-;J+a!_Dqhiu~PzB1T z&fBS)!7G?OKs;2~L9i&;YiWdhnd^tv27=k;zt!rZY_u6W!gZ5o!IlPHWA zP-{H-TAn@gfR~ffc&tS|(Ciw`CWHN7#+I`&IyZG{dMQ!4+b?0w~o*W@jI>N|+#zRhf z*LkPQhjI}pT9&UO+UAdSt9 z*e~~Q3;6C|FKXY^b2JK*Ew6E%WU<-Q&! zvNY_L{wo%g@Lp0#CAei(RJPjZ(jJC!p8p;fMO5;r16TObIw|W3zfg|?JRCe3AP?Pe z2X1zJCHFTpI}-6)tr@t(6j8VCG@|rKkC#`j===?6!;lrv6(=G%DTzWOdcD&XH%X2p zA4#NCeIowFTi2(OexFFB>tKF5K5jdY5Ne5gtYeJYOkFC!G?;y!pV#3tuzzC1K$uOkzyZmY7>~ zG~F%-+y(Qu^>B`jT$n_155CgPq44~?hXE$s;=JCLDJf(8^!nbjPvxx6OLIIjYZ<6C zL$C)2F@}|3p!B2$DXCDOl0~r2+(?glWly~eJBK1n!aFjVkt7OJkL}%3EP|vWtY5k>e=B{I zUMU`pRD_@ciacsxM zN(^|D02ge)E-gO%_$m1oMglShC3=i6jpRqHDUlBnLXslSg#ME;9Cx8GWEe4I7#$Uk zXM5ihKM6pcF7T941%b(phm9Tw$U_v0EIQbLt=1A5M=1*Iog_-=2@Vd9xAS3?jRyrb zroyEQSOm2D7jZMGsa7lEJN#`$b?#xI%P{C`kWBbV>d@5C@l40*J+VHRvF(4Hn<8Qfyx*H|Gf`gJTC6q0XNrY*5r9OHDQ<+8Req=ZXUeI}Y z25p)sDft*q~E; zDMlo;iihRf=ApWkJ1s-Y)neO~4Bj&S)AI*ds76x&M6ZOP-V2insoM8Q-MfC@n9ElZ5u=d)uP zKNhvLwDx}2C<(g^$bFQK)kHCc-1^5r7Z?A#u>Xf%W>V3r?fo*y_g1>f3L^~l!*$#Q z7w<9bLfR{Nw`BaJz7=1`KwW!WYU>s+jrbJW`}QIiFJ6>i@eX=^uw>O`<;|47Cf{m+ zAnOCmZ(lkxvNjy;x_7S5^)0Fku6La%<=vDq$y|P79&>3S)7)-cVt4BquApN%9gV|W ztVii`p0%shLK=-(@$3{U@Au)+Of9RrBV$^&Bc_XUYHP!atZ_@(uN74^Ou8rGh`M?A zqM6)gvBIppdxBD3qbd-a&$ z=2nQRtftW$|Nfv2=&{bq~)MN2Q&MJ;w7d>ILU)0VwS?XXtrL%q{x zZr-=#KUJz1dL&6!%`)1byI^w%ms)eF{G1|V*@< z`+a+V-=F)BeeAuD&vA656>B}ubzj4Ip4auL`JnCCnV%To(0S5A#e8$2t^X~(t&Ms^7Y8O@%dshKF^&ynX{HC8=2wQBEv znR!<0`T6;obZhtRX)l^@c(RqIpS$$7<_Jq#Qs3$Akm#dPd*55L{|K~EtiPCLk3Onv zaV_NSF*S^@Vb3n$xc@NCNl5G9HVjc1ZytAU{dlgdrY`YQ^X^o!Ci`!bWY^*zKUT+O zKGW%eOxuiKhJ_m?Cuq&~bu40W*)f`ormi6$8e`b8?v}*zEcwIh@^+LmMa_sIc@_-K1qvos#X)`k1&FXXhA zal5>$mHF%42KQg?$bVtRKDL`vc8-75ygRZ}^BvSTUG!i!PNpw-6o^T6=WX*ux6Rwi z6K!$Niu?QyMz#^{ro*q*h9L*N1HAK`>gk=d2r|t~xPK}r@e|7)tH$l`}&JdwY_o zlXNQj9wn*%R5IsqTch2x%rMy*HHN~(HR~qBJx8`Ee^g#h;cAhxdFCOM+Yx7_kVA2T zU`!N>-i_+Yduq3xdvVNkO3PCRPsg9|Xo;`mXSQTsO*NPH zF3q$_Dav2^hfjVgv&Em)c{qobJ9I%HVf>}O|76Uk(F`6z6{;GxWoe!h5bq2-s}0yZml|F zf&*^W+-D6fG(X!tDx3Pbe*CYo+;6)^e}@{j?Wa$Q*X+0##jp&c(2Y}JW6Lf7vT-bI ze1f#%_NuzN;`WB((`Y8~C(i8JFJ6+Y z=i{0Xg%e$?lL(_x6=w!4UHeD>@c48c`X0m@svhD$m_AW3?rK_Y)Y7xj&~V@TN%5_w zZTtPImTsvkNzpcQ+w%qCwT=ictyNDnBOOTXZO5Adn~OaJl2R*!#Y))p`aj=18Fv25 zIg^n{gArDSitA;6tR^Wb3?`j0H1}c*WsGAwK&B*>D$iob=$Fp)4+_urQ5O5j&B|S5 zcv!qBf1c^z%&7QQF3(`uOOP9OW`6wfemAKobudy*NPwQht0>E=TcFR(;d#BwRWRyo z+y3H7@dU2JQ?8P=eb(jMYwDI)07@y-j?cG0xnth94SMnw*#2@udDod^qkT96uz&y4 zjcKFV))(;vwVJTm$nUn=ml@30G(UOgimS!kyU!cae{c0)K3i)y{K@(SmHkpbZ!Bst z#kr_5ChpaZF9$g@GvDh9{RN&`8A>% zn;P{R=Zw`I*6IM}t81}$3nf# znA4FHg}xPR@w^8oC)0aWnr65nq0UbDZQI0Wy=w1=U-=7G1S`L(YR(9?Nh=O6@>dw` zyxcQ8QBCG#cH7Mk>esc5f4&>Vk`u`FObE55j;C7NF$Y)r93YjLI#zMm^fIMo%yy)t zznlJ|eBEstaL~J0ti<%?kjcbEN2C&~TN;Or516|3gr@C^8Ra}_#r@#5F ziMcAPy4JL4Ds_6`sQTw*a zc*}fQZuH=?+W`xw_JkcW|GJOXrc{#3vxoYg_Q7!ayeg-X&vVSzY~%P-J8e_*>67d* z#$d8Zh!w9AWdvZa_|9UE)ZV=#`Bfw`ts_gA*{nZip4AXX86EmYV)Zv$+qttIh>Ryq zNO+hJr%QXz`Y4sAUM&=twqtcNlZ+%wQrp<=iAsAukt>AX)m#w~_Lz!(uv*x)Getp> zS>#KWu^at3S(8RU2O>rd+KM>LtfWUwM}G1kRS>|o?%FOMV(^tW5L5W z%w}HOp__@w(TA^Thc6Ull+ui^O`p~~W57l=)JEvSXN=t^b4lU!grePIoX%_QT{^S| z^BkIqyfkUPB4B2c$)D4tJ*MvMPwU*-O-er`YOb)`NgjWiNDAXWP3_17X9N~(`&D?x4NuKE-&;i54!bdd1%Ubt(_@*tptk3ucM^qn@wRvp^saq zG#(|vcs%@%$woL7r&<^-vy!`nGeZ?jMl7K2k=+YwI4(?Q5!AFG--z|4ZSS`5on3kV z@PMX|pz%i5uyd;vCwyEYr=RM{Y-`Kw4Lq&?axA~2pj?64St{@$bM}G&*^%yVrq~%6 z5ZRjh#g!bpzD)#Sxt9m1c(~Z~F+#bnwLa#nP|e(5#y0e%?s zaa;G3*1q+H%Q4GpW!Eyk)#aT{+;G!Rf5^>pNE4JG_jqxneduMYW+$?;zfO zcxwDkT)0cOV^!_Va=#{zn{Ccv@&hcflGsPKe!ug#Dow?v*U2&}ce5SZ7W^snH*zlR zTRcP=zfEht+#ARx$=hi6$I*;DOQTIydGNAkM~{-^^KKSD+?{(pp>8mxMG8NBW1RIP zcj$jdDqdhOO=L_yDnI|ic&^?@#X=wmxtfz8zNH}4LuQNJqMG@%zW%-JE?^e&R<22 zEvj7e<8TAg?!!d87d|A5YFnPpsz$%>56+ac#Y^PV2TH9N%tkZUjg>>zi}~@KkTOYq z{)S5R`NuYGd#zgYl7V_}!)P^cI&79ZnK*42vh1bdA<&yw_CM9U*4df&%_B)z~ zQ-j_&H(P3Hc$1xEudkZWNcdLXo8|k-nfC7|*DboFrnA*_oEc)RW#m!e?3PRE3XIVt z$@JV=SDV@VuOf6H$xfzM>TNszV)CamXnCzYPWO@ucZUq1O11*`K!hWxlD+!&d0Y27 zbBRy22-7I^+9u#GYsOz(_qKi>R~|6x-eS;JS@ZT@QgEokfVEu(evv4zMQi)p|2+$^ z0*v>lpG?<;GsUXJM!M;cnIA2y=b7<-n)jdbLvH8Jo%5Y+OCE|9c$#;cYwv}5$KqrL zj3^Vbs;QJ1{-=ANt_qW?std2eJEJM9oP??V@10`M-@elsQ> z<$tNahDU<9UkU|5A)A%x*~Z)ACO z-SW?us^{CeMXUbPbUb7-JnQ!1O#IW<{;4QVR(8NbF;BXoy0T#GP%5d<^1_1%v25{C z^Ps7FdoNt@);g!Uobs)aQ%`ZpY<9}`bn{@EDe2HN_FgD0Y-4RSEpCz{|Nd>1nD6sx z8C~XPdl1R$+hlqKr{GT<^-8+Ew14eA9Si=BIq?F8xau!yVmf1&}HVfNmqJ$8hr=UweSeC+**{;AZ~K^$#}m4T4riiK#k$Sl zcX&cNlRsz-JzVVE!tKjfK92MD+dccF=eNKTx%t^&M0}TE_8yby`29_5c3{z7J3tx# z=iUAz@g~{w0*ss=DG$%tAJ=3usHN#IZE-Et>DPg z6&IGwJ71)GH+9?2l6i~f>934l-nvdFdim9w_fJ(m>xy1lbkxQpJtHyGZa~@i^|}J? zT}KW#Y`TB-Y2?mr$_8KVwF%2m-N(BB*8aYEUeZ9)jo}_sm+%>x&XLktkNbzp`L4e8R_Czp)SM(cVtL@^$nTxM^te3J zqOn%L4{a@22U*G9y7t2h1vjVGfnBHHKUyytd$Bw`{Yp*o&!5J5u9H5(o`)>IKS~YT z%w<)u974OFh*74k!pz`DyPTm6MjM%vL!-#KPmP2txJlVIPmLA|D~;x_KH{_7gEd-M z;_Ao6_4zt64q8H<#X-PVem>u4lH&x>B$?wlAcg%0_{`j|&(MdYTO9Nv&&bHg1c{I+ ztg#XS>?s$c@wBtEbNNPDZ0JCqV$aZcYOn>Z=2uSL`_u8toA$blb&i+GCE*$NrAwFg z_4Q4w342bptJp|<`hABL<;y3rrzH}T_L}ZvT-w*>LuwWzMc2a?8b`IYwb=t&p;BIo z(@7dncfEV}Zjok&U0Z8{QRd;he__`(qoxd-lCrYa9A{c(j3y2DSesx}9wsMUd=1uE zf8bKZokcvUsj2gCe}1z0$gWdM*c_IoMBb@O!K}o_44bAD%fiZy8*W7fe&u^D7!bT_ zldNb4*K5|u?4m=hF)=Y+*u1h-+9>0yOM}(3up4DEVX}@_e!V$*X?AdMuw+lp)xOFY z7rTM#cQ@z_7s-qk=iO&MELk`o*7l%!&W4B?+BaT~^TP4)*dc>G5qD?%V*%;mmK=r~ zEst->!gb#>zXUtC1%+mlg7wpms~jp>U9lx8QYrZ@j=&2zJB&2{Pi z`J&{RL^&tFDbI~jpJMsV&k>)Xyy@EPWF1bf6VQv(-nh>~)xyHU^xA>)U{Sp>vmU$S z2?i-02kVSi`$t;J9_CWASQ_cFbQ4MCPS@ewZQHhW<|TjU&c@^KW0|MDo`#1j;ut(j zDPcdk;SZ~y`-a*T2a8gt3M9r%%{-^Os$#WDlw<;~wozt_cPAMqpF7+e8Kf=SH(6)K zDI$3Wnx$Hmgv4rvcIVkPq;}Qlo0*JAq$|N|)6&v%sS9b<@2b9f@$mTJd4{0eBQ{ML zr92ZM9Tn@>uNM^+{YBP&SRA%UwS8;HTX%9Kf3soNJ)V;~7cO)=)|oKy_<*;<%jeIh zI)g~;v1inuZcT6-e?{g_+9Z6XTz23C>*XXiTN=)x&A+Xzt6Q09k~6j?coDgXcj0{M z?2w(XOJ~5HS>Z$1USpNq=z~ia&WDXlnzWg?&@lUAQ5Suj*`rt$NAfSM2$pqTC&G0y z^3EXUTlu)R$K*0v-FiwxGwr*Ivu^Y#PIm?^SiJlLf~uix#cTN`S}BuFZESkXGt;wF zPmg-0i%a6zk>`&6)qO2F&YmLpC&H9J!zOx-kxSCJ?$s{ghrt%{Y2JU`vM*rp{k|6v z7&!J_!0u;+5(neXc6Es+>?7*qf{N}p>yym~Y7M1zzql9D%=6Mc6V^Om(~O?~54%B442-&WovA|kST z_wJC^{rmT;@Q`HtqM4HeJ|RAy1wSB==Qs_ONKoZHk1SZ>)$&R8{70$Ltx-pJs*8%s zIM*)ySs;G=p}2;x9MAo)YN`$ts*e2d2m7yH_Y2Z-0tsupyeE&1x$fEg)ue^bx_Y&IP9QDnekh=Kl`gcq|x8PqYf4JKxD}l13%4Z8-xn4>961)4)+vX`q2-R36$ObTVXOD6v&fBM9`uIkNggZjzZcGj zxf+wrk9f{vv|E;=^K{o~?pdQJ0Rh*49p&qynqUzUNfOEIc=lZ=08da@Ho%?aBeik!Y-b zz_s#|-5A3sqmCWKeoZK^o*fTFkVihkkKsIz*GqOcT#P793W?CZ@cZ-aX)o-?V#n=S z+tu0Wjwx`s1Enj(=%MW6%}2aD$zd*&HTvVGX|^rdn$Px3J9Zp6z!gycSW{KXdbQI2 zv-h@(Zr|QnCVM;U6P(QVjs3-WC#UWo+SO*Z^RKz0*%>NrXZmGZtNFnz)t6eb9q||P zs}E{T&#N0D5@zAfZ()v$;Vu{M{@$O$xw#LGngL+2VzkTyw zhG1$!niX2IaExq;C|R^xcWv-Lp_I$?di0-SiScx|ej@6zW2ltP*s7D=r+;u(({zP5 zY;wZu?lVJ~d>lEDRxt)QUDl^SgGWknJ%BSJ% z0X*RnuDmH3ZgWT2dXe+D{L7cPc8a)@=V^1*#OsqzoRD{)x)#kni34{~nM@}RtXOjD zoo-i{b8A-r`}i@>n)0F|(W%Wraag^A=S3BfM|l3yL6N#SlwP;)!wPETyd|34X1B(U z>T2BDiK6>Ya`{vndBkUd&QJa~ZYN^49X==~L?#O#g2D#h|k&Ry5HwA;$R-}vs4g*)=p zJgBPRx<^zznbX+l2D{4w#ffa*yjf{piN2W!5jkf*7(^X=Y_R``H#=kFl7;=X2?81R znYJyudV1r5x|E4Z%_H7_8>U@4*&1oa>fk&5{?voE3a+E}Bfr1YnYxM7IOmVRbKa6U zY?maTWdiiUkLBI7m+9G#1C42yS4)+jJ}FXx8ZD&Yx<(_#vBj}&t-C;69ctsPc?*y@uT{U)dU@c%zV#P$b#=waBP;z0W1?YV`~)~+o$l3n z$}YcWc(}Mkq(t546)#d3ooBHbNG^CWh04Ws%|bIdAmaF{Rbspy+c~=v4~7u+T4}n7 z^rr4|_1j;#v@tf7jRhl2%)Su&aP4mI(!XCs$XS#ts&J4&J37|9EhTe8cNvPjxybiT_V?92@DDvXX0>2 zyym-3yfMX64WJEWk_03|K%Jo`M)QsG1f9*#-z(6SqRNVkub@D+3R^6me;Y__0L&uZCV9KB>u_8MW7U}oisl|8Qoev;)PqnJo8Sy{`>yiBuMa5j`#vDwe52}bVc}L zO!<8Fe}W^Xh@Zsl$I}1!PQ4utjwynh%A2^jSkD`9^6%W&if#XJSjEw(N<`64IGYX@`yC7o z&&dMmpi28zRVZ&pR_n(GUYA@r?AbW2P^ZBj49*p7BK&(zApwVrL?ZH|dhm$n9|`ib zro3zS!jGP_OahaD3IV_P=_aUnaf7=OFU4tx;jp8|7)`#>QyPg!eb)&{UA-T30Tdb~ z^J!opL3FgiGI4pJ7C?%l;hRDJuM)JF%j#;`D;XM<<9W%Yjp8Ba*zYjdqsJa<%Cwv4 z{pF8s@&dN1`JI)KmUaeI0&F2L!KM|*Pn`Jq^XCWCJlAOEp2aZ(kP7O-NN~is^ws*k z;5EvdbWnFes9QW3Ehy@{2O5wj%$E>R9-Ja`qx+G7^q#a_kf1G2kxqm4zoOLqQFzuQ zO}BsZxpCu0)TwJS_cLKhg5*FK@azmwvjw0{4PTB+H}FagCvrl5|L5Ba_)Hv5rbNZ* zcn&sv>;Zjwwbxw5kc6bO^8}HcZ~gg>`$x4uS^M#p?hO!X>QslXl>(9srql=->(-wm zXU^2#&v8pHpmhA;WI z6RyD)uPuyOMw{BZy--BJD!oFS2Wp4#@cQG*WDwHrVS*xWU*J)C;OMkm=oUC>WCZ%b zdtj?x9b4Ps#&m1!n|u&Rh4>%bSK;|s)=dIplV_UR#@g5;-*|Cp&m}LGMfoe@AZuo_ z7IX`CwH^uJ!ldvDA-9~IGjPH6b}znTNh5_y?0r|^cBSM|U$bELy{VN`zwh#X4Z#Lv zlAK91&RV*T7x&t;U?D({Ms|6QsLka{9F`tQVQsn0n58~k2+aE3Idvems>({w=@K*c z;6c~nGB&h5*cMG<87H2i>9Sn<`XSMHEH^pi|y>&v(8 z-aX(eK`{VZ11F{rH9ZRpGr=drjNZ!x2>EEDibPUZm!W?wj%Wg}LLh|%1f+mB;78zk zQV1d7eO&5p(+Ip8219_0P&n+F4F|78$HpQA(&6SYa^uIp4qr|@^{(=?zR6n6aYP`b z3=jxg(IJ`0X|5X3Qq~p$N$Buo6f&lYUKnLN4 zLh?8FiipHv*@&Np3Whk>)6-kMYSlyOpo$uM_H<7jP|QLlWByTto>3hj%N`g4YqKn8 z858RzdvoL*F5SP8WL#I7Vhn@nByPODL=1C}8e4RCw0^m}SnYE9wf3*c2}}kpLf;(Z z+quP2-`?Ink{b;sM^mq*t9RTRGRNb}0e@+cNW4v*NEE)+o!}+NL}pN{Oj9fhdfT$J z5i**4vd$0Irx4r|-t?sH$d_#qM-Rd=clKOEltQ~B7y517TABb14{d-`M`b%sjkNEa zT{AX~rjkq7(?}V60@Gs~2MMDLF=jbb8KD`2fQi8d$+RBswYvseax(8J6!zH@OAI(h zI(zmkoat+Z*74()s$xrj1Z*AWyS8RKRc`D=sML@%b;iVBNTI(jiCi7AM^yBk8Pzi> zcdQtsz(OekQURso;cgXuO}quTedf*4j@~$hkeXF-jzT%r)z!&=j&be}ASp%b<-wtM zF4J@tFGwGc+5=UxI1WnZymqoAq}`o95WuS8I%+~L=dre4%Qwo{cRts$1Q3QY_6h1< z&(9Y@K|!nY#_&@(!m{Q1pYO91aZ05>e%v3(PyP6@s;Uau)D?L`!Zi0#WiwvFuSlN% z@kFwWpu+1@CRo^iA`PJj#YxGbS10lav`a+81(+0>lpV`U_J*FPod2lWPE#Qe7wev9;<*T9tMVG-vv7i_zdpORAhLu&|Gi>m~YN z1A_k0l}tlJLpz9*;0f?rY3Jc)gA4oN3>X3hoXESPLn6x{H~3t))78 z{tcO8eZcag*@D+iGz&c=fw`_9CX(VIMKPm{4~R^dl-F(I?YwG>xXt3{VI@KjM_6gZ zWa_3N$3SGIeOW9_(8otwQZ|j%P^Tbp9#e9IizDt%IXO86%i{j2oLQP3b(X&6=ciss z2>coDoj(Go%$g)HYN5JcmYKLwqy;5kzyh1-@HP--Oa31kI1%%?;l@d?_R2Ily{uh-2iV zkKosLpITLy?l8!QG%BdJ>**u`mHmbxd_6MVTue!nk6ESc!I@>CsIH*v1ntl|i=AGB zgB`#@5w&1<(=gHQ>H8^AZIvSwJ+|09On}OQP(-vAb?VDB%s7&{(W1%%k&tgSR5UE`Ui{pf{3Bo4~FKv+js1c0yjRVZd|APLJTo} z$ZxK&6-NBEY{LPY#seZEJ$To~tQ!|Mc`G=r-4(1dKfK}NMW1!Cv9T1Ccav-f(Nh!v zlI@xy*Z|cUVpC)&y$mbOS`!QO#e{&X;zohu48>mHEFu2iu@lCwE6KOtqFw;fK@Lho zWpvq&z~fb%`b}u<755wAjx!9J84A}OFCvRZdpu6JuA>ZPH0?p>Avib~Xn}+MNjxFm zM~T>qXTj}TTn7@cb`Mk72wmj$8B{ub=yH$_(7Cu6=6I(5WsIW?o7JO~WcYE%;0kCQ z!bn?Qe5M9Trg@Ne^HAh?x||I76A%(|<}HgdGwi#jNwF_F2ue^Q@LNryi5q_2E_d`hq~pvt z!<~L@vPx2PS{u3aV4Wc9-rz43>~$|sV=Y_^{!`(416*THz5BP2{2`5aFCM;Q9PS$k zJ|eZ83j*j>9QnMU&3#;}wTY-*HdELoM|Hyi=m}SYdcxfjbxOhU71J7(aD`FfkAVW3 z(ZnD`akmK^?XXPlHldiS4arDKend=0+QgrFHwM+r+h&#McDqQu;DCUW@Z98sgs%c| zt?hSWv!+ivxj?-Yp1k;=07bPNTeTdIQhoO1$u-DR$FjMnA9~2(%}&Gq$u>=Cm$5y# zUj-AZ#%rV`D%wZH(VPtPK@Hj_Z_3$Yr7RtW}vpi5l_hCy#&SiA1pvo;b z%7Pzz?RdiYs4A;}`&#^V&w<=}2uR=PWN4IR{mL=fjyyB- zbgA}^3v+6GzR$8F;HVQsEp#m+y9l{d)_GVkqX=ndEiUS{p?gfS^F?#UAe1@NS~s30&;)+bCwv@U zOGq&g-y*fp_iBYZZ+R><(?Vc&emfs1SNFstqo_|9EfZd z4(MmK+y#kSvjt&$Ht7~1Uu;Yaw(GMQs7vy%js_1Px`1B-kYnnctt7so%Fq3}$jEV&~$ZWn&@_N(IK{VSIIEqG|JZV-BUj)V34(%jp> zdLcy>lV^UZ347VJe!N(Yn7*grfIgK^>0+a7s7!6R6~YsdunI0wfbNL)?~h;hihM_+ zt*?(3@cHb>Ev*7LKK|u5Qm6zy($~ZJ{Q2`g!xd&;Tnt|?dEqCnL!dvTOAbw% zI;~ZICX#o{vSshOvtYEQbZb6(1#rH~w~>H+yRc`AkqI<3G6pua!? zYkK@!h|jW(N1ug;5-l1?Y~6XwJZ!DQA%`8%<;Bt=P+XBG9?CkLgq~x3qx2wJM}&GD zfu#p8@oov{F9Ihua~n#>8f80NOh5PKPIA}DGJ{Pkrus-^S)0bRdN-cq|2F~vK z=7l>hgggWic1Q=;hD`KyQfxj7&T&mmBb0<_ZX^Q3YzL55&2ddgs*)%OM82SxIvq_# z(CQ8*oU?5@?qZ9Sw=u>C*d(~a{Y|_%Q32L3 z5^My(tQW1omk;JL!7g^MLlQQ*SE(un6jpKHgmML`$5^`XdZx_#iNI znjjMp=tavm%gMUJrDdaD?1{d*Bs29lacG=LOK0k+N7n?t zdL!n;b{D&iMog;GBUn6F>4{kot86<(O8{M-E zZO!TN9%O~Q<59GxOd9&KKQzaU!mTb|?4kM<>Z$lYM(ixxDC>ZX1n0DF{&c-w7FV>> zd_@ZD{_Jo)d=-St_|dC8wbLM#$z6-|PrU^oc*;CsvC`vAOrma#2KtmTNP6V4pa>Q{ zlNu)PLO^s3=e^L=mgzFkhD35dYz>7NDtHaXiYwH3i&dbA>Y_uF{t}4-ZC!$W!kbaq zWWRe8m=srG@Q`#g0Sq2L_0_h_8phfuOJHC20MzDtM0t_a;Qi$HIEJonRzAm{A5~ETs{T8dXr~f(3XdAb)dYyj;mFP^cq2r1;t#Fjt)e z=8>w!jT=O*tv0PNK<{DSUxZEs# z*yrsV4fi)+ph>il)281PoINUQ8N+|6TpA&t+(n?D`zyDqlteWj^aA*r0F7TFZkBs<6l7#eIumHd za!?7C&;21Hn?r;{@vFIYTDl$L{?%#ZiS-#QM4+F;&SOA8kU9nk>IC`%s4oVrMo>~v z$ok?4_c8TL<|-;GDQZe+QhW^ogqbNAmL#D)hkwir=LEbY`q4m%+XbZ`BA#4XQ=`8a zMQf4GSBvspkG8pAKoJZ`EWk9&n>1H!U-husNF51!b6PUiksx99Ot`3TN)J$tw>TL& zaf{X8t;yy@qjilXXVj>tOAkMbT0tSNx`zUzl3&U=YJsv` ziZO^XP(!xUdj`KFn~}f^yWHV^$}V&2<13xB8CTb5xFc!)`TlsfK`O96{Q768v;@73 zPt0I1U<83q)4Ks0mJ7-ApEX4|SA0EwxQ(a=pt-*m_v5km)ABnKZBBYPY=QK^A#BKGLC*~#yYWsX|JN#z3+j|Cd zhks;3pP^{{Za}I6-;DjlP?I#uPnx`l$VKp+$k95elBiA>E?)GJdIu(iz=(Qu`fn&N zJOVpgKw|4pWfKY)D%=Lk9)d$b)vP}Pi8B{wb2RfK{j z?4Imi9O|4_@u>_;t%h1JrCo*9uK1elc6!0>&&5x7lN2VaAy3uC>q{s#;wJM@2)gSa z*scfBuHV~E3`D?*@oG5IoY!)jmh8zYk`i%vCh$iHwn}xH09fM%9v>9S$9?7^<2W~8 z$A>9^kU>%i%>;UT7Z=VgVDTVgEd(CST&!5KZO0BG%>sZE(iWyxpnBCNnNgv_>>Td{ zQjI=>pdxtAKw=Jis!+*0V;1dBe3Z0B{(a1!C8N9nYqM`*+T^FM$K%dkyhZYGFS1a_OmD1mSFG>8?!K`M>_^iF>hX79xa5b6+C z&KmSYGek_ZGUJ!Be4WcQQFO})#vHtOX^P0t_QD;Tzl>oDaSHWqvVhO*UI+{@g^%;E zCg!SO{pI2`1V`HCDhokZbX*}P`=!{gA*caHlJt_#L)3|Urz(IY#Cm}Kp~OS_#a(0^q)pfg&xX;07{IeSlkgi<05-FaH3e(%1H4c=p<={Km50|T_t4r`l zByJE(@5h2v$SBy1^zxTLCDeVx^fe3YF49@~4=kE`aUyDIC0rh1$XMw_f<3L8tBSZyex9)NJcLvbeq3Xqr znKh8@U(MGGH)5`C-x=*WF;7d~y9q8Kg6s@lrUh+*sPoSx{MRT9L0*BQs+8cep>h7U zAic&i02f~ZWQ*oT2mcjxT2$$2j704@o3@kgkr@~6+7DMYjxJJdq!@a_`xG%Y7y!T$ zXyOOkBT46MbHkaqy0N4PAt@7^q81FIm{;vbdl!-Fj@*b86M?x!&xtgEh!p4WCr?JO z>*hy9{JvTxkXvtFVIiD2^{-qm^S|cuQW+E$Y%7?rE4g`dJ7yMXKufuCYcK*(6VLd{ zBcbX#+>LZe%)tzng&w#(1`9g$SHG)QP~DG0QXgeZWpznd%8y&c0JTWuj;H__Md|M@ z>N>8a6^7$mqA!jfbH{tg2t{g}WJ@2@zzABzZQXJEI*U8dvUgkq+-H$UH+r6vS5glk6xeK*9_te zD$!T&N&lD}H;i1&9x&s`LJ)-DoON>+9?dbsDvfoOAosVRU_duJg|m-Apoc-ITwR%% z{b0}1$UTm1EWFoHBwBSRnf`6E5)lL<9aq8h#{N#1WYx~=U%0ql@e|{^;DfM%psWLi zLyr+M1OX|l>o1Be7d(^C-AfPkPNJs@hpBQ$|NAu+|hve($B{gvRb7!d>6MGubXLg5qed7gk`^4-Ew zNYKcoHucHvP$Nr==e|ly3F#3nlYiP(4>v9mJBrUhR%BuD#2IN6=oVJ421bX=p)w>Q zUmKS0+Pal!5D>$qD8G>CT5yM8Wa*cFb<{r!fBLirgFKMoyo4H&7*IYpSSq2-(`88D zM531?D1gZ?89vp{GiY+YK91bK=FgcHBw+2A2Sn3|DjeVxi_Yih)?KHb5YU)QxeuC^ zBnkk~-Rc-^Cnq{4n!X()0SG`W=#V^!wxq$*9jgPV_zrIgI|2`3z!&(;1%)WwvDOd~ z4t$LOX@tp%EQ94GP#9pOd1zd0Y~tDU_isrYgpr2!skfVY?%Z!i)#O^E?#Y$T{~q}o z-3&;)!NbAChy63Zga6n0j-KRxw8Ahh&h=V;PKV(t)_u0egy_XWP)PeAf_qu9WKNRh zis{18)q5oUM^DiX1AkznGj0tb4$Nq0wP2d) zv0;o4vqnfwr~^A7vWXB{RWH(yEJk*r0k{;4#syDe$mEu?@~K{q{hs?j68;$)8L@

;YdWD3_p=D2U zH^P_gUl&kjT6^1-J%BJ9V!it(V8lS>RS+Zk@i#0g8&kkZEv#%rVVsa&zN?b@efkT; zWQJo-jMHxT1ga<5ME*}93Fz_|&czBnB%s>C{ine~h|`{SMi_c{?|;7mqYAAW2TA?B zQ=;i#?ou)x0Aj3-mI@30mB~2JL%5(@7+`J1l{Ycfck0SHmT; ze}3P4FTmBSkqy>Z2W5sZ4zbKxy^$S<9m=o<0j0p%`&O z8vS8hSQhoZhwxRAf>?-W@ioSC(dOB3ij-l5?*V3{YNT3ZCz{>&Qqg2YcJ26@#R(Ua zy<;K7O&-t8<${Zu5x{x;?d&Y5X|knD*`R_RO*1HVj+SCMMT9c1C58W!Yy73{#*V6L5=tUgt0y&M8Z3Kyp0Cf&A-z?SjqY&R8m&l>PL)~$@jPrrh zhqq^KgUg2UN~M@2AE_^takSrwA53HgaM(4mC{O7Iu5UdAhy^2g9xv!49JPYunO`G@ z3~!;3w~uddw5|~(hFUh}@8b3;euC5AOgojxn%^*-qPZelw6=iM(F?OHI}l|Q9uQ%_ z$pO`U+KmizU6;8t)ut|$@DX;fK0XS^9ruZAI;9J zgCf5){=^$gcd2lk_c@NL1s;7CScJqZ6wROmjgRF(_HhkAwB$h6TO>EMI%`n z8_>4#R#vO^O=O4cTU)5LpV_CR^*^7I)?~8_>AXQUJx>|Tt^uVjj^>7BFxt61Z`0XH z{eCIGjLWAx-|8&~|9YFcw@bp?`LmCTq5fB34u#C&p{a~wUB?i*i)Tl9z)pSv_NAqV4+{c=qT z`}!Z#_)-v^CWI37)gI$BM!lrExs@w%$0ZPNvBAK=c@bu=V@@2N)0pfq(%tcfX+PYU|}| o>0yK6>+8$s;Ns+IZRuvi=jvgXwkJ&sdsYU4j0LP8~djJ3c literal 0 HcmV?d00001 diff --git a/images/Striped-Replicated-Volume.png b/images/Striped-Replicated-Volume.png new file mode 100644 index 0000000000000000000000000000000000000000..adf1f8465eb9cb17607e79f27baaf5086ff0cf3d GIT binary patch literal 62113 zcmY(q1ymLP_XSFWba$7Ow9=i@rGQ9xcXx|4T)ITML%K_mPN@r*?zoay`VHUT`>*xh zl3844#+f^xIiGX(*?WItG}IKZ(8gGque8j}VmRkKvrqxZ2d!crJ9GuaobOx^7j&yW zkeQ&v`oA4pY0~F-EmT$=M5TrIhyUOhGK|pRzce{2jdb0UNQZE1{gk^x`2u=m*BE6r z#9`4_fu<|Kp?}cf(i%0crIu9c41vg!9u~B=i9ZG?9kw}-mk5ksU4W_-=<>|`_+*^- z|4>A|vOJ39Db}sbgeZwflpONGXt?wfwFJ#;TeVsgDLcPFnr30n!xzV@#$LYidd?QP z&s%=%a$M}yx`JL!c2^rmmxfT)xdP2ztz(QMq6Dg$J6CPvZDonCi_wDIXqCeaMc_0o zF1Xh5Pd;07n24HclI@;rMarQx2nukXF@}s1(iPDhGF-R_tMnh$a5PqYabx=Q;@tx` zO(5E3qDXPZ-N}Ly+nUa_5t0?>wOkJ_tq__Q)7jpWF}P z{7m;r?a2_g;|>Lbko^!Lb3?!XF2GN&`r$97%V>4v>l=%nq_;cWj*r_YU-m2Xxb@G< z+ZPgx#TV`TYiLxv3$H&F==ftKHyIr_8yzd?N}L>nH~SD{G*9@e$FZUg{k4S8mo*%A zfBa_i=}#+=rd8~|t;D~Q|JsEVsBaskHHZI7qp@84?SyJWnvVXR)*MPnwJ?{eMAK1w zxp=ab92`rQBBD;E5z%{P(0nZDkz~$*Y?rlK0Mq{9;kVME+*ffUNoTW6tHvu9CIZ{= zivuh^b}UuPtC0)AlEYL-J>eLBnaHc2!W$N*1OoPQ0qv_w!tO%~wq6gmZ!ny^DV_z2 zs7TX0tsduBfj#{6A1YvF^H9NggP^D$iI#=Ln4YgaI7ExO2KU#N1<#^8Z9Ps6O8|cx)*!d1-RbPF#Nq3ZxfQ$q$tBg3DCqn)Nc$E4clHCuU^9iSJhQ z`)ML>jf6chtDVW-;Nz{_-qXauy}HnP1-wbE--ut0LpRHJ8g#3u8)u@KGABOXi@lO# z?Qj`y7()%Lq+kk&_s2_O&fpvSvv*QNgb)S6;Nl>!(CYdM;Jw-eIWAJY78G>uOlK!vRcF&CW@1NC2z=XxYQ zNxZwmKdzcxFTlGMPxSPsj1oU*P^o%2F5K*N0fIv!uVXFtiv6LrWavYtpw2y+BC#Ucj785H@pFl+;!25PKK}p zerJOVl9C-~O*Z3X0r9t0IM$l~8KIYraW?cAIK_5=WJj*_ja^fXqA`h74_D1nwGi7zgyXhNP8)Cz>X-ODY$ z2Dl54J#?A3815xkSVs}4O^2pUXsKb4nBNI$|Owc>j@PRx(%V_AzyexSge)XA;t$iY_Z7etU97k z{CxO#L%x%B8DyHRY)#LU7+*x|PL&SU+=v#edpzj^f9Yv^@6>afJ!P*evMsd>#hk-^L=rial!IC(1LEWcyrL2K-?8*iQt3sA{kD9%2ET*+BAw8+e z&Inq^%|V%WcK71ydXJO(80roL1`FaB%o}ts>GUP5W?u-+!f{d@YyFpCfjz(8kdXD! znH!@2zu9o2%`yvKh3x-(!W$Ia)z&~Ah1%+v?{(r(+ZwLwI%a2^8m{j*#Pqh^3--Xz z+#m=8wNl@9w|9R3p__Dz;7*{SVlC6OQKnUD)Z=U8Hj!B10E`$?V3H(HFc|K@FCt z43*Q|#refAZ@^bDx&Pi3o*8gI&imgYhc*QXws&||*fg%lUZS98pjQ@tJssbzS}7D%%Z2RNs*E&AFR8k_I`km&D<*K$jsfcMi4)cld> zzsjC94IG92@Ep6jif7o7bHeP-ze@bbE9!K)UGRu9 zJkopgS&8CsC^;eA<75ck>VpnV;?MTYGVdd3?0>OO#7y1VzG>}^9~+V6g=wqMu$_#+ zFm*Qn_BAzY<&+*751~%Fe9Kdb?!o~w5kb;5Mawx53g~tz6&YW};);)q1AI%Kdj5w@ zl&aV3<4KH-cD+s%qqWR>EZF*_{fZ8ZM&BIBQ&FnuywmyS^nc?!Q-TkJ;Nh^a<9XOr zhARcY|Njo4Uuih4!}4q@Qt3;#E|8Tt87 zc(5nx(T`bIdum@5mh27q8i4fRU29SCzj@A*JKN%~EhsWIAu027{hwzKvt?Q#k zUDofN>%Di?4}NnBx80lnJ6j%qu$~($cycjPi6P0ITf_g3t9sHa1%23^ zAvwR8-2a)okF}c}X3vZy<8D3IYr<9sYt6T>`fpk_(~kW;Att=M;M|zV+r|~T64uD9 z#+(`hvsy0xzt8eT3*UAhcHvb6iaoB;fmfoH<^9O93f8MN;%i8rm1jYPk(K=W`b^jT zzYI6?tHC^k&93)AkO!9kMn#goWV~WZFRE&kvYAsCc>XqRIwc(Zw5N;zy-8+8GA{ip z5XXupe+54L0t&~G9fz4~^pnQnYpwqtC0;(Y;1w{@qP}_BQMrS|#+{uljO-C#KILj1 zmx!+PSSQ^-|45S%Yte*(Z240`+1ccFc6QvD-om~YcibMTq~wvBN^~V6tEqRLj$pxr z{GLJpxe9EHf2*!*$Xs0L>$;JLJujf_wV=HEw?_G%O=z;fO&iW8=i@Nys&2aN@J_f0*k zu7oHiolSw!{*7SETkh88P+v!>Qh`N3^2kwi%I?LB&#Q?-KFqHTg(;o;Ay8$bv zulwt0Ov{gzp&s28-Cb{qq|l`N{Y5-IJvlx#eUZTt1Ib_u4N+98-B7cY)H_npAr)$% z(IF*l-*A_rv3m$lOX$~TSu(%eEogo z@hnR*7bgPPqd$A=qxoYwsP1!ASG#s=yAecd#rH}$b3RVgeSfl~xjBtVIo;*qd>9C@ z6XXC_5$qUxfc2yznCE-e!rIkNJp^( zSDHKUmq)2e))sc6x7Y02oT{^W9X7{Xa@9YyDg*=)Y6+_;(xgt@ho0}kxZRH5z_93B zj}vf({O{RLce{OzM51iz2?RnmuY|EZ%_?)je8wV&(;un*T~WE}j>(g!l8WD%@b*re zy5_C}bEd^lxOCCHSVwJgDn;{zFY#=n{e!=6pf>bNx-;YN{fRf21a2sxl`#XpMwL9_ zwA;&l_fgg|mvNq9S|r6R2%;@%l)&da;bB)3lMo-#{LI=-r&KMoY^xReyd4*uw#5WJ z(I}5x*{PyVMFpmWEMbrn_?>t`ViuqE>@075UZ+=s_Y}2sVLdhaq7DCtP*XiY3sg|l z;n-n$uCj63n(Q0x95}?op;^wh-7Dd}z&HDxtMM5H-n8rY6X+iusg0%=D6jZ>-yEPM zp2m;c*I4_xPiW*Cn*GhJtA_es2J}3PktjSx*z<2^KEl=u7!pNOT6-T)=NA_6V`SmF zLO7QRY{6ci>N7B}zR#dVnfi67N~58mq7q~ax`O3}u}f|7u%p)wDPszqynR*~sT2Kz z%g)yG>?>vy#LVw7$-}TbyuD|Zm*0yzK?lo#N66kxVGvwK0qCoMi{g%))nec4UXfO_UA5YkGgeqs@q;&@FQ*# zm?pg1=TKKdNogESZ4Wf{Ptu<|URog|&ms_$W?{n16l%>VTC^FpyuH}X4Su?%4ET#L zi5nBqEyvs9Xr9fy=^|L=J==0B3pJ(0wRLBBGe%Dw%cS+{6%qn2WOY=JBT%B$)@q_g z1&bxp1;$k}#En@D0tRN|a$;y7_QQXXb71qgUV#$8AlTvwb+`BDalKDqwQIk6^?!$^4P*8 z?%Jc~r?Lf}-Z#I22i$8$siu~Alvt&e zF_Ea{n^H&BVS^qVO1a#|YwQ8fN9gq2CyxEFSP`u2c=t#Kk4Vg6n%~=HkKS zZI@TJJ& zmluo599$t9N;n$bQnUSQL`328c^m$saDqgwl#Es?IpOo6yXc3z%Bu@VcPnZIVW~u> zc(hw)cR~q@eHQqK5ykEX&!Q_y)8d>Sw8**su9{&CcCi3oQW{`qZ!feR$Co)PiX5WI zn5$rA12F-)>gossuS#~BRFosDnhb#-MG?O8VbmHRq&{MnkOT0XwY z6$fWj7Ax~sxKSce1rHiT=<}6tT*f)n-Q>eG!ov3jF6)T$7y?(`X1n&kf1Njkr$7nc zhQl*sgGrh#Pyqs>nAs0;O_Gl~5jXbdm!D>|w0bm1boCr_6u*e284=W8PiXN7t>do} z$^;eQ;2yfOoC>1L1qVwccqAoUjb;pC&{7>bdQ-;Oc3iUMkmnp6pE6KGX^yq40Vrju zod(a;mtg4sQhZD65`2oe48Dz`{YLNpDA!-4McZ|)M&Dq_>=#PFnhP?MvKLeDSvfpa zK-*;EW7Uz3x1x9X+mG5g_*2r^nM=Gtj)NG6&?iaT2kG|b#^kfdQ{%NcIX=Gp>|oZA zDFAIOEsatfG922zyv2>`<3?&ZolK*oq@*RpCWx1HrKA{6Ry^Dg#2;&fi&S|<8rozE zlk7{(CV%)P z6FVKdkP{&TzHghR-~03z!5u!;6#J8}S&YNszpDLpnl9@*2nlPcc>6C9B_WmcU8)a! zsY-PZi8-MTx~WerHh*?=7@R|V^>f>mV{Cw5l}m5DbxWAs(;w|}x1@=R zLX(!ZnS;+7o!+JF7)|C|G24K?((LAD~2` zM-C;wY9GL${Xo9~QtA+9(wz0=!w;DvY*NLl?-co3)W_eKj9#%??W@H)&w`pZz|9i} zYC`zg)x~#3uF86KeO*!vd5?30IY^5iqoJ#-z(P3MPTtwllHTnPL)bY91s>(F*nrcD ziDT-Y2$>372{5qpi+^U;B{tH8uJ0`M)BW>^-g%8~QIrslYRh!v<49W1GEF!$PAoguZ#1!#@O3~HZ8&qu_Z9j8IdvFEo|f zg=h#S`6|Q`{ck+{WJa9SOJy7#v1-HBkm!)s#MZv`dAaSE#L3e8Mt98lGyMo3*?}f2 zMo7|1au64_w7hG;7YjQTWbxMG2eMwlx&lXoG!;Jk@5i7iwwHEF9%GQ(o%wO>;^O9t z=U0<*_ zujNoXX4u>T+!I^X`%KXSJ^M3gll@A@%~VxgJ+T${HGVvhs*-H`jn&n;W^4#e(LPzX zn0YTl{Q07Xj*pHG4o^qi21YPhWD0SgBtu+mY&KPRGcl16{yhdA9oZcoLthh;UtMTe z-B{c`^fbY|JUmQp1Iz>c-jYug7q9+%W6+CZ1X=E+I3)8Ad_+W|$mXg3WYH8n6kU~C zAQdxIHS&{3i^)oqnWS4WC9skA8kZMOkr_b&UuJE4;NR4&)v_Qo{Va2!N=*afKPorUTcTB z4Ywp~F++;*W8LvF3h-kcDTlF|KnTHsh@Mz{<<9wPgY)l>vOm=$G>;7USW2##DD=mK zq&}=O3oTIo?avy9PSrg>418Hoy@M6DWq7ktmbG%JfX9DDK;#ePgtAS)gY5NfvYETP z#n4PGt~p9kB7k1oVZDb6ZrJPKWHsCooIl*ElXCdxjS;6}N%4NSBN$ul2)8xNg!}i# z25n$qH-w)Ax>8Jt_=w2Q|89B_oWYlggipO5M4m%#>ug_CTDtJV8^A)@nnmRq-m6r1 zYNMA2?tZ?V!6avn9*#abGnPB|1CZLF#W}EX!JLQ)^sDpkjW7ZPw^jb`GFz5g(-3#Fv?RTfZj1mE$$mL#)6V<4*$q)i@k+Ae9#0zACR5%; ziQRKz&Ng>N%~1v;%F;K~Tq{-uDb9;Dx1K^#Nn}@rQNs9colrei$>g zyn{lc)g$$S^UB!Cjn#_PyHWt%ZUUwE`ue(7L^YJ|NQLWcoO;-T%aWfQU|R)_#VV@W zQb|jt1Sv;W^4l{xPoabn9$lvFvZxt7B81QqJn;Ad^6kC zQbciOt?Bu zNp{hiUZk55eQHR_qa$dP+7hm3CiuvP-Qh%4MNNJhyr=g+Ed6oV7f+b+n=g;vW(W2| zVcOaRw752=`;&%3{;z?_ljkqots`@bICa7=(;hZ54*A`5%G!&Ajg9YsHoLx9P;!17 z(A3(ht!o?&!VW4}pCgSb%yR>%1R$cD)4rU?3H*3f|iYfiyDS}6Kvh6?&sSG=cOxk{=tDC3ukpfB`p(Tsjv0!sn z0(a`bW~}+Qty&V6%>5Nq^Xj%gjlr4XpW_jRrGBFHQOhpTfv8Yk@4VR|`-;bhI2`Bb z>w&J<;Ivr2VBqbXNyP3wJ6E+P&d;FEPhMDB`pz-O@-}l>IQ6L~$m?V&nV9r0Kpf=` zRa7AF^Gic!=E4oS`PB1a6#`|=$G%O;e7)w90*=HT&*TaJCGUNt5NIe?_;>ojiY{XD zSXe((z?mAA(gjXk^jvmex4aPv0e(_&8xt-+;Nra(6k<5D{r2tK^HPECS0T|fL75N; zByhDzUOP|6Q?k9ab8$kB%w}B70z|s?Gc3C3MoB)n>SZ|@CLwbCCd<7nSkl6xq;K}mbp`$x2M#RP$8%_t5m;P3?_}HCUVNJCVp#SHJ&L2%jr2WRht z8(h%cTtTDnr4_Up$Vboj!Ou7$VCUXEBKD7jVvsm8zOL;Wy#{GiQ1&b352zqR!){un zkE-UAbkLnq2j^QA-n9g+7@@eB5N;uFo*DNF7k6oTrUddMN5#_9M0KSM{+@NjWUv zehEAwMK&QZly~mWLTENR&uS!@rOXgPi;FhBvVg9H_rChBwyJ8FJBpaUv^KJOZ}Uu< zvpoVQU;`1djzPk?^MxVdTU}kE#N(klTyD_a;b)H-xzB;uf2s{TTZ$jWFUR@1&f}eT zb~B8rj}y__r1t2dDW%QL8MuJK{3O{PaO;*Jawb>|`s}^tX>O!`6d)%lwB$ApN;Q?p+>Aw>Gx6bd=b9f`Y$aNU5+- zbVzUByg6Cx6mYJm0(vHt@cbGp&Qi5JB|%C~@_J=f-;0sgLXTH-LU#v6?U$1x-HQpr zM=WqgzPoAF`M)Z(%XqoDUl61g0IIUHvky*Au3Gfd<$&A!-<*temX86w{nApKy4VQ0 zDvuiyh?O8*>HFuvLk~|F;|U2x*Pp=wkz6-j%k8Jyl)-7O4h$Ro=Wq{&p45s!3;J&= zNI6=s?8wXTa(Qz)DyWbQtnhN*Va(AmiuGx4L})eyeYvJH3;gXvvs6eoB0lcU@#cV| zOV!drTj}D&UW8ps;1N;tWA*Ate(j9@-=Y~t9?Vodj`XeRVssm8sd9s$~!X#Z5u z%@0bCX(_mehkXgu4R+&ObR6-2v=Yy^Q$h#d!rQK9mD?XK#)STDgtwozfdAc21y7!~ zt^;hmdsiO})g&H0u*LtG0L09z z=(XDo1!`YLxJ0p=Ox@u9e1A?7;{YB2LJ1}ex=)D>12N<1bvi2H{TRnLxe;spKo9r2 zAb|JWC%4^w-wo{83jenuF)(h?$UnX)LHO%&U2i^cy$IkU$;w>aFZzm~zkmD9ioLC! z-4!nehx|fR{(k*%iPy<;;sO+KJbd_lw`--*&h5vk?POjk)NbnRm=P-PRi3T;CP#{12}yuw-@hb!q-Hj#G9?8n=$0!OgS=>lr9lr= zS}HGW+TPSih^2RQ&C91{KFats9*g2{C_ubf-|W^yy`uVKSHm8{d~bs0Z?d-K`THzHxiQ= z0x6JRKp-Pv_2sP?c6#9@4p-FF>?#}I;n#Itpm%yhf9+qMKSF~GfNs$1Y;`1-tLOI% z8TMNI{MFt4{VjO?38n9YuHjE0@Zb*4&i0CE9&I#pHp&Wuvh~LEp8fu9MqdsQsB?Rt zeFo|vd>?_jD12Z0e*ReSGb|VpU`v#N)cg`(0;?{FB!(V%GY7vv2PXma_Az|D3V$2b`X z(Myo?vqxff>kuX1Q-Bkapk{WN;wtPLkEcgP`ggKOS$HZ4Xbg18%fZgj8P zE5~{GA_KXTBc^%|`h+7GvvGw?TKcNNNoSpTK0hN#gnDqKka@}WR_ z8fGaGd`Ugm3kK~#`spq_1;B<+><74>yFYao{Ww8M!0Q!ulu%Vw4S;p7?@~d1q8BN2 zzhoHs#b9uCB?0w%sOoY~U4kk^TTLhu2ptFTo(#<$uJQBTV*B|o3V1<)NQci9@#a*{ z;6&b7FmT>|xSZNQs;X1V8O)AVWE|j|5=F$sa)147+#4hYZtcYA5x*+Lf&Y6@l<0QW zzV9}|ltEikTvHR@3ZyUwuctR-Tj>T)n}JY&nwLP@D9yiv^*&D^lYH)byZCwA(EwoW z;a-X#pk&G`jVOp;;Pn6~!6!C9WnZBP9ycn|7R9ZXdA!M_F$v^G6}ALxJ0orXG#>oJu+0jVy^HgH9fl5* zY{DQqmjCHTE8I67Mot>*o4{sVX9b}i{|s*QSJiboB0s;IwyBJ?;4*1AR%1=CmlLkO zILY|lP~Ca?z(p1ns`C{f+NNiD^EArch%4?#fq~QQ=;&zM`pW(i>rmi1g-+@R{{iz* z(@lN#$CUV(7;IJhf~O$p&K9KQuv8zKC*nQ3u<#2Ili~E5Z1CVVT{6`Jwg^H*_;H+Q zZAnoa9UUP()ItADL0Y2cLj-5Pb(6ncoDb9NFPSG^BaDo1o~5S~lT#QLMFl?|8v_Y> zp;~|Ynx50B%SU$V8GsbnZBkw-FUMqyH1qw>gq#f;RYv-FDY4xTOEM6Ax(9z)O$;=0 z`w)ErfIbPN#SgRh-X}|%3+Gyhf5iN6rU5X_cI=o7v`Ay=ZxEvpIxRNRJD?Uq zfL=TXV$~kKs~!=mygfMcs@qTgowU1Fs+t4WC6r&GlJgGBh2*PcK8?%%$x(l9hWl{OGaek*`n^kf$A2`+*ucgmOJpR%uQ4b5m+mRM+}6T`#|?_J*T;u z_xm)l{b_*-+25RRs$u%w2krxo)hZ#;0mPWglwMUIWeBKL$*h1x4 zbeNc9RBW9a-33(Vymrhan=VMm{}7)53$5yIf8PxC0Wb%d`|~$wVH9d^?7eR(K=8g2T!58}JGm{;L0K%Z6=1tI~zn`C8gBfx(r5oCQt~++9WXM{l_Yc2oJ2~^usxR@^E7wwb{4(~jh zdm15CAlVP5c7zlEe&V7eyCBhh15}C{mp9Bc4fLj&F*E z(2CDex(?w5oZ1_2|JzP1>+a42TnG^f3D_nqE=pLCbWTGhAhqgaxVX6jBKI@^;ef)X z3@{cUB-2c~+0fqSCra;CSBO90NO-}8@B*1iC=L60-qhMTnL;XET!I;!L1f;SxY?-`d-=037cud(4rw=>GI@n;UR3 z0=WSIqO(zjJYq$jH?Ia%}>5Q>YBwcT+78RdH1Tl&~vcPf96ekSzx z?C|}eVvIE+E>c05P?V%bzh11Ph;*6Q0|$~l3z)@X9_|+%-n=m(R(5&1W0@#$$eZbl zOE+1LnJ9^4kQZbRv1;UP`U-Awyu0ftuc=|cb(y=-v2%*TEm3_{`3eaQ$qr3nzyaKn zmc8;Rn>&0bdsB}ocJ9t5xT^FP5 z9$sD-e+O|c_XK*gwAIDZUN*q`v32er_RAS??|-ygp$363=`?8(8c^jya*4MNCDS$0Z+76n(t}NXWFdd zswz~t!OHW8)-{M`r0f<;3@GJpZ#nacT&Eg!xi{vVcdB7YWgPr|BqiTT|brf9@d z&Zb`e?)nR{f9!b5DxBz(6<3lh_n264%jt+?c?1O&$F7J=o0~RZ_lJjPtlZWP7U3Nh z2zGP)uOd;FoBai@Jv{-GNjx6R5@GUbjQ{3r+2Gd<+tg?p`|WjI@C2Zk_Rm*nA3~w~ z*NevcdpXXfRrUv)*Fc@$yBOmtD=G>H#>1~W21ww{#IX&QW9ij_y-#@1&PrY0*07F zPf}eSXG;8@qjl9S*4r5%dm#5I?o+a_t816!G3|c@mLC~BlN8|pzvNM$LmJ*7u zzoqKCTp|C?Z~mS4%*JX$0pJjsLi5{8O6}HN97>GfsFQph*g8gSVA$DHvU*a${)91m z<>49D53^l0Kfk%xUtjNQl=9B5lS~ZBqxEA6Jbba8`-FeX^60SKn00qY+u~^b2v83V zZhw?G=8DBh0WSuoDQ@C0;NN1#k74wPKUrx8`y0l5M7{7EC zWMmN2EtzKXuy_RpzpO*cYJ0E`S-4m`03QUjxw-l3%42Tn0HpvJg<=0ENoVzSy@Dei zrmUrhyW_gWTl}}#=$fOCU6hq8et6$H%TPfBO?Io|NNHC`UeT*m6-f!j6`l6$rF4y8$_{21|oOP%_K1-Z^zSk0hL7#d$MYNJl8g)wf|F}J9UM@FwN2l3)5yTv& zMPw(N?u}>ZSGmW=r;tY3`;*50s&~u*>>I@%fiUO!(Yk8f3D|HwU_tEH+Q1a@3`0#N+(w#K6^hA z1Rd3+JmVbtA1D7mhQ6|vPITg*1ppoXJv{M?3`uWVR5qKY064}0%uI>ypCF_q>;UOn zK8^99R2gj6kwv2%b`2kM$+Y1>i3!nCigpCI68uG7rB)amvc8>-YKp{-`2ay4%}myp zQAY-B2HVM%Hy0=e$H&7BjuBjNEFcXQx+KLZriRi&xp;y`;8f(oBzzDzlf%_KscogqsWnEpEhPq_~a)jJX#??V> zMa3Jzof^W-s6#B@oYF|m0lQ7JVRfVEqvK=!WC-xtw)#DHoE>&vx@xomu7;992zNyi`ktl&&=Hj?2D*WU%A|I_Ljk5k36Y&k6s!>M*wRg1x%3OQ+ z=&BKfb(u>6^jDJR`udc1Zmb|K1<_OWtZT%@TFE!MbHmC5c&!8~mAv7TV%#3^wI`2;=o^xDw0H3 z7L#wId0(d;4{`ka(8_@UG8;>uXlicOZO&)~45e+EzTI7cLe}rnPj?Nm&}fO{+e5!| zhYTfonY9_fJ$1-y;1faJqr2FJr;A2V+xfL(zO=ingo?N@6x;K_OMej0y_yk-T| zR-Nz+0fa3t!Xz{q#!?ODH>7LW_1SkrN#505=8@jtt3L=%HO#yD+_Uf;oE%X+dPpga ztQ!9mb$3YvIdPT9^C1kGyP}}V{0krsm82D^=O~PJ^GOsh5!$twot_S{C9gJYO$V$O z+Q!BbDAuaLy`z$R7y8<$*JG}+M=(n0%V4d@Bkf;i0mbtO!{68XsF2qP!aXm;8WBnV z)F{yE&Eqh;Rk3EOnYC3Cm;g7@}yKE|CmHrRBY(4HGW=HA)Y8_LF|cQEIVHqgh?`s~afk+ND-YT-a1%e}28hFk1{kVh5SqS3h5cB((rRBgigNR2u98rBz|z zw2(ha`VF!YTg2hu#UEOk&iAF5eu@u z&3s@2Wi;C_mLh|)jD_d;ZPDJDfZug@cUOPv&R+guH3z)abwJ2RZ?K-qkL$+v?s2|r zpdcsDxb+6>;H>C-8$`$bdz1}v@{=?I5nnh$-`952( zUZgN6R2qi|XlMZS-Npae&#waz4*~l3k1~-3TDZwT)J%)xz$SNicr8h*{TvHWd*IZuJqJebI$$xd zzPmb9WQ~zEG=|#n518150OSEj!N^Lp{i}b7{{n&l5<1Ehw;G2&f|PuLIC3mw2f(w| zE-tvh8WOTZoj}P74cRT8RXIs|h6bU3$6?EEj5~_+&StTVgf;=4_;UYK;L`b zet*`f-{L@axj$95BpMqq=N5qr2nm1}X#;0^dir3QIK zLUbU|nHeq}`Te(h;HE6}v92mQ>nS zG&D@hBEQ5y6*qb0swIH!@1>11hn=l4je~2(MWH)kLt6s_*+7Ur0ZjeJ;y7U4Ln;p4 zI4`EHtY%` zcnXEvI9otC7mhj{)G`4tx3ffY&xO6$Edbwl9(8vin~+C=Wmqvm9^rBVA;<+>8+)3K z3^9!#pGAkO8!ZoXUKP+C5*OnWgS7fAPC3-Kx8W1ku-y@adpY4hUO(hXqsMfS`9_nY5%(wSCglQeTzyVxvMX*<^3 zSjlL_@rANSF>sS4il>DtcLDy-No4gUs0m~prpO8Hz#_8|hdegA0~IXt)L!K@|NA4V zB4`R=0K)*}cV8Z%Hr ze5rEiTY}hA63?P9Ta+olp1tspfQ!r9pL33Ml5zCv7EGd`Uj2 zI8hEPAur8zHm8%43#w#^JZ;09tkviUe%Im%Mrph3K0ZDM1cdK}g`q$bwgsrCIHI?` zFRNF+3~$R8%`|-m)z$i>O^W43$ZX2ax{K`sJL70_eTo^moWTjSxq?xq#Pf+5T+V!W z8uY7h+N`@MX-cMd59L0i5AW+fOTH3IEfSmJ@%Q)oc8HwB+eyN!3oX|9O4XeLaoIio zj{7n_j5L&ZxViiK`rzQ$b!#b)>46zAQO;Y@)fIWq+7$vgdf`46{=I!bqf1hYco2t_ zI8q$?ChR(8089M_{8)*C{cIfo+W)^rQK=b~*eq#UXy)b7TJOQ}x2uX@B^_n%!g5`? zr+9)*vhMN;)+z2!J zUCa+SPG?$jOuYpETiJDkC7|2H;d*2?JI&k+I4w!!q$Or!IU?@W8>43%%wrncxfbUa zUi-1goF#du0oZh1`w*qH6O~ZJkErtwxf37W!ddp4jwf=wP?eqFLTqV^5n!nTN-_t_ zx}PTDJV}EQ|0PnIP&LWV-v(UFTO+bKC9=QFzkn!H-+EquqxAkQ!epuO366zb1mHdE zJtlpVm3VL?9mT%p12d8X05>2M zLR{eQ{{176<~`MP{7>_?m@s_!280r%&=ujECa9AQ- z=1`FBRzrA+j~K{*xTAO$@l0tdNqM8a^H%n{!SK3a^@3se4TNL}xx4Y)iZ}f4QfY3` zmF2sm79*7tPZMq=C9~Ur@*TmpkZf5TOON_Qz5j=)^Ny$b{o_8#C@aazN^o=p(m%z|i{0al3U#do0h1A5A5BJBn>ze&AnEn{jhKHA zS}kH>VtN%%M)uj6KG)v(mSF<$78pX2aZuonEV|MeGLUaIA9{CNIF?$iaCMp7W~`1Y zoE0z}Hu|Eq)$!)Qw@dg9?5#EH1pFa!F}ptS=7FGbyPsI7B`tZvNzwJfIX{LgpC;J7 zvf@?7s$R;~I$h)e#XU7a^&A)JpC8Y&&>ZN)=W>@Hxkk*BeTwqukn$GvI=Xv$lca~l z*gKzzcf#+P!`74kF8OzAj}0u@=tHH|;maClct_C@bKc*G`-xl_#8W2YWsWd zecxEIt{i!*iei(BnlVd%k!{Y4YPMeU{}5eEKZ;rY)1rtRaj6(`N^kxL zL$kgE`GS4*%T5~pQZd3BZr?Vge~Ds+IpZ_9C_YE8+3gL3c8;*0W@`Vtd!W^zH$j>U zpIM{(sdPSfef|9J5V;+9*czHKBGh*$+@F<7lVVxvh~~DGr{E;&yT_9o&TyHl90ht` zD*OijpN2cv07jbYhJK}S?a?KU+&}npLtn2B=6@F`6k%kT(D~cX?u73=eRoOB#8R`? z(BSvaTO0qIZj!0Jdevr#0_M4BH^Ud2KaHxZS;npyg|=K#c_k1_Kbm;!Q_t$Fch6>g zGgBq2h-n;nHWNb|zP>C^*;oHDnjw0Alw37KnE7#e2j%R1u%yb-m$^#Q<~hjFJK%FW zx6u7sc!z9{ZdQS0tEj1b$13)rzpo?lstu}%6n9CK6~M(^VRb<2W;ZJw4d77y8^P?) z@8s$|x{>SHdOP^h(<=^dOLDX4l&|f+m!`t3_VmW}^_c*5s1)9Y<00{Su50s&YEf%W zL?$HF#!6HUKK1hza9w%ieJG{Rb5$%$bk0Km7W3R6Suq2oHvfV3%k?uy{VSm)2B((8|8Da&yZ**TK8wVk3*PbA z7#xqdKnNXql3XN*(`y&!owefshOJLdmKg$EUE*cSFVE|0M$>ZsAm%7kHS}a-VT~dB zTARE4GNOz4W9wXby0?s4R01buNHbclQ_>P+8cYveB!o#O-JOU^Cb1L~Xn|>rdV9@HvgXJm%(q z!yU%2B~&=Mm2q*Z3oc%Hn24i@wNtxkNB`DVH08I8v-6Mqe25WuuijVk`S~h38Xx6{ zJ04}aG{^XD8ZZhD4f1Y;rHnu{+^q+9T1DN{y4d+g!;zCQjCYo*7z#_(ygaU8ap9){ z)A#SoczPb^>vT+w2d`lnIazO1IkHsbv){eiaXD;`pu+QMem*%m(=5w1CDK{6-qhiD z#{Q%*?HHx(oQu|267Jmj)wiW4nO5K1h+Hatr|^u@aT?`BH3sxK6uA2Z*(>9ZOu^r|M3Sott9P_Z^( z3~J_v4u!>k2OXoFc>8AT;o->G{6Nbh2|x zy=Eo2jRBBK`WhgZjb6#YJE!tTX=`6(zYfwcz^caWk>V;Hk-|0a7IXquC{m?eEKf^J zt?{wrG?wR#yptG@kcH$$MxLYBtJddA;y=d|7leZN*76l{j8v$YYgB3$4yC&Xlq^$l zL??h?zUf~DYOt=AO=T|*d1uC$uGnzOJA!;QJ5n#M5*G@B1w$V^;TcBR|E(5ReHBZvv z;NuJyox7(0d+)ujzVgVo(1*wI1~S`ni6oEz^wrmw+37h>S2_^#CO zmUGuLeNeF~cY;q$Z%Qk|Yg{>|%&U1=+iTC>PYxvyM%rcCIM^YLpqHca9KdRl-P0Ma zrU&%#wB((4o#a2gUuF#r*S{*5k$xKaQEiPpF*mkYTKjXK(t%DuQ!7k2R6iZ&=Q-0h z*DASxUwXySS@n3-kimqO_Bb_7%+XQptUW1WVtOMnx2dTJJ0PL>_ngW#~i0`OXxhGSNdr}!e5m#^;{`gl3-N{JIL5WCUxOd|UXZwm6N7vu~ zDlgG1pezA{KE3s6MW)@V{IN*5qK8BUXDJIP4cDQ7uLZ2!sK24$(o!B7^)Z=8KU5d1 zE>OPz0pE4%5qbS^EMLL~p>Tng7Eu#gBSlkL&kVUgkU0?Vixw>k2SVql4KG=#4KHK7 zj1G<9ieBrgbZ&aE5@gRfHsu}X&X_N-=U^Te+SuiqypSIHnSTA={Cs0>LmD~sysCD- zs1!LqsDl`tANUSE%el&ZH|i9BWIfrzf(*pBPnoTN7pE(UwwTn{%WP08KZxH8acWq+ zunswh8?UNOdLEtP;?F*y#(95xXhcf1<W!CPRFAW%)4~!(4b~dg=v+r9st{|0>Xm+Xh z8&6nB-ye6dpgVTpvV^H-z~w?a1v34khg_yx#>uHW>u<+V%nQ3`!`H6Vn?`dmQTGff z>_m~wd195ryD9}-YIau~oNeG*085+<+%h52Qa*exZP7OZw*>;~w@!RjuG!dnQJSJo z)i;t~8R>Y1sPD42hd=%nOJ!@y*(zE_N%1Q~f>MBkKT-U3`3AMQ#VmN7TbcgvTP@#6 zrh2|+i>g?6G|%2SL#GgWbjAPJ(xy2#j`+j8j@~W9VAFwth?*&-6ZOfU!@^)`XOFwT z3NEqHWyfbZ|Gm(fCdI?cfOVgks@$G5nekYOqA7Xxq_I}oB@g!q>=R{DYN`+EMdQPwuiQ5JD}-|5 z&0dMXYd~-5z06}tW)=0?652NHmE4@x8Yiu>A9V%c_b9pht2pKNI#$}wp5q47N}J>y z;lG>lQ6_`3l*T__l)+4DM=O0LGVU5icb~6w1WMWw7Aenmmm}2PK~qN_*7nV9coh^} zWCAta;Kib`n6H4w{59PseqYA1`RKkGZdHKvtfEoPKF4_WSMsVC@(NC}4*CaYyyS6jMwFh~gTQc@*8i+=F#=oEGTBqjvU3HbC zCWc#D@0PBD-*C~kpLMdX{{;(J688oig^}R1-rJZja4j%tN(asQ;f50*Mb_}8Y5)VX zT19{+@#fBs&3lV)nVDRGbPRvoidZGw0{uTdna4m?3jj;N1QUDYl5!qk$y}15OH66H zp>gL)pfzvcy?fSy4kX6|AuMhS`N~mMPDjcsG;}h^^r|Vx)YN2Lr?fc!1KE|%VVj=I z7n6fpNXK=d(V{fA+@9CQn1zLWii*Ll7txiEvV$BJ7FqWz9Xws<@)x>4FkoM*ZCdu> zNq+hx70092N?zq7G2%~2EbLL#1_lEWT2BOum-(X1z-w4sHxc+NGaY6b-+e_6xs^3szC4LMb9fBB|7 zezVg>O5=k#XxZ_~6c+JD;e1@b1v;ppl#F0o&I74QFm#4kw1~!id18$S z8RmRta2$}BcFn+FeNK(0yPhZ{cp~Fi+7MkVy`Z ng_6pb0{Hlh)@ zmW4(^1mXpDVwq^%dXIMc@yA_kMs?q5mCtz2H^=N>*T*AR%q;NUO>Mo8V>U2jI(;Ug zdwJSL&F6lTV3d-Kdw47Yfg_+gNIpAGjhDM`kL6&+1#=xpgXxdkekLMuF z42cI4Mww-_O#XbUo%i8AxAR8B@%41LJTq~|B2XfPquQdAE@cv#)p-;^XzIhb^BmhK>ZP5QgNFj6 z@#0)%=|1TTkPVgTSSt5urPSy)db_`Ah{wp^%lwzAb(GldUism^rcr8lDic`j~o`v$|dCP%7+ z6a@m#(Pnil8E%(JOfvnxE3p-?88<+af6-0-d$OeHkeiP~j0SW)!B z%4n{Uwzii1(#6`M(xz0xXXe{v*XUF=e>$t5GTJD)IAeeE#zJzbp*(UR|%=z8LL0u`pmWw#KllbV? zOkp`@9(5FoXvr{nIX3>-`le=E$>WBX`lAgteL2mpQBw7$%&N7T^TKh@bCbLa#<|^E zl(?Nx*)cbH8O|hZ%eD~ym1YazK8ax~`egP_w~Rd|UVt=b1~Ue|I$us^nHntZV*Agv zYk$3cuG4KJ@Z8rn)16MhWv;u%#MyEpsp+68wX0K98_rG3&NpIO9j2?S;*q4|-n^P3}(LYsWTqp=%MTN13Zpmv4Z6 zyeLh4yySqJ;u^1_A|I!G`d+z9ziYOCTOG0jqlmI>vyO4a`6A0V#tnS2oZi>w?JDMU z&o|VJBWS=DhIs$_pVg;T1+jsREVxY&sE+Np9X}uMW54E`Ss&jk=hYUjX5hU?8f+%$ zQU~!s>P)_Nr=8bT42gW~bNKZxRaNx}Kr(p)l!*Th6KN#*Fc0rz4C5Y5j|qx@;CL6j zwBx8fNne(Z0N8HgJh%UU9$v9V|GOYktikYeY`tFl{30FgZ zUwQy70l&4e`4!^|J_ym8{&HRAFs|sQx4y>H0oYTwv_bLF z5B>)-*L2vu9G#tQJ0my8DL+rkj(SG%>7Ff8a(Hg$3dy1r=s;}u65-9AmKpGUE$MaZ zCuvxCSo1#ZBY*7Z@MLFI2=$nsD~MIvM4FN_2huiW&fb%FH63WEcg-K2p-G4{$i=5i ze)?$6y)uodLh8kl1^7TF#Ux28=ocCzJV#vjj33xQF8VrjWsy2gN;W@@#L18 zsh=RVj$C+H)9DKe?Do%})d}_yEWDg{Z-r;Yxx^v+Gbq> zc#%yMQ$9)rc!z@0Tkx}rIY0PEYh_L=}gF`aQVpc-Q4 zvGYmQfyjC9O7w06ta<9Sdla@oLMHj}1blAj_6bs1QwdV=C3NoT;yqcl5_9G{j z>-n9Km1_sQ5v3%hxPMVCaNC+=KQ`w5-U)K7+EQim{c3#}m(@Buq6H2) zH9(w?LEZ4n9gRM4Cn0~7{|u^X>=*)MW4oI_o&P=0BpjB`93QU~o%NYdpB(g6ak)`( z`=7bq1dU$jnPpnEy(-JRFghEa>zwprYc0c2g!9VxH-@?T-&aEDVc?1Jn0iI6W+J|%E9_W0k{45k#qu(`9= z0e4YL=68>l4b21*{6UxU`Y^VMMV`;Pb)mHJJK~;6a?M)?#Wd-$W@}0fL1!h9GAa=Q zY&m^%Om?-?eAa54$sfqk5wLmEt{9snhIH9D0uYT#*3?R1qg-xPd83jid)_?yUq%n- z`@l#a^DC*Ah!>Y@v;Ml4(R|7&-{fy#teB9x#CsuVRtasH2iZGgFc&iuM%5kvE?43t zmla;KZ@hA7j=#fVic!jW&StMR=fSI?eu|^qK%w$0E8~C~#;5Q}pb|GMoLc^yXPm=Z zs?n(_l;R!`{eu;X>f+h$TmTJ0hfu|$o=9JE0NI8?Fe>Y?(^(^Er{!3UhoE8O@lCP@ zZ}ee`L>FjR^f5A(4HnvZG;o-JxFyQLCCM9cR?RTKFdZX%`7egv^ubEFTKQ(>!i_VH zeGjtKB?JGx^M2TQgs(sU{kH6e2KF{l*L0uBIqO=M1)$6YsFb~aAKzrq>U)FA zgaN48H{#+u_))r4yuPHAl%FC;qWx8E1)n*lD9{M)SLfH$*F~zRSs^Ikh*RTfuug_+72`Suc1$e0ZCqRZ^?8wynW< zO#}4?7{(3S%Ss{MTZ#EeIa%E(_cO>A1T#gA3sl(aex-i6`XHG^BI7e zELkZMZLHvI?l}i@eZg0LT+J@Gpc)T45BJ)bLm72ovz81e2)d2P1? zu)Rr-AF_T*eL$-(hrt{H{>+M%QL-mrwUQp}6eBs-CtIX?(b{u#Nc)h`^#*b z(?%vLXkO=DLX@C1vYq#Uyq&{5QYxpQeT- ziM!U=4op)O)A*A^TiLaqpaJy4U?Da)gPW&+`BV$AAg?=(V$R%u z-n%;|utxO1zUjd-jI7(uC_UfcZlsfG9^&?op_B3h@?qa2>-dwpgH6z}O)8O&JW!hQ z;Wy&O9L(p=w*{vag0?tA_~S3C*%uuyj89AeH1>gt$~OxY(LmNgc?boNH~{k-QP0`u zsrizOG!zBJg}Jr0Nr3s}fad{^kzy=w%49ndz4UzK!UskI5^|iU-*~s}mc@+!LZ5WP za(0Y@z6bXsDmq*XrOY-NcZ;DxU|U$!&Q8GD0%1K(V}D%);#(9Uu{p9+A;VZ_nySR@ za(<9>vOJh^)@<7FN9x)#9*fYNA1|;rt@_PtPO0}$1l8}vYm(!zZjKpeJy8dOAHe zqdtWYrl2^zL>qe5cgJnWkV&x1<1)==lvF8SdOkDM>(y*qe^7n~MN6UJ;PaJO-!9HB zh$5B)qR5Jxlo(EZ&dA{K8I(=m&p<_bj_{MrEbO<} z+5rHz4pSi%h`4l{w?zm!{DwG?ahAJs#xzBoi%V1@s&VdP( zeDxPbTZ`8EVCXU#*A)%K^d|{Bw@DD1hn?@!3;#KMxeoQQ`isx>+o^6uN;m&>+SDhp z`?Id@Cn?3Q;OoGatQu?zkpVK4&z*a0{;!WY)RL(bzEm9Vm( z*wj<8!0rU$33qyZFa&X%^lsJ+)YKdl@0UG&t8_N30@5)MUAh(NR`Y}QbVo|}&cUHF zA*Dh2%(AIrA?WOMyRr_s!cA!bepU3LeW~1KUti>$S!Po0ow2@#WNfV#x7CX~sI2tY zh=trugD{56TOn>Yej2L7;>=6MdrRXV$$62&;-sofN-`Oy;B08s7l60?fJoB`pk10$d}R8MfDr7DE+my~;Ycc|?9C;wTZ&?b!~&wSGLDVVv_zrPc%ZQk!T5NZ0qpVmt42x#<3e1%g5I zJ*;tMQ-NWmZ4>>X zY6@L^V6hQi^3BGteBIzS%J2a8hzf{VSZQdlUJ5HHEN0&_h>a`ORn}eraMh$u%8NDhO^yTui7 zitS%Wl1!5e7?dk;<chyJdlTwr2m4;BR~-ol&xX43d6%Bi_xvO zvOW{5*E_lptD)5U)`8dW^MQ=sUTf7$FnICIoREqiQi%JnwCS^%YaLiGAmsNB0OX`{ z=Dr;bEqq9wIyyG7o3WyK_Le6?(o3$|w7^n^RD=B$<0BuO6wPNrgS=aQlR#Zg{qYAd z$T_Wl{Czgq%f*o8Z#=t0_1>%^_WgKnQpK4HcPr*(^kwXB(9ZpJ==%#hVO*D6`uQ|P zg{TFXv!z|*dT#kLrq)}0#AXJ(iOO4*#hS89Ew)>MWeie(=5|`+#J)9_$3{_ann;{| zNZ>i3xq#*}!;L}zfP zg}hYW(yZCeTy@+B^*l?PlsidjOIgwSo|=r9^%LXXf!DfsMM+mI zHY{}X*4=JX1l%A|Vgfh`<_Jq;=K1;gVI5=p zN?;lR3baFW9*htil+e@A90uBs#3i+gD?qdl0gl)?of&c)WoarKT04Ui&geA+qmTE( zWYMQvpr~O>8{1w6%{v8=iU+?{L%JNK3F}|seP7*;6(Sy2W!7((n|urICl>9bR)MkV zn^n8^CB1~d=gG~PTNt*O01f6*lbO@Ur)ff-bdK$#hd052U?WH>+k=Y~6W$YPd3wH6 z-jz{0+teLFARY=aqk}lQ_MKlA5Is78F&0JNfekEC3m9Hcb^>keJ`Qh`7y@*e9HP*n zz^5Qx>#G1>*7r=LDwn*Q?f#w~S_hsUh1c7UeZgQ>wUG(xU@?A-!@1|t&9DStxoBb% z9@h5Q&#&{eYIQXnH2b%1M&1FPdrmMYbgFCn8X#z1l(&7?{10(59<6}$(`}qT9c&GR zsfSi2AF96{TKW2sJ&6{vA$4p&z}!R{EGHX_e6<7=?xoxATT$t_pfdqhT?vrn@cf8muGTn>VS|l)Biv{PZ3Q+?X`1# zYpi@I_-OZ@rtM2D>muDS+ho9IRDdrrF)eC0ntEpNw1(;3pG9Z+IUVZv zNln`HE9hFAW@@RHC`_D1v1-!@8t=_IzB^r_uUo2~lG!SDv9Wb@LK;O1F#$*)V3w(fXp~jFKjqM*z=d8w0EbymBxffTeM`u)xQICcM6hQs zbH(jzvc2Zreqyyv!{-C(a5WwCL8PbKZZo()CE!#8sA2CVXxVR@A+9XQF&Xw8IBl?q z*!H+~fk&*P$rVwS_2&5xCpgd~hc#t#`I!f$By!68sD$`kkW3B#1X5USXE$TPPmW)$ z7Dd#dQ-0;cS?T|(&9rKxg(m#UuLYh=M&N8GjC{AA7`OO6%i^H4Omv1N+fT@_6|+cO zdYs)q)5s>}l85XOFousldi(c+9Gy}!pvA2D)?drk&?-R&18)-*Fu^K3HqIi#8F4dZ`$g6@2wZL!@`tsfR7k#>32AhAm(&m-Z z;+}k<4)pUy4{u@8=)?HjS_X|puVN;1TV(~9L7tgkn=o%_3~&6(0QsHupq1GNazL@K ze;5}s`#yQbq^2eD-s5vLcg@Am?Poz|`?&C_@hhdpPwq=T2vls9ELY*#Ky^vWbe9;> zK!ie-uXk`ql$Vv#8!Qw_rUrs?{lFUlEGeipJ}NkWN5qxDxoanfY^IMp$l6B6lhTIX zdth?SMemd**~|ly;ehGgIGrD}ADB5pa28Y{ivjbR*pFKn$XS4iDggIK5|5!`v z{}RI5xg(^&6`P2cM%hA#|G)3M8T5Ajg|lME8zOu(AVaHT>?^NI|Mxl!Luu|P&LIlMl;+7L9?KJ$YNehL#wEF)@ z)>al)V-XYpt|Lug|ZK?jkDYT&1o1A${O0cDczrbzO;E7Ib`y{B{j%HmuPqVR?<4c z@|BUTHu0pH@}k))pBB+TSuiO>&&&|Km5RiSR5~76t*C#Rg?r{(W4Ec++vlHyQB660 z1TvtbxMIUUW(eQI|KZHPFL(z_rhPI)q`;5ruC3+#e|G|FXZamN71%^Z=-i*<)vOP4kkx#bEIi zowsQN*)__4G^9;_Bt;3Z-ks-p5T{MU`t)Seb`_USE=o?7cXo`mk6}z&$@Iy(HUD8@ zXwYI9XRK^liCRO>!~HQyZoB&Yk!(mYR#AIn{-TE`y?!nxnOm=%5;rrK$yo2JLw=Ca9tDrb$T zuQwV8*zlmmH)T{UxBu2Z)oPA<)0Y6F>JqS?5Ux(bCT|t?^!cCpLkz$3);g^OYp0cA zuK0Icy3qGQyQL;@&X7`+Wv5>BI)Ie+{<4^_me;xa)@NHkk1l|(ZTy6<9lxoXU3=>9 zYr?mynayU%QRl=i;4&>b*^&I5wdJfG)dutR%-L3vkTcTvbIKik2#UJd;Ipp8jl4_Z z4GEea){@B!d6ZseKW4;YJu1%P{kG`!zpJQ;NLqn+v+Y&GPidoV0r54R1%6e|=#JYh zFjs6JS>EoDEi~L(suF7lr|rEMjCYImiD|kK#hDnNUg>vV)p#sfL#4K_95e&=)%oCS zPqSf-0vSO70v9lha_CIFv8yqxuMXhj$a}?PWJJhrAOFwGm=i4dN(Ijwij+i9IhpFq zYdaZG4PQvawLntTJIg$q-=%aYlsK$0Di)kcAYJ^Ahsq=2=frbL@E%1Oa-M)2FW1%f z{6W0bt2=Jpee$)^-rs>Y&s9J9d?g0URUo719nzTNFhfvXvCX{k{LPFAKVdL;2(^|W zKV`!sf;#iN+jV~LdGrsFBqcVIUWL@m<|p_>8-62iB-}{U8QL(9=aL zR@P%Til4;uRg6DIYXz>{$yj_yX?ctXa9Ic=9STP$fi>sZdDC80m~DUJ7ND(7Q)D)< z`1=Ku{7b&sZ_Y_41)Ccdoila@%?m-lj3Upc%&d_ph>Y4ZFA&cj5*3;Q1gpM&`af*0HTh^aV**bPkjfOM9aD$DKSL%qVyAKcK?ozD4cp zQ25&3O;t<8jh{h-VyGv65uERE*tT~tm@6Hb&aXnQSd9iqFldsPgX+^No>RnF`RWF{ zp({-@l<3aou5|j0!k@fyc?Ww-oky-DeZGGZGFiFp|IQqI)SUQm?L!fk_)8OWb$(O6F z3C=2^_79^Ujt7__^6X5hVP7y9ck`VO`OR!b~REnj0G0qrqhLUoVsH(|wAiYo5} zrT5nSToO9&iKZ-iB+qUPN{3-hAChXjD`8ouVCn49nHhgCV}6qG{R_ud23WTuUL)0d z#KsNkmBuJ4rcdIps`hxf-U1BlhR*`-t=!rBV0&=@*vSVG{96SJ^gdB?K8E`up8<0< z09TaZ2j$27*yM!kznq3p&+KJ$kNE8UtupuF@#hYVudj0#yFgmc1xZ(=SK9a*VzK|i zkVh*HMz7qDP~#^5aq=rW85o#cd}Xo;*|}dc??zn?^M_gYFt`Vi{PKN~mwph4HoHK|_)DuO=k z6XXJ6catS>iQ3szY5$@>mk-H@JOEj&n=EJm?u_iNVFj?Q3*SNP!0d>y8B{h7xJ9b< z0UssM6|V$M_?bhbG8!;ux^Hk^H4CnkBz**ofGx-fs?yyqKfm#VLwEe1QOmLjBxRj9 z`T=punGFms5HjyIpC;sQf_t8P_>o>u8lv=hYqI>%u-OXd60`zqB1|=P#;-(Mci1F} zt@KNz1orT41Lxjf8S+U0a%&)**-b*Q4J1@!t6!SWnRu^Q^oa!htub*S=ShZiFjWCt z&5uW36d)UGqlauvD7NrO&p)1T4N-vnB^NrILnfa@dzbBpDZc@2>3(a?#2W^mLWuWo zE#6>HH%)3Xzt&1C*z3}jWZ5`r(218>615HS$D;r&Of=-<`}rID8s6tU}UX zzj5=Gj$#66msf(KU9f%DyEMRW**5JsZ3iA(iu`y7hT>7f(JK-;!ahC8I^>@LhY$YH zUTL4mfoA%RrENeCU$oHP-eM8q7Y&qi7is4B@bN|AhTrm~EMWY#f^D`V-jaQhM$t*T z>Sdv3tXW?3+C6)?EwCGnX*j+vLJ z6OGJXc1?eueEK9}&O(GiPZ->;Bf>vJ(l77kHTrr4B=GlKetHdZ{KugY0of`>-8Y*e zK}&~oScSuDBN_7cU_^k0dhbH7U>C7L9IgGy;E z2)uL6f+q`!HfW#!@eov-^Dp(LF~4b{Maz!|0*+=Mu?Me)^lGimjtbS1}I4RSxpdTK9*r=PB&e8#YJh3B3)y{ywMuQ^ z?N&_;GM-{;=y?Ljoh8KQpVNEGQDm1hMt;~Omw5`eF(K%xlp za40D8et22}6HUcxD7)wdQYp}W-`{HuDFRQZ?9)TK^G)5#BWa{HH*bX#yNPps_^tr- z4Sc7TO!V;2O4;@6B+0JpDI|cc&cXEv6l6$*ku~kp23wY`0|TYE=K;iN3TQvs4cx+z zT<07>bQTY%X3m-knuNQhi@}f1aMRYlhm6pt)K3NOR0I3d`7`AcbuQ&?Q8V0lz7am5 zl*Bm({hi$f?3dPCzyP;GW+4!i`$wwGU6j`LUV{ZZyqheM60F zNqcLGS15uBT=F28$wj)ZPm*u%RDhM(UJ<1LhzNSH&fcHQ_k`-{FSol7btR5=map(_ z!nib~yY<7#r8^XB`=)@pExsg)j;cp*_PT7IpCIRBV?Xwva|s9FjNd1y=SCuo)Iab1 zB<5}Rs_w+QU7nL9f0VeZqBJQ(MiyRh^-~HEo%nMHO>b3`5DD^QyM7X|BtpW!e|Pep z`}+ydbj0N?50=q=w3DecIn%SEnRFGkN!n6W+_8)5a(Md#EPc~}#S;fdN0Ce?{wdc6 zRp!Xl>*;#k4R4(r&^oigiWIKnBwTxD-KI~x~v>v=p-?u zl%_~H6JQ63E$R@g_&;nXg-B}7{~11o+u}IA%Z@e8v%n<&$1G&=e&>}gH)?Zf-=+^h zC1fA=PQL8SM?hvJ^PpbW;m*zTg_S$r^C92AC)PHSS2l66MU)>-HLD7C9Leum9EB-8 z1~|3##S7hU$K_Su&dYJhA1_upsRs{rS$ZSF0jb`yRa&_zpv<;qppf^t)&)D;4>p1J zRL4@!EwPc6DUWGO6~)&YVq41*FpKu0QURY9QVa|OPLkVOA5g$Wd*@Uq{k`w_%e*mp z6POe747)-<5pyC3YU<*BKSTs9weA%wIfrM-_uL*l^C z`%-`Ur3de@&P93rXJdJ}+BPY-!Je`Sm5Sm6E9QQQ@XLU+6~uWLyN*KY(~lc^Z@F@< zfu##an<(`^iF(0!mfMM{HitbA@{B|c=847gU6!8*r=u4-^sy$ik87OJ!hRBiauJN6 z8J!C@Q|X}s;gQdz@3)%?13m0gIkbXPgK0@M&ETTo)cmCB@29t7iR;2~zIID;K7oY9 z)kgjC!{^zX-Mk-vrWsZwf!Vy0^P*73w~ZqDDMp{|{(TUsj767Y|5A=QN5@6^7_w|N zooJH~w=Kq(MT^657wiSXX}p~L=~9?1_$i$Lyr+waZN{MzuE=}rg5JK2d=Sp0rC6fy z=ijZ4!6Jz^6tW!*oK;$OnLA<-&18PCxa4zMquS_^H2e&oz^7mu(@{#9&Qw&u)zrQa zD(?6BdnwF!6OZcJ-N^-m_cP>3Lg!U=FYUq8(&o=igup~FvN*sh;{*Rr`uh7hu;z9U z=`rhNvrB&n!j~w`q)+pAe#SJfO~ExtiG z10?}MMhPe@j`zUFKJe?^*2iy?T#iR#_COD+(D3y`59DCEQf^zPt5x{(&mGp{x#?M_ zVyP+xBazBxI86R#BO!>mT?4}NyD<=?0IWyB7nn+RsCHTvHJ$z|!rJB$-n(J0IXAD+ z*#<6>5!E+G3TL9Nq8t93d=zH>1M}X?tRtokqnR6V(|G(QL^li39Z*Ev1r;z?aOVE^ z45AeikElg%`7C}^2%5H(2}c*?yg7VfXrs6hI$5lVgAW?mxhtQ$zCs)@gNMYlgcbwZ z8z4KOrkOBqER676IMJEJS(}Yb5pO zhaxUKW(Tb0^9{cI`+j#)cFdT4THGlpH-UM}w9iTu$5|zGour{I@!*`_lnr2}3 z6$tJw&IurhsSW<_%QvuER)4O5ZyP11osm)^OGd+=&H%8r475_sFHi6Utmrr{VO zI`Z-5UC3;>z$o^hubr{RPxZzkZDHSgpGOpND@~?%5)x;~T4*TdN5Dy8AZ@c}>EHaI zpDp+?Plev5M>nW-Q29Kzd$QQ9Hv19FrbKr93TDze@5LL)<#@)F_tOdr5Eb9Pxa<2ZpxfwZAC z2ISq(*fW-;^4HL=T~F*nfI%{Q^inFRB?j0JKTJe9}8@`t+jP zzV-><{ez3fHYIfcD|Hg_tCk=p%Y&US;}A(w(0Ag*ckF7$Ts4hR((oDM&jTk`e2_

*VPM|}JT z>(cHR@HZH7X80o|f`UjRQjr|1NHBm}bsF0dr<9kAh-U<|FSYt4(&();11oAr9PFqn zY;C)h!1|KzWKAS?s@oq^A)x~MtMs{%RJ9T*k z8WkWw%;3S;jhE)|`-Oc~!n4;C{rx*g-tss1+F(vPw^t{PvnZEyvCYt2>4pArMxDi$ z_@AY3QuFkQ${r!m^^Sb-A=55XojuyC)iP6_5RixnkcbN7Xk`yNRzXz{5OB>;u%@8o zfk1tnoGkLKDA<*aqxHA=uyM3gb!)VR1QZ--N&}AD5DTS2%Z!g}TBV}8 zL(%^c)Y3QTm*n=n1@x0k+G$iy)8EjnFGV*FX)sFI*OO;`vCH*WIc4v>Z~+VMNI*M- zQB+j>(e_WAIY9$@=i3k0P5mjj!WE161K0gP4i>*t(;UjDxmR0tWosMb7HBJec)Y@p zr~YPZGF>sWRT3u082XUlSUQ56b4Sotfr=Kus`nD_*}Jy`ePDLUgBZ0TiJ`^k7Cyz} zes>czYBUL$O*1_6^-j8~YRiikKx|RiZ*`yN*XG6p;L%xZQmNm?)au~S3jHH2^HZ;5 z@Ws`6Vbf5!u*9XRnyU(nB=IZ5;0l;z?L-fD4t5v*e5~?#_fG!1_pb%fz~q-CB3$3< z4uONF1#4Z#bGOp=f&3ws=lmG9l%fQooZ5=Rx&y)QRL)V`gOjlnc6MLEe_rMhr7@2l zlKM7AUrDcCZP;#1vq6p1G`^H~z1lt{Ss?v3i`L|9S6>bIz{Ea}A{#)`_9l z1-I{=@Mug!W|0%mvj105f7)y4#vxH>`#yK-{_Ut{g_xmJc z2gX+anrz=l6reAzk=>t(LxffAXmFJs?D?a4u{$frW;Pg}CiAG7Gt?JTaH<2&GxDo` z*R~Hd%lP@Gt)io1?HAR--XS|CVLya*ZhmrZh8lksavOi9Ib5QPXe^mIX3(ENN#Rj> zvBv@%|D97ry^8!y(KCQW^Q9RbDYJVF)IS3-NMCBhZj_V~Tn&GgNZEGW%n099{e3Dk zfI$4)%1?g!-KTRi^1(V95NAb0EU89pe-!iHZP2eq&YXnideg8X! z$%$ExPnSi3ViQKWJA}RLf6Z6U=?%NV_3tCW>*Ei5+hfnvKG+BXH`pF*WPGKRZFyH$ zSIKVxrcUMixz+TX{l?Wl&^=>90n=caRm~9IKyviCYhE^{-m|zU(5oduQ)G1dsjwyj zp+0Vy;l1ZGt|cviB?uR9B$`TNo@iKJbawtBe(XfpNF*ZE)<$4qFbL+mS{9~Ex;Sr6 zO=;W}`ds~V-IpiEp-AiwRKWG6UC%|Jnc4C9jzQq~7pfLZqdV7buKK(1D)`X70T(PB;jf_ODyK}NCYg{J@+;8SR7%6xV z$ZPI;zsGLOwCK+flLW|a&4~1BrUFEVmA?z_QaKmd*T{UJB?z&TK0g@1jSkShjZ6dw zK{`52I0g}j*F!P0hYT8U8DeTG5CG z<|)Rj?&%zdxg z9bG|dG^UT+KtcOgZ9Q9B_1BJcNFMr=O@cO6Y+yUT_ev z)tzr%oH{q7{hd|H&o3M#(XCxqJ*{X#49h+Srxpp^0zhmJJ-6B#${dk5Q((YA&Udmb#_M{1e7CIFRdg9lB4JX-AQ-n^~Calu253}KPH zP(&lk8agddz%UOA1qEQ%N-YTWUxeL(qNF)Q_d0HyUjfdMvA=Kkkb?Gc9#x(5vUvIF zbLqQ1bmYX(%NHUkp`LTa86a@##+%vbX7P{H(%v|4W3AiV?!(- zdaXat*UA_Gn)g72v*Ur5h@OTFOVga1Ew5h+7)Ov|-NTtP%C7=c+N98S9MT=`P9SC7 zM;fx%#Xs)|%FICe1Wg_lfLSb6blm8`?#JMZ=!QMa!UEQPOHL&2qF!Q-%1$K8J7vKb zoG}_+lFS^j)74gkb_jH|Oi&M(nbd}Z%4)X}9Tfs;32tcqT(4@)J*2!oEUJ7S7gk>1 zs}x=H1Z~iMFxG6*^lLr`-?gi3eB{vXo?4Uj<#+JIL%(h8$b4uC(6L)~1x_arBbRGg zZuZbZrLE|s+5)FMfyZzq;FO>`_*vyH9cEb0$+Jbn$@4T?%dAYAzsvAphWKaXe*Y_^!NO^`Bkz z_8)_n*W+|t(-X22rqu)ih3VkLO9>AxrFAd6N@m2(`s{f~g2kahlu{{eT%qpnt0@vj zzmjgxk`CQc`eEJtm07$PfYGpbWQuk#QgFY=irK zz(JG4jFq9Lut1Cv3Sb}~6<-&8D&+8Ni5m1B#!FRV49BMrO2jN_fb`t(rKg4&GPbpE z@vQ8|0>#qPbZeUqm&@owom(2sbQmQUP>` zjd7~q-$q`uhEFLjP4=ZF3ggq4;bP{kl+C~_MfLbj@8cgRXi7A4IkXfCozXj%C?{kw z^Qdt1r>s&?>Rh9|LFT7mHowMw&=I6+S{BZ2r!+`hr`lEJ0)HdCKoa~njV*V1-b{;FYm zK8_^31p+e1GSJWTy}LBM^Lg?AVbVcIUYfCF9ey;d3t>a){wb0?RJ>l{jfG&nQGFou zOd}4kk_<|X-o?iPx&-!sV04^;p3D5vSiSiEj0b`F)7|6%9`3gPGeLdI>f6U6+QSO? zYWPsTLuK!m48oVEiYvhn6Y3opqw!YgGWy_$#RI;Aw2eq+RjG%0REah$dC&ZgW|Ok$ z@~GZDCK`SOY~#V{Sxtn=KQ0+`n!IDHw1+g$-p3`Tt z3h`ErzRoS;RYwk0kmu0meh-X_T;`9jkWYe^QWDsnhvcBnw~7}eq!Q#dxD?qaxP2Ed zAtKD2o%sM|2b5C?aGHuUz5?pPEFZjwB9wEUuBG1`e$-O%*Wb+T8}XbT7GNg`^t=j! zaLSJkaGv>ugz2}vYttHs>jWCGCsqlR4FILE1@}fY^kSgO)`wd_l3{kiv{I;v?EX=KxNbc~E_^ifvRbz+@X#)TUfAL(m-^N=|KRa-zf`j#*45YtCbiA7b9O`CgtJiQpiHKkGQNiL z`8lk4@*^cBWy9f0ilDUghCLJD)CHNkG!nmcP-4d_JEtxZ=L zL+kM@_7f=2V-Pb4JD{%xFcShhtqU*#-#LjgOdW(bP&+y~krU7&b*}Swry4FiW-~yo zp!wEX5RS7gh->vpV$5)k5590`n!6n7L&qs$H(U4Xe=nUn<?k%_qd4E#jO+BRnOU zj5wciD*>z?n`)n2YB`^KR$W5n{gbJ2WC$}T`aMMRA$|fl+CRr8$?WwtBCoYb0;cI; zA29hI1Bsq8#hK@TFgD)-7^=}x3}Q=&oq-PaBr}Ss23$?iM|d%VE8;e09Nr@kh$A~e zwVsg49`prAJYq_)1pB0q?QB34(X&e=e8~Mh(G>`=N0XMLyH+}7f zq5EQ~>oH5jA;q)D&A)!lm(wvYbo~BpeHi?@xV-!ZDB}T{g@?)EQ`Pb-q5Ue#%5KZx zR1bjP9R5Z$Sd^`l)ZG>>Kvy{BVFLx41j3)#F@08v=l) zt^}j>*5Y*CP#l2H+&iv|5fQl{4_^^TKb>Km>2?RDpcMNG0VpddDoSJa)`@NDia)-@ zq6@(Dw!jB1*9>cR0P*UJ_h#H*CVUid1MDO#aDNjZ3w%KC-2VhE0K0JOFBrli-;K(i zt>gLgxh>>=vV@j(5 zaxsE1rT{hc*w0T&7H~$F0U`7oK;^v!V8|sfSDUkV>7U#Zp%iY6BVLs%WRT}_+87nglEh^%c2E-eqdKad*Zr$D- z-Ti+NjLghE1ZE_mTSZ0$Bi(t7Z=h^ch$g0PO_rGqpfED6VU{H^bMqtp$IU+5A9MD_ zDkuN)U|$5-)5o*lz!yBfP}%UtZTpOWE4WZ1Nx;-)b&-d??B&or%}}{jE&4&tI0WXx zOQQfFvnD1cpPsUTKiY}NelBQu|w)#A_BDnPaGY2UJa&SCjx0OBtt-$03fPC z5JD9ry?vEvK}GK9_8czSdjTMV+rY9B2}HI1=?GH?h~R-5e(&X?g7kLaWQUcPmuHhq zNJ&cm92xllSbJA!IB$!sHf&DSk^v%4C@%_Np0x;lGr)=rKDgM{1=13RhI9zfVWM_A zi`(_`GeAwuVi5ohy8*#c)_+Afqu0YSGQ~I=GDnm18DuU+??Ll?U_3;hJ$tq8AX?w~ zb8sSBnSA=%LZ?xt*!c&lx^s;zDgCT85p=0^HKwA#B0bI1Eq`OKnNcL607_o7z(~N* zrCxzYa+~2(#XG~#O-)nIy*c8(ogD5|I*dLjHK~1Yo?bV=kVn;PT~(+VEG{173M%BD z^UAWioVYgwYL?riP0}I&Hg(g2EDi!{`IC(VmHV>2<82GfK_D)|qElvn<~^YkfnNV$ zD{~0JkoEATX)cE_xI6|%=pP!mjbKRt-pO*d$t&w)N?M`(=l*BL$O109TS`LDe(1L# zA`Sr`;=WjwG97gB)EU0EbZTyI&-C_ z0Pk3Ei-63n^ubvXm|qPCOA$B%l`7_(Vg5ilFs;EGxsz768UKs}#1^6K>WR;#m04Z> zx*zu$B7*?n?Up=&p?f2OhA)U0l@*cl(bXQ;0)SI)r}*)I)`DPqrd^t^8=8AjXAjvG zF7YfGe4K9b!nn-`>|Y?dLPD|=S2VX*S0B8G_>r&Y2E+n9lBuX-d$0T2lXZmx*_I^Q zx%s*6#qG25+#iOcE55hn$p@}aCBTuFMri(uGz-`nuKALL0u}f3VM%ch+rpLtn2(u- z+$r@I5V^k{(}M%WCI5Jsi~B}cxH%* zJXQy~Kpa(J*>R=RsFKxUugV5RPU%r&Dm7JY`ow{wr>wDYQpkmXCqBcTpWTd$i!*9| z!-w3fg$MJyup;v^k^oymYz%G@6Bc$&8V`gsMm|?*q=*J{AcZAd3|7&A3|9FXtwux; zLMQwhz=IMKFp&i4=+90K0VUiRDrIB#?4yw^gzI72IS%e)GB=tcM(gYs)9YanHx{<$ zI+&|lbdoWKOO}#rIS%3|yY$FAXeIjRdhOoK=h=iBhy{oeDgiH@0CY3kB=C+G?t?Qi zAO;@-p6Ho`^HA6U2_dK_c-3FzmSrQ30;eDOnRP}9xDZwgP}@iQLDZ#`0#}I%u zGK@wb@cFJjCg~Lrs;@e~2IWUAYS)>$#>-zjIyjIsW$a07i>B~ZZU2%}&&6J@HI+Hi z)R|vggJ;f4$~`t0+XS9v_n$vp^z`)p_BqxBv zrI})3mq67>ZBpW5wHWPno4uUT@`_qb&F0r*=cdiEV=jYs_5Qms6We?l@^UN{9*$2- zM|SX_-{BvAAN+i)<(2M?va<5e;oPf8hQHd6r!R-&_i^iXYyRx$lppt3D4D?@o z&`ry2=l>@4>%&8h*I$eLKmC%sl#4~wkaX5Y51hb=5)DU=N-P6bU;X{FYq&j}wOmpI zKA4_?LB<<*wfU86EY&TqTSa=`%1rgyV1t|Dv@^A8@hXr>GC>4#gKgd39cM>>rTMNI-5QuLb z?}-%aROP|uQ5rNSAS*G_1ovSA3NwXA+4q-BZybJ9I=rnNk*hW6h@gIC%44abhNL*8xc<*poH;feVxEY9-5q7oA&U7 zsCSl0E_Xk;J-;3a|7eQu)QGh)g62GJ`o`7rk(aiN;?9Xo&%^H<*Lq7U0Kf%Eyu~vOW*pdtBGhUTjP1w>6@dVf>OBVqAc`VRJWh?(g;-y2 z#;EpX&y5-P9wq{(0+=Y!bg+){1L2y^twbg@YZs{Q=fB|9Im;c*-zV#OAjLC@;|I z%Vun25WwmpTFI8p3`{3)c>DUnOlxlC?0Z`_GtbTc-J}Gn==op=BlWma%j&oxa`C=a zuqO`wBs)0k)aUP@Mp%)hng5)0O`UJtQ$|V8t8&Pf%yx+0xNQo(j)Ui6_>|Pt&Wy8N zPBXvF+Am+ev;mO2@Q<$|s5sw*2e-w%g9Tkm>F?b6x9}^nz5@b3Bo+tlv}#ple!!F6 z%J~tvE5g5<3)#Hy=hnyVQQ?31TB~ol%z`HPLRRlC)!8vNWvi9eBC5lox?yL<;__XK z+hUHvGA>`Kqy${Q56S8=Xg4*KyA#8 zCuQ(BB^X|sRBt>eeat)1dQknv7pwn~*Yy30P@hBc9l+jbwGC5+!Q`gmb;!G0xZ$Zs zqaz~=8`Xm#eLN!ySntIj!h@aH#b(|vM@tRFHR04cqckid7)sIu_KeK}577q$-pKcH znvzAORZ~+_kLiEeef0CVAN{B%yR59g`XygUAnt0#%A*(k_C7DCg~k8=9(KnJ!aj&x z=rZh#(qV~6;ZURIs)NiKe{!N%l*7Jzpj`!gAd}d<>zDSzzz*jS>l|pvnfZ-smJe=Fgutu$ETXplBHVHY(FcV)R*E@_4>CR62lvYE$4>K8La;{3#RG!aSlU*hjS zx=+nQL2m`|GTJ2kcB91O%?c|Gou@2i7N02GDXsLun1210^CdDt?(20Tk&gR+um$Om zw?4+Lw^Q^*iZy`2;`yvi+Aq!on)GpZQ_<6(2$u-lJz6GuWf*J<+yRS7v}c4EQ(_Wc z%58xNte`aCM{7Qsu8|74luCl-)g7H+xYYLUQhMYYP_W7}r~Ce+%6;?K>QYlvh-B^! zuhaZ1H(7tx3s>yuc9uONrW2E6qfpDDE3!D*^7Y>krhPHQd3MQJB~9DFI5&^U+4!eb8L zvJ8KOXjxsHNKI#qyns1dob$tW5&Zj%(+yjo@#;bFGZSl0o|&35IN0VrmZo3JuAUki zquJRoLKRtvii8Dd$$UVnh-D`{Tf)iB)7pWhe}by60m>IaGenbXov_gIo9wTY)A4gD z=G<36poruzl1%E&Mw_c^^QG3-ZIbVO?jyC{tV6Q2xH|r)wb#qhKf~DW`bI4Da%KRN z0^;LlKklbXbikk#%>&N^Z1qIYd`AKtsY3NFxc-cgE)~AIS#jnyY|T^B)BWWrp!2wm zn|q^azazm$9uUXgiFhf+_o-|g*!;?!d%@DE081Yl8eZHjzeY0mlUYeBSd9qB)p0aGq^$EPJjOn>5FhyR2EG4h|Y=&f~gDa}dv{qUOS z+=J5Q=4ri>3J2{X153S@mIKsGqtbM1z*5+=<0OW99L^n3h^)@fPQluxB$P~ELCpgP zMc4rOYc-H6@n>U0(U~s=^|X-?`ny6&A0V^k__Utj3rd@I60Z}D*j%lij4w+L2R=uy z;4HRTBxCSSX{yr(qWVI%WX`QiVF;10-QAdAAN5@LfHQhmFuXg&I zJx5trdaNDYBhi-wAj&y9;QWV(jHY6&0n{4!%~XkqzpA>z`HX`1Ma_2kE>x9` zPAIBaHTzJ8l`dA5Q#Si=+dj%AhLRzDf>yC*u`D%*tlna>Z*{4N63#x|Z8C|gFcy7# z>)MEec~8|rkWJ2*ODS3^pj-*yd2d4@Pyj6u6(OS;CnYv`Dy?R$7A?vr0XM81{S)bmUrx_zu^gcj0raP~m zn)zY9N@O;o$DL~f;G;mN?M|JIcd&;~AKl3BU zgDX1PT*jf>34QDdkK(?K#RO9IS>se;o5G=6N13OiR(j9mXO_-7G4Hv}j(@PsV_Xbd z6+bL&@(Fs58p{bkFFN9mi<>waiV1o(!G|ysVOn0vU%Mq{Akj7MIN9}{$Y|zHS9?ey z`t)mszlFp7%l)_|hVVCdvUNo1^X88YP~PIly(Y||+15|0={RIzxhB7I&oY{#S?w7! zA3~*9;y%utH$vG_-)%>KhrBb4canuJp=YHZa`(&kzS{A!T^7z%qj}!@+vk{vH%jk3 z1HD>pe?>NJnsd32M*`A2I-55BrH|`y%F$=}t0Va|MDkHERz~hc@3<2COrQy82)``3 zVYx%4HFE!i2)v0lP1{`GKaHCbJSghe+K=IFM@A$rta;F8%IF}z)Xn2By%hsHioN5I zowAcPs``$Umj^M+rtZh~X$ObBOyVys&F|qaoy0JdlrCXvHg%(iIftG#h?;$qUq`o0gA&BY&jbyfN&pd3H7(onfB>-XG42~+S) zbE%dG-Z_AU&DSrFKQdq1+pKjy__c2AqoPjE>}PuauT%+{tpI=BhZKG=8zqns6#re7 zy6)Sb-=f5rI^h-!-`4lUku^;|pVu;x0$&3W$V=4@8?#Bfc(U#P4K$}05f@n^W5i10 z4bP>57@o#YpHjHG2Cg$y*yA0@T_=N9ZlXH4)8%BVi^U&?xkqFay(^WlXnRVu@LnC`K9((s z_$h`qQU3YgNbn&j#vC3_9mpS`-Ba8=U5 zCRG|vV;{Vzkq1@D)UsWq>ZfZJnJnRA zymxQxR|mSNbx)jnv_vwXtc;#?ZLW9^emg|bKm43^@JHbo_d5`y6YUVjpp{&Tlkvf} zh=Z^6fI2;CqJ@?S46aMR+HJ60a0Zv}6~x@Rj}D2Oa<>zTfXjJY-Om_B8+Bi`Udio* z10PZ&LDuEwWvcp2sy05?x!Rk=*r$~3wOSKC@LHhZm@KnVW{5hI7U<*3+i;bZdh0!R zNE6){aC1PS^ul<#eVNbcOebYTGDZEl|K2Ul^Os_0SRf(n>%hrxI%$SGY7FC~d6L-s zprFoweCD!?Tp6X-Mn>Bq>~C#AZ0WE0q?fP87lP1H-?hW1wJ@DJ&hA8Q*ugImU%J+b zrSUr$M5$u4U2$SV*;Ou=QeD1mV;DS)M}WG6Vmb5+G->A1YOMy_sNUDGBB z$yLK2;$o?Wa6s~%4jT43$>!B6YzeRv}kymj!kK|hiweBo?(tx z7Jox)#h24vjFKz2M&KqCeQUSsL+8bAp5J*qT9!c240%YFQ)jZr0W^svN$T>(tN8KY5BJFaWzf+m|8KU9-l3L5Z zCDB|u5VyaWjVJ37>N_%&_ARMO2V_E&hUP`geMi94Xt>az5G1!iflTid>&| z)I9^93|-E-eE*29_O!bfwxW}qeR!&&5_0k@%KdFPotF6$xf7tQwERk^ zl|DIBfu6&5%)WQNVVnvboIBE26y6Yp4m~5yXu+3I9}eC0J1yOvXb)!h3Y>9o-kevM=6{1v z`JubPT(8MdBH~iI6b2tJ9>Yc?;Y&K9YwFD(8qP};Pfuh8L?%s4c{UD1{JU(JTWQ`N z8D-gz46TJ)Ff9o0gnLVWbf+5Xx>5CIm6v&s<4^F*pHbG5KCq-83f?92-;SlUQl@(n z3mR2*lp(k%!_bU0AFrP#?dA;3LCotePKs)Yf-vjLvFaC_3vC(3kh`>qN;~!|DX*le z)c29$@blk#;=a>y$dP7xOab?ORUhjiR_WnU{nvCLz^4PgaOXJKHo6-)3jeigQ(L1x zfiVo**+$$DM z;=V!9`Bu*f{=G93bB{5=$;n)bPq3DDTMEUk|5^r1^IFKhrZ13!tXwuuMIMTN=cm0A z$o&I9ZDA4Pm&muXp=Q!qCbTX2UmucsNF&NEKCRk(nP%U>myAH6$LE1kw#ge1&P^Rz z&N;9iQ52^RoF5qapOc5_^3pMOrdkV~uhto1lh02>7V#|^rg!b6g3AJXjmW{Xw*!+h zHOX|-9P^0#n6h82IAf%*20Y=PgaE)&M&P|mk{VB>d-bm}v4D=@ZAE!*s=^1KwG*>x z=bafP%ny^!Nt@KJFd6v=Eqe8o_q;FH<)YdH;mzr*ukCkZD#iu4f?Len&yKTH+2ZAg z)hso*fISBxjv0 ztu-01JNaAE)bV7PvSbhu=`$u?s9GvQXKxtz-w~N=>fCBt{5gh4fz8vhRc?htU;?Y45nzk4(O(Sf+M=VwPgncq zLDly5-XxE-wUFHNfThh0u1YqTr`wQZ2d2WS@Co<-tqmQ#Qm!s<2T@W<2}W>sd)uBr z8>h~}JKINwqWruo+k6pAr8jX&oOO365$HR#ZsT8ja30q}Ih)@n)n_0+{kyc$?2{7` z(|%Ce9UArzUcbbGeMASJJhK$$T+NNF`ZqpcghKl=Vm>zm3qMx)x0R-`MLe*M?`){#-aXC{i}KH zbNrU@`Dc0)%eVKAi1RAy)hA$brPg9S?;nI~NWQ>{n_4od>Be_TXjZXw4N5d=I^YtL zB8>|%Yz)FTlT?LubrLb6MZ}7~w79KO9_g{(Y_p+=Bxj|I8BY__G|}ZmPe|YA=~aqL zH9ELy?nC{*8+cG61@n_35~x z*Q4a0{<{i=99&%9#%c&dp*)k13S+(S+j|RiK-E_`VY!I^Ha^B%y}WNZp%~3Q>Jyx7 zQDw*f()@o-MEs0Z^2p+%8@JY_%nRpXwtS%a^gAaXWF&aC9(mhSB%Er`tFs+>!}Q<$ zYY7zAu#8cez|h7Mdi4C1Xk;D&X9v52k83+a1!`$Yk0q@vEkgPJJ#&!O<+K8+=T}-hOtmY(Ml|SQDUQeoqI)> z?aJgkfs^nQ3Tdys>eEQ<#qxnpj(gE1h9<@$+s0Co#v&%oM<05MKL?AMiWDFF*=}+r z*Eg6-QRp2qjzvowM45ZoE_bY^hnzQ~As>`1{a2LW74Qz4f8F)}pMYOrar@2%=-3pG?pvA4f&>1;6cIj9h%i=Yn{W>gU3U`)@qvlD!H8Ck}!v0wEe(v0s z=>1|*M;*ziHWSWgd0V>WHc`~NPCm`IpOw_IA3}$}CN%9;u9nft*Z<~IcH+)!z<%p* z*c>4FtT_#j;b!}Hbr#q?3c4AvMU@Ke_I+B_#q@;KS~yNAg(8Q?bIPc!6|V;j07Er);Ituen`L4$UVTOQbSi(*CocUu8|I8px9T*BL$g zoo#()Dm_lU;3U46)e>aht zxT4GpCXEuARHgN2$qOop9^OyF$hj%_=2Ue)_2-9+PUD{IWPb5^kI&W*vVp%Yqfetf z*_$uc30TpV@(e6d*1XGW5asVLVGwm@EJYJ*se)82>5hdGsfjMP)5gxMlGrF5mZU@t zhChvU^8K42nJAFlS5WTX>82xJ^tp8@@71OA_c?LV2(QDcFfHMoOsYxkttp9|dnu~R z;YnY~XF_s9JP~KvD(FpAOwjlIqXwfm>e2b2!5%)bcUvSS-O_v(gX;MQRXE}T9#J0A z=OUoMv^F>(-s$Xk2_mZ`D%_=$@yll#>+u+WjSReZZS>{T2>%}1aPN5UY zxnVz<2NppqkDuR=JAXHjeFQZQe29q8@G*SgI?4}89PBogi*VL&{kzZT`5TH7SV=4U z^-6N^bvlVLY#-~1=+PfL zcU|$@{-Vj2=t-yYR~&G+fb)_xk{1$d1Jg$plbHk0(^V`>TNetX1|vg*GD65w$o`oE z@J(OWO2nRpg{;g$Gs)s(OqJi5V?NLSbv+V*ZjtBx_i%su^z&I_1G>M=4Qf01D1DsA zY->1ee=NdaD~J}cKx{$p+JrW*artDr#u4AN(D|X6PrVcQL;DbcQka^mQVF)7AV;{A zsng8W@J3^*R8kRVs0GpCABZ=3QWi^9icQ9wPW~=9?qYFGyPt3`V|ojty{B1iWF3Nx z!>Lt^s^ur-3WrBW&A9b7?kA@%Q#exMx*I+2W;}c~1xpYlo08gs$2>DTW&m~jcdKjo zMI}|Kt*UP=?{vF?y^)L(NFE=6m-5`C_(rCn!#EskxfC%qpD;d|H(4rZ zXrKNE>FRE77S<|wbr*-BGD)vD&(N?{GsiQ3o2rH_b6d~`!NN`Zg13!%O&)B znc?+lKl~}og)2l~gZoeaN*`awrvK_D^T81h4J1k1#3t>A`o(10EH)y>mR4248*lE; z%*{{5%0PUA9Ubk~e+bMXS`}8P2e79y0f=_>Fa?uWIrdy8& zg#}L1)$bJLCFFfBkbuOp9`y_`-LxtRR!&QZSg$>Ik~#@X0jYJn&6o!t-O z?!>CfrVDL%drg=@o;5(oqq*c;gde3KT$Up$XxsQNW7ZZTY5^qDn0tP&WEO_KG*G}luC9& zf$+lu_OPLn?0mV|Y7gx|t>AAOi4MEGcdG2OA)=2YR1e?;Rr=ngk^4yf~q^3W+|I<_YjE9RSM+m2I!_m zlZ_SQxErUalKWWb0cyuJ3BeQU(>5K*%y#@*{NQ+>)2(mijkL@yg^-IGewB2T#!JSB zh|N*~6^^zLF$Onpt&1{oLcgdQc7uH*^Hk3q9he0s^G^d-*h%g}wqENrTXStqmTBBU z-HP6Ju9MoP3OV~0l60-Y(o{zDP0vqZ7M3!iBw35lN^*DjO_4K+x|>=EHYv!B$b#HI zZjGoUrvrEPAOJT}FuAa&r8Y6a)ZyjIb}IqF-O2{>&V5O@@N0GbQ&!uVDjn>P#*2=W z>xZA!gyE-9lZ;#2z2x862aDV>$)74JrCjW2>$gwOWVIgL_W#@U3N&l?F?oTLOHaVD zBV*rw@i}i^>vkyZpmri#&hi_P@4j7~nd{h5>4z|k0A;w2Ict>%ZxHil;rCeh#pyCO6usWrf9Iu9PZY1JxgZz^#pp z+26keZr!Qvd{WJOm40K1Q-N~oixDwN&9dw+Vs+{? z-i&e|g(daU6Djet~wK?KB!kde$ulQg^OwjagYUB>T)5%&jr~7J? zjwk!bzFdRhj>_@ z#of1Lu1B)k_1hY8<=b=f27NM@?7wCFlnf@>8-HORaHZ8q=3_(d_H5j3|HFhUjB2Wd zpSewz!xaK}`$){zUm{0en4f!LE@u7 zx9*?>z(&J(=$ENs=Ja}54p(m`cr7X>=5A+f)3bCKLM|MOghVLlU3QNZtR(}rRNI_& zHlOk-T+GrsE%C*+h**u~Fa<$>Z2uL7nbIXwr72LpZVkR zsuZPNxXur_{_z$xjCUYKDx93X!S|3Wn*w&2@Vg}vKE{98pj2g~T>{SDb{whweV&jS zlbU%QJj_nai8~9N=i;i1Qg%B(6A!GbzM>*#J9J0q9uIe_kRL@1b9jzH{=ww>H?tWv z31*bc{Hzb`sQx2V<&DmOj&8rfPj~W(5Zel#>_=i&x=+8P;!ffuWO1aACtDDxMKb(O zVnKY+Xv#E9mh7v|KB*xzIL)w%NGG$)PXlrNm{YTWuR_TCVsI}8Z^vjptj6_?r z_e0&0E{}GAGvsdT&3f+Jp`#$~I%kbM5kO2B?VdF@l}rmbIam0|Ih&qshty=9lvp^l zM^|1I-7EY7=)$3V9m+F-KZx&b`Lb^La>lN7}3i5bUE@INblM_2Q)#D|5Xrs=i##ru>{t zK`5}Hg|4SA*WFB=lqcoa$F)ENHT!k>>clxDckn{jB3C+7q$TPFRT5KT=a7c|EhV(; zyy<%$h{KPa)i)Y5Hi>R5d~@0Rcqj23ZJ4r^Qtz+>x1f(+`aFj8Kipp(VAz}~@;r5&HjNNy z`4ta4jVYt_IXCmlea_K#ohS5R0{heNM?3777A}X`^%>za-NK6;kk-vwf{Lu6zS)^z~Ciaoeb}bJu;2t#9R(ZBv}Q z=#=B)w3oM~<*2&WS3J4aUH@V-m?D=>J;>mzoExsjB%8IHVgelKtJe}TZ8fgheA$f} zXNdSUmdItxDSaIeUVOgZvvZsUpRc~ujolyWw^tCdF91(TwCLnOGKy5qxVX;UhzObPM&_sq@S zk4@qyFUSkLpDm3^ zrm2r6`hzRa2+l7aQGcqRe9eZqr8PN;y|4e5{`jo>V#y3oY6r+y&gg|KK7cpu_}X5- z0e35tnMAZg4-tnYHxWl4$a`yJObE z31%?7p3Rpp9*-^acI$#d;J=f*L-`=co$t6sIoNf7)l>b&{q}OX-7Pw`5JU~@&0jw< zFO1(~er8pi`l;o_f%&|ZDwT}*!}GxBKYHqJK9L$jQ3dGj?T8=evyGrw3JWtxhgn%I zf0sb?ecO_)@hg$oszU*61Ec+a!`WzUg$@*uH07IsZ8jn zypb-n#Ze+Rdo@?awY2qjTgv=>UPP8;Aa4EID`d97N>0e0JD-JUT>%Eym%7=}r7pV3 z%ohRw%pKV9`e|((imB{Pr_QwltiI}q5nFusw#O#cgjw&H(s(OK;Ir)^5(-2543dmy z+1=ysdYdmrrONErKzh=Ji0Qo(gocbWregqZr?%WZdfzUbqSQr4?)dQA zRb`>eJf>7e$AJaSmPDz6Lsi!}|~zgfKNqWWYE_W;fOQ}zn6!Op8qE_itq}!{xG6n84rvl??$QDs5H1B}y z_YaPVrBzbvsFWp?YpzvH2-HyyaJVMyZ z3+0W#-Db(U$4=2#Y{<^Rat&~CNibbJ-rbdWw5cyeItZM9nN!V#En*So}Twq4}$ z5bpSrDE0=vA?8OLK)&G?@A|hL6F{pG?m^vNS7uZq(&rwbfhD_g-+-Ry&1U2_j~XC=g!^1^#$kWz8>_w;hWX?8p!&WIA-X9Z1or9Y7Nyd9k%(JsT6 zI`LcaLvB*zMy__L-o1Xs28pD)vUQSrU>CSb%e?A~V^cMEu!B)ot6qfeo9(E*T3xlK zl&VIX_%}q$gu;R<=9SL5A7|iqI%bWpbi38@R z9kgZ6^EWIS96Y2kC>~2v;}{G%UaP@GjyIYR){_lyYgs1p9FFS{BMx}KLw&&MY@`=j z=jY4&2H~bMqXC81!M>|D%hdAr9>wq-bstEcEm1bRU928#DmX#9!oM@7y=AY4clf?1 zU6~SR4kpWVYFWebIA?g(62Y^+WwXb{8x~D!d>tH(6bmf1MfzSq&-P!ZYKetA6Ey31 zA6D+9+{zQ)XfFeZ!vwCp{;XY(``)Y3`YD3imnpFGN4zk$Cn3YwfY1e;jEi^mv4;KL zCJs1BGENvex@+;(ry;C+pjlSpwD!*`(Q=tCLD+JmYRZ#jV#P|ZPQPBPZ>COcr3-qfg znx`AxX-i$hrq>U2V+mZ0QJreeabp z0Px>nkj(pppf^#0v9-~sdG^GWa98gGnz|;{$=GaP9vI$A%d?Az(8XCLYFfw&H#itx zh3&SXj>$9*?4Dx|U3gzJSok|;10Ghi`J!UryBz`RW6g%b#G!#^b6w%M)zR%W`NW!h~UF7~?+5e!l(@R^fMkbS-U|O`mz&P4vpiQI!{B_JKF@fNAq6 zINHsjnGecL1VOhSAm0{B3{{+I;=)Im5j^lrjXZCofmpq>K% zy9Mj1Xob8x7m4xlGZ5+yNR5Q_%k_{x|959vonP9~F~oTQRhO8AH`9TA#0#r zpH;}fyb;hv8Q%d7;Z_};2~kG{&nUrq^RY6@8oNmX(1%xUbl2YS*pDsZd?^e)7kzw? zz{938`W9kiKV04m`^{nt*AG6kO{T9gWuJ_-f0h?r^>ddAWO^|F%T%GtBZHXnBK2bG2kj z6uOcJzUz-4OBi)tvng<{>tw*NNeaY3H%*0zionQUpX{f!@ z?Ch`bl}ueb=3qIa4tyma%xYk{&pyW)P3U(d6Oo#58X3j@d{CrsU-J_%S;WPl~>)MK?WpmzYZ^wm{% zyNE-2Kk;F23kV$-B0z02DOB5ZCZFC*zE$n)Dl1(s$U*I(n0Z!ug>r8qRK5Q;Inmw0 zJ_CkRsTv2umpPIqeM!34{xfH~=Dev*CQ@E|2@w(+i)*))6&B8bh2L($rDx%{1YSu| z4B#E_r2Tn*SlxF2^uJ&*x_R z6*!rlc29o^?-<<|5O8sT{R+a1Qa zynY*a-9dFXMTZmb#q}>Yj-QL8ebI-H(L&Ry#^-=Q{if%&$8a{=%nOd-w@& zd5}v?%A0>k+@zAL`h(I}W$jqRY7Lxt zPYC>UA~!yrK~ddZ91s@Zu6^u$=lSCwYwra*C_33QwZkuo_}q@JYdqJgoZij}sT*|9&20Hi*T{|IUG`+qEg3)QNZ^AHOI%q6 z5Q`JSu(Pa^<|5rEEghH>8azDaXU?vSnugF7g@Y$%Z?|*v_5yCS3wssEM){c;xuz`J z2~h15Tc942GJ;Kn?QP^>7oB~Ek@h@4Gp}YLS+yewG~qeed$gv}<)8_sIborHC0gy( zuBj;X4kKr9L7$@)svW@AWUifsg?s zE%bOM>0Cdz(%U4rxi}G~x;Qbcq#z94?tIu`D--b^lv;%?Bx6%(Vn|PseIC(x{~<-s z^|=+?nR(rSAu?ZN@PHIz5mgOrtcT!A98H3Z6^sq5ix+%G;xuz95xj#L7R#8rPu$&e zt}l&bj*h5nW35;H>tieGU-3Gh{#m?U&*kI)1`afC#`JmTm*?T?c82kdRU>Yg5XXE1 zx|HF<4f&0gL=F4yGNpxHFIEOpJolM}9K_qjkLNB|i#_e>E?o?#=T5^O<*la*CCxe( z1`QY$e`f#i;{qn=Y_qS#=u@8YEsb=DC%pDJgnoe=?saO_*%_JkF7d008~#>Dk(2xS zkY2n7Jmqx(HtCYjEMG6AJl7u+Rmr#lg~R=7s9D3`B?YJUWmY_%h1x?MM+UwEDpr+C z$-wK!*;Imynh_UgOPV*bhTRHY{b0?1k#6Bq;H(Xezix1k!qK$EF*C|=AWp$golB=c z?h?oSmMG5hMj0hnkr(2gpK>eE7+B@h#H@A=r8Y(qv@#$TZh{kfi8oqyH7~B2%Wsx` zfT;*LnWV8#=2N%Z1(4u1>uBVnJ2XT%cI z&!@$HPyN0J*Ly28Wh)c@`;Tedie$B%aHR%_UNFfc z#dy12S#<~YH;k=>HC9X6ZPLVsQg6p^Cc57bmzfn( zOR(FQ-`~UieA>A7#hO};gyHA9v~Z8FxA`(k^Nt(Uv4+Hhnx3iA7_CsVce`YOBZ_tY zruff|I%_gD^}wC=rdxLd@lEfwacYaAFpyTr}AGQdFl*i(|4cS815aXebkA3b(a-w#(s zC?+FyHXP>hSK@}ks_$~}@w$eP?5;Z4`k z)vR92PmAg}HWuG;X51B;d&PEco#ARMXegtzsGQ`h=OJb+L&5Qdt-OlQQ0B^>@mJSw zk}?QRLC(|lXEX%zNV~qPTzh%{#SX`lJT=fDWJsQ#kBVvUIO5X z^-=(%_7|OC9iKS#P+TguKcx+JtX)|@PGe4dSg1zciL?BFsDSJu#vou zh9NZe$pBfo!!t`qO63m@o_AST@^(^{0K{6q&Ubh*E_m^>-_8R{SJ2rB0vV=ipP$pB z^?dPG8$81&1@nDwbUdE^qFcVDsiC zP5(Q}Wv|oA=bE7*ommV=@1w9?&IqT|s=PfdxWKs#`Y|x*0bQw6W+ts@9PGPK-rUpE zN7Jw;ps=g!eFY{q)R7A(&>1Kczq3;g1X51a(!Z7XVdi(IMm{^keAqO)DUA6e5SPc# z<9xmukd=va*s)X9Q)k!D{f|ie2oS4XNB0bbHJ`w{eSECuM=AP|=QmX( zsH1PH+Emf8(dHi3I3F_1r=}$1{Ec(v)lDUn)t+uWz2CJv>9q0mjvXd$+0kbn1vb0Z z4=t8T8|ifDOw5vSzO?x(JY_ErGe};ClpOmeCkGC`{+#tqZ()w;?VP`InNDQ>Ls7BF z^{odo`XRbDh*NUE@QZlQ0G9Gtv&4R0Sq?fwp2VF=SgM=jlwO8a^}UUmofDEkNgmky zo=)SO4yrfJSLlW~9?hVY{k5&(-OTW2#yiI}$c+$qN=!&;0kD0;Y?KmdM%`TO%C#Up zUmsQd`F0B41D3pUh=O4lc5kh^ZI@k7@G!UpWVoK(!8y0S$uOWA6P^8;xtAM#9ISxfYFMk1Ne(Tmpf2wwZ zEc`z=)>GAgpVhy9t^eao##q_ch7BBEASEu1SvyhddmnyI#Xg1Ty25+hGiPJfKS%f| z4US!LXWo#ct?_6$rnd4QrJT^vLe^8(xmHX`vb{x3-~%D>*RzO&X9E(Ks7eN**1 zIIh_ibBXHhm&4>5@L_o%B|#Bc!9wbJX*5j4W_{q8W_mulCWUmVr90^CQQFB6_7G{K z5xlPxXKp)^mBBSLnT3;GnJUcQbQp9wp=r0;O&A|#%J+DGs7f=`eQbQmLu}mJ>pIow_&G_<4fUXK z2iR!|@qOsLdlv)(fV-|EwKgKJj0a4o5*3cQVPQk%{#QrveZzgY_x@W<-P%O1mPQWD zPY5C*$4Lb@hz@uYUsk{g$a}?I4cS;DcuACRuHfS0ce~xGGF7(97wqX>3dS%?b#LMG zR#n<#Sa!VDwp*(fuTY%F@gw2e5HnyFkF+7#ILQv~2Lyfjv>|#85IS@28 zAmFQ>;?!^vM<{!gDv8uXUN>;dK$;6%vcr#N*P|-aaE@`L&tQ1|@;XqFKB>REk)UYt za`;k`3cj3o{lumItYM}z=q8UsLi3+7i=#r6bENsbxzFr7>RHyxwWku!E@RfT>cVZo z@YVrER_btXAetGkmIp(1{g+{|D-0DR08 zb^59xdcGc3QeyxkPB3cmqH{0Q6A>p>MA(N4c{b(q1dRBwju1fzVv8d=ZwsHnMa?bN z7Gu+5(rgI+;_BoJhU1!5lUFMo=Oq}^qlJVY+&qej0ewiN)c8pb{=;HfSqb{!{ch&( zdwCW)$C=L|;xI3y^sFs>%nm0l;L1d)E1;`;VM?R!GcNc!bZw&=!~IUh9h2fis+iLP zJ_$ZAxn0@lE@a39b^Dgei+UAgbvsCnIMvM#tZ<6|btXB^vNi14BdHhVIfNlt9O>i$ z*~ZInE2wx`7$#{xU{}}FG65zfxeqDE!#hM8*703SqT2IE{7EE%lsXfXnqQ6=+_7ZQ z=Ui3jljDD;Jh5HoU9R9j>LptAHoD2(h;cwW%_8rT;GPpx_a{=K>Xx${JYh}QUwE@- zf89ELUnr7H+I@dIDF0X<5?h<_1bvtfJSY3Ttu}TYp#C#`55NfSR$+rUl9E6l_HR$k zX1vGM>6U6by}+Ug(tZafYI}%#8AAg8H07JDNvQ-jKw082DJY@FEwUl{NfW!kC(;Ka zABkF5|LPY2BrE$&79Zh0M<8-lV)OkF>1~00wuVF9M$tdf9yD_r#7x)71C-(5zFeg} zTvukdFhAbR_fLiR3#Q&813$?>QX(Zb-|dwO4LV2c8fI6-dj9L;9u@kRHv zVIIU@ZSd9{ea%EqH@W9Q>r1Y(tSN?u?im1BUmD(>jzk4h13+;5YClsvWTh(nj{)*Z zA<>4QJPr~&smMFxe0}}HHeL$acRZrPZ4#c+b|VP-g8GPOLfJo+q^Wl+!};SGoJ>V5 z#=wDVCzZE%Yk=2Rr#N-2|U>COW7hx|e`&_9asiZ7Qlc9-tJ1qC4D&6fq zklszg7bp?+fx9zNa%}SrNV>3wJVGIgZ+0;z2Po_E&O=9B4{-*>EO1d^UI0$~lxkP` zi}$1D<>T(=y#iRFZwOnih@0q3KP8U0PeURWo6|4H^dVTF6Cg|6%;CN>wpP->U|+bcXPKm% z&zR6;tFHIJe32wBxW$JoiP;B;f}D{e&^TAG#VeqPqY+xvk;3Wq@ArS|hWZcQeC^7f zt{zr0fx6eqFZb+nY0^WZ&RA}ay7UpV1zA5M(-^i^qsVO`I`gc|)~mz2;7`fD#!n1o zO3s1eXGgzP4Q|YCLp(Eu*Mf_4dk8ea``fmxk+GA>?+ohWmThS?J9-Lm5S=&eluRG0 zg6h`0@ry=@DNyKET~Ued%$L7KF8}CR3U6(ZlUh0ozl5*slYGYMnNRKrLqd;{wXC-l z80hBE zM(9LO8kLwo>#>qEX9&%Y8b0T6%g?OcRw94mz=}h^L*DIU8~iB1hoIFYcu;;7BFY3O&a0`g(HArg7i){TYmcD=d1vs~H6$3^CF^ zgi|Q?$@$p(T93`@JXpl4|FD|blqqHF2b6K755nS7O5}V2kA{;(S$OfA;ZiVG8Waj- z)F8k@v&|OneFtDm_J4|j&%lno8`|1211+$_Qv0r)9YEKJ_qVYtReMA)1n zvKMObeA_P6HFHaeWgECst!_N0zr5MLqdBkm7p?ju?y2)X9RfVsa%1mk3gV=KBJ>eI zFSwk|S5q8YxdRT0^6QKfMBL~p6rU;XC+y%bJQ|SES5;C9m$62jDN?x%0kDDpe)X#+ zfC_`ELjEV5H)*eOXZfJ`$v;5qfA%Ytg*fIMTSkCu25^Ss97>r2A-J$~+TU+E?kwAY zqCQnc#E%Y=Emc4Y7S33{u9`wMc->;y%`=tzkfs{kWt|-(uTV!HTE+%eeOe|uyTpCz zi2!@;JOYUcNcC?G-uF8X4phE_UINr!S$us`e5Y}ia-o)l4QJ3&)Pf7nGrt7dCNAgK z;xrIRbMMv}t_5en`1i^7-HSO8CS}T*L03TbCHz)!DE-LxtC1~NH#mnF()r5+&Lq1S5^@aLq@ z2m*ef+XKP6r7vD4S%E%`jky+%_lORstKEJ4SXoU?%_tQtkl59y^!rNypC4f*)dZyXi~4xi^k}h~KBAimxE=(|_;r1Gkz% zFM!mSk`SY6#dV^0kS8xwES1$`1a@{=J-zJl8n#;cnLkI-9B*K-^(7!HQAgfK7A?Yw zY3vC37>lK`G5+XiRvFe$Jw5M1_IKu{!Qt;xWmAjtt@X9381B=_3MGZGDu?%4ea=$J zYYY6Q<>W@}SjkgT8i5V!>0##+5xJvPc91lGP}2yK#sP5@<=*W}<-5VbCm2%O9#Mb@ zW228J_SHLT;)LNrt?LW9NwhwF@L|j*=I9O$e7Uo8 zKJYdyRB?3Coh(V0lQn5=YnK^^L%|s(6CI=ZkSQuZFOOtM{oXgRw!3#j$>S7Q211Z8 zsTJBMUJ>3qZBVimpnH(Z#~PQEq^rc%tF52Lmz?riU^LOtFqJGuK3=6bkAraakS-(R z{^+QdZ3)=>HYsqV7VPH%%sKC!CUTWi^I8<%H_|3nT_vTGx%D;tuRYIE9i6E?w4wbXN#?Ex%WX&4S;Y_`Y+*$={zgr>E z++)AYar=C-b|cm;rP}nUFN?6#*j~V2ow<+?c#)t)AtpBWP!%Ug?y=Iu)I+8?3vNRL zDG7>*huQt+P$*7<9Tgzm(wtNw`Cr*$GcrT~nHa>Y5MLVDS*isMPeay|dVN%<&+mNy zm}di$92oIsFp(siyF%q9~uk&dHwpXalX#R1?$XNpFlY9=O_>)8Uaa z^&fkBHiVA#97ZTd$EgET&jOsCqJs1x`*d|>G%-DS1zj(E@TDBdV`}zdAYx`{Z+jLK zTxdNWhb7Y9oAH-mk)qNNyS`y~RvJCY$~9suXvk(Z^Q+PE53H6OJzOU*DCOcH0xbK_ zrBYN_?#IaHQAH*yywLIM1XNGqIDKDjwB3t*E=EvM(?+Pdk^sQcDP5@hd435o zpa+JJ+%Pns1JFz|sjTt^JA_AF!kOm}tsRjaYdpn+l*pIZXZuhO><R6S|UFMUU7IWms!(l2u3r?KSHD^s|2yRH<1eCGmssXH2kC zIp=H3L2&>8Gvv~=_>|Y8YfKoyf>qRx_5zc5^q`xJ4^yLbDQAZ4PJ`LIgl5V82;fVH zWs#`5KncME^Iy+Tz)4#BRA6CXrK&Wi(g!j--biZZD&+a)=v;Ye8EKX#eo20LF)jFa z#m;Dg^{K8LTLmPqrKT>c!+oqptzjm`g3LWAxznSfGj^QdPw~W9=^(yBj&vl}A3H#z z0u>0ZZn2)xx{BI;X}|%2Ku)VMx3X}t ZdFJe9n|UB{A2n`<-7G9!ODWQVN_Te(OD!Tx*Ge~nbSi=?Af3`7AT8Z^ zKYsteduQ$d!_3Bf-t(Tf&htDc7N(<0Oh89~fq_A+t_Ib|z`#OcU|`yTaDi90eI367 z|8T7}RiPMv|NWpkOH+VX@ZHp2cw%6X?)>}1jF=$Ez`zK@P=`Vc{pR+A@iHjUnN1v} zpGJOqw5=@L-b4iZ>i9N|q=DE#ILHSYYN&+oETrU;aXHXo$*r81Cjx`-C$Q*^t?;op zJ}4~Q-G?}q*ap673v9D(E3LHc3dp9b;Ey` zZ_q=R_pha#L6-OaWGtf*ff`Av=2Oa7(O=;7Yw2&MSh(bcBl2G-SvtJU=XX=NO&pS^ z`1S28-jAynUL}v)anmj(MM-aoY(nfKZlChqnh|?wUD>X!JWtz-Ce6-g#FH1qp6q|v ztxpirg-dQ;l9`fT`m+F)Js~_n}hhulY zaw2dD?uXnV#-@!g#sq%2rCSnG^nd12;0+ zET{yfv=E{1f5cddrKesi5Bk6F;C?mzVdNIW4=L|h(ZS*7ER*@)>q-yz^#7P^MjU&o zZ!(tZyfSQg{z#hMFXK>Td8veRQ+j0NmE4I^@bh8qz#q19CkFZXqj$sw$B2xG`g$3` z5tHJ*VoWKjxet!~#G^dks#SdrkW#18NBURch6>LvU)qN~F5-;ibA~-3e&^klHI2rS ze@y(&Jfx}t$}68SjV70$p=K_pQM3|~|wC|qxI-w^x&9Jbm@`3cS&eq}d zt$QoC&}>Iua4M#aR*Ra5e}A= zuk=3l&iwL7=$C)=Mex(GJbh#>RZ7Jl;f&aGK9n0za=bQ2OYn;n}LS`Y)VZ2eH`q*Fj`LOf^)%~@)YE}lH-eR=f>T_TH zlqwj>2vteX-zSyl)6mLN#wmP{bVLmbYU5g}!sz}ej~{`A$`$kT89BLd9v`u%1k)d# zcv+?AZ?VW*;&I)kjA4zvW6p%i^p1#cTYgpaL$sWJs-MEQg?HMrx&6BD9FPZA8H!$)IbWd zU869$n*M?eNrqK3sToe&KO~A6r6tf{hm^2``3a$w5&Ej1QzdyWJ4lhZSd}7j;dU{h z(@iL(>L1CTbdRtjif$N<&e0hT@rgm~;hUQ=iv0T>%e>>QD0~fN(pw0b=+t@1M`s=X z`Mmc{!_CZ?q8aZ6yknVd%!}@LHnoDo?YP|5H%&8phB*QamNLhz>lY*nUVQ2jvL2X6 z)(ZDMW|W_asukXF{x(AakG_38A0!L0r~E?vr?4*@*iYY}T>7#j5{&LYCyc*#wIEX8 zXL3Dc`-3)3*V`B677r~$TJobV)k5xFSs~>zvt;QrP5dDx%0l+NzDwECWVOzkw^5x< z+LwEc&f$WUGfm2edu7`PUdO}d(}bpCu9lLW*`g0V5(ZWKS;?I&ZniJxrObz>gmh<> zQX$;~Jh{=1*`2LttfjtUfzE*=>4Tbche1C(g71b#)GhJTC$#syFF^x> zWxgIlt>EL7Tm6g= z*3yEzpR=i^aIeLDNf1wG)4reWR$qf8(bgy5CbCy2ZK-3GbUK_La)Jd;l;7WE+$!GO z8em?gcWbf)U)Gz4iqDs}-wnEiHV^sUhj{NdWns?Bx*i;_?Ke35d_^Z&Mbl`1#w+R) zB)=GZu}Bmfc37lc`P{B|l&QgC8(JZe>^zt)?OwZ2dyC!Cq?`I{bZ2A24rv!!>vm%U ztX^ec?ZY<`*!*;D>`~Ch``9?#c@>lDt+bgd=N*i_v{dVmP3wR1ZPKd_Jx@V0&~O@bsUrR&h{YP<>D2+U%!jm0)wH!_$BG5U!>8 zHrErf7n5hV)=$?SbhWFmIhfGb#Tn;fN1JIs9Ov{zF z{A6brjL7$i{*_B7b;82^e-Wcb#BN4A!bpSsezJA;*1F|EP_qT0ic9E!!N!3r{>>AO z!^xefW4_XUX7a@2e+CM*2<7d926{UxI=j?}e90lw^w7B zsr`N>t71>0N#t)yWlv!kE3$t+Kk!-(zR)XFNi2K$HQKN{B)Gz$6tR{6&!>Am_J@ab zkQd}HOKD5Rn`da?s{%iwEI#XuA!dABe&oDo?NY9FuB|0evY4!Vp2QS->mG32&%Iq< znpg1LGB|Z9etrj~gqLnv}nFhf@}U7$FX@A-I%cDCKuXyOL%-Dh(x?uC9Xs|9aG zg0H>kBSNPE)KM-O_~d6EQ9nU4{hQ`oj9-^7TdhbpH-XWg<1f9exj7v+#|@4}z`GfiM2^zYs2g69bS7#-Q+@TxXy-6&2OkaISRu$B*xtoaa@fefCI$-~-;f(_=eR zmG)Q18x61e$-{qsvK^~>@hNJQLEMGm?DtsYQot^Mz-Fc`RD^;pt&cPE%R90P1xmKD z-a*qr;q~Q`uFn{_@9g*I z8ojPh?JLU4P$}HH`u6taq$%7wIjFBOB!m!r%vQ%40bVc|m6O9TFgRE^lr2ukq95xC zn3I*pZ>kbcBUpk%NIkmH>S@3EBU8kAP9#gl|0(c*ST3y$1$I?L5vvmJL>T6SGVN@! z)6)sviO!29%|^$W^V_+9Q%;;(a+&zO^`@RKjLq6!7nW9!dKJa~6Dt{q^XtRB(0l)# z*}6hff4HJWhhOq=zPxT~YO3Kg3zgTeU$6PYzu({RgRD z|Jm*gpQ1-%#<4}CLn`U}ldX|bz=`qSf8|JeLcKiJdSgW#Cc$)KLA1n-Qb=54I_P2d zU%9jWne>43J)WH?@U{|(d~eVDFIH;do~4gL10L*<7iEIrYCq2tr1*o6qMZBT>CH1U zF{{KM!%Lj!o2)gizmmuo0pS+cy)~Rm5kWqt2IZRKae;pizB$MLwY6pN>{%LV$|WJB zH5lz9G?mzhBu$kNjM|96IeCl95e=l)&Xjt~rJbe59Q4El-cQD^0uqvRTTvi;v_DZw za<(%S-{!NgVczETK_^#AFXrL?_7zQDsY>F(M!6eM$#|krL~DEfL0`iV2gnO`1HnVB zc7;!g6En-@L=!z?D|kQqE0S1xvE8@yVkuDfOS|=nZoAKZ84%xV zhxD!;5MB8@%D`b^TA~pcJC|=(puZ}V$NcPgI&XHDu-A=dQ~v2*pl5#Fdp(5{b}s6F z(W@rFGEDlS!($@_l_A5Nwe~Gb9i#!}h>wr&`$n(je)#i~YE@=$K>zKn3|Vq7E`xU` z2b5E6p=_6vl|!g8(cZ;rC)}@3#}%$#m43StNzzLxKCN}WKmQ!c$;t&(CG(wyR^iT* zVQIDi0-@M$7{X7OGQ9hnZ&o6c9wE{h@t3ye|ly~%B7hvlwL zHZD`Xp88bM^vn7B*O~#_#pzHB&DvV-$S-Pw60-LtsVFDoZUNb2s|Rg=mjY#K<1JSk ztxkcyA`v6L9AvOJdH=gGDGDVM*rX5AfUE8O`gSTNwxXbgm9h3reCgNbW*m=8Rc@4D zf);**&psV1M_s(BHa@lxu%25+*#IxQI8$@MSkUVWrp3Mhbts2WU*c~L=xcPdG{H-J zq1ZAGC`TBY-?Wj^KAMF$84WbQ0l;Z^kHP#XpM8_l)563ugLrN`v*pbO7Y}-=hE3VF zjEQuCjdKc#k2~gcbB#QL2^TGxCDu1KErb%2Rx@YCn(2VOp-g)H6J9%$<)Z=Ww~NiL zwQvn;s(tbZa)U)UOi_(ZvDJM&8m7Tgm1$Vo!pcjQI$46PU)pl?_uU62sG&Y!GqA4A zUN2v;;*|>x;7M}@Xp;+P)GIWCQPG+_>>U(I9;$pWV4M9XO9MD%iF`yQC=}v)Zzq1`mESXcdonq4s@$1b0gl2p?zkDmxm>HaZ!GBABl+mFW*GM{f}_)&ie34NnWTF4TCJTY_qXhBZz z_VEq*R`^tb&jc|W(yd$XL`x@IF0n)lfA9A|d&CcW2YZ|z|bPKA4Ccv!EEmTM(X_@r7<7CA_|^{6|GT+LjtyaYSoVo@O4njDK< zx`V9PX{*FgW)Sk;=~cLb*AXEx*4%4er(3=pa0bY+LEH z+1o3wr17CR{?op(R(DlK?>?J1#KX4d1Yfa~Tex;LJ4#)g1IqjFCMy>wL_Ajpk&>VI zQ=R?NbzHYiB8NmJ8n0xtMz@ql=$Wlf*Lx~vn&@6PjEp2<1^SmVuOuZ7iIcvA>i{9< z<6%Q1F1BnnKtCOuG5*a`%hS;S>t$zA7PVS7xmxbpw{?rhXIP6Tv0XEEyxdMfqkh6njvDDtv zq7oESMe`}r?OQyI;G_qzd8ws1#doFVT|Mrtn7W-j!K#YaIK*-qiWmmvt+tpW*y~QoNpLnk02YrlG z?zH)Sl(V!5=Lr-4C5%Nfq^P1$2}-6s{NcZrfQNelb%B$22UL5 znC+~*>;_m6*l?z_syHVpgXJu-OV_%CBY8}ExX6%7`WdBR!1N1?AYSiC!nr22@%e>OSOe+9xd39h&% z34L1kja<5wmBWaP$H%wXo{@{IuIBje0?#ib7)Yf61wPW3rFJbL`l_&KTc;Hu$g=4C;5(yT-~w+W@hqriX^?CP>F;`2B}2kP}SX?9RrHk=Y- ztOVpdR-6w_Y~%Z#0Cn#;`YT?c&Q8C6>$*p_eFIc%%N)J`N<@t~P_@yKF_Ts4K1GiS#-ujpGTn2-C>Oa&F5{$AByx$JtX!mG4z`7xuM-yJDPM1K?rtM|^u z5z}^9wSSOs#{Sjb)ulbCRvP>@SOL~UhG(#-h96~r2u$74)0)6U!S}3+)IOYAzzmGx z9=pBIV^9iCqJb|i?nr5Xw4efYtGGQ6t;r1*D&7|O#LH`_mz)N8eF4dmowS&>Awu{ zwF#jA*EM;|Yf^hvvinoG$3{OW^+xBcevQE??TTz~54kQB*QC72--FU|uxxL>k zjr;!nSGl^^Ljmd_<=f!WLFs5~{X{LcHGD^v4rVc&x_>#}A2G3Bi;N&|aq|*3G6xrV zKI$}(+bQE_(46t9_>!e#Sl_mJDD5h@nT>1wtRqpZ+OmjfZQXvq;_5N@5xx=?QjI>U z-1S#h@4sL$ZG-{UCgOYVwHgrE78)!vwYh5uE&QU~wdl{2^L^DS9GF^(yZGw+BH{(v zYDhVkTZuYW_`f2mMm??qI~X)pFk(I^A@Phcrb#lrI*ar)ShOT74Hg1;!W(uu>t3L>v=pHc@!rS)Me^FdhsR{> zs-quQmHG@jc)>q;V!l{$Jg{d6&-`FkGNQQ{FE*#p7uzweAbXcF@Jx9fUs)urD!_E* zasWXfHCjgn0snQS{Yy7hP3+4^g((zUjHPH6uz+zvQ(fxsSsD|?0a%wJ5prvcL_wJb z%;U;>9|GgIvjz*4KiQOgI+ipAFpGtkRiKW2{xRn}0uW4S%9(}B$ZF1%kL!r0{hJ%|=aK}ttnR9bd-T|zKr?l0GCx|btKC$n%DH8459|pr18b12uCb3GzFh@9Gv~0fULF62By&T zJj&(<1@B$XuByBf}3RinmLxuda_svA3h;(3Zq%dcJp{_(<+ZbW*x=+~*LLfwWYD#^M1P-K_GG<*zoV2)Uz_p12XrnsFoeLiG4nY8Uf@ z%1r#R(C2HT%c8=rB)Mxl#jUp|WCU#e{g(^?yz-nTrL_muQxN zDVs|~b%!Nzz)(7zsd^zi(yfZCg!4FM-??YFesd#O>YePc=E!_p^9AWiBUoSR54CQWuu z?XKSbvnJ-{r%o?b&++G?xRh@Ej@W_lrN65ze6W2xPYToh#k{g!`3$uV)4a}n^W$Q5 z3U1oCel^IW&7t6@%LVDpT+Z(x!y5lenE68BVW(k8y^JF4fB+xxWY4hc-lfVzjazxo zX)V^3NgkI@qyn5NQ`=p&CcXD$G0;nVu`ltj&|5jb?<^agc>0G+ozPd5Sma~+Mli%U z6ntd-Y*!iN)rlok$#o=;EOml_os5Ik0G&~c=7cyaWqdH;49ePHUeV5A%=S)y0(cc1??*}hnysWt zsmJ~GN@#d$c=--&(%oM{fo;yu$=_6}0)%GQjI?&QS9ro?vS6~ZQt-^n?L!1kwcX!{ zG)M!=f}i<=>S3)CiD-oqiAa@pBIV1UC>Q=VRm_qvMta*`4-%XR-?moTe>>$VQGBWo zi}v?)q)0^{A0srJhAo!1LB@iL4P_DJu|w=QoNQ_t_bQamS8*E+?h`7>Dt1e^{)!mN zK&_fJlx;bSDq0Fqclr@anIb{MmY<=+Tn7Iha(Au&7xPZpkaA29s?Ls6>@z^09StC9 zTUVlpf3x%NZ;AAZl&t1Ua8O^=p_u}IcscWw z)G_=2BAYl-P!n^SeMcwm;=~Ka{3WCeIW@hJ8Q1&3@;Y_tYIiFB{vd}EDIs6;_Pn*t zdv`oVSB~dCt5aZ1?J4^i>A_I0>y!I8kNo_%b_oaks%Xxziw&mZKLfyQamAqO@6ETJ zO9bfAgPq}UZ6QY~T(N;F0;hL+GZGbRWIbeh==C@^N=N2&Ur-#{$IIzOyTYWqy(udT zNo<*of(TP|m>DH0;)c3|iWG5TGP>mNI-VyfIxBlaJXdJtF~W7a_tkrQ`=y_6Z6M4nD&mE2M^ReqV=A zuLh`6LnGYHsIuS-J*yy`>=HL3`G*Zjm_{Ae{Cq+4dR>=m@uOFUBKG6ZphGxqLD20H zdTq`jR7=UJe5)nouC3(&fEb=qA&qgvBLWnnDn|0w^aMbv(JkoW*f+VGy-%aI6Dsjj zBM3Q^1vAvbf1O6Duhl@APU2Ok-DB`4SqkLm)Cpz7yCZk0##Ynu(kjiK0tBS0DzhHf zp#bp(UKmk07`e+x=50nziik1Z5%t_qZ>2G}|o)Da#B88(59x zgxNjLx{})0he@o9;?#}~KdK!cwy%Y)o<6s`8r|BSD%~05(-kkeFzHsXSvrK8rX_r+ zFL>=$ZH*UfQXh+WGgQKrB>(&Or0;feb%Vt8M5YQhlw zcIBx-Q*G`dF5#GrUgSxKzo+sJ)8q^1`G2V0Ue7nLmL1eYyRPfbbZ0i3#1hq7t$BOX z0BRW`v4tJ=>ba0N;p&4r9_)>Gkdg@U!|>m~iuzNcT22GrjXj8(0MNK7aJWRTl+wOC z)OK)iP$>eWNseHaXCim1KpDPGH-f}=OQc`ZMrbdpJz$MyrU4tr#FiECwy?}jda`Df zrFm`*j{y*^64sy^jBSS?JUT+nr)VYcdb_mGlhofK{Cj0h4fWqb3wV|kCwNNAu~w)C zzXlD!D~kDeBdq#yGi((JT@hHU2@KWw0QNfh6i4Nn%?w5@$q5$r>MLp!&=0*U8b&rN zSuFADF!!T&bTxkeIKQvadR}QT&1qe20~5hDI4Rp@!t~BvKSJ3Ny?k-=WEEUeMN>JZ zI)wDLt7vNhTTo{;H;9yPd_<*XuXB}>%@=X3 zE!EONpL#&6zIE}VG+tGFy!sQ4p|{1C)}U1Z4p^qPQE9WDQD4CI?TS-CmHXQLGA+y^ zvXtQ(P=P7Nl{sx`V_`hV2Ni*-vs7c( z*rKg{=?0pK&F_--V6Y7o%xcE!qMAS};LW4~axi)VSy{=lFZDaaP9`vdt(}qnp5t*9 zhQuCHT;u`VLi7^7^m=0fk++`jxAg@u04T**Q25-hn>n10X~D35{`?sc=@W^*9EOqZ z8y?2)m<%#X`@kJBkILetUs1bhzzR^E4!hwIJ`Y#p#7e0}BND{m5!YpG2EmpfgW7qD zuYq=bMe!C(Khruo*!zkm3=nw7GqAJNr&8(CPmg$ZMF&nQrSW`?eTnc@_o@V5y!fPt ztEZ}E#7wgP@nM{*=nid8%|j<7zEhfHkMC) zju0BG;~x)|Wn54d^{Iy5)~rTUBSue5SJEQ-BTYV`UKQn_dQr5F#GHsPXz4Um?`g9+ z1mBmDXh{fXgVY9s&noF~3SRA4kLlfPdFU`nxbbK8>j$+QDoCtob@G-71#)ipg9lP%iV^h%qiN|E zlJfrkm9up&az>#6?*;aT6L_0lPcNkUL@u(hvz`-Lql1euhhlK#;@|!@zsUi%Xzz^P8UvU$UZ(M9L*AWAYL0kuawxAMBsK??yd_M5oXxG3E3N75 zEn$&SaR7^A;e~LpO#Wf4P;Tcl&Ak={KG0D}p!;i}1Hf`KJ_Z8lv*MOYAj~p`SqN+w zp1N5ziD3{80&?WH`)87+WVBkz;CD$xdVc6^BQC8P zy+|ewC%gS91v29}+RxD3^W(iX6*3;6-~0s#G`t7scW?fL*q{*~y6&z)FJr+Qf5^TT z(IDZy*10Tl>|#Y$Gwr;8V7_h*6E&9%v$GLkW<(z!_(;}`4flCf1 zOkUIjudYPpk+}052lUZ_vO_E)REgy%s`upV?^?WI^pwF+B`|%}1)B34 zQ^f5?`Ro;;Cc9$9(A+*D>^9*u&T*@<;~KE@o%@GIp&@B<{38Su$rZ96;Bqx_2${H{ zr-SovdkeEjZ`Gb)`P;qR{rqtEYDZv~I=pPXBdZYsIJ_38cb&*cMvO%3g)VWG=MSTI z+$B}gdPm=08rFHr1)IHB7Z;?7FTdX5+khZ>tm_S-ES1BDMGkQEaigJk(90j&4w^kj zZVD1hG9r?2b*~dE44PvVd`%T`dKU6**ZGd#XV#w7$OJz!e(=fHY1qMRojK9)&cUH_ zJarozIJvz(7Y8C4090UbWKc?Hz!~Kg5706(Av~c``g~3eIEtE7Qaiqd{4Gs>au$&C zgSlsfzhVp=|*COjPkAVG=Tk! z(WlwQ*MK_*Q9DBxr$%<8P}q~NF?X(}b|c}H|E~|5?60_tLfsKHdGpUxILsP(iCl{G zfRV$!#P5aGdV<;wdKx5#D^(XPle^gdUQV_%zM{quc4nK(_TJ>~r#((7C8mG9ii~x$_fOsm zDY4~fx11VwhSD8E^gF*C+FtB7e}z6j6Y1hlh4zs@x^BYG*377pM9;&^-aUg^e~MZv z$pK^opwPqW`6m5mK53+>-y%Hn3M0rJe~~Wsw8jwAYnZotQ>DcxXM3;#s6{_}lH3|_ zl)^4ZavKPL0*zw|!ZRj$l=+Gtz^6c8B>nza*`bA^IJ%fcLkp=d^W2DL41#sqdGI1g zmxaZU&=S5LT>8hHpWiTdaL?hemDhGY`}6}3i}EnZ25zi0Ig}%5&{3LZiw%G3cEpae ztID3V9z@zCAwS33rRr_Y5hk^MhJjsiEi-MsmNR=ADR#S4SsRyovH}r_h`cVZHS(=z zM;Bk`41B#QA7Oo3-h~I{pA%lfo59PTg*~rqN=}$mF~J`w8Ii994G?;rtk=V^j%*xy z-0HaMzqxOgsX&`Ov(Xy>3ve@&nlT#zud<)8$6$B0%Tt> z??|RPGt;8tb3q0wZ~T=YjQo5;kQyr!VcGTMvkG2wNHb!nu5(x~v@M`sXVSd@;m1(> zMWU^hnU-&~^6QDZYuHWv^h8JN#0%I{20YyFoq-on(16TW^CdXt7e+k2fyUkG@oVB; z`}J>$){D&0*KA}Ophq%zkk~TPk_J|4_q_YNn^?0{xLVf6hh7s64z=evocD6GmzX29 zpJre!Ea*->7vx>rFEHlwmtiX&(W{C#qh2tipnQ)P z8Qa6k68{DV=NFNYLKk_Q6z=#*2UId8OK&U!Vc6l$k;`kQ#hkh^HpCaF)^VO3F$ggu zm{cN5G9pddIxcUsrdZ^Cu{JA(hQ4KYv-wNgR!4HFuyVbmH+k80t?nF#**>h0{e8#` zGEt%f9MVj>sCw7_!``uWyAh0h4=IoUX0h~|;PlQPc# zut224tM@(K2&^9|R>PTED3A4>Y)ZQWO(= z_kyBph7q|cH!v{RYa$EM(4yJQ0%^!JTYU+d01gOskupd%!=>kB(XxRMqH}7wg}dtu ziUWy5UN$m%we6-Jt@T=NaiHf_;QKR&XBZ-6#VA5D})lx#z>ugU)NlKX*I za+sRF3mc_S9-arx;XR6e>w}0}mk?#DN|0@?M;IS-8ezDv$1Y2q&CT!n&nD`A*6||p zY}~I{a!E=We24rJ2}(K(;OO*nHyuab{yZHT#G6r?^ntSLO&Vx(4Q|+9Owjic-m>d8 zNJY($f{NoQuie>&q8d*kd^G6Dn4JNgL4c+YR2@&0{R|?Go+!{8KKvydN4Mkena?wX z?;c0uRFW%M`Qz%fWc1cdoVv&xg``7oyw@#BpKSU1OuF8oehdjwAx~@o7x|CmyM_{T zmbAR=q}7ByjAaE+fjA3_XYT-!S1RVLcBge8!BXN6s}}f493aM^Or=dZLoZi><5d)r zn2(RejyJ{VR#H|sh;w*;azmHPf9QF6`J3`m>xhP*y~m@O*&F{h?4;#rGjlfl$aO3R z{VCSPoV%h;FOvRw8C|yBJC55QK5a%#W@K@nWG%PAQR^Y>s}d%v5(pv+gt*UxufGF8 zO4|Wru$_t37wB%1MN7efiyCfHvO1CLMv6YF+aRToq6{LIe4N*GSYzsYMA6ShuwU!w z5JAe}%FWoXO$j5O0tbg|pC?m^_I_IV0Ic#K-$aGPD)re$bknAvVQ;h=wBK@rN%jUv z75{i0{2I;bXV;v=6<_L5zS(Rdm1OhfvDW_;nR7*L{`XWy?~K17J74dtSjo^kfDlOQ z#~V_rtPK3hj4?62HbrhkR%)b?+t)Q1dBL}GfW=A>djl3Jb}*qatb}#40RqUbNZX%z zdem#8dDIGK)LB2*BN)Zbyc*vY^r7&r`g1RWyrth_SqEE6KQUBc-hFh4CTrjQx zqtkjxf9aonlzabKTCAFKmwtqL_~E6U4fn;GnDx-Ft}EBE14&o-;@p*iMM+Mq;8=0J zd0a+S$<7Gh_Vf{sSV?86U;XfyU7M7K#KANfg7ruO3=j9}d=fVJB3Q|PG$(2m*_@{! zV!Q=@oa2R4EM%#xwR6;YfFiokTi-A*0S5V!Y>)GUMLr)lGy>Mf4IW}cmfMI-XbzgdgxXtF8i%yYe)Y%!J)CPqn*Sr}}%kqx{Khgl12 zn-XSwoU$QjA&8vF*{F$jU%_pPY+>4jNhLet*UBHZ5B5KTi}B^zoMF+Vapz6RF(-hJ z3soPIMI5iKHe!Z4@BOwI)gVim9b(Mc$s5n9=N2zCeJ;;h(WY*sw!NO>jS%f|hbymM zhGe)Ap)4_Q6fDEb@3LFRF>L;R|4y)fne_JZ@@L8aQFl;+fvPI4WUW9z=hG1S_udX7 z>8qm!X=M?BNG^;E9I8QLy_AG0S$}Ghy|+OpQjuu^Qwc8n&(Dvot?X@_hPdBQ4+3`} z^tr|G0?vN(VsC8Nm_G35q5`3_TU`l!=_Te^$gc8~7b%7Ob1P~?eph-U&ZBJpf>&u3 znA_4wP9GM~c!#VDJ*PuW>?@z2qu-ItCKOU3Gcz--2Ldnh7Lny}%}amwzZ?t(E4#Aj z2vh=}qGV4ND#|J;&`qK~wK!=TKbZSc`lMMi(u>bMjEc@6S0I3BG^OmxqQRjTt9)A~_ zp;@Sc%AKmPDHVPt&*58A!Ma%2t8Hpv^n36*OE-U0^2#g{`n+31Zer|_&eW-k4{yV!D|T6I*WdD$;wh+;C1wvyp50Cam8{YYy!>V#`DQrh5joPe>#AgH zCKU)_60`GT#QCNe{p{H|QR~r%+8|>Mad}y#8yak!jXm)wKE&*z?5grybAh0d{jsh*rV&n$19&+hJl4GDC-Y8R*uZEuB!?uMDt<+lc{ZJNPjS ztl(x>VJf7s?NCB1kL{}Bjlt5aLL~Z4wg(_neMR?y2sFTI?Xa-x_)k6F*S3Wlb}(sd-(ZCAm2LyFc^HoB~B4>{JUh#F!+pov0`>l!6ffjl_wDm z!u;&5jZYB}CTJZ<2crV+sNPB|nyzIUBQeB`V{?cxj7OfW-2d}nHbr)+@Im+AkYsAR z`6lBCk7&LK@`8_ml)4V*bpnV}YzL4!%jQ|?0-8As7)~*1>IM$(y6dxy48v7TLD!WZ zM~W5EC`x({6WCbQjJ@q>Gd;b>fX5qf#4*;QOx@!z-iW)Drr|mji7rPy37vYPVi%O8THn1I|#`X9(!$2G168_uv-h6#9S!+I*`)nF0x zmmj|6S*iktgaCXv`>rx}C|h4qvQgn_M15LBeWaw}-Ie5AH>z=Vl^^@HTa0fL8+c^z zMG0uw(`)^E0!NkS-u8DX)xe&*!Wy?*hS`S=#H+_8&uYXJF0hNc#G6}DuLNDcEgH7_ zIJ4e9!KvW`+iE58gK2)!AdO9b4#c4tnE1g*w}gTnD_4!cg@x^SH%RL|4}_;S-M8Ar zccQLyK1%!|T-h1xircnJ1!sa1B3bw7HE+c6cwP0Hgpb$F>)b-`)me+@5+Plk#KB#} zcGVC*sk&d@&*@QmY6~^#&3P!1i(`S&qvK&4NIwWux~Q&3NL*Z83#OAV#9x9k`vnwT zYI1;9(|<5kzP|Jr3y{?tLuQf9vH!k1xi0;CBX=L@B<ilg+vWpD242-4yIrRxr$K0V+4{NKQJElv{4znqc69lEuZ%Yy5O{{p+Z1)7kR^|2qCVJUCWQ|C{gczBG7k z>!aC`ZeN5bkl$b6hoo@n28QqhS0C8P&pSIj@Afx{A(_pug^VnDN|gqs;u)7nkyi#- zlHgBVqBLli-1+U_QmS{`4z|;hDyod8+i-NV$eNJ81a7YlM(u=ccGwK|=2_tQ6z*@Y zz~s(5A^it{-mFNS;-7Tg7!8kFTaEBY4u(kj5H(JM1^~VPgsE#MCwZ+6UfWFmKWF?{ zIlbiTL{(LT1{fW;U}xOLUVc~YACQoquYqgv4W2%Z?t5?go{>yT0;EB1A?IpKpPIxp z96ch|N%#5NbH&S*+?e1gjMuKA36t0%WlO%x!HTcCu6HWg>Ore;Fgu1X1IAyaCyEMe zM-nXnKUDf=C@Z${ovz#&hwSA_*zTLgFpSt(xy=|#F!qMZncamNsut84bzSZD!g+y{h-Sstp0y@QuN8}0oaVk zb|c~_&Q3-}^|^Op!3)s{a`mC4DtfgkW~zyyLc*X=!(+x0T_x-cmSa>1tljYR2)|@G z^9#1d;3Er5QH7uXyS!2k=KyK^?C3Zwc{`!XQ*Z>Ps@;Ytf3hs?*~U!IslwOZzDy$< zsAKn9A8nABku=Gz@p1Vtjm#`1aQoIvce5P+UeM1>}h-oFBjWcausf?(LkM&SMag~&5R@|9e)Lx8UL8YK;``ZEqC~J9aVs zbbKQfMACMgw4$peNki!?wH1|6M^^3kM!*2ifjO zZyt;wmjhOHQc6@@L2?0(@Cf^JB#?1l4~G%V_dU?;6)M6%9+W~qS#6nClaQmJL}-ij z9PzXjfzTxBauAv{8ORomyv`Q2tcop-oJuMoQ}M=wg^^$yO<10`8mvBD{6CNO1FLkE z1!}7r?At1E{1D#Q|ClZgNGicJy38s@G;w8K>k7PCk4TY{DHa7p%CHa*uSSVz@08eD z&md8rT?VBdJ?@>$WHu(FRlhL>iM+hOpxmqrkkOCe44q+r_<*fE^z|pYiBYkG`L;)I zdjatQAQ1dt;IKdXc(73oJhCd?_nD#B_%^0FXkf0D9zzZ zn%^hy6AisXxPzM-U%#K0?8+GZZ|#a?h%eha3X@0ftV|;U1R%6ExlhPe4} zEbNKw0y@20aSN~(#p4P zW@R8WZ1QLo5W7+#H~||z04kpeCJ;WFJ>{_QYYN@ihuDkpaQut$`y*oe7YNZIdrJ=V;7YO%}iRyvJZANu{;=2rm zrIl2xWpMQSOcE7k#>So_6dS`o8vk!gbc3(cgE`?6x&3<-6IEi3>f6GD4@SWYNjg|M z%`}<+ZxJ9UMO&8Ra5~ZKPPS!F3)1ozyxTC1AxLKc4eZ4V>JR_g960&u^~ovGYnNe! zw#xJI6d3|8#gtbCQdO2=qSQl2v+V!?$u8k)izXT3E&JhzT=pIP`#(}h@v4U#5399* zMmho-fy+;r0}J;=^Bd;AdY_hWquXxxY(Ig?|7?Q_r-A8Ux$e$HYJK7nh;`# zQz-OZV)D+Pitgf6RIk9HRO4ZZcDx*_V&F0Fyv&nd?^9$y@a$(zsQAmJxG-4mWqTu3 zCZ+<>(==;!zaJ<|;1(jJRig>fIjoIQen05E#H+WxX|%W9#Lt4y1Ojg5{@xcPx87~9 zCd=O^mjs<;=PifPCcMIOW^$JQY5o3&o}Dqd>OKt{!jp8yhW*-H8RlYM02&aM{@vTQ z)WgXgs=lmX(!E7L*zDyW=hVE(O|}^Td;}_=umRR?|q|4t9^mNgw>Vee>1I53?coH2l?~ zV@tQt_@n;cuC^K}uRCcy)0=Om<7YR_n|=7_>YRv-a+g&4D%%Jk+0z_-{M)TN zjY(4uH&-^#h@-!}!|W$bU04Rx&o`#G)A7=Gww-VWr1vg3!tpH|Sl9UjyQZ|>2OtGe zLE*eU35I#gS~KSfB6&4F{<#3v{cKvYVqz*Y@}R^!_@Jbp@qm0{ia)7=dH&F}!G#=T#@X91nqfLW_wKY>{LHcKeH(PH0MRfRLtmU2BStWQFFiY#eH# z&98-xK^z~_>Y2p`<`$(8klZ5V0>lX_*D|%P3%d-OW@cM0#eWw?nl<_9Zr@n7U0=l< z6hvS62@>Q|Bf}Yz$0D9mRngQ*WX-q<(XJ}wyVAk?<%uHGS_oZRYvUK z=URugAT+U;*X!rXur4kCl;p_{p5PAGheo+(y3Ee|zqX~&3E>)yfs=qs4+}1`7PSTC z5!()jeffW}i-Ulzao;){iURHo{-*!u!oTOuYRPuGudM4hp$8I_D*vi;JH6zUX8#tajQgH zesJzb{f0r`>l3!b$Qf~RTqPBht6QkeliT^_o&{H4g0lxwI>v%|N3#J!R_(N;ND5qU zSkyWl@BE}L8L}1*dOcjUIpA4IRaNj_Qdgqu|6%K`!=miEzF`por9nCsL`qt^L{d7X zhY|r1hHhpAq`Q$8rBkJA=#=gpLUQP?8Qv4G=efV@e!ur04vsly&U5Xx_g=kzYtV7- zy2{$-T;yeu3de`rdH7}>z_=^drvA?7IDjb3RFOi+{=q!Hl4~nyofX~7{tZV)@j`FC z4`CBic9;23;_{gzg4-n<

z3I)_dag0vkfx6FA3UqU6-1IZA2bw~iFw!;0TKVPSD{i?1UVa<)C1|yLzm#0s|EE32_QATdOYui0W6o6K={50O z%H7N{MX~dl=H8@9VTSF3nnB0zmIfPRc>T-OjQVjzqQS|Lh5Hyfo}SPwX~HZ$Q{lU& zo;L|=tzG+&YxNqJ(#$t9V1@r5}+TrnxE7tl*{s zV`)!t)2w!;c&L3M=eNfQuhGmpj;&R3t;FvBt&^+4za-aMzM?glU!|j(smHJKTDSMQ zK{bqhsGqfyVnt6!HG<$W6lm{mHlU+c-O<5##BVVy*XdDDE(AGE@?1@>Q<+b>YWF1$ zQ6Ju5NIi0ZDv8s49D82CP`nlId%gL~qWkr(n|u88(es)xo3Z;7x(^QJJw07^=j&Ql z4Qo1lI}iwVEvU2CO(IC0;fH5{_XiEQYWZ+!920g@kb~v93>$?$*N2SND8DLeJGi&C zT(Z|VqN@JnM%rDgNrc8V;RjmZEK1Ynlk~fN^#(VFZFLE^7qr$R{sPE@V6*ge7&0>I z93jultU^2=3BLwSJBsPfH#MZZUp(!2tj%MIO)7ChNQ;7ERl6+a!cw( zm$TiGEu}31X$&DNxxJcdUqru=wwoJGodf6lmN&CuVk(0zW_Q=~v%;j7AjP)>vQEXe=WJ{eH18oh$p^h_p`5$T@kAiBR4O+Uyi|u zqv$<-unU%#JyRe2ok6{_6cl0oxq3+#c?P{bC)@~+XGRa`Na1;kkNHiFfS&9&E1mmk z#{zQR`D;kaI{mGlSiWzDfrO-^zrcd0mG!XZHwKE&6aIdfh>EgI36)?tB+X46Ra=(* zf>Pkc$<2D!&9f6R%uED1XUN8{+x^?uL;Id6&!T#Wc!`8yxzX48*{aS%+S@~VZP(pY z50v#(*-G7Uv2J*GZRYk;Nv5J^_y69{9(#p=VtKQpHaL zI(BWeO(M{dvxDRyMl@ed#AMIUm3-N-lgUFRN%$Yx%dpsD2(FLkT!p>*IG!y~WMV-`t?TK>ulMZWIXpd!Gg4!n0hc>of=U*cD`BDr-z*`)ojX5WdC8=D{X0xOFU}L zL6ciH8I`jv)Uzl??CizwL_{-J$5~oyIkJ9fXQ})e{={p2tpP4hD>9A}^EZL7G^2{= z$DG$gHaI`dv(Rm@O=)mkgCP2i8@a!K4}YS3&N6|Pll@9Yh}-u$=C-MQ+rp68H+B|x z0s9x2EIbuHm;L-%x68rk0iz?|_yd}cL>`JAqWVn3u1tMXu4Z1NLz6lE{PyG3IzF*< z!Hd`qJ+-5}cI}PoWxPQiPfunv+{CY}2fQ&6Q1hi%WC`^v+g*0t%EM1A{*d4LTPn_R=dQf15fl!SVsT=uSxnLw~eI(m`3 zh4FkMw=41k`_PnzP*vc?=XU%K78x6kN1yeIp1k!|;O7FDgP(*ll-n;1x1nFxGght`xkQ7UhGZdLjo0=k*(Sy+`nT7x90`hp zi5xk{17XR9V`uR)Sk{*p_L6j$5fX3nf(Vv_6vrAk!y=oVPqM& zqP}X|$j$Y4ce`|gpVs__qXb-NwAMg11T$BLE&ZZnKPQiyk_Ba$9$03WVm}S({8Uk* za?aj%)Qs6@vIZ5i_lRbAj==1AmoZxPr!55d?ol9OTrllOLsS?|T?X$d5qn z?RPkqB=WLv4}eG$n52Vbn!4BKjU0Cz@>LU%$*|8kXw=Q89F8~heThLE-2%L5haxKfo7H_$-ET@DyOTzMG!P4aJBC1}q`F45D4BLZe2ZBGnfx zW4~a9TGdI{hU)6|lW_ZDq69f49)ftHI)W9fym4I!ld1)YJIm1o4C{2(XDz!xadOe( zhWbUj9l5|d+LQQUiVr)z{aX?QIxNv|NDDZkECwtiQ~qqe;{~E-$?j5$9-iL5=#KY8 z%uTVkeyyww4^V|qI^x1kCej4+(iy%k4(mu2p0nSY7QbDY?kqLzyzk}cY*NYMVw5r3 z;I#B={F1xvY=(xUj%PY@}F{U=aZFaXytP< zVhMfBQP0wvs>{gaXTMuv9ZY{4EG4yN0BzV67()du%+TA?zP2_F`~J;INgY!rM;o*#9OdOHAEtPt!UzkbZ z9miMQzL$^xK7_cC`d-xO9<2`a?uNcN6?`nUr2uUh&{NM@+#cu88>sRhB0%Cz>G+Cw zl!{QtV>@W6&(?rkCV=hI;M!-=pd?AE+G{R;->yoyU0F*tfP2tLZc_Nr+n?7FY)_UD zisrd0F<<#+hfsF>kCR(xC#j}hV{gSQ6)%`u{CRIV&A85}uWzka$Vi}5p-^ht|8C3Py@O@-x znSIG)N+-#g_b28p1~u8ZAGStbzp$`>fNjt?qu^b|xx^w2I({$s)GXg>2)|TNy7yxL zHmXSSPMjxQrG;j@e1|Q(f5E~tP)4?cd&?pT!}RnVJs0$OUu9YZGQ8E|v+`P$d$$b+ zLgcakEFK(`&VA+gm6=7WwSNvlt&sDoth&d)l5V!}*`JQ~vfn*GO^c2?1KU@!Ka=5N zQGu7+uZlP*cG3wGC|o*(oDpPpH>h*zHkFJI5f4Ta3T(?ObDfr7JBn5ADN)vr+hW8% z$*lK`|B2dwwqXi==g|-(&bIDEyCc{Pxmym6T!5QD?E8ET(EideAe6xyI`;m1$cr8e zge!GyAH#@N{;o zwFG>HX8))!;r!@*&m$mKpw)ZF@Q2d&pD~59t{?RC-u;ZiDd0No7m~xSv60fLZR?D| z`}+6_vn_^Nti=&(;Q&3hy)z0erFu=JdeLbDu!LAKg~}xfr0pG`@!n!q|NHsV&U+iD z8)n?^f_7GgaTGCe6zP^IaR2#kz*D|=^h3Eo+@>d3Z`ZG>kuu^04REU3VMvZr;I&7s z72-hLC?M8t3DsqKQ3I>{vo_!#*XkdKC4|{Nhc<{9YB%k{&3dSao=?;61En-G=lzqQ z)ut+3i2seA+<>R>zFk{P`Kpg|-U~r92?7V)ASl)yk-&@Ri4)Lb^-O^tmPcuwEcxk2 z_kE?dB9h%d!D|nZj?mg!_XXJj$T9Wy(XXtLo_%N*nf1*Z%0sj%!YSe@k}0w&p~d|T z0PTnZszkd*X;h8nLxRaZKN%z|NSa8e`C!u0j{`|zJ8`{TQpfP+U8)*Ppl?3_BbuiQ8;O{A&`<-Hs_80J$fx8N4tA` z?0N~ITyj`lZeya9#dVUPT&jgR9(o^YL2*KRwHlPiuw>;}(S72L7q{P2r;ooFQhx#Q zJQTPCkm?_sr}=V%Uwig8A9k3r(opsD81BKBeC;EN>ySdVtneuL9cd+hP2MW-#ARa8 z%{V-U14D_l^pAg6E%lNW^%1RmAKDQg`5lfFKk~^m4e?NFFJQC~+uiKBV@yKU*v+~H zd=Q2!m!~GT^&94P?KLpkf`0gIQ|Z*M&w&&hr9i|aaskein|Xn6Za3cQ9=bmsS-tY| z9sN>;N7|Hw;~O`rH0Qq|0#2ixaFeSvptBXo%cXGV!M3nkqgFjQt=IRV^jD)%jmN5+ zo)2q^Y^OSo^Yhf)jYI5iYoOnn22v^MU|v@)1(|n#9N?^Q@K*p*Q}eXcuyU6iR-ql; zCQtWu2X@2sb;y6eZKt6P=+IYJ1`6|O{fufoG=|RrVu1C(Nn8m=ty4?sJNh&bGRVa$ ze*f`CM*S_k@eO$mm_Qtc7?BSo{I@8ye0(OWu zOBj)Dud`jh6)cHM4(apu(RfSdF)-Df$IkN9A=(Wgm$fs;yzP^1+0Q*V2}M~E^^Qvf zAKTqdl*=FwJlfrxr2$)BhbVFZwSP-TR`)ri*>ep$!{gTXu-=0leYJr*OnF7Lg9N36CqAabF*C zccuRk;1QyIEll8IV`JGUYq-C0xeM-eW@c6;iN@J$5*&W$b&mhAi;{11`wO27paEYY z)IV=8N715h&bG_UCG3T9-m@ z?-#`mPpX&RGtrbA)vAxvu3cRoSFQ@ei0&VaNe8!W|G4ayj6$TYG=$th4l;m#mpL_m z;=2Agoub5QXX!0Q-EvUwki>O_#MLI_)j>PKR>WwTl9E#4_wO<{99j^H3^C6*Rd%~G z0JX_Q_A<7O=@ff5bLa2XE7ZIhhQ~c&DYftUG1&bce}|AxxDpEhZ7WiM(1Aytbktw7 z*>nt!;SBsU4MeiCJQ zdpQJ@jYeBhr^WdAxSAGm}GySZU0=;5^8963j9xvD~03xb(mcq7QxyI<}-= znpe$t5#ObMSL!}cj2%sxBW$xz`($S{F_(( znvj^?=^p{rA7@_ARoi`QQWWh(a4R?+LZ#;7eLt?~W$FPr!7o1|^2-NIhB(y#Irx!+?7>n ztFOL3S;j$UGB`*UJ;AyS__@OyeFJR;nX~)}ELn8I40!`+3H^0-{=qgM6OL^sDX{yv z`uaO>^>jm>>^s9=Wsnku#%*eoHHR1MD_u&&rsf~LBYfLfJ<~7xeE!9D%UR)(v|MCV zx(?*z39)8b<=4l|sv)hrK>K_p7%st63j|%_of4~4zI=HzR|C!ma)D!4rwE(KQ950g z-H*L-OqCvo<~;X6Fo^i2yCr87*$kG@9(W$ES=h4(H}`Zd4gqwD!(6{8nI!YG=YGGv z+?igmCy(B%f*q`$VQ(6_wfG8TjztiY5$C88P;LcSAK)Fbw zq{W&m%mO?j_#@w9Fion{c^s=eeLJzaTy28VOy9Vfty($Eqem);HGiKsjd-vo->0I` zFJ9x6`^}&C*P_X{4rodWZrgZTT2rGq^%5ad>(Vh$&Jw=$t3R1*3?tXq+(FcJJ0Zoq zeaY=)Q$xh#AUotU2`Q2vACQ^BEE@d(fSLIv=XV{?G0wh5-SCBtdu6o73U`xfFU%7WXP&VKXWH;eyx;dIYu- zCP5T&!Zb#de^qqKI?7{;v#6|ycx8`AaeYheaokttdly`he*H8%L0g0Uk!K6XWRP1i~&B1-=9Y#)XMI{flxU*&F$=H-1hQ<-3aF zh)$=oBRB6)4Xy|>V=YLnsl)Z3c(qLcKgisnxTNHJSQz$?_lS$bRTr7dR@bE#G+@&; zucRQ5W{P=UUS4a&WND@2s+>WMJ#DE$RTu!`_8hb3@_1wH?(|1J;@ z>U6#{-*03SG#@r_`Lep;rh)H~6#E~gJ2Ad$6()RI#m3?%y*n2^wWw{cKp1fmp0VJD zu?9qbWMrg%tkRBX${4TKUIhl|vjp?|7}AR3Pt<&9y1t-wvSFh`PM z)`)wbeVzncT3H28g3TNZ8{9ZbwPz;)R@ea0JPabdwv#VX%T96u0Esb=H7o+mnpj5p zQQ$6gdR4HkbyK#WXF$XLioHE>q4v9d@Q=p20Srsx#>U9YW&fj-Q03V(_^sKs|7#gD z(dJZd$6MQS4aaD@*H2!XuB1#CJpKc&KUNjFTJ+Wy@Orrm*{iE?w!58z0;O%=7iNy4 zZx!V0U5G?~%8u`i$amHWAfHrBIuGG+g>-)idmqHoewAB_jgSnh6xZ_D; z5vMQ;GUXN5pl&w!N7|S(3HKRC@3BTZ`oMvL{oB~*54+tYkCj8R;1yp9dkL$yZFfu& z&|6U*5^!YZ(d6R<%dW-w#3&C><~?KvCprx#@y>IdDR5yLS_MR@f$|=rW_;kD9xT@$ zQ+*0-rw(B$MAUHCav0UdtPf_eW7gP=<&AuQ6K|b9+AMLggigg{{s2$os{d8)f`tLT zZ4YAec}#6JGF#qR!}ldt$YEFQW4g_7 z2t!T+Sz|RjAggT9X&9LFV3w8tA4LnepH2<8-`^{NYYL(lAZmc(t2aO`dv54s=CD?~ z$|H-97YEDc4&El0hZDM&hrHIqPXJ&DruMS&9x2XD{{jLDf;{5Vbo8;P-MJd_toD*8 z#3!RlT%oqHd$MO-00o!P;K zlDNDf^QPHKE-}}GfQeGWI`#0PPq+Cs^m-27vBAwo(X(H>iqGTmB@w8l3wJh8_%r@q9Dq>4hk$>(M+O9; z0R!~hpZB41leu`-lsWr^pY6w&0~y!~!Dy<+LxC)z+bw>ZeyL(J)_nIS!TK}*+3&Wc zAe$NhfFQILKi~rW1O|-)A-5R*+?K$+8na(1E z5|ectZG;5(lishOjZ^_UHldWbMgV3|S|eF>zfmh)UbMyRai`?%vE*1Z292 z&cOqw|2YSi7)+lAo=!VPycFn(qyQf|%i%fTIdb46+J@Ou*P$^_#Pp%3yAd^uv;a4V zdpC#$Ew6-o_r2@g3MlO`TPmqh-7eIa5+%pLB489t+^?RP2iUxl;fVPo99!_{!P->w z8NU~RfVxQly$GNV_Qsf@Ety+5`V;Qd4;vK9xZ!F zvB}+I_EemKNiGfTo{Td0Co}F=11=egaekAKBGZ`m6B!ie}}=)MpGV zM0l6|CaBkv*E2(#)bNX-lqhKptXcuP+0PfT=tjt^(@qPqa*pkL=Pl?2A+X2(WO-Y9 ze-#2#IZW!66V$l|unYi$G2{G5^)vnSfkih$c;NzdH0zd@^#R@JPfJiM`j`=gawR%V z-op5w-iz?=kZjX?ZK+(iuTyxn1WA@$?`dW(@#N4`Q1GyrYoX=5NXcx^4staPNjJ?% z7HKhSK_@S773QuAWaXA|svL803j@F@%5OWlAWw6sFMb1IF*zx92CQ01QKM5S>;c-*&O0C6t_&n=B*C}?E1H!dW9KJZ8d+6zDxDuR8`LjPd zu}fiX1Jl$mnpU+Z@nLNn`Mek!<6IjR_BX@P76?8PM)5&4|0vUz)#tfN|;{@KK^=m z=={xW^CX^#FqjGe&nA|>aiaE#6v7Rane(yO-=P9QA`4!w%^e>_4PA?l0=Ucj``T1X z5txF{bmg?4Ox_EY!a+x${P^_9uN4hZ$?af<1O#$MN5WgchLDBS)d0zxdY7%&#tm+y zg^Is1g>ZvH2A}xxG4O@-nEtw#hxwQ&VYUbJ(^a+}<&rwNjNZY#@T_7SV^>frkOZ%K zJrZ+=fo^g97yaBSwmeLcuQ~HH2MzYSS;r#s8@{(EH7RhNmVy#!W?S*JKp+AK4=x$g zbgHETkcY{=5j+`>asP!%54tQ34hQyD=WV(BBqg@eNVvx;UhFCXMuUB;w73TM@G_6v zpFG3=Tt(V7;1qEHr|6=oF`NcC1An@hSTaaL3Rzf6u{%64ESn#}fspr#e_T@|&;{A@ z)WAQ1CS|>iQw`T1gzu2z-|lS|%#Mk#yu~Df(cOt;xZB6bYeP2DnQKEI(4y>*2bMIT zMG^neI@T>^Z=!+P3pbz&vK_-xT{~3`4WaCrtp6o0s)=)y64Lzj6TXUBvb?HUnz!fN zAqxK-`JM~hlta5zk@lDxosgsTij|)}lN_DKEsuc~n}Ny!ONzDv4Gr;|yJbP~$qX+! zpbYQ4zW*_Q&KeuT+m$za;iS!Nkr9XcRn`Gk#0>0U*?y^O6A| z`ObES?7K((r3bsfyO(V-m;m%v@+LI=t=HTOfgOD*No@&QKe2>_e4jY-x#h1Qe^Ahm znH8Ai>;d(r9Dny4NNH=vHo*cL>)1;?Jf}i>ok;_Ef!nwmK)!|C6_@Pq_L)eVvy<@l zY~aTT4tbQ6ci#g#F1_Fwc50;O-8BY2`Y*3V za|kF@av&@OB*{|CJ%7Ik)DL3T(MPbq69YJOX%4xcM6r%$goRtomL)*J{6<2|3``XB z+^3i(CU$X_QAz)8{{9n?RABu+dM`Lf#Ue)#FStO~-oA{&W9c`VE#zj z=L_zgOF=A;c9vr-@HaY_LaifGaR~M!MNGHxM9mllu@3GrTm9R+hy;MaV&D5~(*CI| zROYTom+&=^;TSIzl2MJxk&aAI&XO?3)daeqNi$nhBghNxcy0R0ft+)xd=Y1EVp=&ALpnN1v*_0OOI9l{pdrnsDR*%?-WmX3R) zqo=$^REp)&Cd1eG3j?OTMry5*fneEZ#&S-pJ>UC7KAINcqLbv%QXEuF;v|7n6t(>P zaJAfp@aZn5--P)QiyH70m=)nQw{N!uowr|``+lhp3eR7-(fYT;qBVv_sn1$e@B{RM zcdfeu^m2BAI$y^^KQcRE#Ots!bmD6gxsrC5Ws~0e=hK`wbSiUiBQG@^MvFAqpX*ed z=6H_aJB4VPerkOcuo=3&?&tRqofz}EUhdO4e||lRlbG!XZZ>QO6eEN*SOl5v9<&UM z0I2s0aE$V137?MOf80)>2Ke!hg!|tUzn1uto8gfj7n$tZ*Ks~h$3fQomq}WxZV#Ny z4;I||dPj&7uWtB621SYr#y9%yo2&z~%F!8{ee=`%;z#_CKGqo{I;|mW1P&~*kah}d zZoD8rEObIl23$9K$i-I$NJIj&-gRII_R$m?#l1uF0E*0R$Ab!091SAn-DR5X?4T3Y zkYQ6(t^B!4^gHTU|Gis*p)%-*&OV@?ivPx#_fg-+_>g2>|50Fje5Jlnb6oVz{{#W` zQB?Z;UCb=(kI51W?V7B$E78M6h6{;ETr1kMKes*t3^VO}c|+U|g)J7R6h?wA-69;W zuiEP>M~LXAZ~oh()K$+!H>v_fbWRZ^xaC)5RDphnk|ny8QuY7NOW8HxT9*q|1ea2| z<~0tKP}v!9eenG1nowOzK4#k|`g0S{W!p4Z>QO+v$fj8fufIr4VT zzr@>*5|W8~S}8FAPWBt!QgVw!#+(F4eg4@BQ04C$fXPEeJah=zfjpX;QgV*NDt}%w zc=KtFZ*i9b{r-40sM3G|i{7Zi~k`Y?(Y3v zK;8n#lmsdn{p%SuK;?8@W8=*AoGlHM1jPH#^u{R-ppqtz1>=dV?yo&7h8 zuH@g>4WoAZ`%Y&lpynhn{PZ6krSw-tsi^i&gBCYbcZqv{`=x*u^JVwmBsVqk`)h_M zYBfg**1Q8soB`DrvbDAwcwRFgv+33dQ}bFy4xrPJbN+jIHO^F1X^M}546?!pv+cD~ z?7lIwxI|I&2|cp@qLH9umHEkVw+k;-;jTrv2B1!1?U3E+st;neBiCtILYzhs|7`9* zoLpvbOy0W)cQjH{&*VEk3bGyz*Q|feV&Me^U@`KW8XuD;0JYb>?U~Ok-o|(XO)R)Is~ChR8N@w>r5deGzVD;Q`G* zy61j+g*RKh!4e6o4>iC`lZ%`?{Tc*rlvbk z=$Q~3`?t$8bEr_PrFR_eQ0h3~xO+&u)NoKQ&JCD8Oe~%C3IU&DGgHP_gFQWi!xaI% zl(QySDPRtDlXwSd$Tuv)E#F?aH8!??0|rt+Z973%9?&fQ<^0)u?#FLQBlRjXQB96o z^}A{CZ*$O#+s!j5l5gP<7P+N_2l~n-J}vURd2C7c4%6I1&^R4uqw;96P#p&KDPeNm z8tsfnD+6@Q$ARJgy#S{7c|l`GBmOPs1N@6cS!S`>i z4J^WcTmq{YxGa3xVz=O%(tLOfAD)6-dS6{XH{NOp__=wl)&Frdt7|tGKghSG=g;~>g!Y90rvwz63H>ZQ5F?(+es;%>X&HooPj%VII1|>IYv5`uCiLQt5!9+ z?9lU0a7|nrz5ws_JfZjYwsXU?J!O zyh^zev}GWf-}k3Ceydv&#lvA-KYnBjsFucUaBWa+Qf*!+StV)yEJ);xKp&ADh8);B z1Q3!Lb7Qd4=dECeB8GKzoc}+6XYvV;(_5o?zTP zkhhO4BSTuQVaTh44I~BzB$lS}=wWA$s?3NDm(SAcSHi}?1c|%u|B8-8-%rOmu;tlh zDiNLGx9K0ex27fEm7B1g3Ie^Oa$z`qbdT4EGhpQ^c#|e>56(F_zVZjN(>8ovnUWaX zdWTGpa_BLV%vWN=b0~bmGV<>S{3BiJX8PyAK4(X&E}yP%wq36p_K`8$5p$~1_6>2| znW?Zed|F2V|4+^Uo@Et8RbJ>_SynvLMBb97Ss%4sXJK`66cQjf*8u7ar0HV>SRN{Y z77h0I^=txoSCsg)$+FICfJ?yX4V^nYdcVJ)K$5@`@cuRV!Il9pU(1=DV zc7g7Jb9a5AkKRzx?X?VpIeSFXG(22-ktd6Dw@1`xa$*)eA+2q9*MLn<};` zs#qHBsljDpn;s9){`X)Oawb+2B5U4mo4L z+PE#1hBe<(UX3ZyZh|?yIyy{D_GB_d-Q8Xsat{VN8+h((7no!SHfREvenLMy!*CQy zn2FQAA%!Q-ZiXe(%jmDDuF}bof(uuMDGDXrfbU0^pb)gjC8eL5oqZB}?#=}8U zeb2p=7d5~$1)l$IEU!A;7D@Tip-;@K_?+;=VjXkSOfGYkuNoyw{poi)x)8b`Z--HA zd^@!>G}Sy**JSusFmU-pAN5&7jIykgXXk z2hpD6VXys3OnX^=f$jRc`IT#u&ch|BSg-|%yez8`;);q%fI$Mui2ot}y zm(xh{Qps^BbCkg0=ZRo`IM73k*zf^zJWa5=XDp}rO{c1KgXKhtN$zIdz0pnK!u@g6 zI9W+if2{O2lJx;E$1;lbHX_KxCN!&*@wvrel{wcG8EljkaF4oH{>$fgIH%hr`tG3qo~}W)>jTNZ`QxpO>p1N(0&G_OOh+txpX3O_3Kw zo-BO#N8}AXLwAn&a+k5TGb#CGYa#kw*NwMitfFBdO%C6c(IyDmn$254yJf`86)es4O&l7kNO1gh1+EBBeV86X*k&C7Pdl zs0d~x7D1r;E^*aasPrXBk2*@i>$=EVMWyjn=-VZ)$FZg1EU$K^BbCuYM5Um-1-1nB#odX(cD zTua6oe*L?5d@8M*h|(&A2^*ix(&wCIEf(ZZ>rA|UKwD2jnAA>Egj@f zHW;D#5tO_Or2FMJ3eo-7JXFAtD*O_~XRvzOQO{Q|*z5L%d=sUUztuTlg)yY8zCvS9 z`t8bQ#Hrh}HN((HHucKajWH&hLMwX7G<7i?vGHCi&B=ClAlZNPHPpFYZ;Kw5qR&=) z(Ryl`LHiqYLhvHLHQ~lDe9C$&R1ITFLH&6Hgm8fcC(4*84NUmO=YwzVUX{}O+f!R8 z;CWWoqdiZ&m^67jebDZ+aq%VU2r1Blrq>ur>!*P5pSS$L`=0MBdgIU2^d$h|3}BP_ zTi615*$DGgu}p%6=?D{#La(~yIOESJvxisJ&Tq=I;xX(n?XU_Jhm+=~31hJRM@xWR zG(R;l9XVw858tw+yuvy_mA%@|xHa^Ndz*~$UMk{&suGllaO`ohR3*Rt?sMx} z4+|W<*Xml@{s;pjNcPrcj3#c82VAEy0^?X>1yHH>TyT}diTvc7DNt8trNR2BMkI}k>#3Xvmk zepvE#9(tI1fwu07L{)!hAo;4DrIo+tD5-pkLL>p>CQ9OXP#-$KV~a!PL5qc3NV}Hv z-`9p&<||CyUUwVUjYUyyWSgJnmT>jpaTO$#4hkX8bSo~ zClrjV+19+SV~&-0n0vTA#tMfnkR%XBdaZ(=6na(yUDf%S*)LRN)J0ecGYAvp?sLxT zbkqLhEvJ)&93HjHTC(Bv*~9E&?lwP8A9j8uL4sgrroGWq_NK?plVycDNelLR{sdm8 zmoM4y<~Z7z2r;GoBh7IgXMtbOev*qcMiI$Dxx4Rv+~1jMO!CL$x1>A&gO8kt^-vjx z$%Gp6(=^|c#~J`Mts#w3Ko}%K=CE)Pnn9khexHX^YU?baW*eCilPu^Br9N6XL=8p~ zCmwkOQhP)5NAJ~Koy~34*uC!rn%8G1IP)roi{TcwL&Jrp26QCP=;XYl925EnOadk;sI0=uNKz-ka z{e+v~Zew}to~*iT!+;9JU(6Pso*O}*-@Q^M(>p(^&g=?of+4$;d#~p_%MY|^mKIU) z;7xW$e7)wGfbb~?a+mO?6-NPmr;L#?MdStxeo&*Psd zf?5tOKwVBT1R`5>R^ws|HIjt3Y@yaqF8clB0DpcB&3=v6BuCIzQmraA`g_*dJWYM* z%AHlS)T0h7CzzovqDO699&)9$WmQI|{QSI{d*thAjxM&8J5} z`>wt>ZzW+*GOi3o?kGzKJblv(J$25DZC!C#=%(4`5R@LX9I;wM-Mn*pj6N(lntS)y z_$J8hqN4^yV_CGxwIKk#5doS*t@UqY?7Bmoni}OHx7V3)g&U7{fD(y+{b=i<;8j`! zXmM8i)epDDzC9#ud{)YN4O27V-dn|OBklakdz~rA`sZ?VYsYBI`iCIvmysROf;VnZ zPcdB3R5$p)iTL}xR7{1Gu#S_XMap=S`02(DAXmZK1Y-n(I^NLao;9TJY=^CA9HQ~u z-Cmp5Wup-md9mm1DR?~q7K6oZO1Vk(D)?rDzbgT~WQ{@8NR>3h(Vjf%7GeIJt>ia;o2E^8%8cL zF79jg%i|-ma1KV9TzRd9 zvI(m5m^wU5{Wj3g?4Vu(Qk?bsm5D=fR<05>z6se2g1HbU*q;dklS?e?Vw2sI%7C~5 zJdaexEC+yPs8L#k^T)Ja8~$1c;yK_s(6TVu#aUxpRn91FxkIUtOV{<>u2d8wbD?8< zaubC2zMUkcIWO&1+F!F+JZ=R3`u<2uny83k9p62{rn@MT!sk8!KVo(fZcpHp;uT{S znY?2g@(#A#50aeuB(T5!vIfYmFU)4`oGP92|LNTZ^LuNRczLEB3;z%XJmUx{{P1r- zR`G3UVyJ(Izx(|YeozNNVa~ps6_^#8t*55G4^&c)mfkH)J3#k89`({hcJfD=#e=3Y zwP&cREzy5iLC;wA)cfv)<65=+jeW5;-Oc#G4VqW*&rayC=U%ut(x<%&2k^~Q$eJf? zeSup*0CbN(!-0AK>E!k>riZlwl3%c7NU`7%%H;SFJ#vYCtDJhsuPQu=ky|n-GLkbY zTP&wEC_>abylWsTvR4vcQ1vbJSJYeeB&J?{`QSUewu{vuE^j17As&E{FvTTK_?%cR zRi8@^NAXXNi@N%@Fv5eC<(G?WE{#9$+e_PfXnHmT36=LATD_#)1r!JaCuF{ZdtrhaTnEeMw{CZz#(|1&bPjius1&M zfp~PW9EafLWcW|1Gir&c=+*=9Kf6IF5NMp4FN(q}AqRrCt`{Pc;gbSupN}nnd>9W? ztSP0kIJ7h1Pr85?jnGNb6?aYFVPC-y&|}1-Gj>ibz6-nqLw`<1FXMj50ldT#a`&!3 z)Zg)(ro1#bZRI^nsAgt)9054Lm-;crKp+&@9s7z?h5$4FuE_oK#`x#xjX@y*S`Yg> z6X`uH(-N1zHRKAcl{Tkg8?*q94>!f0pOrN+R+l0W{|I^{hr7G%DR0@6)vbDV#iL0L zKv>Kmx`A#8(~@_9kWDcK=FDcgYX{v3`T(CR*!3);Y9WOL5s9o%Sk{2)zoKy4{f{#j zYn^kOop~x^`z7|${|^R>@t8)u;99hI9y^Fy3YxA3s%4tq0Kr72$x+M62>RQybsqOfmGUC(5vAkovEJQa@rw&{YNx7Sr=~_PTFAnvg zmJJkC(AaR|nPxw{Nt$3b|3IO4&#$6vF6xwKLJ z?DIkA=ieRC#rJEseA<)$oA*SL&!cDwpX2LL2rpv*VbDV`PBh&t5Ts8AbF$!-l8aZm z-32YQbk%3iPy#E=okcv%Vz2ez@Ay2)+~n)-3gPO@a2e}AdVRLeVB&urssr%{XgLPN zFKx3jXc2XUt;Q*-_0UH0$YSAOU?|HnfL?UfpAN2wBo>A#`!F|oec8t~DM$8d-)>&@ zx{B|fm{68kG5E~vF~9dw{WxfPLCEgD_PG^!9Rr9c=-!w1|J3*epkf~z-N&F{b}uG+BtH97ZpTvokz2L^n6A?<LxW(#LlsyT&sdBRjIbB&(H9k&cu!ii!BVY;Qg8pESAYjy^b=-0`Xg zqIUu&Wcy%Xg!T6GJe6-r;BI!Y!AhAOQv05oZPH8%$&~j6FPQ~kM(xoGY|tfz=NV#U zYgV$~dBPk~qaK%jFQwfs^81c#Cid>z`tWna<{sOQQs^J#U`Qq;DYSFxmaT>v*J<4R z!u0uej8(f1s_9+pK3hyogvz-dt2YJ3lJ&j&eejsxPG!#hPD81~!iYV$ANN9iE?b9x z|E9GFv+0S-=<<0p>+E!~2#3A|!r&U*1B?a9-{*Pu!Hmr>0Bhm`4SLYg2%BLSOW*&g z>+!kiTm0h6vX72->Os4cqR7C|%cTvWE=di|SNr?Z($SovuOz?td~W8gPMM?%>1(+> zZa+3+<1UZJhy510m0{i1m5F{aQ#`)U?W<3jGD))b`2uKGY1_HE0@b)(hI()1aY3y- zG9^Pu9$i{nSliydT;mr1@sSH}x;j3Jwm&Wp^#M6-MX&XFt|g*LTk|c;z^M@orZBHm z+3%Y~x_*`Id*w%ufRJ883LN`Jg3Fc;eiw~Ia3$+lEP1$nt~lwN6e_8}RK$jF#f~oQ5NN-2u{@4U z`*HG`jI-K0R{XBPYI5S%#C}YH$-B5yLghh|_OZq90~cOz-`gKQu6e730nv`LQMqp; zc*(5j7ABBq=j^U3_S%xkGjp6dF1ngtM>01$A-Tbbo*KS?v>^L>=R>(k?+@I+zylT&mD@IaXJF8>i$=UEoPm zD4OHRQXD#9VzK&JXJx7J;e@qeGApa8^z{nLo4U)) zsqR~!jXm(X)?$>38FN3o*lTUCmunnNx*id(3H+etxB1_-+S$dZCL7z@cd0T~5XwA= z1NnC(GP#6Bzm~GM>J>yc)Dmu2ijX;~w?bHEpG z;n;d3wv7eVw-7k$3)?%&Xq238GO_V^e%}7auJ28et_NTv?~u~Xgp5wG&$CH>Y|6D= zTv-P#cZcz9>ZaNhaYeF{2^u6u+r0QrQZy_MyIx#jV!9kRUyu;-5}UX6Hk0BMuXf>T zg84Gr%~6Le2IDB=Y}71s(2?fi9yuZx^?r$dsM%aG*1Lgz!nEA^N{f|<*a{=}JZkLU zJw`6mzF?&HwYx_7P?c2DR(@rYl{NDLf{%3;scZDTCD2@dS}g3uW?h!6Cs#R1;a5#{ z_FRccdnYGekQn_Z+rj)9&{Xdj)#Uy|q)2=6SJ+&`tzI3R%ENQN!q4LZ1!P9c5A=M# zG&X9%*=hVWNE7X*_VHKMdV2}NK_B8tR&Ufe;8q?q9v@~FD^?kKaoPvzJ;iQrz65RP zG6uvtY~QS(4uyF_zFi5sqAn0HrK02Vg$%WG&RMm6`&lLdd6_aR9PrRGsAXw|--Bb+ zw3HJ&+Jj(-1=kxq&UEA9;esk$^9CO3Dz<&Pmrnm$p?tST^Oi?HkPNZ0dkU^(Y20<2 z)%#PY%YQr&U?b5Z-D}Eijy8h-=i7^L1$lpk5L&6&h}8qS+tC#B1*u5HM)|5S2@geG z@3Z|^J80N64prryb(QJU{xXBBmwi^@O6pF?wkY>G8crdi!phNVibcP>a+S7=)t!Jm z{}vpnqkLg37p#|MS+`MBCKZx99sz;Z95pI3On)EL-9+{>lty>zsS|QVGK$`9mY&Uv z+JVC9pNF(mj=1v+MDji)-qqevW2RTVy1s6L{G{nNXx+D-oy>@j8qRE=Ome<=O|low zJYi&#uGBC&Wh)imbGTmYHU@rA_vebqq47k|bked{^*EOV*YmGKz)j(ZhH%HSgR{oD zsExdNR0VO5(u+b{y6suzJ38vHb;LjWX2pGjcL$*g3^paHAd~q_j##W{ebjR6!3p3> z16Cxap89i3UD4x=mAN^~A)nhrzAyXE5_wIxB2yl;Y*G3dROjs*mJ-Kb=wAU7xYx5P zg&yjiBHs?tCM9)li6wJxbc&JM>{|zd*@rBBZl;^z{64n8A3WwVv1FdOJo)2+_`Kl5 z_M^A6Yu$a{D7oNbHeN!KZgH+0CWzZzPRSuE$=|_Z%$Sn>n#3oym${(_$94zDyN4DR zVZp~$Z~S+UoM|^~k^PUC>7#ohsX|RuB$acSkIb`7EG%sbC4O5$7<__fVYL(E{O=S+-6^JSo(($2<}Z(49BT6jN zvMdLqE8Jdg$cw^LL>_Gkt)++}YX_p;CV8qS$2YQgmgO(Qk`0&SMGl4frX2ftR+Afg z1s(QStF}%ofDiT@Q|W>Kl1iHRV+3IypzX`%ws zq$YsUq&ER+p%)l$Rd}gg>T+XM~lf zriQ=Q)%)2j3_xOaDD_xT9iGr6r6ucJ5pleWeD}lwkK7_~xXo`nI34&RK($}EyTGB# zcSBS6s_4uUO;dbeN(RffVED=0`aSWPf=OWC~#xKL8^?BhYq)rCt@$tv*xO9gfX@HLz!_Diz?Ys0$!`sK~sG7z!;28sq4gEr{rXj){7{Ht9H9Ejec}9HZ zO$D!D&I&Lml*rgR>vcQE49VI$w|?ubkXd%t?V4a9(WcwpU)G6LLHx#8qdl;HIMou) zw-3*{7SAMtO%s?TOMOHQ6Wz5kQa8MLhu60FvqmX*%RDlF)d=#(A63fbJdJ)C5seIz z{nHMl%6{?k=($FN9YzgGIn{oupx2*Q9wm(~b)HIl@6;ysO`h%u7Ae87{Q{TyGTf<; zFbsQRR!mKtOpV8lF4lGx0k5C5gMZ$2atD{=v4@y2oWQ^gt;)mOj7FBMN)_HIV1FH` z53#ZX!UGdl+FDtT1Ygl-L5EY5^wo6_GOFhgD~~$WWO)OFx)!HZ5O`vxw4;P=F-~6_ z?Qu4tpt|G?!GH#4Vv1r|Vsh2e+{sz$$M&7pRTcZo)mf$3yB=h2i>Sf#Y;LAE_QI zan`X!c_go4DBmW1{%^ww4d}SZ$@dB<&%9%o61c9g;A9+n&LMfLl#Cf!t|2Dwv6`i! z3U*&Dk;Oo8)|v~PL4Xy{55t!-UNE&XD}~>wn}e`b06v_azt7LFNqOlK>2IZDxUzIW z6{svaObK<3FzFChUjE?_0v;58?gjQfu~~3k6fH02OYV%FnJJcx15d|3|C!bkJCkmk zUXA=a(bVPJwn{GJVVrl!R93g<#NIdgbU~RhCM68%$pD;w`lf zBIp}yS%p?gQwsvX0}QG;w90*Vi`(G|?59dbb=Nja790nO4V^}@nnt)Mcigr{xR3kp z?qY)_oBEAH(@-_Aj51rNB^?ZHih!WeBm**ubHR0q$>vAsh$I~NQ!%$_ezXgXwOX+F zt0{vb@J#R=G*9pJ~oCHE-9!Dr6z^t66+bme_vSN@j(Adbmg5qwVcFk}0klDyn9M229 zZn3`87@$yQ=-1Mp%9jU+RDm(#c@{yF{G5l(v}-3bR>~^C z(`NZ>L1y`Z)&F$#u`xL+C~&k;COa)vkyMw}4oxNB;BNuX#4 z+wYvr=6oHPEcwAiSdz{ltmuc+5Suypkw}>Kp z>vJtT8mpS5Mruw&4(NVSouXec|DdYQ!R7?v`GZS??rd^kGymJR9R+0ur1%5l)uA66 zToLwdpn3IMt6QJ*7xZOEn>4yiPPSjK7s**c#qI)rAh!lWNQ-dXX)LmPv-7>nD*Z54 zq77w_Ga$1Y;2B^nlS^xV9^%qBU9p(`pf{=d)2;?BRE!@>p~u=hmC{SlIdMj_1B^t` zZP$o^!gPyz{kg?}<{x}}%q*oYFQydTh?GIGO{txNkHFsgr(w+bn^hK}d$gyylMT zg5;8{-9J_{?qF|Xb2oP9;5pp!P>f(=oM4kGM6wjuyrgb$cuP5Zs%185Dsbz;EICb4 zn>6!C{Q*&?Y5i{hA1hY%i&Th=?4AT_yrF3=)MtkLAJ{T>05(W;irz^11gG`1v+sqlRoySR_AdCK@#UKBx$pl2-m zm}KQsDy#aP0YSln$NF-kHQ5C+U)F1)OlWdzk-^lgAPz;8Ay&~Z-|HW;(bm#Em%dDo zV1z$szsTo&!m3xHc03zIOb`pL?ZslZl`+^&Ue=aAU3gWQQ_iwg-H0JDCAx093=5wk zNkT`IuDY4-=<*7_{wrEu83Op7@shfHhQ>CnY30()t>jYntYwaLuPC`3=1R-h_j>)E zL6O&bPH3NEm0nWCKd(wEcI5oX8qOpGci_H}i;8Qc<<#X+4E#t)^C-$_G8!&yrgu|u ztRe|Dk-gdl(;+%N!aLE!yzxt?Z5{z?`x*H)ZFL(=oV|0yxfb=aLR*f(;a1B{s^QqM zx}f)zYQJn29_{(_WDXcg_O6R^(qgQ|@j*xLQN6t2Rm$KbD4$>+{E!q-Y%IvoDq+et z30u<5{7TVH7b|CisWe^B;Y5XAt5!N7+R@*ztnxNAN?cx;h~ppyOg2 z^$(d7`vZrqX1=pD)XlBz1|>l^thaOWVYv~av)a-DPloE_!+G$Nue%?zDf?2!Tjv=8 zvP(=?AyEIC^o4#%j{WV5ou5Y=#&TH6|QCZ zB;x?!BuDH#<2yJCzzIB~8`gH-2CLpgR)NZ8B)3GjWo$Vwqm@rEqU5&iMOUUpC|(HA zKaYu88f&Jn1Ua`ZH|)>7X&WoofK~=nP3POoygKGNw)3Phd@Rop$X;k4D{GuMoKpJx z&G6XyTZJ)a2u&?pOL}h5BBhaPWNHmaDu_=yk2rwOLD~N6|4p6uQyN6<^ObQNX3n%U ze#G#dD__;2WO#=!%jHLch72EpBtEn?Tgr&SVC%SZ8%H3c{&3ZFO}MMd#pF;j^Xm@( zTI$G~Kf)WkU0iAAm$B3L+rQ*y-cKt~Lv-Ri7wzi#HE-*z&5^C~ zl}bG3dmjJ2Mejl$9&!Wg2eV`}Puf?e)g2Ex)N=S&-Wy*$`?4;1Nl1eM^$`G;z-TH+ zR#E^(@#a?rJQVt_FU%$5EmL$-d=f?{`?X|(=G^|?cT@KNJ9WQiuW4dETE=CYjjh_X zjz=PKv^(`#Om|-)WzWT#b6Fwa${Tmd)tLgfDo@Q?c0B{fgmD-C=64ycvXu4zVew$3 z>wNsh_mg#Gzzc^*6QG~2t@~Pw4U{oM?A1bdB%le|4{{p)X}$V3^i;ItEJ!x^+4B73 zjty+ZQ2G1%P&?H<)O^XO{Qc@$2fKwYOHJJ=PS{&|x!Yy64n5C6mXoC7IZTBSI5bnC z$avD+Q^R1O>l>4-oa{}+Qt(Q6b=g;LHsQja?R&L^ywGdcf--+e-x$Y(7C@IPsfqD zR6USQ%`orfDOhkxpxR%RfXs9mgejccs5Vj;S{#Zk8rZ42f&rBiJ*kg2C%{#yjSm~JuryKAA71DK~iM6*=?dmsVd$)fU2`G11zTF90J(s-pG?gl=T#y zkER(DJ`|KXq>06}dg$js_djwL@Q<_te%0;A)-u7HAht?-x%jz&^kBnhBBl8T~h zsL_=JsH2&q8c{+6p~73s_%NeZ@h;Cl@**T3nG3#ie*6&Ax+V8o$lsKtIMeczX&vVn zbn*Gh5$LZZL{eojDL>3KLG(+XRMvRrMt52()$bMKUo{^B_8)b)&6Jo ztBK|s_g@7kN)>!JZ!V=?MVWjCjssJE-~9#bi+N>0nG?gea{qOBN2rP7x7q^Ckq7Ap zEjrsgW?%=!h(tMmjiZlEii&&s`GJgSyvJd7)uc4YbvAgI3mZc0BZLnAZ%l9iB*QJ9 z(Z^L@0TI8oUw)qc78u@bc1P?0)X~WcN}k$hRezi2Oh#JR;9@4XO(MG+Rxn|vWvqHC z0YIvsCI4BMs76|ZRs~?E{4i$((-aolVit^xVMPrzZHhMIfd*p;vF(s;L0(ov%bW3? z55@X@f6B|oUH{uyioW)a1g^=5qVJ{X)Q~&JTtUFw&U@FSH7N9+C~GgJPa^kjUN)%% znmqLGUh(XQt-3D+0$CfnJ}Ayp(=bz(;o9EsLuv;$Wz2 zS$dw$Y5Q@7z$wYLg*FUWm9>#akBaU+0j;uty?NpH0JQUg1!98gb=dW%uND~IoDmO| za*Js;7j+w3R}IED6z`ARE(@F7*ET*RWUwd$bkJMe`dQD^**nsrFHxgYlKhxfi_=P!UPrp4}FvLTQ_2kNd1@m;_TT540_5kqCwSO81MhU$9jrDB@u@7z-5c zNxANr8@rvgbFd(V00Q)vp(l8EAJ1d-CeIAS+A;x%UNIy4jOftU36bqM3lYKB5%R^| zo(7O2SfvFfvDmtLQna7xU_Tk#>bZc0=_KZ z+cRBuViX@G)5KMkkE99!_LHLhB}?&ttdyD64)^O1`H^N|Zwml*?xt*LZKEd|!nKa4 zJ{1ewtaSoETQpS zRR=<(jzk0*{sHRoF-B+B=BodN`%jG<#R12;2!+dCF4Pf&z;t*yd)@#4AL@obw;Aa( zx^aG!oYV>1wEa=%;z%cWkrwfeIq`JxA}Uz%AfwBY8F}tP+vkjC0dClpG#b?*TBtH& z39{68KpKF(4|$Bj@~Cn6(eJ3G_kl$HhqEVXEkiq8r>(x;>m+0sJCS4a z7w6*To8Ox3p2Tg6mjcmK!Imu~T_}8BKaT4J-W@9Cc{7qBF=t}EaVLEb$nZ}4ZA-3f z*3V7)9eWS3b_>BpO1Ru@man1>1vr~v(b*x3OB3?fI+#<*1{8Id(^$Yo(7)rgd>?I& z2QLb>00r7L^&wpmptfiu{Nh;LYGMI>vV9|p@h9j$?^Hwz0I2jucM7IjFhu;E(c(&V zzJb2HsNfQTIdT7%9Q(W{G2yHlmcH#r@{4_e-ieRDtt#Yw&VC+=LHGnBagR%if5 z@ataRg*E<87@_Gsptz~%FX-IGqe~vNDy^n)`jjC23ipW2WqGzdFDEnI7CTJ55tV9|3H zbH?FmMv~2ZDMV!ns5g&Bhx}8z{!4&VyuwCB@xBAu$|)ACOP5-PAoiwDycqjtlVF2b zmB!G*X1Q963Bv9K4dC$@CTEZi7H_7e+8I<;ygW@4Qe9r&#Vqn?1 z5@1JX(NQ3EnH{ie6!^_1ClhJEnI~*Dar?qwX_B`oMF!6%h{E0 z#kl2KH<}rwDm|+cKwO-B8w7-4X@@dbQdBOyZpE3A{2NjpKDMKRopdV5_%Gi1d+2qW zZKn&13HXM-+3O`T>y}rbX0+pu zo%dE>GbCXgJD)z6YwJtKYyF?h#0W{3xnY2z@%HOottmhf`1;qg9t#7~l?X7x>J)Ky z3Q?@K15xNG1_so4@hPoB;r*;}et?uD*;{V97T?c?jva}+gv2h4yp)nI$r>)4c@C@#O+1LOOdc<1VT~4m7ZgJEL!$#o;+cU zQulXaMeg5`gE+p>IC`BDDU2F6kLbN4I)?$K1X!qi*&XXcp1=+&le9sb<>)=$E&dxs zPDqZxa8K3zZ~laeA|z$g6gqq8z+QFl^&nv$z+lM>4BbM4MLRrow<{g4x`l9YtC|`Z zo3l@8TEP!|2gCDjPabC>icGt{_sX>bee=@X^Ur)}T|JBGazb#DvnFMU>37mk9BUfoYH$EpjGYbo97d=@^)A;us^cZI(-3 zME%TsDJ(8jZR zN}^Szf|^Qh0-T6A2S)8oB?i0!cfwx2fVOvRQpNG_x?;0Y133#0RLwyfK~vLTI9604 zYMv)p#L&3yg%laQ2qaQoed^hAa(_kj><$(?8WNLcR2ajXRn(XZB#IRAmkiovD$GuT z{u+r>?L_LOzsKeA5%ZIq$O*nr6qnxtAN|Zbx)zG(rM&m!zh~?Sh&?cVuWL+qdA2+a ztDL`N9aMxxx0-2WnGeswJOesh#EM< z#cwm(c53dA(~AqV*JV*CS`)pjFDS~DJIQSbI^ZKk!q4D3x?#_r=pYB0w+r`XY7(gN zqSxDxzVgr3Ciwku8+n7P`t2BOX7ImcY}`$L?iRtLFk*zXb`NVKUL*F+?6 zOfs5ZFht4@UtPIR9%~+ZJbBg(RI21@=u?3p#WL<`gnz_<48yC(#>J_D4G7f0vo}(C z#L_mkPZaekMB%W>-rlF0e39-XE4y!R3#s{jz@Msf@AC}as7$YO2$}y>@h7!ObH`gO zU&7fn$`;uq(%xlDq1jx#-_5+zpj9Ic)qPXYOtVN|0phr}W_2sr^VzPs+_nWPH>i~T zD38%rq{r@vTFfYEIvbx5na7UvQ94aGmHpseiulJ+4((wY1K=cf*cBf77xXW7+duTH zcc-#|O>;3~Jn=VQg)787m;r3X+ul-yUFvd&eti?w-tgoNm;8k~9Uw92VKv{}<8X=y z8dTh!n}1{VuJ$Q~{yt$Aa+NNodwKZm>eVL1gj)$tpxf!Z>0Lgxw$L7mOB3*5U5@WP zFjNzow-(ErXs}j=762m z-s$;a4>ghW6~YS5eC;eBA1e%2&YQ`F@o~P56!L8L!VRRxxXj%78`@nOtlCU|9(mO6 z{^}gS%=V|ApA)Dl^;Ul6-TS0BXLp(KNqQ&hsiO>2ssO@922M_uC+=!x{X1aBVqCoW z=3TpFsz7^g6jeD@&PDf}zxq12+b1Sul*i|fW~5;PACaHmv=du9EcKMN$LJC(x21M6 z!)r?Hv?hr$K4}qu-xAarWhyrr4rfG3)uQQ5XMXK5e0pjGmbHb_}C;m*Cc;i>5Y66Tf-;)fmBq_ws6z&17p2mc5PI@WwfS z0^i=JwJvDn^`0=vhhMmVk^t%5nYJ?+t`Fe0I??GhwQ} z8SiFhlH+9|w>6)x_=Lv$^BE>&5TVz`AxY}=!0hGeI%Di5^$)y+I6=Ecwre+E47so= z1mAGlR~QolNA-7qTDjW4`R$hu)SzE0nqX{7N$FBNCfSidFizwS{VV{(axc~336a55 zk1dR2tT9dsQD*pkz&UE@!F%g95PHRWP3dlRvBCRPJ}X;bzy%AN5b_5FkxAUyDWMn=xH99%chgqMt7o<;NeZ3r4gI2JEUvdAz^g_S6)idGGh^ z{E=GH$rGn%HXtX+Ifr)RFyKGwF}5&^0e%|#-q;NZ%!+d6`S;@e*5i>i^9&%^s$>5E zIr#uY#b=Sh+bsIxos7s#q&j;Ukfmg{mmY2$%n3Qg-MyRiq2iws6R=fzaht z>pc5>?B^OV@F#`z!cj}uk{-!sPb4RW^k_Nbz+`XjuS!wqaloTt*Ua8u)uur+T>HU| z_6YrjJ1vB@RMoC6?=dvnT0ePLG*1YS`6vo1?f8Q~jl9?ETbC=;TdHiD`C5zU1Fm|_ z`?f8LRyUm|tuIr`2JQbvcXLy4G!ge!Xac#~JI;C9pGbgzgczUiW0Jl zGIGL_5{eQM^nrbi|5pP~u%qis$p5>6!#zJTpaJXU6->b|{Q~WMoUQ~028y|Qx%)cU azj6`-`#5K9E3*M7UD4Hgq*<N literal 0 HcmV?d00001 diff --git a/images/Translator-h.png b/images/Translator-h.png new file mode 100644 index 0000000000000000000000000000000000000000..9b6daa1712598b8703f51685cd6c9be575703fb1 GIT binary patch literal 114830 zcmZ^~b99_>+x;6G)1YxOv2EM7&BjiX#1GTsRmoFmN)hr6|x7@Qza2E?{6N{r@iD zDYPhfVBpDSWCXO&)XFOU}VnJ zV%SCioI6%y0UAvK@Oi5~P5)uD4U2)CbMo=FC3hnyCx-XOVH0aTtL0r%Qc?gg_*fMU zV1@yO11&5eToS3A&ErLwTN^ZM}KQ^Qy|baH&mWPWd!^ZIzDY0|7H zK`ez~yu3e>bX1V<{dAo#B_1iX}$k(o7@>UCoSvi(|D2o6If;&Ft@ z|NT2H{4xA&tJ7!9#@P4^dGr{Lz=wmVXdf2?a+!L$``Xr4mz{}8fkCawz~<~(knvqMMpNucgoy3!?Q5mQZYTXkAeZPM`5>)c zv*Ynl91&urZj*gULxWp$W#z?>6{La`i5~LaWNOvJZAIJMr z&3al2C9&>i7PyLp!_PDelMZA9@HwTdS1ymTC;)~Y_^e@W2~41 z!lO(B4^yavQ4N~yZBscXn#&@u3C}!mdKhfjuEf<<0#~Wk>N0;(FYv1%kf`!`5$tH? zH!rxQup!8QYi|YTq1pPbj;rdgkUZ+-#^C6A|mD99l5-GwDTL z42bYIGCQzX>fWs)i0)0v@u@8BN2sSalqwAa*efB?jEp`@)OcRz9xKL>O zU0ZgL8EMA)E$maQ_c_cf-=K#&Wxr7=NBIbV zbqqxg3HOS!7Ngl}_p*Lv8>IS!%ocrD!%&fbfsr|TOj>dI2xSavef2~Do_*aK?Yd3ha6`bk*NP7P_P1&2 zQrI}BmSEXxJcBs_bQk&rQPInuuG`RSiK)cwaa=K-$Bji(!UX{{7j8>{clwcch|`?c z@_MKBgx9`Kfml0gf&!Unq3cy!wu|fqQ)F?9-65?@VlnN~H@QHoQCi?obb;LjNfO(R zqRJGl_3!w;5xch0CRM1{hhq20t%x`WBKGF7%}nL#b|-I}R?;38)hMO3(~Vejunq|u z1B%f^&VjBwD`Mv>^6*TFxA0Jw&ZA$ zs$UGL^_tDx^)UiN_3y-M)M*n2+EG$ z<`%0s%>e^-F!_-%fiMAi!q<=00Wb-668WQrgAukI2&ZPE62gEJJZQH%<^PU?<1#_D zlDX8nVu~(-f-Y~E`Zo@5%)6C)R;9t(3^hFH(AQWyc+X0^o%ecO$8isuPi}ox0g2LA zzY&c>p&%hwS2BjIw+_l)tFHN;Ilbn!M0Ax1kNasI_a7Tii%hOF%qc$2ny4?=k2Grm zQc0av7Ujx(&EDYzc%$S1++e1e#H1uK-m>|VW0Zg}Z}m}NeCWonwVJbLukIX|A3K(6 zP(-1hV-6y`UOrR>ChxK5is5xRHfptk#inN;&c=jeien+j2G_a0pK|IdBDX1l>v##u z0I?Sd;TX-;QVVUB$n@iCBa=+5vrHNFv*jm?=jAej)O~nlZ=Y?*uL9n`tmo_jxXx+! z%C&wYOfyd~gPz|_;kk~H-SqjN_htrWhOkh(Z5BLJwaFFvNwkT(|$mNvx&=4(Myx)bnUAD`eQD#M-a zFK7FF&I&tP^8$3*CIW~I`*x(K;kmk867)6*&~fm}o0--JnTxCd?;o3xi;YD)f>#mK zlP;Yyl-b%J5N|xK7PE7dMez__4scygKz5xw3qKPHvPOC&P4XasD*Xx*Bq{oZr;i@e z_qWuGpxA<1mZe-X8%tu7lgov~ss;q-6d<{{_ z%5(&3Nf)NdRT{Dltbylu1C`7Y%B>cvT%Eo=TZ>AS+8&!g4E2MT&kn51pKD`{vcToA zUd!Yt8OrH*8#(dcjs0Nu21aG8F>G60D$%`~*G-Na@}-Zu<;3;k$UypP4=ccw!_S~u z_RN~K>_j+2qUy_Xg>ni`OMHR1l%WG;5zOU3-{!y?Qw5B=i|=+%kB`Y%kIkg00|_yC z4a-Fq?OFIa!?T5{3dD+9X_9PY)X_8{)Ky?7v9ANdLP9XPd|v*@#u4(n8>dG_jeT49 zeSNTF4sEifAmnu)xjUJ&)YZ|EpdcqdiNqaPn`%`Wr;sK&q3v31sS7=QJcauWmMsjl z0yOUd;e$cYwb#&^i{;UtG?dED5_APA>ag4PI}KJ{Yxd8q1#_s`;bg9-D*hNC&^th z_dZPW6q5kSBeZV|WSRPJ5aQ_`^BTX~y`NTUxU6)5ZU7`)NJ}x3LAF1?;4!91zx5+T zM~AJZ!Z1BtdT6N(=v`*=SNR0#PkOGnRFqku&?Dl4n<_fKxZpQ0zjX$1E*}TYIq722 zLq4k%lYQV|!V45;3dbCFq$x9p2#t86L+uTfZ3&6yAik9RwOc%TBDTz z{7gpJBXqQ=VV;$D=?V(Dt9+M|gK;Sqd*?+KSw61!^~S@lJEm%jh3Ro)P*E?03s0ci zu#^tNqRoksP-d?ds3@n_PoXw1OS8ZWp%Wpk9PVwPC1$QQ*>%BXYiwFOw$>hiMudw;vMSG1>gnG83jjwFFzi zz9hbyKgj3oN9iwRD}>YeAURBl17%iD5DLUA{|^=XubU+a%4C$3sx*LKXizv%!Z5<| z!hjYBfRmF`D3a6YASWwplm(OO;L_|B2{W^D9B~2?2_68q4VHiuy^^sw~}_SsMItx_)PKZvBBsS zFmJPPa4e>?`cF?!_cmYd?d|PcUUqVDq6e^sW>7my1fufM(2SbAK3sJCCA3+pA_yP! ze>IN*&*1gAG<#FV5fSp@)^9dcH^9QrW_EOp?dA$XMnbAU#zn!TfoFjWL<$@Z7v^fQ zQ$%@q2d}$KR(HE|3n}>eA*y$tk}3Z45Su&%s5SwtI@g81|G-9(T>5~vTmRhoZK%XX zUs%@nPQ(%}*Gs;+*KJxMoCjWUSAtj>`hh&OByaxc^W7aUGFdqeev{CUcTz!8xh5DA zUxyjDjN@cbAvy{sc2As0kGpqLGm+rD>uJ2hrH{pieH6$g4wxjTQG;{KsxiL~{Cv8{ zrGF8sy@{>yNj3A5!&i?rOA|ilxiv~0#0h4(4K1U!LXx_|S##Nc>-3ZOnlD10Ad=`S zP!A=V!VV{&UMQOL(KmeX8Cr=gzMwz?9fnr>tk#;E509*R$_5=IImD&(EeQB0&XzeG zB#x)&ID-*+u|x}h?ro2LEGSjqGWiNlLI&IlpTD&c-DS6i*yU0gwy{GI-h>#|&PPl4bmRMBY0p zDf#a;;x!%tylcVlHS9gEPg*{ zQ!-rCz@FX+w@uV2)M>c2H8mwEr7Mv|lnYhU;BR8=vw4L|@tE00@7DuPfo!353;pWb zTk44;T8&U6xu#1S6^aaJwa0hdTY_1fvv48PFs;aGZ?h^{72D?khrGmUBzdcJm7#B6 zVVZn2!^(1WCjG#%ni^CCG8))Bjb-$`36#y>?F{*h>8B+=<^l>JcGP0808e(Y z?%|MgYQU`860^tcpGaEEmx$WL5xCs)6y4rP!o&HPcQ9v8rapTrF3HG>)xqg+s(FlW z@~3Z$MOHM&&k`O|9(#rzQRj4eefdl8y_?6Z$4^!oT7|cT8_K`~<2g=$P*x>UIE=i^ z&E^$rsa_Us>ayH4jNYpFkQ9}q;ijuUk}RCZ42QlyU;NqAf%t0t>xx}#=a6nyfH*o% ze$fi)8$UpOd-t*kBwEWx!lhhf)e=z0UHI((?Oc&VZ{{T(xzuIQlP*ne zU~SRmr`7tF{PLP5)9Mw^@#$&N^JsjBWY#Vwn+W|ZTlhaHDBqP|+h_O9Ry|@+s)*Lm z^qXS~Z)A^>Q+X6Dfse77M@fje0C3UBFAxRN$ki06TmZQLZ#c;N0z=FzBAaoFuUVw_ z8)ErCY%oVlsvk`H=X!{LAB4QyN==o?U*KXOc!&AgZ-}_&ZD3!&{^%jvTP*SJ*sT2G zzl86f)TY8a+U`@ej-RPx3Rv$OwBDZ;X5YDN=};J;8&(c8|5@F z-XKO+K{nBo^!Fo*7ar!<+o|CD;n?bzGIhq94G#!Huz zY$&_8EYZ3y8gAOHOv-SVS0t?fqy97|#_C#?F~n;~A_-%8&Xe>jx#t4nXJugS^IzN- zm0jcNjP*bC5HrC*OYH%2pdLbkFu)8DnIB4hp;{|PD<;RrTZ@#QZr+tk=XNF}T8#vA z^+kR;%zfMg;g5S3BsgqGepNZ8ZT33+IlgsIQ;o;<-vP*LofMq~x~p}B)$ijWduP8z zL9(@|tkkk;!W7T0--v_bJKJmbth42)m1bpIQpvaE{`(^D4C zf2MQ%Uo;Rn*FR=>L1^|n0dh4Ez{OeMSym0uoX?Jxk=a!MY8CoHk?BF^8uprgyGrL-sT`3QMb;YpX?VdG0LU|I3mtNnY)aD8)!EJfiYK7Eqnj8feU8#UNXe6Dx%cF0YoUl`HLfl#{?LbG&oZ|*%-HrrRN zTsSBXGgCYZi%O_w_C0RxASRB2U_x$|)5F8O!Gk{u{CTcKhCS(kF8tCV8>W>ztT_v`nGUWJ5WB7+5FLB@( zUqj|AR%SyX0lxsIO_&laTZ=Kxwo~&_XSavVeijs&ssXj^#?9iU-#o)9xg{WB>z2Q1 zd=PhQk+XJsX%gGuQ!aSd^Uesdej{#$q7Lh>ISNIi?d_!RfxE@;FHqN%x5epe7sDbH zP@_JHB|IN7+V$a`@zow9#`vL-sOH%Xsg1v{C;F!WioZY)ch|N%%H*`kHj+C*+Inl9 zy7wY5l%73w!PF$*uUzKS>~TV&+6du23hx#nU*X&kae9Sd`<53hs~{3DFO~V=X(r0| z^F4olk8y1bpe~EW-Gf1_>t3QsBY7w-W-2s);Wo3v@5X-^W~NZZ&HHQ58{TyDV8~?% zxLuP=Nk`O1F?Mvqw2UV~FHJAn+~DyoO7cHZN5x|SjfXCWq@@WJh?)3Y%uMwzgcRI&^oS~t)|za zjmq9hAX11|9GQ$1AW`MapOmCZ@qNAM+;dc|qm=yukjdvvJN{R?lrvVh)#CmIx|?ks z<%QM(@L$L}bKSe6vq19G#HiNkjFJu}a4(;ALh^F0!5BIJ@1=~J=wfP}wFYd4P*4x>{9jnj~ueC-qq zJ->67AgIpf^@@;9rwzdv^lKMXc-~ev=#q)!hqlmH%)jfQY2lsT>};VY_KYgQlI+cD zm}#?zD?&Uaq$3@^@NAoqelwsqE`yzcpFR5Kx)U{D%OTO)#jlKc{}K885n?Dg#eDU1 z(}e#f)y;LpO>{dsPse9Ov2lZ}L-D>{xZ<;Ve}tH1;7>~mS<6pWm=##QhW1jcW;#84 ztSC<2B52j8(fAlkhqA{tbWJWyj8E$%X24n<`}W--x6<#VBZsaoi0zY@E?b9m5qp7! zB8KO0d{2M6jiWLl9H49lIcy{@04-5TSz>lUKI+{-HcYqE`yvF=gJbr87+?nYA0qf) zH))IjBO~)osX#a{8vv9ri7)^v0!kP_h6W`ZFD3N_14GT`a=j%96`yMwzRAtiH4(=( z8iWNn$q$TwclyFekce2E?Jjl9V8DopiHn#f?Ck8e_T2yO4j8gxd~Q5CeSW;lAU&Q$ z#7kXs@$wp?Td38_MP+ac2>c{C{bpnYh;wR2`ncR^D?qyXN~gtSyi#XgXdd^kIx#?Q zpOujz{=?I=vZuNCyMTZIp=cNbhK+i|mRkKC?;xXm@w_YTeZSX2*{dZsP6O`}=!0 z)9Znwh}Zs~+~%6}nDCRs4En9}MlW|K^?&W$?~Z3U>Kt0QT@K}p(+iLJ9v&VTNk~cQ zaY$Nf24nDYhg=Ym@t`5L661&j8KwsMkhXVrc7hy~VRY@-g15JgexnfZ;QQVl#%v5H zkZRtYEjf%;>9=_upPZ1h99J54`NBm8pV#*t4HQ+@orIGpwYgsi<@-0cwN;BJq_f*B zMr%~+5NIA|pzF+2^x&+l)LUjb8~2vMAb@o)1Ut2mr;<2Egwp@mbUB)2|1ua88*34k z>oQ=q;_|Ir2&m;HGWFaX>)^(K4=qAh+5a=imS;4TB-p9hr|zi3>)x6`d@6g|BmoE) zC>98Wn*c;yk%Rkc7u)M@DY+W;KXPT}@CIR@p1rYgGCxUC3zt+%akUWmCh$~}7JSzk z@_XP%%HJQu#>p7uAuS1#G<5T)pirAT4V@mcx?5tpCYTGMR|R)K>CjhP*q_?)y>-_*}nKtG~0qKG`-b&kT zx8iyMuX}iHNG)#QGU^Wfmr?Iu1{ef8y}?3cG=Aa#lm~qWghz(+Fx9LN*c3c>6KDwS z69X(>ej%Ht`443-j|lYymACrU|DqO5%8MXFY(#|Gh7xIoo$PNsN%u>10)2fZj)}3V zLLo=&ES(xzwD5|kX1WwSHw!-ziOROr%i1yufK=|`)mglh8H6>w1_1*MU!K@73XEFP zDxVLjV~ni4%-2)7BJh&lX!^|IUHvJ3X|g&Xnu7h>$r_)zRgA@ZFt)Ex&*Zi`x)%>J zWx_BR@|g<1K%tRkSvJ=d@-%VJB#d?7oe;|!MCr)O!4EBZ8tD@Fu`r%7c7OBKa#)|d z*mBqd6lktuvkbv>vdnB2$l5D8u%-;|9}{OZRGuyyo7@Rf`z*az`|Doa0qjamNmy&r zk!O$z>43DuT3OfysG-I8l~}%BACTA!D)ym}4$5Dvb zsGvb`4vx|Vn5v-kkQFPaChcB415OZuq;9w~bC?GvOiz2UL?f^}wOVl*Ics z55A41O2EmIu(edsW;OyZMkGNOi1usm_(U2CqLz-TquM=c{(g80EkMW&dW86(!%h=s z`;#(n22rh+ zve->>b#FKm;b-Ljt(B^e@a4GX<Vb(vLuy|@H8y%&><9Wl zPa8pq5)DHgx<)AGvm-ETw&btz-;06Y5)A9crHk*=u0nkFWrn*S?q%%A5`f_tH|f%d z!6ZsRE+$CYKy*0c;whUH8i4Z!y8M105)!#=t%^cpYH`_!N8S4O^GiCk2qdALQZ-oA zCc9*~6`>l3)z_ui&(z#9yx>e+&ijsGg~TNEo8$0bRsr5l7G!8RrA>*2Nk~bm3YI^8 zpDsm8v)o3%GFuLn{-E2ub-MU2gGwW91+Y;J%`Q+I=F!7!Z4|9&Dd1uh@GiG9B&*gy zPJO;0sdcEuYWKlF${DH*6&)M$Zz)1XdO8wn%pnr=D?kl;GGumq9cKtJqzithU~xgV z`#^KP-FGjUDR^|~y1bHk46uVKR^#x7E;5I8@P9FQ5HBH1C~W`X47rM`3l}Klzvzr+JVTQHDi%-TxVr-O5SrnTAbw#-4Z-Pli zv^Db^DiMH%_!2r;W2S_R-L^yV^g}VQ@#pPqM-@lU`<437dt!!%Fg0Ah)>Bl>(?21> z$yW)TrkO!YWrzhQhpc5dNZQAY(hfZW_zIV^4hU-j$j=`K=>LJ(CcN<>(E12C=J>{s z>GnsMEHbs6I&G!-!n9LWD?sQ5>rWCd2Puy0_By1>5mDnITKWNFcUZn+#1d*%9Zb|d za}$}Yl2C+<+sc9C)tIeLWI{4FYO5DayAWTyvlzqVREd;8N61ti2lUSZsY2 zK`g`j=H|U?wRpuC^2)MSJ31`ZYOJHRaIMAgw>-QEPJBt~sx=cq1M%;L{ZGLLEDX&B zs-txTf&Ucce!UpO{dmQ=VJ2Zb!;HR%`udJcB4X%omvMR`Ab6)`egyoBz0`cfg@$a0 z1}_e$pZTQ>ni5c#p{A*+C<6qz(o%^XV~&I1mJx8$0}o}Q+77`F<%R(dTG4o>>w#)2 zWDvO$&B{0C(cZ-{4x?*2_`j2Ze?-V0pxX6iN79&USk)E6p}2EK2+eXP0ooW5=Rz4c zEnGEIq74@O`@M57Y}B?fOCUx}=+qQe++tFFlu3F5iJ-2GKNkBk<&41vzKD@5wM^rn z!Wnk^wqWU#K%w4pHQu+0%loqp1Wd`Xn`9b?4eG|MThu!!>(l%*swW9cw_hTsyE4=OK;bzDsS8y(K zV~i?}TRSq*RvT>EA+VnrX?9*^*AyZwtm!lbmysJjPZdXk>!)&Nf)thi*e^rDu8;}U z)*4^h5QyvZL7YWaOe~5DYreM0a~E3*%CB`eI?dHfe=6U+QYiG7$sfk)^hJ072+f=u zRA0i@5JZ-g<>ONU>KM-AL&r_Ey`Z`~BFLgk-)kZA%LM4*F=?dt>!~SDms4HzDPbr< z+8rGZrFNyGnJ`T3#poVOLW!_l{0B%NsTYxZ26&d z%;S`^e($J4q&>veTTQS!ev4bR@&f}Ze>Eum&mtH4**RpVS4lURzUL*SVk-;%6 zbYqC=#DK7hKpWN_GrOnEihA*6IwU8Gn1%%dv=HH5Js(+;D^!SLD^kBlAF#B!yyjQf2>luhZt#v< zq!E=0@Hk~(+tm~ols}ehqb)2M0F)xzmx1J(6aoE#?2-6RK_9Sz*Gq*poAn5Lf(VIOGBmQ2C>dgl&K-ydOGyW(X-2=@xGs zgf0iMJE=if_z7E0I~4z!lGgAPaA@!%^1vPn(6l^W^B&BfR0I9>;d%j3Su$q0lxU+o z(Iew6@h)peeS6(|+buWvMDF)_sPj(u!;7yJDwM|~Ro_7ng8E4RuDi@TBlq3>)4F8I zGu?_pp0U0l>ye^^Qf2_r?=gFe=;IQ{He-=qx2os@|2AjT!nZNZHbm$8P#M70csk@> z&}&e`c&TT?t7R+*A03vh2@?k0D)CFFU~Z1}5}Dq2aP)Ua2s2*$+XK&`hG{o`|2D;Hs!ACO^QrA7BXQ61ZHPBytL8YbU-mplKR9dh=No6n7i|%p+M^#>FKO%^HC*i(rsK^Kv>gE9gF6Y zR`fEmIFiB4H53di4xF;YuPW568KZsG=$UEo6UKY=+mCH=^P=Q`wl!)I6dLv!cuj&P zhshTjCxmjmj56cDsTTtRjNwfq8H=hlnxyZt8^x+aY*F;S~-GL6yj<`Gm` z$HO`TW$rMQfR6lpeCsd$5$N2%_)bD*B4*$FSI+%BAS<+gs9T5Ai*@ zRSo22T(ccR)n8Gy`vQhJvOkabzhpk8Z(d{p$adED!+B!@PO>89$;u-QV(_t@@vN zFHgD%R5-eX-ngw^@AO_m4bUzGp!8wYuW>5cA3Yw>8;vRa!EkjYr3H9QT?Ns^d=95e zWy8&er8b~FUC4SQ(W%LknUxwMAJK%5C zX|M_IRb4$D(r_>7m+tMHJ9}%ZUjIpx!+Hc|9h}b{EXG74gn&bv;Qb#MaZ9yo)`x~Q zie84_sk#l(!ZeuNa@>myH-3j1srlEAu1^hsUB4B&U;SAl1A$#;LI%G_i=DncLCTnC zZ&_%30maR`M@t!uh~HO2snP z=^XH4p-?UH#I7`XsGA4T`9ne20(wWb7-XCc{uzvZJ>!WIfLt+cvK9Ol(Z^0h3>fGn2- zhs#ziw))`3X#w+f>J~t-#FKdwy+i(h(apKJd9CS5c%=zAi(&BI>5>}ekZ}H6#L~NQ zWgB{ss>iqQglNMAxQpw(9ev2a_I>63m%XTD@C&eu$M_r=vwPS=zmfS|ilzg(_wlLVO%E2m;nYa3CpaQEPk1CDdMUf_p8+vjMI= zslH(luJAM@VB`7EV1?O?y0U6N|DmHqLoU;Jh7~6%gqW#MukA&e49NK7Mog-8b55ye zm7I&p_=7}`fmQz#ZK|60kQ2~BA^DsXS*?BV3bk&`B4F@i;5OIxP+Un2H#-iBQWlb` zCmf!T2m`13`V)R?%5C=#({w;gwTV{3_D9w-I|F;;JXPTV&(%6#1LP=AtG~7{UW-q= zkzfmaqj_0CY*p1dHy}FdLtwfVFge6*(TB_(XTBQC6y$-&Ko}d-+iO<^&c*wZ6V9RM z`;o}mPtkP)K|C(xr^%K?%bhIcS=eO4lHiyNe_Iw~?3}0tgM|)5jH;^@&?h|t*3uY) zp$V@96nK{j>>_&iPmz-_m^3pvZ;jb#X1^rJOZR*Jak5xFBR@4 z*(JjTnfka(9^$&s%NE(jc+6#upq|ha*HQL2(5zv84PNZo+Gf$0U-6 zje2}iZR3D%L}A;SJ|5;fPynWuHZ!gG@O(QTJLYy;53Tc|K)^kzgUCN#cfmI9BXSs4 z|FxHSB5PO>DAid?moJ)n;h6)%?=W+JYacRNpT;_X}5^;hO%&V#Ko!!rA z$MxaCfWwVxxM5lB|A!6zzkKQcWljU266Qz}vgJNx-~#hh{(07**ToBgt{^}rHvIqh z11*xSu76ez#?sE3tQStX>_L0y2eYUr0YQ#gsO{e|tyb5QBc!4bW1pA1mBHz-aiLyN zHikgJ`$?&PVBo;jenZqgrq|8-=Xwn3SILu77%ZdVk)Id=AgO zLv53}nb|LL4d!X4zwOw*dxJ5Vt8@smX$%Cc?&r(o+2Ot)Z!3eVt33E~M05CiMqU~q zy+o)^xthJO;YQghC1NPt?#njN=4Sw^?Th z<#9E)v^=vk7iqqULk_r^BOidizPMA1`B?}d28CzL-&P<;%!tB}D@S*R#V6G}~0S%f`;MXW_$i5~ZhXR5u9=;acU;zW} zd4+bp1?b((V*>(&+Z!XHikAkWu<*W8DFG`)LJ;DZ2*$_93u;{*cl#q!n>SnC&P;nv zmGEm`0?tN%?FGZ5e{F7UC9F{WJ*jTYl`1ot!@IN(8p7nPFXQmYSa7zS@s+P8J{rvv z^xUHGp!gI0kLU_v=QX#pMdWVtp;!V`_`aFzmS!r2Rf9mlhlhR64-Wk{_l>>_Rt}C> zOy0rG&5saJVW9XZ5E_vpoZz+V^W6cLA&c#sv;B0QFdnEdl3(=yJEwwKAigjhOP63l z#((B73xC<#JEuNjiA`zQL6m_OiGeoxyq|X)`vIn37EW+C)fq;d?22Rg*Wa3ZcFJVE zc#W+kB2oaa%mfe&Q~z+ip?yhEHVzB`!imK8X^Oxk2$fK^ec-_Hf2UE zrV*f%sSPMW{%Z#@<=j+hj>+S4G(PfR2<#93&SnaVMyY+@^gBWhuuoc4wV#68A{7u( zQ6T~O9>v!gR3ZfZ;NJ`xsRN)y27015t%q$(YYgTZML@|Gr93U18B^Rx{3(W913_eS z0{ve|Pn#j{O3kjAHI3wysw)xlPVUhJe63y>3|G4F(6U@Y{jXR|j+fvZ7X+t`A(8<@ zpg=Si!<}fLY*q>y99tF0 zasP**Z;gQU!6t)E2=Se_*pTUisfOh8`UnT(tDW1kq4O8!Eo$#o)8Zr6wdt;vTmS|v zLu^?DWIhFiv0(%8dHM#dm;BaXm@$mQ`k7)&vzs3OGSSUeCcILk24ahKM2k8x+|6U* z@7$PudZR20m?59E2fDEB!Yrew%(2<~HyO8`i9%q?t76w~VH6%tyif@=k)eRUhoZd@ zC}FYlnx^P@JCDL00QjohBzHEbCfdkU6le41D{89h-qPEFqQykg)A`)HrF!{+d~V2x z#1p5BjNBS)lB?RHL;vgpGe|M*d2Zd9u(C{7J-U9|BfF&G<{$%+UkBG^Yi#J0br4^ZaYH-Ve+I5|Nd60!h03 z^{k0)m-tE!E)Q%H%uqvSGjaWeRp48-ueAb1zC|9_FX%@3;lN%0!pUrPxWbuZ!Zd81TgDm%U7vw4qFLpaT;V(PK)GLFF^$)rA!Ao%ZiX`eC8!QG^tr zQ5^W&wBx(GC)xG@7+N@z5TveJR1`QW)t9ENyd0C20KpJzs(k2lex2RG<%GZ<`1ISpK{q2cy*Y-zGbp z(8D+C!vw`LN{1S$pd3}M?R`HXjFu4moyPX7(Z8b^eILuls*oX1`qlV}_Ck`-a%PLuo{2dR2b6D12;vgYxySXjIE=pg#0S zqDvG7OzeZ+iE^6$5b<6xpU;r2gQ>*(6HNCaq0wI}2`$Cn2+`cP*baskGUl6TPC^<* zPq+_TiJ2qH_)K$0jPW-Q3@NP+ixwwm3-$@TpKCnTmyFgyO3KqY8GzR^WYEMt_4U_# z17)UeO=>;zdE1bFys~Yar89lKsC`2!6@n=zP0v7F0k_ShC6v^<9~Kp_uvEgkIEC+rHmrA$m1jO2gqN}yz# z#XA8WBgG4CaF^etS0--TPH{7_q!pHPwk`X83LwkZflAAkl1jCG1(2c~p+c~^ zr`Wm}GG&7J8ZuVakoN}N-@Hf9I%#G>K{wAA5G<0NNt5QJBo^O0Fjp0c{|Po-7XHWI zaTh4Dp;3Ca9M5f5CZ>mHoLW5)7Qz&apcUsUu?|*xOIn<#6w46ko9gbX7h~{Sx$;0wD!M=XPBaj9>O$@3xeB-3H zq{r98$B)7JD*)x{c5n1&%Z=lFp#yBW%Pd`uk2tpSRahX)WIFFms?^2#tZ)%J;Adxm zTKf6jt;y2%9t}2=#BfqNVCYwaaq8qh!}B5Ii`qc^XtXKEx_@y8rARG8VgfgD6N6Cc zY0S3Z6}&AU-JxlOusgzwPgJ{*rXtZlB~iEpI|iTRs85HK&=Hy+L4B9e0%;qm=`Z_3 zLx?@?QpXryu`0(XtV7d&9!JA-A#BHnAu6(?LbA$^^+)Xqrt>RFvYTr{A7>Uw%`7Z~9}nS};*UNxdO38qq5rth+mnL49V)liPR{?%PYWi&w)8k}n2(Q~Ds`V0FYT zDEg=mio^T8O^Xi!nGJX&I}U7`EM!r%270Y^z1F!}V(5LO%V5T%gcM1p)iYgp*3bmH zeEZumZkT!*KL)PnXh+0Wmtre|GU;DcP!<|#LSJW9Wl~@;I_^P@Y{Qt6+%^0n=h!}w zeQZ7rQB^X>b;z(L0UsS zR4AbSCsP?yd9mE0YwdPVjQ$_keHU?zL_+zAn0|9uNFwyB!0>g-2T2UutQkl>Nk*)V zxLqJ3B?|rz{Nf*1T5OS%<{z+ zMw_x)3EC>&``z{MeU1@SVY(q2*zVfmJ`v_P2g(#%-Vd=9G!6 zyTe&P3Z=WV409PX*!v%iOOGrIyA;gq%V`wy!w#G43ZNa8IDQN9^!C%!H<0Ys8imOS zO{mgh*6IBN2AeNsHX2H#`|K6m`EmncMKkf{6DaXkoqK+)Fh)Y2UwOpW7#U!4`*f;o zHSeO=x`0%5cg;$pGWnbA@|Ey!0zsm$?~sVQ9C##Tph;qSy3k@`&n$S;Bz-Q^$mb4b z3Yipe7995ZztUDQM-`3Le9yqIcQfP?hi56pr+nFXFk2|E4-M*F1wmXmp$}V|2pRASkJI>4 zF#oKCkD(t#-=Jnkg-M9_pVIlYQ)p!i+*R&IV1iAxh?*D`Ug$31<&A;e*24cqZeqp{ zQ7Gc(ct#d2^dmsz67sGD0+5}I`)GpE9n1iR_@L67AoPnEg9@!zG3OXx<4bneNQH@Q zDwgfouT;-kyP&nk8krm+cs#m@yB!&bu@70~wy%sGapX%ujv4-VQs zlAr_MQ2+TY`4scPW@xs7P#`G`+M0m#+Q31(R{0HG7Vw4*fLb8XmzQjrZ1cY^%^ON{ z={j7X5@|s9&L)>3Y3wh(19hTRPkJL*E&etqiK-@Z5m3*GqG+2j;!aPI==IH|dC?Cu zfp+I$(D~;ZpaOQg&E4$?}j*7xu&x9H?qxb0KlT0WFAI6V4_(W`kdwSop!B4XVU^M9g$EwCy|} zhCEKcMBh-x53I&)6b=@LHP{+_;3=Wx3B{o*RqXn2z60)#>RkP3xw$X3od&)HOEIhe zIyH25m-!qvDw=&&vYp&NsFPGwY7|GHG(`pM6A53L-_u!fS|qQBKk^i)|2Swd%mx+OEgELo_CL}OfiXh)oxdB9GKQ>b6?ykSp)^9GlI03n114? zsV(vFjplR=T?Ih6YYkQOU~eDm@Ev;U0=J11uhthCt9cL<-ZE*j{>iPfk=1|9Qdq~> z4sR2{SBOVx%@^-Z1{%9AnG^GuiDj(xagSWXBjx`~CBM;=Mtr3F{j)?FUa7zzUg0q4 z;DIcZcR!?b0jx6FOL!5H^?hCIIO}ulfZ}hSGZMfG%V4q^s}!{majKiZM85q!Ej(yFkZ&Kj&8$Uk7(0a>dwz#3cq*g1 z`b`X@D13O$P=4%Z#)k}dwfN@P(pUz9Ie2^@f_{8IRQB)26$S)K!oki@OB$66n)cEY&- z58u=S21-YdE$3R($A-mv7#|4I)GNX%zlo~IN`k;r=r*0rHo+wqk6RLcDG{3t1|n_2 zcY*$yFyD!$Hd`7oTC|`r-vHZL!|$yTS~>W~4S}xM_q~UA-C4P%BOPz)%LrZoea{nl zoxAuBh7fkmca$QTH~w%mCx}zK1$Ch)rV_$8AunB-`FA4sP>qfDX}up@Of%HWt{;@W z+5-v}0#@kXW`M>9Pwf{jr81$2i3Cz<^YtmnY2zH}#d8iaDGQ{RPiH>B(10$Wx?Bacj9=CL z6|)t{7GZ1j=sDg<*$V#@N5mA+f@Vn;BEeprFQnPZ9?sc^qO=n|gZOvXFpBohR)%^TE+9;}w(*gP; zyG7Jxsq4y~jbeKenBNGXhOX|tT;nEgSU)Zi=h3IT!Aa%M9v}yTb_M7s%gwcv?3v|| zJ5Dd#R+3A=&Ql~)h&8kuUTmnl?4Ed}v`&i9!zsjOZFgGy$*E2jKP;k=Kq=4k<*)SOef z(Q+a;Is{g00eoM-7j&FWvJM}! z7`VUVwks@Z`~-H(11= z(Sm+HULPU0w%Oim&#Q`e@mm(19K&8@uJ8W1--P_&Uk7WjNnSNn_<>V1nMoj5Rt7@J z0VV3Ot#A*W80kakX}kIHrSV$|c@*hOW`ELs6gaMf0!_E-25BaxS!@qYn(YghLcc zE=D|m@U;6EZ6=zrZ@u(!Xf*CM{XCEM=OG5)H0u1uKHH}71G0L*=17=2kQ|8r6&YEJcYWvjc`h1mIra5jfa6q;PT62BPJh~S1XC* z!|%vJ1}bgYg6(o&YF=`BButFVsL7FZk#spCRC+82L_B?iCb4aB*Lqrl4w?HRP+OrI zF>Xw~wo!%^)Qd=dn4fDw8pX(2tyCj)*$26$$-am0IzCoZ|JstY1@jBrZP0e+s;~ z+<&p<1T_y)-dqSDyY>OBj<_5=KL!Tn09{xPBmlNlZoywM&^?lT#Ly8mOakP+9T`BF zc5YkP2pG^1{adg6UjHhJUsnH`D5r1$of9;QB?>{G!;(r%Pxzr0fa*vn&`5C3#KaWb zeKu1l-TGg70;mb8F0EDTi82Hb0F@&%gEHOz5M&(c3axe*6*srG?!&3vMA*vg+}zmJ z8iNsZN?P0f(d6%+)fiwH)7i=X{(fo&U!6lk6ZBO8I=ZuEC(ynyc5raO`gpojD{0;8c(J}BOZ?d#Vf_O1hM1MQ$;il*kEOCc zBw_|GE~@MRcA1*Y%?xf=B`ALiZtG3R9QXE+8xVoU89kDqRZwx{roL$~dhIqRoydGB z3(rJ)%|&*+6rc`KVLVfCD)fD5Xow@}$vT)916I|#D+WeMJ1i`0k-BFjkwM&Ixz3m` zpaf@5Y-I=9GygwSDm1QZY55^Qix0`)WMN={>CL<9U}fb*sf1>2XQ$823co`49mvKm z^5`+YtAg>pfuIyL3fp({0O>2fB1eh}0qX}NT9G6YftuWYE_?#En??5HZdgx80h4%E zo;XL(u4`0?V}qyzP1RL`Rg6qAkW0)Uef5$u-y~zHkB8^5Fahf}yHLEXZcCBUEKh2c1?!WWxd7 zoV^Zr15AQ!ubZ9B=qg0ihbf!u{ck7)Pq0A4=h9`w*TkAFHtHwiODamOVx-5)WSCDg zbT7?-0DzsBROdFh5OhV^^rSbr;a9UxAhey_&e&d`QTrnbl+=h3ZV_`pbZPxKPm~+( zlw8H<}v&O{?&|J&&U*@1gN_bahJ8Krl!dSc`xw))h80mtsV zW_RPnh0}pp>jy5x+Q%|TrWUsknFuZ8PNKaDJu^zR?ig=~z zQ>J={GgOraOV?@n^#Na6(()fGwER-)?0IggRtOJ&<~h1Us{QX+M+%1Q(2NcSAQ2X^ zSTGpSx5W0WAkaj{$}PpcN#FdzqT|dSC6$FOiNvA%C2lv}a$n;(=e)S~j}iq;eolAT z$=1Kq=(_V`!1;OdMVp?T6gx1JmS5gB1g9BfN*{AP7wuA}Z`<$oiXS~HOB-q9K?XV% zFB04-bwF5?5l@L>Li)%MT#gYrp=CM1A63=9fWBWwy*5>y93;sJ2zqHclIw&b{Ti@b zR$UgvB$0>XB!XlKXt^w@z-@bphY$|meYx)Yaby@ZmhVC34dYP;EwxA9LDk#YFTa=Z z3p<<5>d)WAQn8=W>-5@R&mM)S4Zw4^4E2sqi)>loVp7#EdnR}Hk&O(?kz+%?be|@6 zX5|Ib-_q%WFzCi~tqZZbLTvx?;$c8JJ6X1rvy*wy9XTvEDEL*pl@Cj5SY%OroGJYH5`~=v5U}3lgtGo;I65GUf-;s-Vj&y!3=YU-u+IMfl;igqIc0c`U znqB-p#x{&F=7Ajx6Poc3@#9T_>7_LXH&d1&pq~j$C+6IK^^DvaspA7u7Iw(0vpN3S za<_Fu)q60AZL-Riv=9ZZH_HWtX$r)20<}akQRqR&nf7c!o^?J)a-{Gy(5VPbbw)>byA}?rebD$FPFz_>W{yFup)q zp9!Px$4S=_?l$mQ8h2)BK@1!Ao0oe*F6b`Qo-r@7ut)!{{;VIuXTC?-UdJS}TB6C@ zrXKx)s;48*w3YIV0LT6!IMvh{6C=EF-E&uW4vDO-+zUpnb|_exWoeegtz<8^9p3gv zprr6)qT!JF(c_zEHxeP?>%ObBP_HgS%*Xn;RyCCuF!?cHA!pR`V<2 zPYFcd&NU$k1)AdZFW*Ra?IRnG(!yMA z`j;<@I~i}pv$_0&`B^fxlftnBp9E4Jf7Z=1uPfz6Swlh?!>>_%Rm>nU=exZU908~V zMsElI@I_W<7X+Q{oJEXZxT`T!tS*)XAy?a?q^{+Fk807BRZonjL!uaL3)L$pTliEwCq3Mc@l_e;S;?UkincuHifpX*D=YEsD0A- zcA1%9k56SD<7y}v|4v$-{2)^V1t6GL=-~D9P6vvjXQ}WYkH2C?6gqT$8xRa_Sph2#y z0zY$U%KQb81OLpn5PR?{N#4_4L!jH9-JE43asSza$PwgjUBhMX&c&M6hE7mqR({07 z44@jG6_)H1htvQk|2uiRJ{{u5kHZuav+fSt*lb#`r!LSyAebtS0(7r|S1KOlpXq!K z{$s!`2slH*c4UaJkkaCyRP-TM&>&u(E9Xv&@t1)bUF?xqD;iJ%di!WF3)O1R<*eD5 zoE~K7@pZwp? zF-iaXeU`v~zeG_8`tQX)Spt3-*z_7kvrX1JH@YucogSL2B8f5fCw3JZ&fV0k&I?0lES5ET`rX#PZ~jpi%!fwW#I)48IN zngC`!k2q0MQbLW6j!yW*mqtH69aky0+A|})4;LGi3V`X#_3>i}m?i{=n!_;D;}0K5 zjRp%19T@m1z@Xl6D!=6ZX}J;rg#%iUH-Wz7Fi7Op_k&E&_Y`3Ak%k3e`MP8H^5x4w z4qqar>^5e{AsrpvS6+`-mBFqqV!#3Qgnj^wOxgpSMf-re`nk#N>00*FyhZ1r{n_w* z0RU{Ag-_eL9zP5`ds92m>YI_t>)Egchf4YS>BPS5prxhFsUjwG?YKQppG5jQk-<%H zCQr-zX+V==K`=M_Y~gMexIbHIEXBpeecQgzCCGI@7)iW1X|&w9&fx|&*TED3JAsaZ zBGU+L|E2lz_Ox0mokMvlp_+IVFsY3Jlb&dUoyG3oKjC`;zb{lGkNdL%N+UEPUY+*{ ztFE)tr#pTi47V0zVQqEQO*9OrI_4UVUxq{=4xK`(u5a&i$P{JB&E=LLwD1d?j(`kX zC??Il(xZ6wY7!s zO}09B$8)VdkU9ousEHKP8D`$jO#tUn?|TnzY-}t{OkBXrMx^-m?O2DfH^kSB@ZSw5 z;PJ!$XE|TCggZ?0x!z^P5@?@dN{fGx8SH6%j^QE!+T_`;Fnux+a2kRMDCT>j2`QCA zu^9*{+OXzfK4)A}esS#ffkd}CUWXlnP_Mb9K9_KQ#SDkcwA08Gco%pP{z&NPu}LD~c$!9wRa^b1b)wSJ(icF-fd!pr%dIUJQ9yD^ z%6m{yP?enBC@|#jonlq1Mt?|!)=yrG1h__nI=a%+(<_gF-gMowHNcjq(e$xDmdb20 zU*@a_%mnuQBX61lI}}7UZ=s*+R-ND@*|Oxk0Zj6>WVTps>VMqnxmh?tlT=2I)&BJ=-BdS{TG9=1X{R2>Cr>ZgBfUg)D6mQ?(y?M+)&Dy_` zsX+gJxjga+xX4nR89+t@D#NvXRue}C1|>(2;vJ&ywywwb{01%tTgi4Oz(WTi^C+DKgqV~kS}Bp9*FqJ8PVF`&d*`;l8t5!>S$++-SFh3k z&8ijVdYX6zXl)9OWKYqC<^LQ*_=Nq2uKDr2#62_FfAL{|L0c_RM5|_ zK`@_R|L2>2SWppu4Jo;D zc=?uu0c8&2Gg7esHu3q5Pvj34$IWIz6)1D+SDOE|qg-~Y{l2i9#taA&3S!bO=qeil z))vn*T7UW{^?vuYI1jsnSElccRe2`m&~g?RXc%vsECgEI>1f;QGUIMMv-Uku?bf^< zw#f;RXMFeL7PDm^3~_~jj9m!cl8`Fla!(AAqGHvxj+vbUN!?T$0jo4)fUl=S&Oo$s z(OHmxQ}$-=_4Sdv+M^Krf4V%@de~)xiO?(#h#Hy)y1kV&`rJRw}UacrTb~C%=z4yf3`7k zNyL5^Kw)8s%FqJNY$L*^#J}-id=33t?}2t_ zVZ(=VvJJ0qyJ;hW7=SDj)yfp&it?Z90u8Z63F^vY5MaFIM$DEYm}md49grRH1Uaa zjn^tDMevvr7k8l)vvn5}J&_OGp)cKNpx+6GhJNE2EYvy)7njk?HLbONqv_?QFh?id z+$x8^9r)k~uF?En6&ylLrPNHwe13zj$= z=ZSZ5H2jDrR2L(xxL=Sr{RhofC&`}0J{Aeh1yOd%#+0LS|g|M2I?~6 znP1}MHK}b@Amlw2>#%yz39U zLNbe$1}v>?Z!ahA;IsEa$U@J`w^&;|R)#u}Z6Ha(@}dbl%}`IA1y5v8LkscV8<55^ zS{@Oilg~eZ6Kpcr1Cv51=Pwr%TOVM|Yf{{^EIV2mu`NtxsW^A?OmL>kA(hXWck*~A zH&R9>ti?s875Ca>H+J%Mv=P5T?Swza6~b#&B->`Lzh|#bXTnB;5a?nOd#A{eUQI&AY-OK47^5y5q!J_M&j9U!39N=kdEVYC;0OAb;BzmWu5_@6VaAK}BnT zXB`CLlxn+sIu~DZsO@4MA4L5x{B=>{Dg9P3dwJoZxjKCMQpv@BmK*g01r-QuURt}p z)O(kk!&+m*uU={1L4%B|X=RqABi#dCOXJ5RHlSW%B*)gfhI^q!9zLyii(?EVq{O7( zSDQPg2bXEbNVC%P&cHQG|p77|o8BF!b3U zaUNYbEKgJ$WoF7^)@Sqs48ir`UMYny9@7>qwIAmzl8WXcJ14z9*Ddg{gYlQw(Vm?< zf7rG!qp*SbFNRMJC*($&oS>nO`Iw!P1b&8*@9oN%Vq#$v3Ll#XPtag$w*_7aNes~X z_$!I%m~=GiHKZQ_=xgo2XPeN^^S{q44b8W_ra<$Qc;Wke?g!Bc>V?>K$su1&$ov^~ zX%V$NS<=p~b@WXIKYH$kdjDlyh(xcZB53nuxf+aAU}ib0XojKC*2nzu=_MRY-1U@C zieE@yyeWRY=>cMy#}gyIaxKB)&DBpgV>S1i`?u+a=C4I6`7OFQ*K@<PTfie~;6 zez*yrlftghn<0l6^SsY2TP1(o;rnAM8|U{eFxIzKVpo%EuF~8%2iWlgU0&l>^r8Z4 z9wL^wZ)#BsBx_>Em?Tj`K$&Qa)eW&4iREq!~rE(Rr zMMf^~fW0g#iW19xOc7g&mFc8j*|5TDi<8?y3<}Dyyx+nFkI2`w7L}qkW4|EJwcUgb{T@xn_mpDFz;?^i2vwh zNmY?St+!1}m{Rc0%tR?JIbT1X2E0G_;&Xa-qGsR#=RmUJwZ!wFp=#DFMa9JWhB9Zv z!u9V19LM8PMM21g3f_APUYWcZY zw@h7@&2Ek-a+%_lq4@7L`dpb`HMkni6yzx$8iCy=3BeDMV<*@(_UErP7RXvcPcb3& zXD{@zNrov`#mYSu&G{iO(PplDFUdXeG?stg`Bljo47b>pTBOa-jNc8i98kN#NUO37 zT#ZypelfBjs>Y9r=BRxEu2bK%kA8G;R8Wb&<$3RO4YmrUuca)ipED`3>`Q*0@ry2u z<#o=FW*wVDQLSPlm21zYUXHNxnfX>0ukFN?<-WX@_;@w?|~pgo=6#jKq=_sH!}eZ_O0-*zk* z1;77DcyKYHpmqsCjj{`{ez2DvcKkUJuB2L_vh0?-y_|>i9jKLIYjMJ-ZO?&lGwF-`_F^e8r|^**_qn#Mje~z7zG|t4&FgZs1P{~YWk|Eg7S2! zYbFjhwqyOPC9W(p^q3uu@@)it433jHh7rZe!4|g-fmWrt33f=s$%Qg2bNThoA_pn9 z)V~6f(rMbsOQLdd3@6o1F?~D#ZAgu&16b7Pw@IZXYUE#WJ6c&yB8vihKFq5fj~>la z2lu5LziC?znT2i3Q6J9*e444QXN5O^4zVAVCe}5k~~bf3}M+P z2p64ua?3Xq)vBBKJOsvA5K>?+iHJdU=A#>&tcrE=Z(ofE(0;?8V5{Heb*~0ISbuzfk5>WeaUk8DDkBN}T9EVN6G%rnT8Qlyz2GV|N1+ zGEEOyA;{4_lS2*0t8vlwLG}_e;?dZC?t<&{3m$fZ$?t(u%3u`Kqw%dv_(AJS>tQ_E zSb!stdND590JF3@53f3H27?OJVXzcWa zcKb;B%+NiP4bp!sf~8P3%a|8#3D2X}G!cs2lYg8aIAD;$e;Tc>Z{pe)`mqsP-SH@> zz>bICZ6iBB=?OnQ#mN@~cbhU{DUc|K20!gb>bi2{0k#$S@4!|`HJ2Qo--Z5no*WS) z)m@yfrD`%I{=t}U>KuN>o*L0cZBPrraEQv&HJiX-hnBw*@+lzkZ3h-nc~(Of)NwX|EnqG$MbdvE5Y z?y~e7-Z?uroZO#K9);1)jv?!FZJy}yb9DnN`fzhwk;(Yj)@k!$%Z!y=&Lj9v?n*KG1aKYKC;yAA7+>@n4!r7<6qvz2J-gF4-}A^XOue@TKi3jxB#xF znlQuZ3Cw5h>7z|v^H|Q>@_fG(c{aB*rIxKnWj^O7nb8t`$Yhm|SkCxCx|@zF(Gc}nPs6ZewdqTYO6@FR9UQR8By>rKc$VtF9N8E}1^Rvwx35%znGbk6{^E&29#-9CLUb>vrW z6j6RnoQZ+BL{K3viK5ZuE4se10p04~yHY7@OT~fxD>6GN_M2&d?&aN#%JdBkP7EOW z8a`N1Mli89W_eHl(9to@j|rI99?QRzf0qA57jJhUOYHlT?`7%mg?Bn{iZe{AVu9*f z@v4r_ttWnnOT*h>T@*B==WUhx@!AT@gX%?fDjRIJd_gc3Aj#RG(lkA6t$XMRA!j!t zA#VU=2^m&kR%Q~xb{^*#OHJ4p|Bv>F4~mm-pTS^`m}3t`aC zOTzUwV#QacxPihv(y(GG>CCC5$t83GxBm>yO*5BGLOct9q&3VZ)`z#RH#OH+goUT)CIzTVwO4Ey+Zi&7 zi+U6=9;BocAa_sN_jQKP9a~sLT(xGB#duLFsm=`d(RnmL6hP9~7COFFOivra3q@)- zL^b>0U)#CefK_YVgx(uEe}n{6ljfM*XhF?%gXR2LB1U{*7^v)hgZARNVnV%E0%KSe z+no1FQ=W=6;x`gZk3U`h-ClgBe50VIoG@R*ZZJG=BI%kKUU=00YJBMv8^UO|Y>4jJ zn8^}js%4owGgNLb&)#D3joZ^oJ?vmG=%+hOJf8dSKxOG%@{I{+M*>iGBYix)4pF));anA+Iz{i`p>;m9TnE7Z63W^;HQFUblTe;(8iBybF z?FS#&H_XfksI;SNKAyT*qpT<90ZdFi4@TH$W}qhuYi(&yIMA~RP|-e>y6Gqn zB4NR7^*gL2zviekvI_+GFm6h7QLwIWFO%R1)k#V}#)Pp0VNSF&Q|{lEZcR1Z0^adI z@`st>x>u`01^BA|G`sHA!N`~25l_v5fe7_c`~vb}pDr9vKKR}93P+1fbV&X8Is3l> zKXHdyqwa!RFi8I4Uu_x~NSSHoO+#kUlbY5cV}|nZ4=d4WA*jO6?)(MS4KrdEUUa~@ zOV#o;*Y@CN0|!u_SZ>snE-1)hAT-1NEv3*+z-<0vl3`DHr-aNq2cU<2Dr0KaR@S-e zGxwxlKi8~No#g3%D%U3!gJ1yx`HwGw5TgeN>4;Ah5P#Cag&Ak{i5CA9QVI}|O>|U% zJ)Wg~`vGGXMRl2rVOI|NzmzsUA$?qWf)x>G_ym^chv|pP4?!mBbG$S%+R{CK*-nl} zX&}+@3Av?Qh9x+T6QQF&DVTCc*|;WV9YQxAT^=88H@3d^;%7`RC`e#}f1GO5DYmTe z{t_DW>>ZI$qCvl53#rJ6ac`XXL3}L{L=;PKBN}U{LT&@lF`{4OKXN9Eg|rcd@UFl> zP@}^)hZFmKpDm5yAD`u$m(BZGKD?!QIKTDAS_7=V*gEW*_hs*;p3_}$@1#!IFPUiB zOG}hj>GVbe>r8}xKDlFLz%htwpkC6jEF3_X#`YWSkI-uD*{S;&|8mu5YF$||UiSB~ zep;7g zhHbhhmSB7{239_g8le8Xy1lSpNNe?|+`GIf=J7uHtkAiFgNSP7R(slJ>H$0Da{7*m z?jK}jH%8Y&=A`44VpIB{iGHEgiE=X2s$|s8(hwqKrtMP*m-hu*dI+Mgnk{BHyCm~4 z!-$0YYpVWA1icWnHcrHkdvaMsDzibH-&Q-RIDqsU)(>b_WC+(&!&jO>{6g2)F)n9b zt#k()8vd6Nq7);^Y-ViW^5Fq2hr`7JXNc*Z5N3-vdE2R)bm^WnBh0xWI6_4=+X6#g1 zvR68h5$ERx8-N*9PZr_iHy@wchIcrcYKVS*K#ai@lioWcfIC_<_P?HlTEG=(EnFB) zOj}bgEZe`j4DZOxg~2EPsKNqrxYw_ZGLJdM@fFi+B#t%{YI1V2%^|QOfyk-p+Gm2P zvS^_j;;G;D&Rt|@;_+j_-(IOswoxVmrn|%%w5r&2XaEhKw1@0@ z{FiypsoKu~di*5`Ks^0=ph3t%^H-1BQqV^T*L)`8CXtlLhTdMK%V^c?DBSq4={qwI zb|?qIK$*;vp?9w2Zk|0CGzi{2)Ncec72?uc%$|-y6Xm|`+?p$tOzf{AZQpEZG=)7K zAz#Wf$U_~UjC0U1qZdBI3J9XhD{%r1h2zxFnfTyXE~8@63L!3dmnRwC>{D5y*;HUN zqa9t7kPHaLjITdO^K z55#DiLZAIRs6HI&R(1A}lHZfQNf!}Gv(WQ?!4%+vYd z0?~3){xfC*@v=F2x}NLb!T;1G_=H|5E>}@O>0IRbMtJZ%0+sxyj?z0zo8plysQym; zgv=m2nT^;`Sp7`Q_N`&VQKrkCZlvjJjc}a>z)eCyP_MLM-~(v3@XiF7Uke=`&|}qb zOY-)v5H#&ld{&TTuf%^#B|N1j%N}n37_^HbUt}@#do*_wft@WQ_9}cR5%?W^g>nQ$ ze50NjyAen~`T%;K@}x6)3=Z#kZsFpE{hjPtDj%?9Fod0h*M@au91gL{h>ubEsQ$kgmy_6MjKDCxI+e4A@uRbk8!QGcR^q zgWvo&p*~N$%xoo+CTIJ^A2uwM3TL>(+e(!Sc#hfwWD+sja&v@#*e}~uV_(!Fk3HB^ zhUd+Mfz*n24yXW%J>+enk`&dq<5@|dT__quDe5ZIljOxd^(LVx&!%GtDcn^B>otAz z&^+tyeAbBKLV>cydO;@cSM^muTk{Q~e+M?|XmEL`!nP-xJ#a41dGt@d$TwRn3v(d9 zvnQK4;8fIyEu8-p_?AKOkgwExKp9b8Q(fq78%f>6e8{9k!Utdn5WJsX7?bZAK2+N% zZJe?4>35V$A1yOq=rnn%V*%X!n+{5H4p(}Np(94^gFeUAPyLsB!G(O=ViJHC^9}NG*x1Nv0F)5x5K_>0F!@LxZLZ2lYRqP9_U0K$Ki^@?axAfK+b znsG^9>`D7#rztc6rL*9K$Ty#sx4LMJ8<$)@5ML*((xNS;tUO*}vqbw;YutS}E;@m^ z)gH0la;v95@oy^!5kYs&f+x&9*TjFDA-!jMjHwmN*&!RhP-jT9bIt$8jU+X&_679{ zB92vC0|3HTtf}irS3zV;t*qT$vSIb2pFrs+Rwc|^UB7K`$Jy$J3y7LQ*tE*i$5<#* z@QnrMTHJyFuFE5ja`QllQzawoCaIj=GxoS*3&{}D!rZ}+tMpY@Y%A*3#9Xd;TRXwA zmf|eLkLxsoR3Y`6+Kr`N8Wg8sUS%D_DOs#g>C9EJ>((SW;@|tX<_GDQ8IMO0Q2c1! z%g57{{6AXP05Y8Cd4Y^{Xni^OTC)MxK*cd)SlLpoH^xax*4+#+_ zFjw-%Z*K34QEJ*qdC34}#5{M{3~@XfoH%^)UFiv@)77LXqcCiAgNnR(?G<}=mvGhj zT{?G_YG;h05xyv>vqnQr{rcKZE!S#^UF@gISQee5?lWLBd-Of#8iQ7NM5Iv`A&HRj z0A}s=po%ojKcD$7rAE_^nWxPjl$GdHobjSEdg1VGJ!+9!yyU;Sa)L46L7$y65S|gK z9=@YgP`jNn9Y#WAC8RAUw9K~Y#=A?hT%4Z|A)yv|c6~^m0w|+3aNw!m5d?ICK!k6k4^Z)C*#%<1 zn*e}!v(f>m3%u+*2NEqm;Isg&^`#B?lS=@Lmr}7>ODZ9mwDniGn(@| z=cw!*9n}~`tTMeqz9l;jN!rK_u%i#J!?D&J-EdW&o{MdzVrQ3qJ#G}M%{X$=BzEoH zOQEQz(JpjP*-2S1g(D~brfdsvSWZCq0uw5#2AnB{ zM?ysCddw;O7#&Vo@9$7>hCNT>6*F+)vuC3ZFoaMjc(`r< zSuRT6ezeBo2tod@Q~tL$pmOv*6+B!QcEc+aswXI)+c=r%?Ju=|(UtqES6S+k%CAtQ z!JP6xz#i81;yC%IHa5sIEoxil%1WXeI9{~2?|6^-(;~y+Rl0bH;pTC1d!7Bf@~qpR zB;ACi`q<}Rk(tM{Tei>(*{lqiGg;09gVW3R?ZBO@&XR|3e%1#PEiJPD27At0+^F64 zv^&3qg24yQF+!soaGj&dBdu_@$_{mXqxNu+VlSGwb?4Y=ak7Iy(|BwTs+ApdBB%LR zDF@lAQhjllqJDA}X`wlrI$%~Z z&8wQtXml#TKz~s*(B{G2ambW2bQF+!)CZHHR@=BKn*fNvxZY0H%=2kSjfC`Ei(N3I zp9apl#&qg|v|}P*JkQO4WK!F-V5Za%*}i~=ojsUEf`E8j&p@w|8KE%^01LkX9{BZQ z8dM~dQgESvagkkaG}iS~8)w4221o+c@if&IwzKTtx5uaxur8E&j^WntN>DvjcjJoz z5PE;Vy_7JKvnK<630CNz@8flwakugu>(<$`no`T50f}0t35}rTv2I?kxjt6{&{~cp zBb%H3yML~5G!^+xVQQ_dP1-ZP8Ghu~XagLj+&a}o6C^JlP4Uar-VP@v$$7YdMA}lj z0J+4HTDkE>%@v-e$0}XV+E+k;{8i!H?*GNHU3G<%Is^WEe3E$|Akg9vAjGjDu6lk} zJL~9u*W1<@-0iLcZr%#j9-`GooclnyZ*jc~JN!4Itssw&plTC@k}nD}M$eV$NWg*H{AJr_Z34_cgGWff$`{Kg-m47syf|#5AZ(onOoB z{bgLw)`St>7u=mueVZEq)pII8}fNDq%{y+dkoTVBe8;*v|sYJ8}Ad>F`SRc zn)euJzm3Z6Po0U^Z3i+G0VVdhncS@Trl*JIwn6sC0WueSXaD60m85+;Tk;M-G*)Q% z3-BaIGz&m*Il2xe*v9VQG92(?7nKN!HN@v`wT$Pt&hpUwPq^L1RSHf695$l+8&)~3 ztigDW0$QK&7xeOW9f{sTmqp_4W3SdLYQkvf$3t>*D$T13pUToAENFK;abp255liU2 zbhM;g|A2Yw51)#()%+kqBumHiDsP+p8`PK1a0|(a^EH{rn?5!01A?#c;xxWTm9s^m z!80iHw{EAIbf1nU){D%T&1`NcV^&tjy$Ia zx^uxV^hWB53vE8St$QZk(EE5%S?xb4e$5HsI{EFrp8<9=jfz*E5H!p-&%aY)F_iq@DnJ3 z`ToGNNWWo5OiDL-+B8luyhX|aCc{SC(j&WIfB9NKcJcEbdHC$C6y{+e1=pzuB>apz zK6``A8}IjQ9$(4J0g2Gp1JoyRVRrW4`~Ui%g#=7+es9P}rIX3+{QgahP7oUFZ_sfe zQRm)#t8o#Ru@dmTQJy29`25w^$WAn;`GuV1)q^9vAwSrH`t2lihZ{{?-=LiAa4hKW zMfA3%P+Z)vx(8U<2#uyj5dBalR1ZXsnd0;jzo4LS=Wl9bT$qeA7Q4E#!92k&B0!`w z{_&K3 z4SO%eF5DQo{#shO92Yx)`mi6blDSTv8lbAm$2hlwkP~z!ei>(AyMAJ2Gm)v*T+4RZ z7&RP&IW;6SH{4>Xn3F$X@Phi1H|)wF>wjFVgC$K|q8pEwm>X(#CNI$FqUraT^nv4*xyLwo+!SbtZ2e zFeUsS(nC|N6*%9LPgciTN#x%qWd^xM$8xgeHnuw)Y*y91%5b;!Z#7y4iR*^$8X)arprQm7!>*sw3JAbp{ z{f|2F>Kq2*$o;8K$j@}}ehIU^VjZh*Ni3Q7!MnwwX=t97I&q#BBg~A^ZTt5rPprC1 zu9LJx1CxI)^FU*DLc!r->`w$%$K_Tpg*d`To^ap1m3T}jjJy$^ijnc(&dJqv^4t5D zlWRTi?$G~AMxpo(5JvhH@%8Jr$36qNNj7Or2UGS00D+lv)qMWvU8H8a9NNF=2HAzO zCW1sK#QqEsZaWhp8;umE_8(K2V}N)%7Jx_zNdTbi4>ci1K}d9~F% z=b9lW!%2WIs8c$HhC zRPOQ`NaTUjyVBj=9k^%w$CH7fvIuamyUbT=d$JN`R*?D(9%D|aQ^=DB1&>*`i#Sh6 zQu=^^n7aYx+rt`^L)#hVfBb8j>aRNqBUlC@$}-#ufe&flx3#kiFL|fTLd(m~II-X~ zoNM|1WOsr60mhKhHR(1qG(-T#?^o4(?PeQ;mwMBMh6I%t;6rNzEc3A1o^N*y!&k}QLEJ**592hcz)h)oTwrSnSdkD-p&me!;bCf zd%DtCIlkQ=+WPUh6Pdf`vAk{N=;&Dk*zBc%yASHn(k==xS+2LO=Zh!uOjuhRFi86b zbaC64#Pm^N!4HG^Q7Kof^19xi77v`%8crk}?6qEWd}#zGi~cnmN@BC#4zjBk`iWff zy-NYL`pF2m)h>WLKvZ#^6MY)o_s+7ivU0D_HauR=#LWkWhcbCTxXrCiLVdU3v}7AC z)@h;omsVCfryL`O<7q?>TO)v;0j_QP^)_ds;kDfrptGPcURX3m*e^ZY;B;L+L7`DD zX9|^Vc?pNR&sc2X8Ycw;Kmjw+^TR(|b&HKv86|;Zp>D%j`hRG9%dj}2c3BiiAh=6# z7~BH{*T4Wlf?IHRcb8y;LvSa!BoN#^xO;H-;O?hMzP-=c=efV`bMO2K8G4wWp4DsB zs<*0Mc;Wf>p-?D4IT5f{lV!?B@#nw+UTOxf6rkb&DMs65~zy$*`IY}HTMO>Xj}sqm{klsDcn{bYJ-1i|2%VIG)oQLlVzncg}%vsL;L%F zc0qg!10Nb{jO^iTvNfQ~WmsG`uLYxk9K^rmm?_}h{pBHy2A}_?OTxgZ$=Y(An*|(!1xz9+z(>7-|F`YPh^+c zw98|Za%%6QV|XdtB(muUbtLWNMASG<}ZQJra^(#(_(*2d_3(o=X}h5$k|` z?*ms46p9ob6muxw4i%ZV>AsJ0$9aS_OBbRNF=|%fEVa3)TDmb1dUkUn!5gc`P)dUQ zOzl)dU|&&WeYoAdn?sBpzYR$+@HRR`j#ZVd14?=Z{qoY_a0}xeEP*k;$DtkyToVNHbKy#%^kwMKw~syXbz{BT$b~+Un7E z4t8}I7F9{TLRW<;xb>skRH)|95Hgr|Qetl4H}hceMYQ2k9$Noy6*)%jk{I<6kttOF zBe_3iN%dhYmSm~*qe~P|(Q)}3HdSyu-DcI@i0HY>vU{8}$<3_L=%Yg3L^PM=TeUZp zpHqCru)j~Ot{2x*d^N7J8YtX3qG>6WJDN;~DxJ6=Q8m8KT?mEaQb565OsBPq+h;vH ztVx1+N%ff=Ay-mJLF6t?r7wUYhK5hQ+#Cj=aprw=(+c!})d98~kzra z#YKgviJw*1;10NE36dy=p71?>U(h+(%6dmn!;4W|J-#SBQr3s9qy4D)#HE{{BoP8H znK!g7t^CcSWMLfvtn&Sx=T2c*TGeo*gCNfg8BO&@%H)wus@Shu!#go>oS#S=d1>U% z!g+Z&?`IFX4BwQGNxJ<$OoBbL{*7*;!hV0c!-^|wD*g0=Wn&*Y1&2y7e#=h^D2FlR z6+9XKgB{Z_YwOfivVL+&>m;&G?jK~0vZ->c^qXk+#xEweU8&gQ0?S=TCi?`tl6l+5 zCr3J0S??Z3XPiX~0%YWd6T#s=lpe&Qo4a54TV<@RQX>)Wc)bb>7WO~iMgKzs;i7-h zfL)g0XHJ%V=}FPkDmv}=$E+c6A(zr|5HkXKBk1v}XY=y~k0J6#H|R}vsMW5MO9}$= zmQKqOpEa3<{DQ#h^KCRkd82h(x zFpdy?4I(_jQn-!v{LV!SCff4dYqa0*IemaBslbkm zHx^FUQ{wrKkA6{n8Imh*lURKv2~u&cKnEE>uuU`+=91h77P`vXa?@<%%=79{R9tGa zgH2arGGGF)hd6$O=A}pFT6tly{wlOg}*U@bx zi4V+2%5l`^4c~+z0}}vQ?p&Mgo-+y!P8~v!^)HW^7UjAgNg zIR#eFIk^&xMh$s)2_$l|p{uq$fvrO-e{w9e<}Ag9lmf}#b6zD4kP+^iLr{)bO(*F^ z;eZHVrk<)8lyIXu2~?jg zhO!>Yctd8%OlbOS6PLI@G?@UMl;=J<9SFP6I?Q`M>*vszE?qO)aEOV2P!cE{`lIfbSJLEIzzLuR zfYy(&q@S*9-gW~v|HmAA;SOW`lx!xnb+bO9@*+qs>|x1vDjnP2(E??OJ*|fA+gn{q zckvNcD&k^kXwB}YT};#|rbtJ_UFm?3A3WYli~gzGRsBEt;UBMYtQ$!S&v~Jt$eKV| zSyW6~{9+gD#aAnJrsf#q{Iu6TIkPCnstC~#-02~KpItOa|1El*w5+iTyHcVzRj(B)MoV{PoEVSLxCrLw*Va@EtWAC zM>Il*8cwyqJt7PWdYMb9U9qnJsfXnIk+)DeL9;Pi*S&2IyDAyAo+d}N^r@m&8#$36>rH^ zz|)zGvM5s?8W-8|9xUR2MH>Cn28T9V%ofpXPNM60tmGYU%em{c>jyjO$i1(Z_D+vh zycNd&VVbhhU#3p)w~UMpqY0tdh;UH`C}|``KJ2Y1oweo93}(=ylZatZ{lfQ+B4E1c z&7fn#XwcSX&Zsy0pdv-BAY-K%{E4eSK@pXKoj?(M)xJ}~S|}Lwr|7Hl=R66qitQsl zh5q7R=X!Bj`y-8z+&$r+Pe*a{A-R)NY%6vwvdY2x`f^i=`QHwT-)d;|Zv{-W<6Nik z0vmH0}W$D__1Ro5A^k&b>xAoK0ZLO*?yHNO-MbUX@bV>ZY)S!(`vSp>++urJ(&e(d~o zwkrS-QRpL}Q=-#dtDJKAB@#E08vmAsSX0baL-eNVVBCgG3qQ1U+SaDVWs0KbWsNVsB(D4NhR^-PPUHWV-_H(q+8GW~C}TEQO_6>30)T z&z=wAcA8Q2i0e(^Kw_+>T+*bNU6zlZqMueH{|PCgt7%7*!ZG`){Mo@3uFmUlMa{_< z+(NOeJcFRRU6P=OXUliDrucoAO~0x>9b-o~+}Uk79Y1qU*KKaX6;DSgbV*JuDJ&g{ z1*_Qo25DY%DFKl;nOgCNjpvv!d3~aC70M02 zTv9$ttD>}eD%+qwiVT=XcFA8s>XA79>bzVR(Z_zW68DzWRQZj~e@eX`U40DCZ@5Os z$7|T>&L$*-vMO;p$4>htLoc|`!s2UPH)nrViQ^&NIIYUPB@E%-z>-_Kh)kB-I}fFq zY)&0q@qOGN&}>ORjJuFEH&gJe*cJV(cR_DPgIfkZYW$E5Rtd>o1e!c#iyV z6yj18nu-@oTuSG_=nlJ^{$^QtjgQ<5)5#$Rox47yR)k#hKM# zy#sF`V6RG%`UgRP1YryT=nIgQmaTy1SZE2FWiE%@k;ciXMrL;SDsrK_OhUYIj6*sQ@S zT4($^joh;~F9RB!GWj?SfFh*_d9@%}aXW7P0vXK~hBO8NCL-Z9s%)1++h3iO zzgoV9aIw7DQ?{6tQwgqbB^iT@7VFd0lYs*J?$f0&9S_}KN;dq?-TfJ;%Dj{U^quv> z;hTW1k+^GlCVAEb^aSQdK_R#SMohHh7(kdPIW(0GXsnbryW`>)nz-k9_1>{SiHF0R zczvw7>SG3so9$Wp9=EEpN<^GjvdTZLVmj;^TD&9fyBW>15J#pM4Ie*8c9PXDWn2-FyX$3Ics zf6?uKpZXulk^dKepuLa^q^PL)B2|HcdZ!I0`}@QH@V@^4eh&XFf2kw~$SWpapyNts zcNk8S2GH!pasLT0U(;clG;8O5j6Faj6&$(;y1n!cxeyQRP4(QK??|6t?jgOZ1AXpj<$(MG(!nJP5QzT%{yyuwV*UU^ zDE+*SFn(+%(DLs2EjG5(dnWsc1mSTw(l0G7kAyy`H}3K-&uby1JrM*P77tu2UguRo zL1GhyAdp=zP^Y=kQ!Uq7N`wUi{eUc^h@eTkT^ zcP@bhh}O(*l#pqcBf!;sBc7g- zq3ht!YQ? z(NmdpZu`LU^5cBqHfz`#N;z-1V;{12r0aNXdo4ZYWK3i`lZ4mFZ~?4S_#G&kssbvtB68 zCi?BY3`lhR3@72WPj<&~eU*@>Fa}VHaxukMAzj{eU=<7XARwPMi zlCADAXa@R3F*MvqH=%q+w{s1v{mHmKdCv0&kdvfk9~>Yl#atse4i3tz1FZk=>hy5U zP=485V0ts%%KGMz&Ioq8931G?=u^NYD##-?B2~|`0fsjF?bG#&?{k~|Mvr%IK%~3Z ztELL7NW4qKa;*kMtt5zEy)=+df$Hm!vUP;w66dR6B;~$YCglAD#Lrx&vl$P(!{s}( zC$WlLEti?znsKItIvHHR60f8uun+kL52)OMpb`mu`3Q}O9ui4MJ-!vG1xwoHw(?QqeUCS?} z-O6&@`}EuVO-h>eLM?~+eD$6x30}trR&`KpbmVGfCzFRWM#n4aX(ru$;LF!JPr?LJ zE-c87HKdQv`V4~Junk@}Iqg+4zJLEd;tgSGI3$rt(2#{+tet8h!c3D-YD1;C~4GlU}E>`uso_n_25(*_`_wj9 zwI_0%aCmJN8_o~sH0J#D8P~VA$^59`&)Iou^B-nH+Ah{uNP>~^*&>_XZ60Z@jizyx z*Lh_%joq-61NZg4gdlAtkP7M^EBxq-vG?dPTJH8TR{TF!h7yFSdN!EhcGe2Npt9L&^ zGu@lr5NqIw7rH)`t*i3ZQ%xUp_UzsOu^^uSb&$OM;GRMZuTNY0cdI}4uY?n~4-fEC zStuC@i>(%nZyYt7fPQDKvYv~9?-hsGQ_7aS6-HLLj&(NSz^^~P`Hz_5my7>bl_INQ zRU;dH$)wJ@@!KxKZXjHQDW0OPaM`WkpI-T;!U0(2qX2#E?p1@`Jns%6AhAHdOml}= z5vPm&{y*|jNH0WCw9mL~h;DX=@YSWueSwsTm<+=xCm~xvSx1{>%DU+1lwGgJrX3l^ zjD`V?0X>1x;3r?69IhPS^6sVZ(!KJ2Qq#V-)s6l&cou<>)>JS{d0MxDBuENL?SbeG z62f-YD@$5{gP<-<7=>>4jl8Rh0n9@1#<=+S1pYE|G*WIflShypoER)1_k)-l zQ1@1#A}r*y4`tu>1}5mw=CDp>_o8WZfrY}bEob>*kz1WU%fE9iPsd$wSd0DoRfb)p zL`sDG63d32-I1XAgfb;fbL>CaEcEv6AZWH%iz>6eGy191Ldj)W%W`5577Pz%8OsvS zBtOcQpb z+tXYdAj9i1izMxHd)k-8xhA4>qM|1N!2t0aZ_cFV#yRRn-r*)z?uh~_|L;m*H{5dC znMu#vNkF^Scv6;F7b`yHcJ#THC~zKdyU)yLOaMUpF_K>kWSu5+Da z!dLKhW;1=n%b?y9HUOfR*8s)H_@F7;MeyE#gs!RMe8PYDfLIp#Cx;p!Ss@CLJ;&rH z?0cDh_yIRI?#ynVD_pR%%#reQi<#d9a8v-%0!TU}EWbM4uCg^m-m){2E&h|4&~>)> zoCYRF+T>2|E99G_&DWTH7Xc3u$cyslc0UrXitnyDY{rq{Vu-a6vCa_#_W;)r@~F0< z^?}~I2v7g@^`P6;Z64l67iuvbUOEkl<`X}uTQ_)Zx1@Lt7Fua$)XLKfx4m$FyoadMZohYspKKTSAbvPqi! zG8U{{sliT%K{w@i=CX(!00N0$VBlO=dinpoL|3>ntZz5xyf=b8TSbhEE@#iN?+aa+ zbGu3l!pfpLkYxAyZLZfzcOP^KR}Kgq^^dmfF`!FGF4793W!VQCT=!yR3V zmxYWVkDxErf;7b17N{REE3^zGF$0@ga3;0medBjPx&n$ zb?>6qxpTfvhMmJO#rlSS&l}YT&}%prSg%KwU-fWOAPPEYT_*gl+?~q1+kY0#_h{=X1kK%NNuqp3h!2`qqUl46(f=FXn*I5)|{> z->7e`tX@+!#~DS^l)okY_wK*F27EjCnrx|J^nC5 z23Q;~R`1#fj@AfwO8%U)PyU?cK4lyz24v;v7%W*r*;=>ZX20ycCcRiq%*L7x4W5q$ zd?#MmRv_A^&&?Z@iNzfpy+X-iBhqMR*W`=Xv1Kyy#k3_nOu~!_>8Z=Lk2A%c687s6RdqSdK7) zlU7|sizn`#R)sLgswVZPY;)2_h_#X$KP}SwM7CIuaOIpGbDIQ#h=f?0PuvhkF%y2N z$(Pbxq5I6ge(kG((`t_jSie84%>V_{_ocedWPq=@5$!Z9z zej%S&NF=_>?Ll>G5&j{0Jc+`NuKP{excgpATTrZ_C43|JiwsKA^on>LCJuksv&U*V z?Tw?%mZ`%=^@nO3sWxQ>@o#KXd{%Wc+@==@=(>CfMe`rmdPa+Fg$?H$PCU<0ta1#d zp^C9Dm2ojf8QyrP3nxsaPcVo}tBb z#Q2NeVVD&0z=JP?C6%zUcuXNf@9vtB9sdzhAVw5OZs(Z@+1}#LlD6|Rg$r47&{m*;q^s}9e2#GRR9^KYZ zSLfQgC~;;HfQ1tTIfhLC2-1T39J;=o$_{gmR!E=J*F@R?m$$^Kh#Jz?s^~jZ^j6Rx~^f+x^&kbN`%WVIoNnk!i2kBEm7ib>-+BBb}rfR=Tpvm6$+s&7vHSTJBLb z{hp`?`1SVaiyj$@nBNyX#hy&7SUk&Dm=}%rB&M8gR$U(|t zzq&N;z=0_#SEE8NW)8Bnb7#w(#{+`>B%NOq-EP0m*R$ri~gyNlA!7Rbc-TW4ofg*q?vTJ zE6RAcQYAAWCUf99i%b%H#)KVOAxi?J)l79G#QBaPGm})sY;}%KI0^dz1m5e69iE~! z;y={n^p{exWnN5-x=Z(vTM|X(F%mM@B%R3uj3zW$Z2p_%DvJ!gt}m*h2*GQ>(*D)$ zW5G+#6QLyI_l&7D0PrXPZquOW))imOoSqjzRjeKdMC-_gx7vtrS=8n8X0)mNay*<8 zoM*GFe;cr-%`41ng7VnnO7$;JO6ghke&>c|&-ucTu1v!Ta2IF@*1qC6yl2NbFK2nk zWzNds5V{8O(WsS7{X`Pe4$Fq^B2*ZcaxVJ(+)v9mhs=4%J^&+<5?UH6do+oSUcnM- zQJ;5|mVzNh8#x#k#t%D(A=#+r-@r3o!3{#nuT!GhtH8`O~NRy6iV#zLo>h1_J#6akc~yCW8G0 zNQet++AeU920&k618s$|BrhIluMS*EDJFPrKY9}Yh6GT$TrpHoU##xvl{*CvaM-5bWZ4-8%Me#D&`1n5arrOkKQQ61R9>BQce^Tb7QHZtx zhetIFZF{3N0)5)Qd@HFb`K@OX4YHVuH5(C}6AbrTp(zZ5EHM}-To>Jfvo60j!14_S zhO>>!8TC470lLgZH!?Fk-<;X4QIy5KO$UnFKaUqG!;^=FtulIS38r@O)(=}_e0BGqnPZ*5th7I$@ z{*<03arli-8OHHkfbW)S)0C8{GJ|8mQlG7uaXJRSMrS7wfe`V7tRzQd!3=SQ_Xpkk z-RiItc+9~W(N@Lq!o%Oys+iJgBG_HS;-NJ9T-}vBzosqhe^+)8Ei^(1u1P`&^Zl~T z+_?@-ZYm@|KuRLELp;Nz^mE>kZe?`eqT*vR2D@=bOyW;Xuy=2sO%=Q8D?%P$8%1Nm z{Ai-(&s(v4XHvLSB~G9AYLwvN3z|t|?GxMDn9GOHA`0Ov7&%t@Wz!hFj_a#Iz1q}*|n%;fyH4&7} zS?vfut1pB)&th(|4|N?SD1p_G?&-}omTQzW&cC7cP@4=~j*;u6waQ9sPJr79dpoj&WFh{A?J>2)Y;+)#;vP*kEyn3lS_={qcQJfu9E*Ls$-T z-LN{CFxJC(T9@yK5` z=&ApFYe4K~NAvg?eT1JG(hR91$+|~H^#u0l7|_Ta9ooO{AI3F`=bS=DlNA2M&I;TNQAiUMH}?>b(`L z(HP5YFUEFSm*emadv##4>)foFLJw3bsV>_$jZ(Racx#9S!*wM1$2}JJv@NGPB-j0j{oejr&wnl|`j=I_B8ukEwn zC1~TBk(JpMbd(_|){HHiLq=Ihuz!en8Z)%!X3f$GmVbY6$>hf=U%>3h<&6@{4|a4# zW;bcockc^-9kOhzIYhVNR!0I>?tDp-L*9&=-T`LTvWtZWv~Etcggm@qTuFI zmDKG@FZGn8CkSa`xM9Oi-o|w+S30~3C@UO3u`QB`gqm-#g2{NGkPpPUE|V3Y`OzcP zC@ zUnl9UgXrRd%v-AC&t=At<(Zs_@tfaE2`@xIJfus{N|;$&ckjSZArf7bJyxh$&1a5Xe-|* zLDTMEyLi0ij;E}(JH>G_DX~kP6n%oB;Bo;i4r6%PlnPE2mQ;c-GzWI z&NPi48ZK;?GY0juTxjmH;J_B2Qm8;fwCjA({7t>7F3A}pm&gXv0TpO5?!@nf#}r=vvg7j`pX{FZ zM0xU$Fb}`*G8e!K$AzSWm<8w5v)lwT$D`^A$$@twlj~qNIPwNh6%r^M$J@ZUEE|y` zVNJdqoAW&r7mhR|$pSEZV>cgShROrzj?=Gpow8Tl(CLc*7r7GwVv_HRA* zj}Pq5pTg;1^4kDx?AHPMV)_HQ=;)R~3IzmOC|cX_*zG&Dj8xcMwU)2@vOp3K12reA z^jw9ceFVEY43H2z~78s zC}F)-lVA;QI&`)FXoJ#W3K@asMiQ>QMa!}ZqxJpAYH-aQRwl9Al zI5nuH<}54o7|2OP75F~~DOV=zuXLT9LKveI174s#RA2ApJB&4zi%4lLubljb6iH+$ zG!RIW+xz@BdzKYpgx2AN6TWDZjW8cd_$nra#St!~rAVV#IW~?~00<+}->hp=yPqxf z!+c{Kgb;o`%3D=guAtL*knx;D@Qa4}8ybGIqt+pl?eB$T^N8>7rZ8hcppt(Peh$eb zS=K^`z$bqt5v5D<=`_P?Jq;fNBMhPN@6Ha}th2Lg-lLw9(2K}d=vDq-ayuK!@%*U^ zmxMWm=SU1Ep5$d$>jyIwIc8b|Q$pyI9um%B^0M`Tf|8FxK=sz9X79cANEQ09_W|tZ zB|a9at^8{#EnQ&1n5XALo%zA>4n`!MSw@Wy;BhlarMyOM|I+`ne!3wxXiL95Bcx?7I*p zeuJ#KK|fL(g+qQfSr7n$*dM^y!X(R$V3w>!7WwD<2bW=;vNDN>;9xFAtE|uX`Rqs8 zpLJ-;7kxA&mcxoB>v%GuArO8UEhlVmp&WnpYewBPdafgS`nBwhrrrJ6!*wpRD^v$N z`_bvywONyFhsWF8g-)sVwhl*^uPu%vL(F_8hp_y_WP@6~^`9Hno*Csqpf>IEF#BP` z(va^{=K6Zjv1;3BDR+=hB_F_$u8i zp2_uRPb3@$?c0oP|E9ecr)vm+13xla6FZ~Xak6sY4O;d-$%nyjw@0%VsV;OV|M<*O z(PCr4HD6-|a-X$gen8Km(1i>%YAtb2dF;wy?3`v;R@-|B0nXK&eWtsCAWIwaL}>}m z968l2tlpm1&^#)NNAt!R?P0M0|uavyVY)2es3TY z@y)R9rh0W?536PyvG})HIZKFK&~r5C;cP`Sf5)ysgDBd;XUh9Qs^;c8U^Mg+TK8Ez zC_G_I`eRian(G@E{HDNKZ!R`bg*FEnX^%&cR)co46^H$3)^d&NZKhs!(aBM16c<;@IQ!pOM{X?8HgnP^vb9bvQjsSRYbR z{oT_;ZP2fcib(Sj$&wFf%;#aJ^qdsA?j-|uqotyuQpi9REz&q&Ca`B4z{0vb!=JVS zswqpB1H2mCdAKbecQJ@K_LJOgt+N%DbFV9u0ernsI9S6A`O+MXjy@sq&ScoV!uxn1 z0g-4{!>2-B-I#YN@7HT`veZ_tPympk>pw)45zGe5KD3H(!#NrcUnexlp%}Xcw~EX& zQrR++{VwmDD3EWQ?K9YmxF5s6qU%Uka;HbRvp!Vi>wy+Rdwo&>fN zISKUn9YhK1Mjn~KV3{dthpUV6-^a5aP=^|V3KWZs!${NfW$#+tFJ+Dg?c#Nm5gD!j z?2fmo?QW9#1O8@qhpQ9ZwcyQS#aw1{Mm7_J{+XbQzFl7RMWgrouQt(^n?0BlAKT`? zBcGS3C;bV>F6=Xnu8Md09a&{ijE;_2A!N}lnXkeqP?7Pio-0=D^m+SR}UecfEsjMI_OLwpT&)&}`F;+A|?6;fBDJfEqt(dv_Ox9)~Xkj}*P=lq@% zf|pKjuE`N*^F{^rdm0@P0}d0V=$Vm8rXy*@)QtD9#E85+Fu(OJByHyja}d-$y+dws z^`v~K@36y|f2i_-gq+(}-q}LvOp{(SxA>t_N<#Q8eI8CHrN9^2Ke*C(iN5szOhiy{S%c$Qmh%@CO`>L-7#|QSOFkhxQ?BjSV6#A!$ZW-vXdRK%H2NEa|Qlxjh$x zE1l7sKa0h7C+~|YqY@b`8Tdl1MwL|iP~4$-qHAr0sqdV@wRxl3rlLE?SO<&Fm5zIb z>+asCH)XquQ(`WN<_97{r_pfP*Rp<|4Y?7bDw;5n&*b z=9)p%QzvWLXzx(BE5TXy6IXvx`_&ogpRvA^kK{@TM-^BLNrlgytnAGUkqqY~SZb50 z2*kUc{<==#5G&jXV@uBbHlI6jc)aOYYA?AquRCFDCu~4!af_b0(_RzEa;W_nq>5#l zt7BLsK6w4UGwmu%1_^F9o|s+ffqv1ugh7)!zvMmk5|yS2+Bo;sM*HNP5Apoj2*`Z1 za_aW&vP)+5xXJHX5pWUeeHFk@v40T{+z9ZwMX)(#2c~VNU^V*J-Iu;D1uTMImScy~ z5`jNexcATOWJxZ&q8gA_x)_5e@8C{%$w|aZ5Y@I3D>XlA;yGYSUkMP@oW--Hdnda& zVTQwFVX-eF1BG}YrEec3PB&&wHx_0CgT9#+hZ*S`m!2p83UoV#*;vzJd1{iNqMx4J zbufGW_6PZvY?Hqm-11dN!-s%{`>1P6S0k8(KU-}Xg_yHIF4HMLg;UFUA?9Fp1Jv2I zyZ5MRpylhag?z^OBX}rVz+Y(^TxPJF&+KM)8vE&->f2{I{B6|&L>}%Y=~0cM-zL3c zE+tcmBX7b{>1XG7+f`wrX0sP)OQv1)YLUz{ufLeKeo9M(g3yexr1 z!v|W$dtzai{vnR1ECOMkYNO7xcV*|BtLx-GWWGwRI+~{&`mMM#LBHhngE%?{#IL5; z))GrZnQ?HKLPKy{`*32eHw{ckzcLuV#b#5 za|sJw!p*aBQ~}Z*DA~R8PKDZibN}mS+*-$FRd@91@ongQ{Qkc{iZMhqPgP`ZI6!ao zf+(Rj1u-z>`;9rnh?-HNh}t-z^MNOC!Qk}IbF=W>D9gCkYF8N$MF6*Xy~I@jQ58w- z+4KYd_45BI!RULPbS&=ViIt@ORt$9+&bc{5H3u7j^;r-|hwkzGx-Qz}+LyX63CHX2 zeqF#{+r$E>^)cD<&s7FbEr@=U=z2j_)-Zy5kPZffMf;4rI#bmKcB|6<1RY_^wI zV#MxnH=C1WES$$_sEa+zP8qCL%jCPBwo@Ytw#;TDI$aMGPt{*@2Z+56Pf*FS5Q@%F zQaC#nO!cy;^ExnYbe(=LB?I{j$RUsi$^Q%WN%c?{U^{u?X$w?pKfnzye?VnEg3DjH z3i!N!%~=do_yH=3t`2M6aCbZ9Rkh_Uc)n>HiJ)fv^^sjn;m!9f$}%qWj87wkdq8TY zLc2UXgw>LzC@86T8O*qYR`S7;x_b6^7Gd#RV;iLl|5Rt{1-={rIwogqL&n?iP?dnW z&MpZsMgzc#e&^a2d+^)&)qVuK#TEjvqH`W<3q)$5J}Cwc6RJmQpk;hBDUgFi=ZcA1 zN}tNb7N_tWYH%tznHGgbH>pKU*1A(nPmr>?*6lisbvJ*Q?w^{H$xHjpKRAmyn7zR_ z4F=z|kZC-Nk=E4vSi>oH9p%O^04-Z4oTtA5GlFq)Y%HsH^Zt8$!Zg~hT9&tBbu7dx zHnTwEc{#vDB@Euwjk4CJOstJ!=G{n6P7U9xG4!Ps1Dim+PP5R_FPnTm)-0gl>RZw{ z=d|1ZaMs{?;X=eGbX8x2!!Xg2Oh>0ecK|F153NvULnw(pjIAGitd$!%c^@0uf~8_R zt`=B96|68!5ncrQe4fi{ITC&tQ}rg$Z}()JKOffgpsp(j3Yv_DyFPMvc5A|$hzen5 z1{_dm(Gwa@p87~WjfXmGk5OXvxcsA# zIC42@D$uP+PL}MyVpZs@)Tp?7snkg-dT7kqDgaO{{SI7+njaJMoaUx%guwW%E{djq zk(60K;^z96bcj!8DK>_7FB4WGj*2?` zzRP2)xBL=n4N}}*DneW#BPEl4GNvc#)(tV7Mg|&7Z~pP0v;Q!{?pVY~rc4Y9@sd?m z@`y;|!ODHrs%om_<4C@8O1@M29QBW{A6A^wU_5n14DBr9@<_#Qmdr zPNgl-s7)u%Xnk1YqGWb}X^<8Af|69o7H*j2JtR}igWH7s&6cd(AA=VM#dV*Ox928K zi>_ly{CC#S(Sd>Y7CvpoHse_nEh83uHL{0xDkC-Y8}4p$I z&L8EYQp~Xo`uDrp*O!r64&b~l+2{;$OBZ-N2JKJ}xkrVaQw0pUAU`tzl(Al@bRu z=-}85q0=_|4@hDey&Uu0N3&RCRfx4%aAhWyem0sQJZx*4<@j>_S~lO|a-OaU#jTRg z=Cn=7kl{z#d?fRq=adqTW2UQ$xWdkHB~tDTf*;FRab{vIUerX?D(C0BD-3%izL&p` z&!jcQgyF;cmM;nH*`<*D{&a!|HKnF`V`H*?HQvL{*Oi;!-_F>G`P!z7ZV^_~ATcb+XdC8c8U{ktSsPO{W+ebP zhKM8vKy3g&E8({qfknfjBMZh$N9j#RHe-oYU7`#gkES;WKJf!phP|HtrA~~gp-v54 zf_u(Ya~Pg4IZYx7tKr25TNb-nxUTxYn#nEnMBnR73^@9T-M)-FsNp*~^12BXv(b{# z>bo}pU;a@g?MfM`bX?J5>G)BS=_y1y8i6$MrrwN=qi*bDNMVMcuU>9XGct$fWzxE) z#js}D+RE6l;~TN=%mf7j0FdS;WL0NpNcSo=VK{=-i|J8{ts)DmjjzxnZ%E%{;z}=% zG7{pDeJ5D_(dDRdvWV+-_;7xFi%^4qRfCSfPpRUckz1O)#SVH9%%qR8wo~gdX2L2j zdjL7N*bE>ahf`@k#}4FGSPO(cAx&O*|BQD!o)XT%pU6db8hM8S14X+E`a-AMmT_ZG z`qaOoHr6i$6C~unk3C`W;~v12`q9yQM_YzQ=7w@F5X5deOlP5bwx#ep*5Fw7WT!!_ zi+UmFw-bB49!D@X{d!_qA7JI1X>5B8sf^nWh&ji`x(ws@dhA;q5T}rn>$H(FAd8n$ z{$5)B-tL9e@2e7_sbQfe0^`jHljM_p`8hl}=yx&e;m^Km zduf&7MeK>pEsL#^1Lh-bfu%=)WnDm*4f2z3rx2ZxZ9)-tapd(ck`T-3K|SL{3LDNbc#hA9uo= z>RFZ+fHr^#RYYir@%xk@8206Ve+&M=hk5xqTR05DyDZD_LN3~Ea-)dR0m~i$w@g*9 zI7I9N>cp-+yhFbI!$@x73(lx$oe+3}We?@!=}pSb(tm z*gIWH6chqG3G4>U4}V1|msn8jU!1~VbN@ZdhxP^mAp~fJrX{_Sl}l0Mdea6CWvc8q z01(T#`tkVnNsBdQ9{FCwly@<|9VW-?@oH+{cQRoO7`R2^;Xc)ldv1t4Ras9>o*7!w z@e&;Y{2k5y)3n(uuYsMdT2%h1YIS2q@XkKlTQ*y?IiAM#U|>uLXLO})BOnEq_{rzz z_|n{&E?hfpY#up@y>_L0R{|rF;6kx2zk4}p0kfV3H1C^=Qn5IFDN{sxMk^sTd2VrA zr*SDVS@kV08SN-5H&`2($S&YTa-63L_IXU4%P_>WvVU&{}*#_85T#>bqhj> z5Znn4je8P;J2WJ?OVD7!g1aYZBSC_@ySqaI!QCB#dvLdVcnOm|HdAUBpvwI6eF-o<*Bx>zW!G(Wa)G-6?BC>@m~?Kb63 z|MKk9$g}CZ%~$K?oI`hFLu8A1M^4PCAHZ6l*Y$g%_wGwRZ{y~B$&j$jx9kYe3q6g? zt4~W=x#*S=&8K|tcG&%a(-->+=!i>_m0FpbAN9Y@<|`9?jQoWdLHSPK^s{BQ+U;fB z*6CPz+33x@R{4pXI#CM$VH$ONBS8$W*)+iAg$U0kGue2Kc0wiR6JUB=Az59cL0}{0vRL-@PlsQ}S}2+<}vm1HmBfE}5#2 z#KJcD^>91Rg#CFVMq_oB2dC#Xzjjwan>O7Z)pMcGdhpACm9z)24*zUCn>{@v&z_JX zYH`qrET{V7#B!y~qq=|&V}4qoZKuaMI2tn}`kSroUXmtf1JYEdT#lYnsYGxvJt-z4 zl!x@ZCg_mA<%=>l;nAHQX6KW(gTRBkN$)CInQhi9*-HV=ibA-__}fUDq!0@mh~FWM-|8XHy54UZC>5-y};MC zgwNePhmX}M59apq?b{!%L=$G+DAmrnPQ}#7jGEbB<>edi9>$4dst1tUGicsz*%w&9 z|OUrS5FlxmzuZ9N}4Q5qTQygg43a9yv~S|(d2>$d1X z`q20o)PrYH9{hhq{`lR5X+}kBPQ%nJgv)BBa{f<<8KLIKbFG~hg|>ntdoX1Bn;CGFt{;Y`)I~b<>!Naz%e}cxCx)lsGnT!W+xpw)WgO8v05^!GIubl_cMy_?avT9yCRm>>ChlL;#k4YAJRoyx_Y+ECzjxD!Lem;=P285e} zX7^f8Ug@ehbs@Ssuhk+A!eB)m|59CF#`amX8}Un~!$WD1+JuCVjFpfrQ4eOaCJX4$ z`G2j*S05QQ0KQyw4d&vd0>O#!SyGDn?hQBC*jT@RY)UCmr!7%cbf@!iO5TC|j&_UsOZFP9hUw{BvhJcnrK)mss#+{!?2>KP}lig;JT*BDK$A__LV^bxV zIFmq@6Au#)NZ3+%niUtX809^LF!%0nAP8VnzEN&uecnGW-iVNk)Q51<;aPTRb)3KO zV~}h*?@cR0*zea9?dT%H04Vy+s3)2}X7uV_3i)%nUFS3|QFqv=X!M+&!>r|?;oA92>?Z2;OEz#|d4lLFqf=4U%XFEaG2$NiUT0bpr zi(YEGE*7`-^i!o7$W***L*~XEBTUU*8AJn#xGq6LLyZsC&$Q~)C6=MLm**;PFcX0a zo(5+;2M$@9!FwZ8@%V|GvfVnP-jePMW6Nk?ALQiOdPy!7Rr5tn7-EV|q!>E-)SduE zc-z4O#_pH18$C(<1~PY@5Dq;-^uLydOE0B}+?H4E?fXP3%2`F^+C&b+UOXn0!d0Bb zl~X=CfY9J~SpM2le1h@oUB>npep1X1w)|sO-Uj}Dq}<(+46e88Xn*J;!kM!TT^Qwt;O(0NG!ObW6rr6(&1o0xaS8E}@qX2v_ddYdrK6Q&J6hGUfdw1_ z1G7E>RB22naxKQbkD2$Puc_upmNfeVh&p`t=R}_*nhop)gVpw@&3J|T1P6#Cd-Z(q zx;EN?yXHE47ay0f-rH39)r8;v`93HJ>+7^qNol&xH>n?F@1Z>5Dt< z4`wAa_*?i@UTz7?EG0%UPd!88un>%+_I&(;_y(dWZ3`Dm_sNZ3sa1i1Rk5_keBHz1 z`EYX~?bg(RxV(salC?EogHgnEF2k!w77gxp;F6mBLTEd58a#FD&gO%RtDyft)jrb@ z`sLKTe2?3-S{-y3cSBb8Qpo%eKS%{4IpkaBF6$>3GbF_ds!6K`A$pVq{{vq5xJqkL z*AO{A#MKNE(6r$~AdFLwF&zXVXQO|ER8dx=Vo56*{keMv0q2J12NFJ`@F1y?*d;*P zwjKO7)w4%BxVu5?<;B5Sy|zip<)K)D7JN-E3iX5cD5md~)z#;=qc?RwH?hX}1=Z2F ze<2QnSw6Rbh)|H@sL$dkhDwiIG>jrqJ_EQ$$z*u@OFavmBXc5M0~^l8Y>G2~FP{Yu)bR^VE5A#Ak1}%SnPbNSwcG%Xy~f2>p&PcJN?M2$#?v11&_g2CaAU z`)4CdX9b`w?8IFbnboGC7@cZ-8e3u-u(-YLrrm4)`fUa&_LKZNC-#xqM#nw$*+y55 z7;quQ-c!?1CP6JnY1mV7^_r}k@b|W_-zczlR2*#XMt9jHnGIYI#|us|v_DqiW02q} zkQb5Kdg4+w{a}Ce7FcxFO|H5Ie5(!ivt(CvHY4O8$Gd(0Y!yDqELW^*sA>LZ76)bu zFO+=SNXu=~p(kyN^<34&w?_V(u1A+CzphsQKKsi8O0I&0H|GzBxZ2!J18JwH%`6=B zYy~Z74J8z8Q_qB4<36hATWHSV;>A4iZzOzGa;cI(Qc;=!`&7QXx1AYLqq__LSaGN> z^VI!*nKjAryHCg&p#tbrge6-+^6P*ps#=)FaVIIXnlmdl zp^(hK4sP={e8GyQHhfb_W(uf<`gKNAw#Hk?Z z&(ztj0EU1$6=iMgyMkgfA0F2{L&Tx5pUg2vnNOk?2rc}*L_vCKc+d#@k%Is!s&jH! zmzVOxrL_KHzFgFyY&x3!d}xM(#`xOC%qfB^Li4T|aEhB1_@nJ_Q2kf>1a2-NRY=j>Nxzf%&tJOh}jrI|>Z&{*SzAtVfF8%)8E_`q?@I!9ZvQFQFT_HzUo~~DG3Km{`v6>DWbtBZ+G4*+ zm}8nhq<@k4Q?KRR#*VHl_*QOJDScoicc9_zQ(g28@GC%(^FJvz=h*Q>8-Dw>%j!l7UF8fO}!?yVPP2t@k|Zb}ZOhlAx3YrHE4j^x7IbZ@}#~ ztya7}b&_g3Ynpm1I>`uBX>cJ_SXgQTQn7CPR83|L7ZNdxybZ$GVirJrzs_6V#?G$hugzQI z!|-nJ!@y#yNK}z$kcP#JA3lXI_O*`lH981Mu0)jg{>PFkj>S5mdCGoii?yQvyFw!M zD4qu$**oYyIe*OLnDX$IBxR--50py7ZIn9;PMbAiy zoPym^7^5S?k1S`pDCf9hW7eF_Ek`9K`O*T#Qb1T^(ZSsrZZ@Zlo&%1(`XBQZ@Qu^~ zU8|z19DkQJm5=<9nPmDj`8Kk%h&vS`eaZ`vVRDC}LOGk5lxq0sdA6wF(1a9}ZZzCa z?*AmJ#q-FG_Zcf!s(7tCmeTK1+xMW9O4N%}uh~L>h zBVcw?%FAiB7aVzCTyGw+^%ENs0GT~mp%|He|+X3-iXd(~SwXb&lY~5OI&M-UIsi`YBB!1&4{bUle?6r(%Utv zs@(%gfjQ?gxR@09M({^73>l$5>t#z%0zacg*Gj2L%Xn?_WS}7g(v%}dg(5BLPPR(I zAFTv3k!wh8-kLQNJKmuvJZL~=#f-`!f^Nq@p8Y1P4csF>dU%@l6NdN#zaDb$wn_{l z36|uUJB0e1EDnC)kR~}5dPf2MeY9lwO##sO&w88GH~e`8UZM^vnrn^hr}chwVDmFr z2PIw!R;Yo`puuO$c!Tx+Co+lDK6fABV9pH4h##9ISxMPGQsvcb2=X&A+Rjur*+QcH$soQ}@VOY)TzB2`9~VNzDpE z9E+-53vC>2{Du4=1&0&``vd^QHh>w52?IUjc~k1^EdW2#T_giGPJdT89%b)5e)hkz zcX7p2ZJq?N=oZ)8P^e>;hE2bIg+rP>S6gy)q;$2HmU)a1)qaX2^OVGwBO~H0CztUO1Sq-GQm^|ha6$18@u(~C0 zxQ7b1oQ!5&b=u2i!UBk$h)s7E7Q;IIc;jsN(&wucvA znSx;ycA-`%#AM4^L&CEapyqOEMeyAf zu*0*eMv1IcnEbkUo{aNIo6D-c%ekZlBwe;kPZBeNKvFH5(Q(2BBB^5f0WhJmLDX;Z z`+@7QN@v^b#O{+IETT_w)1W)_#4wtpy#)`Ve{^`AUo8G!4bZW3VTU))y+S#E7GFP{ z@96bB4-NXGK+IQWa-xz!iW}(p5jt+JTyR(i)Dck(fgwb}o1CtZkt4srK#7{uz8aGe z{Zwf1t*WIY=JNhB)dZKG1BwheRsXvZn~8a^^lh2V{&t!x<8B+X<oQ^WbWXWU^rQxTte0h97bs_)S6MO9Awi4|Y7S1@1i z)k{!2sOX&$-I+tRP+~^$moIpt(6PCRh(aB+ynr^JYr{DK0RgqttMPKj{n=_H z4h{|lYRRa&L@B86jb%pgbKa-EzSOhJ=ag^4yp69kG%i5r%)RRIVa;GBGU?0ASEvX= zs__+yq=D(P9_8yiBUSg%si9s`5p=#0!RC0P!23m-;HQ5fro*aOz1lE;_K^@b2;REx z@T=FKexf5`rmGuE#Rg!?LM z?j6GdBhi?Z=4XF=awyCM(Wr(#f!%QHjZzU^W=?nX-#X&9Tr_2d)xa2qT=K5kQR zsd|nss*+Xs>Gt9n9Y@-7sYP2-j~Wpf`9!{`5?82g14xW97bC9r#*6ueg~^pczMSn$ z_^6DH#XKkL0?k>Q^h%!#a=La4T;yLn?oA&E+#a?cEX>c7a+nPNncg1DHP>!-tzH3Y zQQXY?E;!pE;BqWy%f4V^c6N#f{z~s$TQ!|l?ieW#oU9vwgX4zhG`r+59!Af;*uluIaH-odY*8{+#W%d{mo0qro!~Onrc_x4A=0u^I-?V8|?2LIJ zBD)s!(izX6`UwEME#wxQq9P*Rd9*X>5y?a|2|r~6nb4+xq)nH)G9mi_RW+^?0kK!S zV#kV_gg`!UV`OARHR#>n#b!+iIr*PY*4AZVW~9pTul3sYc%A-68J7Q?OHz$yP+`f} zV4bZpJ)sT8^i%)C^=13;__!gKm~D=S!OVxL)s!K)~Mph#MiT!;qh8TGp%?C_<`G-}fW!PzvocJj`@M ze+WSQ01#4PF8AoJYGob#qR=ZZp%VeWgoAh^;a9XgoMvOG$w(Xy!pA6z-&B|qv~~THYzqE+o)EeabIb6S@L4*7C{sNl1hP^c zN27#Jtw(&Y`|*-5i0Et4h}&e7TCHW{Z2Duuyn^#tG3ivTyJxE`9>)C{!ymE|ACS!6 zw&?s>fd8RlaqU8y2Lup9tI zYSZ5r0jE6Y9jlG@XIls39`3*5M72brm#z&5R1M+FuY-uvU!K=ZjMCD_ZyCAo#HxP} z3F-dDbcf#242xXlO)z(oa=}fN4(#dC_tJlBMQap?j*LxN!SU+_APEcUHh46M)Zh0_ zA#6)b%YfKWic@w!oiU&`!fD_$U{nN5du?sj8ycC*k*=GKTzL@DvG4?m4DQi_Mn!uN zi6QY^(MIQ~hk-2QFpTU-_f>T1Qm7*n!TZn>oyQtdmH7;`f^kpso?j62Xy~ubd#93H zQP9OJ4;)pY(;&+kwE7rWN=UpMcf4f6Oi7A@HmXWPl-Q8X&RVy7%ND*V(Z3N0)j+?| zm^DtZgJe``mixR697T&fMMpskW(b^P@*wS`Qp{{;wg0QDS&4Clukc_z$#<#6^kAU^ z-F>4iCqBHN2Q7a%?t8jJV`2Q;EhXku%tiB76x?j=-(5`_QxxO5DMAM}uf*(hds!@O^zP@#1@alG-)%{O{j} z*z+3KHp5F1O+`w9U8e|*{Io$E?X|`I z`(BUUOK~Wm(M2pRsodqO$M@J^)0{3Pn~OjXZHfa1 z!@~+EVWD>oaT&Vr!$D4}-j=f#;|T5P@4>u>!{gfi%%%7AoBz2CQ`+Z^`!d}o67>#}DrD%Q0= zkTh5Ew8}Kzb1cH)_)OYiIftU6o?o0|5*R`e7kYB1eVl8thsnkW3+@AwK zUEUCouyGAfjRF3iVFY*d7eCmRJH(siorB0X@GP+8fVN@R^F!TZCR$$Y3Wqe?)e@f7 zN;Z_c}kW*JSK^cLaQI*6Jc& zd>2M|MjTlQfpEQBReeK~&`Ubc^nF`T8Vl*k*IFSN%8be$x)UQU5J%#dSz}CaWhW%>oD=wLOo!T+$byw<#L5)?$NtG~z$DT8$8Oa|U4|~QD z$%9cKy`wMN$(LUjUE-s^YC-DSfQR&}IL3i=w@hSh8SL*vq8an2zF()Qu-QE6EdqQ|WjR+~lGsJXjTnaF-#?(}2L-7HUR)8GY$NEE zu$I)Q1e#2)uF-s``R5$dyYDeNlP5=lPso)ZZBt`A0t$v7wO=gM8GGeQ65!}KVi3oF z6@hyeilAu-|7KGy<135p^dVfvL)+U}9Z}7UExHWsa%d36;l9*|j5#qpWCR*4mk$R; zFBc{n*+f`+pR81>KuKQo>9sz?8X#t{iYe5gZ%Oi>c+c3V$y)tGcEVh6a#DC~bi@V51X1-6 zI~}08Ej?ybk>vU!OmDK;ZL^a?{TM{JelVllm8PN!dHqrt&xJ4+IuP3OC%xreo-Z!a zQyjV7Y^dYZ=>-TbICbWtTfL9;s(Q{K*syirz~3P2trRa~-+J1%ZGd~qoVAMS!_ zlWJTXGQ|R!Cpla|0mqEL&t!rS&gA;zp8UAO)L+cv3p zz3NhuG595mYZI<-BcDvG-z(`tRuL1i6D7f zdBiJ5o{AkECtVXjnO_}AKa=okB<9eWM|;fw7Abx>7CaFpU6Ae8=Z-FF(5ApQD=jF@ zWFxObFV2|U(P~lP$P2BFhFVYmU=o?$;n^>*$m0AdaceC<6)&84ClfcWb9|6S5X(#n zw}p%ho9^`M)|$DmfwZzI*mtrzkf!|%uo*ASBlt)_r0|2vRqewZ84hPIazx;}RZAup z4Z^Y8iWrEFv)0@1Ed-*-Sr!th@(;TsIFuw+>E7%TBPI)(&Jn7+QdTCq6Hc62A3&^s z{@RPilo7Rcv+S_#7b#1QLY)zSUtg8`$6g((JbQX_6Zv%KZyUrLCg^-BfD+4O`HqX{;KGuYQTjb}dJ*E9 zP5}{VD{AL|4ZMc|y!-v@-N~xARg*-95xQaFQ)EPylrCB&YE1*4LoL#W`RNg5b^4I( zG3KKh_sywdB@i}nrBWe{U0S+I#s5S!`oP8RMPO*ep9=J z;IlNYBu&2GV;*DK5uV(Q9y)-Qh9kt1Mi4}PBF$(@;c2SCC00QuswhOkzMS5myl%Fy zyo6YzNcAEwj8kVlkyl_ot^|fS34yrt5x;r=6mAA~EB32}4lx}0uUHg(w>YK^WfTeb zcq?r6AK?KiIC;2jO-NNgcn0ISrF@r$QF{{G#4~^Y(oE0o@{Yn2fP*thSIA|idrC5& z`*#Kcv0R+O!8BSRIRY~S&@8~Sr}!){@cg%o8q~S&)rs+!C2mnY)#*hpBe)t87OcBm9L!(oc#nb@hOGZ&>Ii>z8U#$8f^?j(6j_Xq=?s1* ziGY-#{qP&ZPzWgezzO+7+PcPe)lAEk%Sng{m2kvz5-V9(wmt2!9Bdjqc#r8iJ`ZE{Fnc(u`Grm?tkzr?*__d-{x z)TX`vBMyyusA2eFjwBI5t9^K(BBFtpP9|z#GL@-rwBDdIdw-|*rAE1?;rJ(8TD5g~ z+Lg+eo_VfK@pqxk#1cv#7F3ve+5h}J7gAZ}MuL%H)`}Qene8(V4(`-I#EnjVYzb&L zwrn*0UuSMwHoIMi3|o&7Wz&JeFR|NIEl5EbY28m})%yhIj;H3@6?0*q@KnmNTaygc z>Cf_IXGY6!Y?$_E@TC&X=af+2R}#0H+8*=RtvrCQ#wyX*IfG{d$}b3+z2zU#9m?{r zr&PcvgIp*=6(Vet9CBf*(ve|TdC|(LAF7Ylc0rCDhK0RWmi@voLXRe!(~VJ~S#@@7 zFd70`Yk9*iaWrvuZrwe?4QWUYA!p1N|I1Wu&FSA^C;|O~7@V_%Hlvw7nD^;s(5nAv zkwZgd-#0s>D?hfr*E>@4-U<-cBBA0K{7s1s`+MT!6R}%WR8K-=U;^6EacIHXgQxK6 z*$}j4La7)%!{#!3p1Q&rePqkAVaQJ8{*cyzE}MT}iyoqETzq;&mz23Q0F(ZZQ_YSX z;e}~O0g>eo%boR`V8kh5YK{EuS3u9tK_B0H;3ht$$ra&O9x-}4)OB**;vbs9`Y;^H zMd&Vj$5uF?)o&_xCNlHmK_M367cbZxbUR!4bjO-cq`e_cn|Mpk$w_L~R#Qp=&EME&ieY9` zzn^+w&g})yQH75i1+>dC$Fg>dIxvVAT3t0>ri)J5fDK(!pgGwbi+22rgQ>>=X=B?I zXkkCvF)ufS|7G3rR;VD#n@aB!omRaK_3tN8Jv?-`NV=!L;aC~3i;}di=A+vZNL?td zZ(6}%S@pZ)W06qyC!vPVDyz`)j2B9lgN9euzd>b+xGJqkcE?NIw)1e@XZIji>Ah=S zD^u;#K};Vm$n&f4mJ928BOxLba#oXrnY~uulR)AjjEc^$hV;F{jom~0(rb;y0_PE?FsM$}r$DZ+EPYe@-gSM0Ox)j7kdH-Soyixu&+b zEg%!~T~b4~n)blCswRMW!FAg(OhR^!(p}@Cl?RADib>B;BkaBAa{Z^qk3HErVjADX z-8`5Hrq)#16~kb1uU9tm1MN+Z1X8$-1OKSnBjhRA$-n6kc=yZnZ>MG8KoJZ3nk?eXG6&_hO7T4*#s$A7DqL@KWR5#S%DGY z;M{MSdHvjrRkj4R=X3XzsZyXf5&@MYKbuPYKH|5G$M!?R;7|(FXn)3F#%g0Y=RsEg z&&Ex_@Oor=bx;3}x^?puVbN(hT^Z2^=BFuX6GYqUn8k*| z`vrP(NmSk1WxS1sJ(r>2af5B?ydKHbRwC3(hl(lXB=FkF^N1snP57I%T|n&wAQk0f zf{#y16Mugyrda*F+uo}^AgRVf`}Ws{xdOrDhkaFfWf+-mNS2sNrOvPS)Kti0&Kl1p zqc`gqBV4o7&3%0f94PrAE0JRBu`zno#apXVANxcD^uc4nqO`fQT(Y2NzvNZ!8i=U9 zIXg!_G4`GUBijo>+Y4CQ1ooW1Ww|PjBIx!=0!_;p7@#jU=UQ7S(HgWyDMWuIeayPd z)}BjUgVMYi+n#yyb{Vk$1g-WccA(0EMmCTM%Bk5vZ{kC7gY(-r?2$1x@q-81t+n#y zqtQoU2!vH5Z;q(d_Fkl7`)NTE$`xu}!>(lu{U>%&Z#2dN(PF=IX%<82l)-%X}g)it0q z2Gn$x+4PY4qEps8nd<7WSJu_zf~X}zY=mzUNDCI=A3A6hnp;!HIB_kik>EXsI`9ZA z_yT1{d36WzwqxO))aczgrt#Z;^~e4A5xFE8|82f&S?0v|nTU!|p5jO=_Fd(^8(<|k zH9)-IyqOSqp#x-?e#^!bC`sbu3t?MZ4h3t*Ws<%l^!Pl3KL!Nm6r{%TQ{jUJ};{*G(ybN@aK! zbm49WNsjA0KmU9kOnI(7ELKM)t}fJH^hv5&*}uo~M;nc<$DL{)N|W34+n-ghE?pXq zctGe+)an|R@?n6(IIKcrweChamf_+==UnOag@V6WRFh)O;vmDI5(3>i5|QxP%1<7gt;9a*xQ1mDK?&| zaeAfA*$Z9b?<@wzn2SGqqz`r*GP1vPX@_RXb}(n##xj1_G2NI^x8uA{jv{cb<-1l( zu>=W~ulB3B%qj{@qNSl}fEcpWh>eQIhd4%v?4lec#%U zcPP_x<<%I#v{sf}a@N@83hS2t>4}@kA|9j{V)R96;KEBFROaG#=H-q1?ZG_b_w+&) zbiPc^z#jd{zWy9usATLjg~lkoc>g3uuD9Q&U!}haET>TZ>lmjY+e#6ITrmCe;VEg* zM~V(Fy`EO9t=fR{Q#vsW3J`QVTU0kAn7xA|8 zn4aBT3mp{@d)`Qj@H#No?e}nCQZU5J8@y#>5A69Hax5Jz?OKW5!W#Gt50jF$35$~B zd2Ct6gd(zPb=a*U8*KqZg<-GAI9?Gurs7TdoI)UEF?W&UT~ie9-3kt+ILhmJ)*)^B zNyS^i-BuDM7i?oYK+>1>kZm2G?mD41jDe%qJ||la(xQN%a8I2c*Q_;P+#0#P9^`^yj|9*>eNCLmDC9AY3CvfS^BlvYrA5!VB zjv8CDR)~I!9*d$7iUVO)+uww3{V|b6pEKrv@U}H>vtm#mGu-xWuYDbqKuABzXDoea zpBI>HLSToS81S^h!ND*(+D@^ML%87wdj;nh9lCeUM{>3tpAEf0f=WwUF_<=rT-H=z zzOrtEy4^GK+OD6t`62H-N}>6gE?iMdx+=5gE~$Q%>4CpF6W^Ze zEl({4OJ;!Th8bjkX~*K%$q<=Tw10)w`@y{HMO%%};exm})_4ymWj$f7nx&47V{;D} z4{qa@(fb##E30yb@KUin2tuu{zw>-gD7J09YTpTbU_-l!a>{uMXQ2%74v~oEhExQX z3KmNm@-w4=2zFTUFz2(4I+E8ejh|GMgPIE`cFdg@yuThXSQE#ib z;Xo$U=y^lQhdTN0)j9p??1m9sH8cwI0lWjXfSoN zFLKfr5w;#zsH3~s2SeILxy`7w#Kj4Co)ZxSTKN3c&cMoU{vGDbMZ4+aY`;(S z5vN84%6aYC`VAtQN^j|ha=mWlifJ7S{0F0w3oAFo7SFh zMu%_4$ai&5$IUZL4#?NO&aq`rXs#?SZdPjOqLy@94Sv!sLA}!cc_Jqx4@80FbJ4I~ zqu|u{3ZP;jRKtilYi;vUmolS;&SrS%fG7wKtR)Pfbw>Vhf*?oWK-_VZ8sYKh!;}cI zXn1)*9*_V3yvzUC{hvqt=db_I*Z<=v|K|z(kE8s*{!nrIf5+> zzb^6d;vY9xAEC#)@wjcFK6$(kk6Rii;y-Q-c=>ga9OM7rJEvg-&$YtGBD~(0FG-5k zW)nocLkrDrQ#S*NoEyM%PO=2}G|EJ&PF5*=zV$e~Xxjx=l*o2=cF544Cvblfyv@O# z*&=#^nf7RfRRQcHg#kt3_VRRl>^l}VwhWu4fu-erj!|FC`v7#ZmMW=Omd22f5Ji>! zLSWU$>*3+i?Ce%jL?<{^Z@0Ms{4AM_Ei6c|Nl3I`QKzz7M9lzNN$(Ue&|)ZkwA5?A zKHI%XH-s0$2Hut?507qMj%(c!ssuyZwvWUYV8l2rXtVNrX=k$74L;ld@$BHh$xZsB z5?!MazB8%jJN#sBPQ(pN6W6uUnUr!uydofCI#NMet)9)Zz+%mHZ??Ly@BZ$F_pqj< zq(qojHp!%S&M5a-F}ycGa5a)vPV4^WpcN0?AHf*>F8N^)#-g-`Q;4b-Y1JPq>9BsR zSUY)SvMh^#xjJ4a>qZzUFE3Z!97-+i`W6K29naVNyjsBh03Cei^FfOJUe+>Tj68*q z4^h$5;vb)%x3~f0Lv+u6T=BoCzqHv^*gJvuvwQG?>)~ow7+LH(FNLtMa0xhqM?dZa z@E0o`?XVF5qz1zeA7GjJ`KAyx-~Ii4QM(3Kv>NMYA&i5Lj*iRjdpoWUWq|bI)h&!@ z;gvHyEPr!&Xx$hUg=28fSAQi+?<8|J z#;VPyyi)kxHmDvx9<6lf+C1Cu2*#&g6D$OjACkW`ds|zte%O!!szz%kKNlY#-(NCb z$0BaE@VnbvzAJ7vw#ui0@yF-DrgO2$bSy{UxNFJtX5YT$B)19mK(u90RQ)*`@nvpK zs>{J*vjCar<+5XmeXmz%YCvBYg2-{|M|JhzCkxP))jZSq?3^4zo39_PF6-5hFASHD zS1#G_?Rm%YCZ*aSx9NS!GT$GIAUHvngCK zbCN$%n?BZ-%N4JJn$>hlM%AKM zE5N_;any;{-;^DnBqe%3YziU2psaiQ8#g)RX1ToIZlgcG%;jWb@%9#YXnL)r0^ZM^ zrcUQ~B2KIO+be@BV{Vd!a=Xnz%Y{a#hr}tb+oP_7wS$O}28UgNZQ#$TG1y#g*z*H3 zDL^#x?qqWniMkZhDK)(0@XkQJMaBL>q8B2IGw;;x47g&C*_-t89D&|s)QY=PJ=dC zJ9k_!{K0rBRYDWlI*CJFPw?IaPc@iX8G6{XD|sAtwEEGzen&I9WcwjJ-CgMw zAf;32#MNEzEv2bDrYU`G>ylsa`fbVSJ@9G8uGez-_!sVz`lVG0{vhh_=HHim+@80W z^-YHJF^&6n2=NH7`1*xnHwmvW{PXhY5?k-DcPSlET`fa^H)s|0lE(osSjB5$v(nf9 zk=2AsyR^3H%)gp^-^8`y?J4ui0i@G0U~tvM#KbziQ!f3VJ-KMmUUg5^d$$MEl&)H4 z&1Xk8dKI|pq)cey{;grqRb!WeaU7yRVp(-d)>l_6W8Ko}T4}w|-2*~XSNFMg-5t*d zUaqX|el*Irny)*e_lV(zdxh3gI4ISbdJ98uXf?1<3u|16^)Q((*wZ#3Q?H;Fdz zOy%*`21(twJIUT-}K+NI8Qzx1u)Tl@JYU$$YdVQ3UROH+nMbE}{5Z)wQ4{Tax8~CQI z|9bc9^i2W&cF)lK+_wOe?j<)cjt~HU?g+lLRHNF=`ZG7-sY}1V?Z5BqSNqFQbTws- zT1$^OGR`m4hbS*7oj;?R3Oay#8B|2|dE$fCw!YbWh~A?-=~{h;DV0JZwo0l_eQxh{x`SKZ*3))gm*IMg081qe&9jANeSD3^ryV`K^K_*r5(;@S?hN3cW?(Q zVZF=t#n+||Zc^t-U*P}j`fxHNs9itOBSGd8& zzI)5s&!NF|H_APGG+3~U@f?sx!84||vy^GGB9ap(> zzuwMe@sNa_Xp_y?C-@pWl4>w~5m5cSu1cc#(&M6|RhZOZ^*_2L#-noO z#taj<*Y8e<58S(s!ELNw4}4!g$0}Fbc_rvvpqPU7{UFq)BRX}6%fr-@(Fv}5wfCF7 zC4efjVx$dSJqRw&emO6nUNv<}yr_pyu;CKYNin}n>PB6aP3GCZT6(y@@Vnh4I1yRf z+*K5=&-v2;;4mWet9YH-(-)L$Vcfx7OWA*rP>nA)eAV3F`{2u*=khVW7YCCdUFwW^ zJ#v0i?K9f&HI_DaVq%m(jXZ!b*pRH^cM1Gq&)B(TQyQWep*ty?jo$7rf_&KL-F&B9 zg5Id9O_9s=#W2E>X0-0(7A5bM$Je3D~U0+1S3e*bDX{YuZ}j zs`u6aBw;&$#lLXjt=50O5rs9q8s%;wC$kf-`{(T;pujjH?JYuP5RzS6L!*TILkRyB zO!z-wH`aAe2xMpGUe}op`<>~WpkFmz%Hca&NAJGJODP#ra(|>LMU+-K& z0(-@0x7(K(h84WIHelu&!_{K1n7Gk!FTdY9Pk>E`Y2a6i4~O!-Wiay~v1I8ln#(n$ zf=coUFz#v$x%*zdR8{K#0}f8R+mYPc2LnI0W9?6I&_WU0o*0{lI~|(HI}7!fODu4& zRI`UW91li|jXmVCX1jzDwK(D6Mw7Vg8X16;l>IZ&$F}&#?SEfv^R>mvwgavtFUhg# zpBB9T#n)Gd#nCli5}WcPBUmcP9|sg6lwV_u%f%;4a(beZSv+ zyU(-Rf5A+5PhaVKt4^I$mG|o8=|4Snv45Dc8hQGT{1os|TC)DrJzxp(@6JeY612uv zXpx4=P!Otr`%u8f)Bkm|N0KZ_TlUF5C6ezo#%r`_isdd5B}w~Obq=;_VhpqX^1@%J z@E+T8cH?Ll7-2ex}Razr3^Pb3Iy$byjMuV1WzmPa4=6B}M&jJrDyR6OiuZyh4dw zE4c#=e%S|)-$xELE~kcJQK3WA2VtBI^7VG-{jKrM`vW;ESWRyF6j%<_Qchkldj;Q8 zVIl9JqLUgFTGtVVP1@P$4N37JBbjW4iW~Hso4R=dUdtB2_0f<(pKkX-?>Mv&U(Yq& zFRoM6$5fd3HdhlIlr1oTKM3^lCPPtCjl+|=yW``}b+Ijr+MmYYp70dky#w>GGCU27jy>9IVHfCS=^;@VuxnlNbahYsxl?!{1$Mtv6 zD{Z1b+EMm;e0)Mp!g!JjbXQ6zG<~&_JVi$^5d^%~aWM7vJB_I@F|uO!@!zZF8a!4V z$W#Aq0Ya#azS(UTa#$wpmN{!dxxu#ih2~&~NRd%nMVGzj;D;v9F9aeja5?;_Pl)8!O35Y+VP_~euLmXW@>=(I?fQa>|Rk>KMM zwLe>IdfidMp2)%E=qe2R8(~#?`jhXy1XZP*jR5C45_;>Dus9 z-cdV!sR$oUT>Txe^$0vwIy`ZTpmtQt;_tJXJ4aqkmd?B_^0WS7YHIUn1Q!uY9c5@3 zU-pPebzG_*73D3%5&8J(_@`M%&vHzv&qFs4{|=Pk@1t`}9IWyi+@0pttbP^CSuP#< z)xIXZ-MjI!uwLg#qrUFypjq-$nRGZ-TLwfzpnw%Tok!RYs=tpCT{%?rWwWwIimz@e9qlxMkYfNql$uMe~mis~)2{4BbtEwRB!*ld4sf zG@*L%eRZ(yuZR-tq)Flgg>}m-2Jc|jt`$87v+}`nj@m^RN^d51YMoHEulug*6-EGrPRy?=rh?w#y%@y z?Yp3cujF;=-@rK7p66uhmB}|g_FJ{ld_x7L3obbyR?Kx$og70~f)lKmV!}UC0IwaC zOAqf`(UbgOB_ zy1XIUXS8&0vf})49$Q(4`N|EM(%}o&$K&xji*{kV;Z5CubO#CV)=_xa&p=ilQ7vy= zX+IbGv7FJ=$=_M^QZtD6jtc~uj~|ynr}gS;PNh(DYxwUMrw8doN*6RK(loR%!+fyu zlzRG?=ZW&(yv&_Uc$9bYrTDn`N|lO64z>KU?5d^lJYcwn>pJxJ(YcSD4ek>=QX}$; zf*_qZyc;zXEA(pPMJ})yZSgEmQZBVjvr>ZbQvaVtYPGCfi6|!han` zibKWuhQ04LdP%43aP@J8vrb1@IrpP(ShHh_gp5o(0~VAoe7bP5_@z9rbx51T69-$abJi23 zv7w^#^&y{RNEF%uD6po9IDq+>$?i+Ha;5kE=7^(T$V9?^_!rnL2v;)5#8&34a7yuQ z%2LZgYg`&B#b-D!Rs{ZXVqm!ZIJz|{tPmQkN$tJcBQqUQ zfYqrY9=P7hj-uOrv%~}fS~)mK$O(qtAzS}Bc+Eh7CMBAVlOx;fr(Am*43Kq{*fg&b z%iC1UI%`_P=879;V0SX1#v+wBnqT#^FXtt^Mk_y^ZXOL#6Px?nLWUZ`6S^Z!#?Pfh zE1=*HOEf^%$OubFFhu_JMuTgFL!rU+>YUWB;@e5IKp4E2XdVqm-?W8P;%WXo0nFI# z63sG;xOYgUcjFEAd8CV{nl(2kjX6Wcq^LLS}F( z)fyv;8uQ>RO%Qt=vX`H?&6=F-3zEn8cJEU3*S-jbx5hbK;JOZ2EWm>*hE~C$6 zM)%eiu3V?tPaVAWg$Bdz8``8vs$!e*a)B2~m%NuOl`;Q$FYlw{$T;r_>(%lm zlr}11&l$<}nQUNoyj^wYZr$VP-Y^DJY!;e8?*ynbm)E}kNK2pUv|wbR>rBYkeoMC5 zU;*Te7LyE{_v$-MUL9KGyF}5MZKfXdGQR@>y!(+7Yb~Zyzd%qco$>T430}%6?{kHI zhDq>%^{T%IP2ah4Y3ibgw9;@V4K@2d>Fd?h^{;{HAt?p^#vmyyB9!YMb)H(-Kwd9t zAg&=mB&(i$BmnZQx|hqWyR?-sapWOEp~VY zI`#qV24i>64c14#mR8ru;E(ZT^T}b{4IvL`phXh$bu8$mX7Tn`HQ{bah4wh=kfY!S zvYxB$U6k&jZ=%n3-=1#0K)g-fg3^IK;VFS^2_@s{>AWRMXnUMDOxNkFDO?>{(DJKG z=bJv~rKDnSVC9>lw1}H-s5iQd?Axy^CiaeevTEH+bRF#==BHd- z9BAXP%WJ=7$afrSWlOZ3MWhk4A`M? z-YWkvPMn^2V&(VYzitys^WJADPw}K9;QVMcjd&c<4u))28(ThGVY>aX7ReBM4rBOW z-^IaL6Y1H5Xp~v5`q}&Q+SGxvE$Ex#YXE8^)?00X1zmG7c)DzFpAuZ0%-b=w_+fl* ztp--^9s7KqH6#}*EsVv6m!L-=9Q zU+`+)_>a>NC}E)%_ktjyWKR1+e)9vC&v$d_N1w%FJ9 zq~P$&jeEI{5*1nlAGAE^FE{^nudw2={JJ}VfPxY;&e3!~fw#Z!U;XQzj#0H`2dw8d zm!JSo{t5pcCWz$gr{5m0|MPXq?2UNTSl=JP@=wg~gd2LWS6r}JOrTmA5SdXG21_<8 zF-)vEdS?)zf;QjA$oWg=A!n00c21Paa{|twK)b9K&lwW@jet&$Bp1UBoXbjfydtHu z@3sGD7}7*(%ca+Pbig#)U$1Hv1u~fUvQF}s=hS-L!tmMbZzZ}BcMQe%^)Iix35fqx zGZWL&^Lzspg!He2DPZ!D*;1~a>nWMI;MSPhc7g<70W{5b?PYGf9FE(%zta@GG|f;$ zY^7wj0}`wb;+MZGzG1ZdHyGwCOIFZQd5t2$Mzd2Y5f#!vdVov{wEq5OYP}p|@7jo; zQ3iPmqAOuiTtV7;%_}IR9`Shp^&1@wlt1$dEW$(=u&X7Q8I(hIjfAzSf^!n(6x^YL z5dP;zE7>RJx_B_8>d+j66s>+=*$Be8Kcc?+@*C*KwR_fLD^EdtG*2+p7ja8EnXCjl zGcWiT-63?oF5&x~=~nBio&}FayzUhQ_>%F4;&Z}yLfn*S>Om&=hmnJdjk_RQ@E$G&MArZoAJMHgt_q~v5@|j6kg;On2#^L^q^?Sy*q}c+^E>BQ-`F;5TfXNjmJwz){i5mcFm02Ts(g!z$Q*br7 zX9MZ6pacqZ1ARhw%$> z0Sx4{a)LS1X?#b5NZ`%e3^jQr{Hj$Lts;c9GnAG#G;9_l;WBY#Wh?Z1MD=p-;b=%5 zeS8_pJy4%UpmBfUkGJqWV4hG6eY$h~Fz{CS90_rMGr@luwKUx!zQhMEL0VRVjmpq6 zK49W4Bk1qk^i|EvM=wj4z1We?xlte1>unEt(nF89Q7Fz{)uj~hb7nYE@rTcIzz$(# zC#8m19zk$0b%&cBD+eiMpgR8v0h?1SwCMPQxLkC2S zpnEc5LOb5&Mk(j}Z2%$>A1wE{`JrvOvzztd2?GFI5V2lDlzvz%q1KOw{_ha_m*+B` zQhC!mvS5oBTg}f=iY|R^y6-M?wf1&{G~!dwtjQJuFr}904nw7I<@1&|`byWta<|@i z?g51gpA7(g0C}df-7|uAHy!R*Ktix;a=E)FIOg?sSB0Mmf}f??uOcC!q|{VFlP6v2 zq^pV_n?HqX?^j0^a%G6j=Pg<&F?0yyv`Lg$Btyb~cuBRXYPC-IDez{s|Jm&v{d2q{ z@>^%V&67G1hF`fE$tPd)=w51NN}M`CD-8-JYVy+Ynhf5X%tQMJ`FQI2gqxUuYLJCn zPPbTyK53qpD{3G(+UeP)f{d52cw5cPxl7rI>;#n5Y z?t38bs9l+s!&y-AKxCPIMrqVhk48@JLg?$qkes|Tsx@ilB6@`#KjFaezPh>QM9R&h zyp+Kbb&1RSjWh0hhVe#!VH9i0Ei6&Pub(_*E=7gu_{r=YCr2Q{`me|Pc@=R#Btoa@ z2BV+C4dis^jS_oZk!OT}N-XLatdidBX4-toc?`&;;_?O;STNrL8>pDXYrvI&EHDJ8 zH0nQY*128WO)2f)-D=xk%9e2DiL;IRY)9qxa>$NH7%c3~LYRI&dXx=dYx$Grg0LCo==}TFMOZly=736mL`MCaliM*~6Tr z7SdO$6FizlA?Arkm$~sM8m!Bd$B#AY2VAe*m7qQ1brpeJZHFEN~aHl z0(zR)UN+qub^fgd12WCvb6t)oNVbsAldu}Hbv8{fyW(?Joub{&88;WX(Fb*Iuons2 z9#>*hWD3QAI=!y>E6Zo6x4hSFGz08?*UXns#!gK$y&{2=sW$vM>gA1+E%=T<+20hj z#*xKi7#^o{B+?&i(Iswskt+<>d7X+ufBBt{B#R(uyW!)7eEd~&0iD3Je6DWMm2gph zT3{>4!D>%Yp}fkG(3{%g#vT%O+16k8XoR4UmI*mFm^RI^3ZEcMx*{ze4+vq$B+6OkoY+t zK^Ra&=Yzv3Xb7(#t$7nDj~++7hk(+DfJrt6fHC2sch}ergA^Ld( z6WMys6LlIwA_gGt^-s{tTyoY}2UL+qof6@BJA(bw%c1GK7R8=z)Wdw)>O~4Z?)OAF z`A*g|_WP{%iL#Lr8bb&2cb@%b3Dx21;&i^tO4DNxUoX3uc)>n0Z=-HJ;)q7~E1JV0 z63TsT+KKd(c|!o@36JTSvF!N=3V@=B^|C)oihR&6)Q&K|>3q#7u;}j#5gm+pkhq3$ zJtJsz45*m=;b;kNi!kt6P&0RQk;`(?Z&x$>9FjUwWH!fi31O>Bp#9apu#gZ9wh_%Y zHal3kAjiAQxHH&ThRdprEAzSTZ|-IP)mrKTv%zz(lvuQka{U%J?uWxy5-dM|{?Qx+=K7&Z9 zRYYBYaooVioTV!WrN;7un+dUR{Y;f7p9+AfzdcF;rjD8VC;j7Jy!odv5T=T52;HL}AWi%T;xlV}v6tCdmlu z9)sK3xbw`n92dTiGV+6Sesr8(;5ZUw^-R6nIjB!Sq6qP(!*BhKWOqDFg7UTxe`V3) zv(ARr6W>Z1+1;tq<-%fH40gck^$;x_34dL?Qi$qsbOHe^ldChL%wl6a@YU~u4xtnq zJuuU_Z$BUn?sw;Z;7&9h;cudX9b;(APKp3I;D-ESm1CpV?V@Gf6^ReFc$aohsRD3j zR(TE0t&8HU5<~*?w)iQB>eH*f7+Em58~*&+k_HzV3_8)d8A8l*NiJQvdgJv-N_hl! zJ26ML8(TX`#dSQ`ludYr&mv_}^x9M~@aLz!qng}((}Uy*FOJ6K<%`RbFWubNo+&Jp z9^g-%nY=wj=)Qgft^>#(B6 zF`zj$%ygyJ6TGg-_@l$+>wOC&;?7Z+e7{zmS-#ESx{U}J3xIhxhU-uiLw_A~e``#_ zcUL<={I+De3RcB@@l*IUO)2Id@j)ZB!FpeNUk_f|v{~8l5`PRA``2<#)|oeH$YoCo zN9XgL~1p?*7ITiHD#zIlMRR5oF&;@dm|?+ z-&MdNLG#1SR{S}SMe_%Nwqxin9_0P#dx3x?nooByfitEmgGUVU)WGTn#rUpj+ z?Sh=1Tjv*Xu2#X5!o4;CSXP=2(IpF=sB{Co-~bZrC=m$tqb*IN^rw z1#7ObfP|M&%C0;RyXcP3ydF8i2}R?0nw29nWo4^ZoZIEAG%BB7P78LsL3|d88U3>4 z0tWI<@T+VaakhDIWh|2n+V=EQrFpW7mf|=_TEz1=x=0K$0<}M{M-CosGJfbFkShX+ zVfU!A%wSQ3!2~%nyc0f8mh(v5k;l=W%!~Fekxk7XbpuCSlvP&KjA-)fB&=_YEh%_< zCvHSw8RbC<>-*KhS?q~O{x>V{=Y~+jkqhJm3X$cG`Xtb|z|=QBI!KUJlK1wq8tIA7 zWr=(<3}#VraZXy1WgdXM{*x5cz@#7HZFu37*gk*nCSeLB>J9hhd>`IE=yj|~cQPZz z`HXE8cH4n)1b3hD!pp(S>k1fh(vHPa2O`EyGnr6NEGrTvIRIxQGJ3 zd%RYhp)uUDsGD?&1UQ5LOq40Rn^EOrPZjPaZ|uO}w`1SXifsbzCI)}B9T!+t2P<}k zQ&bIn)z?3!cLoB0bHx#6_vs23hwVt3&z-#~)_ToeJ})-zdZk#Isy?+;W+Dp-Ul4Yb z8I|`)BT0M#Zf&K@Pwy2Q%xawz$Na?wlEaj?zQ!*a4U|)-xMWvlK=-l(GtP9V>b%Bh zkHhi#42?-CSi5JGYDc4<&gnh34q3-rsLmdPTL!ZJ{N_!=U^zl`Cn_3`NDO55J!$3X zv_kcFbF7i+n`c9>a$q?ouiD+9^ULGQVILh`(!qRjTc1`9W76^Ue>oPVu*z{qsl`Dn zG@y+{oGz7wa`v(51`T@WAdz%F7O=~=o&sxhk zXli?YPVtsE`cileSJ)L^)(M-CER|G;?`cW6#*ThmZ^GhE>i#^2o^8a)NQgS~ZZLmo z5u`VLV*&!@DfAu^@Q-44C|PID(;%!qREL?CI+hMM4a6GR^C2G7RT|e>8YA}zF>r>c zRMuG!zPjk-*mZMYpo1now8AL+*(74AZs^&sUxz)a(DZlb^DS6aWiFxlMQBczHA9TJCE%$rAqWPc_q!{jT;7r z$hK-=|8ipsX?xy)q-!-wR|nFWnj%tqL7TLM1o|zZq<9vu0VJ|@#Rm2H+=EPt?ep>H z_DNIo^W>^?Ugk_^mdUuFKa#r-8k(f?BU5O17bM*h&-l2wF@egHj<%L2fI78ACyJVUI<@qc0XA&d9}D_L2K zYh9?s`Q*2^=j3@{Rh`@6S#ee`|5B6o`De7UbozyQ=8?+Z_s1-<>@*Ux!wVk~c5PAh zJ3r(=U|X6aWE66&Crs`xv@XGljta=x9@nosOWc~xDh{HPO13Zn1Y{qHn(Hxq0vZ4} z1sH(vGM3b551(U#-%8PEIq$SQ3SKLbN6Y2_GW-4-HThg_d%^Y#8Q;}WZ5KVAZO+}i@Smn&%l@LQ>A}QO zTjK0{`MFrFd3x9}x|QFYN!AfcHFf+9Y8#v@*}w3B1rYFpUZ5WC4wvU7PYNxDJ;|D@ z*D>IY=7rPf0J#KFMp@kud&Vw%O7&Q}V}6O+6Weip;tsm@v%UmdveLJVjQ@a1n;(Vp z(Wh*rSf_(`-B1;pX^)Q`Y$8^!`|%DaVLNGpzip0B`l4t{{fsWc)_R{=u0S9D%v2}1 z_xe3HuzvyvkOloRB(g=)V+iRKr(HPICWJEqb(B)Ei*W0tHD2hm9mlE}4`hN$bE!b9 ze|CMeQyRWaLb{HXgAOxT5rz@*-^Xz9Od@i*Q$W7W(34k}k(bmTdr0z&I})W^(ES zL#z{G&YZAdsd0QtE8lM@|6*@_SS!B^&xlxM#^;{E+QWig{vGDPpnrM)FQ(+H-H4N{ zJ~Sxf^3tYwif>yiyQ;sF$S6Q|WL21|2Fdi?g@wJDW< z2ZRQ61EwboZpbRJ@MKWJ$+<8sbvRpVs-CyvcQVszgdTcDd8>9PwjH9*=Y$ux@p4 z)Jp3QIZ}%2_cLr-<8Flx6zmnhsS|PUK+Vl2bTLo^a~3)8CmFW)4eQjNx#` zs`1=Pb&7I-m22KUE(1sI>?B9@FLVG*?puJdlZ4M$u8c1Kpe+Z474X_d`udVv_G@!h zvpi+CRoN-~EW_ZDeqiZsT4IYd$69Z)*kdP9>iqcaboX@Om$ySsKC#Okz0ek@yj@~f)4yGriIyoPv0K#Vefmr?csZ5*V&AEW)vk+0SeA^qR4^Iw$cj*au zE?5Y_bk+X#fAE)n(8>dmS2$sTr=%P5#6-En?Du%`<)RMz+{Wdi6}7b5@tRlnlC-_` zLq7&#rxm)wiSG;y4fgrH@UdLzv#dQ@-=N)6tCxCk917Q6$>!{IchWqE5{o-_AxKJw z0IV(K%|i|Umb{+Ygfg#b*w|Fbr}_~^-Y4H)V=Z&9eTUWu{3T zP=jZ@U>2XWizV&J2=rdVwc>1b@47l&sZ~Ev@(3u*Dh?38w)$O+Eww|;YmVs>Y`Esx zJ(6#5Vf{2OpMwDM(?L!8W&_T5&n%P*f&y5zNCo9xmX{zB0!`6(Ne_t?yl8p=LVG%NHnAJJe(ugUO>qi=Z=Bz}leYyFpaE+Esq*O6oc{ z*UR^}Iv}#?VOp!*i6=HvSm>QJ+eEyM?hk_@y-^ie=t$!mY2F_{8W`Mt=@6vWDSG08 z;5tIkA9=4?BmwXT;rr}t3}Ai&6Cnj1W1VtyjgGt0t$Y^cyl@Gq9FR_Tl>*$LU=IjI z>Vr)sIOdx-Y22??gG`f~RSSh=k5wq|E@|Lfydu9H>;42#xiBk$M=g@+w6NXpa(!p? zOCPLWRAuguI!{Z{P1M{7ZI#MItb;q(9G{zfl7wly1A<-#6TenUNr-_2`iFf$1N{`^ zLj4gAG3ZSn2ZFxeL9v2H)EvYd5O+gMeZe&nzZa6vp3NFt?Jf`3{E$HEIF>I>$B!5jOGO_u_F-jPkI^63(YPD5LvI`wA$Dh-qYF{8eJ9=P`35 zGv$W>Xy2Z1FaqG8& z>xIUhv_{W*rlP%S!}DvEX_TwT4cjCjTXH{5XB-$4RN8BjKsRp;#P|M2IiVQ)aW z8~cT9Zjy0>4Pw^q%C6q@b#Wefx<9sAIT~8c+j8^tG6OWo&_l_@`~Z$h%u`VGh-Abm z3lC2Cj$4E73K2W?YwDPaOvM*!90gk|)=ovT_$dn)GCBYcX(BC38F?La$yVwP^=H(L z;KkB2()0N!>esbNwq6}-JmfV1HZQvNKy}kD+V$NQ1{2vSK;fx6OY*)tbvY0wrfN0< ztVm#eBSd&%U^y81LyC7`IGzB&T#Yrl@t~R8Gf7eh_1m1({h9`DL;ElQF_*y!w;(oE zm})!Z(N_1c(dab3nmPalj6ifrj-PBdK-JguYPlNupK+Io+8}8SrV4ZL2&bbpf~h)- zs3T!1d2CbW3v$ZXQVA7_RVx#g~*XaS%*3uuZ{ZV(*Z*ks4!5BgHjYJNWCQigoWL!bo@*FJWS&1q~Q-Y?%OPOaN=_ z)>kE_hQ7!XmXz<#sT}gYOGJcn+U*d`?TN!rBYT{XKuy#dcKN&6%!LbJ?e0Y>L{u-T z75{m!s$658HR!m2ZmlsYpwRaxz1nHuDkHtlX2hx8Wte%=dF(o6gg#cKO7d#)898I2 zHl6($v}^TdL9y7eOnN3zEorMu$FLe%Iwk-9aiD0?ljv}6K@{GBR!hNXOcZyA;+5}1 z6#noT@!OR(HJpGiAI2dzRtGYQjzzZW%=W7F>oY^drEoOD=du15%`>~#nSgeZl}i9e?M1GC=`&VR=JcYTdT``b!y&+`vV zhJwnTRa8LD!}(F+Zi8dVQ5X9^ZdecUxuW0?E_yO0g+GTQf4f)~*WFOA#8d2m$9B>; zv8P44)QxT#(}0?%l&{fl8`Cd}Oy6;IH=Ws%)kdWC*4ybrQ9{MWp<^EONcveg2@}WS zHx?DSc^{@fe_0*%33noFy}Ec}i}CRD5Cfq!h>bo%!bn2xt(TpO26r5~ezhEqISHgz zd~Qt0!MqVb1(*V;A+B=q02jC^B2A8GCju*~{sELcplT%yFm%XO1X!DGd;pG?$Tynl z((3?P57n1&FaTByT#alg^JM|?N}Q`wMNs*Cj+ka(slE5Q&p^L{C zd{Y3h-6CQk zX86FqkgOPM)X8LC50O(zb(XunL!qF{_mXPO_t-%R5$LPU)hd$heWhHfU_!09s6j@z zDLwBpdK5=(k56?fQ(B5=W*KjN74fhO5RjBrSsT{}xj2DZc{yrw#|49uBca4@>Y_c1 z7(v>ysq46$chWji76k*wU+3B^;dP!|2-xmcSz=f^#P7fxqC;hIKxV;w={y)rd@OSB z(O1OJXT=MmWL}%+*rTtz`{B9la;VcU>_$5n94@^Bk0#u2c|BZ}(#58@E(-2^$>lg6 z9!{<7_#k{={oqJTI(ZK{T?gF!Zrt&4ucWQ)4OQA>fUc!iYW(q@dSKGlo?vyeZlws| z$rf|S%H(auaZ?BR-F;7%066|;%7DEK@P9Uiw9(AxY^u~bSD;|u5bE;CwtK6IKtnQ) zA|kpOBkiZs6lxQBCqJ!ByjA-SFkUJ08F_v^@G!9@F=yKSFh7s#8bTaU;-cO-Tmxd{ zwA5KIfpLdswUD=wxO~a)%@wPUdiC>zzxfuVDcy_poP!r;L!ppYc+X2G$p^a}8ZNVZk4y3#Z#1C3NMX{6N%Xb3*^-YWz|kfD zcYo&VVPV|mp78_WssdJ9S8O<3nCbWPSbVyBuTR)A5y?L47Dnw{gpQfe+6Y@ekCvrU=8I4QPrZ|}QCfDVN790WUD9{}kHG`KkP++EG`3fpGHW3hPHcLm z!VgQZ8X7E4>aVjZIAra`dQmb{)y(}QkDn@5Pb3i<_No`8t8Y8b1m0egvsOo6Iy#i7 z%cY+;R(b-A#hBHb<;F+qmC1fEJlhdhX3K$#dL+*;3_HhA%Gr*IV zGlBHGcY@#-O<=FK5h0t5Sn|Z%O$WHh?g#4I$(lRO2cQ{stHhr zXF&#EG!5?JDimCS2Y>;HWqryzS?$V2b~u$I8?L~{VV~=r^h3GH-s{fec!$()ZNjdM zWhv8v*7ln?vL$1|4ALiA;!%41TeIkQsZu zu)aD21YS6In^ea~zJw^iUl5dk0UE%vw)y{X)__x?vWZ#x1iE5F%d}QY0JB8Kt!ZwB z*KWIx<3%HtpQzTa1aQ>Gu>L`;mV;~-)P$*CuG@wE7wPzaCt5Iw_6xbvJO2JIBBZk8 zg)Nt#ZxXs8MNw-O2}rJhHhvo*euMTQz;yl>daDi;BjqxEokx;2?}2~pSnEv93`L+m z+HV*r*TFDzU8Vwi6a{psY=nQvTHxtU(yRZbYwfL9*^2^_Iirhj2qM8FDyg#L-g z1k8F89j=BZ3urD<#W+j$N%XwjP6|{T8D*Pa+Sr1gZ@!KEstw>oAEHVh~^Lli;^>+c_|I+~L zK?P%=C&P!Iofi2@!16Z_G9aEyaLj$0e9i%Uu^R$dsIeHU@Q^E;=rXzFv}gP)=%bg( z?FYvw@E@_#8r1(&tdwE;_L-@w@(K2D zpz?!rDxe4&4I%$O00!m(&@(x)M%2YiIM>InwzqIEUNrcbWv?30Obm7?FXpvexM{t3 zmyPtj_eqiRvn+FT=#x`5qE%w_l>z;i+qAh1K-a<}5Ca$$XMlF9#b>jmAfEWq8>o64;}o;L&I4dV zUis~GM!+4$9>at4M$w2LGkZe4I2+tzT;61X2(bThV_$qG`ona7S+szY(#aqobx=m` zBkKX%XLDOkfX6@By;0VN_(K8VNh+y%^{U8+53`UCG&GoL7 zeWcO4xAZ#pUD(+LPQlmqUe6q^<(RbB4zs;KAPd%UrRSk~i}^s)Vn@7M2l=+)lsCeY!QwN&zFaOS z+t6B7`}K{}ne=*m$#wkjC|tnBTupqZ!~HZ*cft#-{4breM)E4m$vOa=MulDNo&^v{ z9(C%l9vj_XkH(E&lCG8m>LVdWZp254qqF}b@QC~=VpUks73Q~@esG28lH)CFUlF#f z;Ns_bUG!W7q-H?pop?fE#zokQO7y7dm^T{HPzcxElsEBoNws?A*4^~J>PKJ^9T)Cq zrifnH1MGJUapcTfxy1YJx7i@FPb^ljUKP%DG0i5|=2v3WQ>JNGcntQQCu84--Y(0~ zWCh%)-x(-xl=AyXRw>?QD(TE!H4Ab?THy%s<$Tl~u+BA7$REfA6UcUa7&Lwa146Rj zL`+bd7ND~-xyu^juEix`nLm!0j_1VlM{<8Eu*bnP!uQh{38&rfcZ8(hPnsF>wisQ`CudLMX|~QEH!UQ z1qK_W_*R&r!eh748ob&3C-tat};IECeosee&ZQebj z=&bjtOg`$o`$nvaDL7nLc6{)30k%GYx4ZIbuU00?`N^sA79x)m`sY60yQE^Q z11XWeg@*M;@V%#t6*9Ku9_YsqFUG()CGE{BB3lu3m*NW`tp{8>2KNskFb;wstK{h< z058vp;J?CnN66!IRWdL$Wx1e6+oT9qu{rfRJ#7gM&o{!DSGfN%S)?zNB|ShvXWJn} z_u}36LVR{EtVK}2#c6wUd&uQ4(4fE$6asF2L|q($nZ=BXnt8O1{Klj8O{V* zEuVZxYR@x9D{w=cbEFN}aW9pH?{U;VRq4gX^Me~xzXgI83k|n54$JDNu7jigMV3>d zf7~u?4E2r6$NA`qv@4P|{7xNX*ybE9gtS+ESgP;}QdDVRx2)oV7d0 zZsC;&Pm3}4lkY2HJ(@)4z3dSAVcp!846q_-Mb=G^q1M4O{qQVKLO5$Q_EXzpttNKz z>;unu5REXq$SJ|!bus%RXle6CB`Ggx39-q|6PCBtF4dbzS5NhwJv{{*AHLE*-an+N z_rJXVuc&~jk}I40Bo-(wtSTeB#cMNUXfI!)m;+68FGQQPH{a= z{gnHo!N=2kVl}V|LsNrG?G1Q!5fv0Ceje3^Dx}Ui_PbNvI&=1Oz1%T+bqr#9rT8W0_k+iz$-bs0Pi?U4(UXzV?pfPG;W%toXgZo`PoS_|s1}7w*l+O^ zNhC^kYUu~Q*HD{cR!^hLI=&oEwRTX?GZEBu^Vnspis@_2j{a7h2EUN-5tBRF$ydI- z>4|>FZfMzn7<50jj#$ub7B2N8Fa9p+Iw0RH?T@MdqYvnMjl9v_I(6}UJN?@6wYMiG zEPo?W!~35{IWaLv?xCO3oW)aY&bS$kv#)3lHeanOGRhB)-Z*c|jBTgd+D|#ZE1IKy zWG0G)Z9*O&@nOT*eL_1ic3K^P&WM^$f)Epke|Igjjc*O|1JzIVw8X?w$+&C+YG6`8g_Z6j@~x4okKO_$tt%TB zelncDoY}8R(<6ClyyfnDQ=^Lko>R&aY zMa4+tHXWj~JX>n_lE}h+_v~IS#H2YCVw%Dazfj*#OWdoYD?B*~q{&Z2Kb!HPbI?tX zB$a0)VeZzc$n$*b-igBdAc;Pd(u}whmdx}z8Klj_P=*Kd6kWSPr-3|s33Zc0^UIr$ z(V3CaC#TM-%V4{ToRn0b_!~3%D?iBy?n0`$&O(o5?w=`ldD?%xyr7EN4^rboV1dYV zUjC5dy?f^PjOH~_UcR)8iFKs!_R>_m4&D8CWhGVdL8uhMq#C@w5!;-gsJ!abDtBJb z5)P%wjc9sI(Po98KuXn`{rT9nVcWbr^8x9ZitQ7W#E?w$VeZ#XnowCSFoz3>j+AL< zsb%0z^@+%kn4yu+_${ve0>wnQqSfWwxmRApSCPllFzn2vOY|_$$&Ut!zG6{wzPh!w zxu&d+ulOFJcR8bH;zB~+hY(``WspTHh!RVTFHb9(X1O6^QnSrYCney zam84Gp2g{Oai~!e{gCb}k)}wv0{2F61PX0!{!0K^7Ojr1DB^&pPgPlpA61ml=<69l zC|oEhO}h9AOA4PKy*#>WYS3_=eFsv)!id6S802l!D5*_iY+EZ2JJ%sCUUVweZfIlc zGOW8@e!Za~A>XN1i!$u})y$5Bg@rF#z7l^^JT046mI3~IIo$jJ6!4#e85f2$xOC1;6RqK`f2l*q3v1Oy&TZaPpbLq-aacKQ|SY zBXS;oY3A~i+0u}&H7)QHOFN{4z9DDGZ_xfd$B-FO7!~dB5!26z`uX3F$vXw!{_`nX z3gUmAJZeOo^O!_a5wH|OIW#CGJepSA3SB4j*q|Yl`1bEUDDmsC@I|_ z-QBflghh9UG>h)$Onkp@#~J&Kv)4Zc1LiZ=eB!Em-dp0LLcrhsR7?JI8})SS9wz+( zfqW3@Wk};l^1z$Qq$EV+3_aJr|M&n)K1lz+8ETvf^Z518B=m~yvQletpiwbWlL+>q=Lrpjc0gMr=l#Gsho|-q`C=kvqS(-9D6Ae@Pq_bNoQHrZk3R zYPvnTS+~Gq_7>GvuW;K6w18W;c)3B4^!MRpAR>z@Brp&>^OvV_r)>>dD;bizJU$Yj zS|Rr!BT!uHIMd|n8`f(N-M4!3yTb#;S7aM|JfugXP$ z_sOJOBbdkJ%p{76_6WkXefeWZymlR>RcwQrtr8#X<~(-up^RRa9N(3sINutne|@bP zr1EiX_KE3Ne`v2Alh&ujeu^>{Q&ey4Yn$Mre%K%JLbDZ5{!L9{`4DOJ%?SAda*j{Z zH4*A3q<>K54)p>a#AJR62h1XuV>urKuHkB^)lO6i!M=v9yM~r#cjr72Y*=EDZy65W z8>5Ms1WKSywpwR&sJ1%$mIoS#%PkvcQy#m?BK(Spm2oqQ?sZHqWg1T_iW!=<*rxSx zJ+WxV%nAZ=?}I|V8oliNjoB~3Si9338=Z;IoZTaZkPpjF2kFZSJq0XY&?#NazGP2i z=!*43E4ApI_cJB8eaix#+fW#o2Q$Z*{eq$+rI}Ccn1AZZX!!b711MhB3NTGgOmBP0 z05RD1($xHXC5h0wK&SwX9otR&K=-!AqjfjL^>l^lS-R|#Cnp;#Xa-66Cg+~>yxRN2 zj0@^6r@dn^`0En{j&QzO@_MdL!QSmo&CiQ8LZc!SqYepmr)xkWNBAW-aX-_9(#ZTh8w#dp+y(xO^|_y1APM&Wnso03 z=JvbLtb)!%@D93%CFsaL-Q|(Q%Ck=YFsF!gRILmum#Sl+W12?0K{jDFaR!gtwM?2j z8!2MX`p@tFu=RC@O470atuYN*2RgMFr#o9lj)zH#| zM?T_ziB7O% zoLlLkl*wF~q?GEq+9Jq@HnkrsX1^-LY=tehY!-m_)mf$sv!aJX`N{^%31 zIVQ));Wjc_ZeCmv(C2Gf23!{D5BppCDD|jv#Z)qehEPX3ZLh8+Oq^LS!WrSo`ePN* zh2PTa6R?Ku{l>EyS(x7;K z>9?n+#d9c=w$P?E3_2ZndpY0g^+0;*Tm1xjC~0QPe?-;1zqh;W{x*KB=7iiVsMQ`O6ohj4hJ6G%!s(KSK7F>)GSaE)tu#1fW~rgMk9dy`;uJ^ry6(}-2E@p zRM*=O!IvFG@oJ8zzzHRq5f<%uRaqB{Si)&jce!;_^Y_dtS5uw`A+|k_{5h@;zc;)@ z1EVSf^3iHr(HX6=sLjPhvy)+kjh=(FaRdDivKV=C)@>ZMqH^8pXl2@dtoxJKKHkT9 zpBv(HU30wldWfG4&=t$0Z7h6Ij|-by5TuB8DmJ2>;`dGIO@frV|BXBE5|HdlASIed!JR}%0(7jaEG+GTIfr_ty1?Nxb--`HBf z?!#@V3PgK;Zf{%uK&QwbGbXiA9MZ79siy(UeG$tgArWkJm1tY2BcA>FP|bYC(XClD zDn`NaHn4LJWSWpdsi0Hm-k<_rLPqG**DrwaqfAn6_i1t3ar?TsI(O6W%2jJnzF$4D z(IWQ7k$W#OJx*ltwUJ`8WsVBChc~oGdYDCtIJlhvz!v~~$oUaho=f*Uz$r}aQYP2| zw&hpr!bawKwtNX*n73TCO}|SA$=%B4p~d7FI#{YNlm=IFHWbvp zUy3ALOTdCbML*5iSZXC?+sfmDvh6jgT8ADj-C8?VGQUj}OH@J0=;J%c7?A0A);4w< zHlH+CkdLS#1L3k*{7| zk2{rtD;Gl&af|x%pGQ@jslRvr{5(8lb3b<+x|btB{yBqq8fMWukVYA+pO;bV-wWPbRuIoJ%;CIwPfZ>h zixIZzDJ01WZzKfoJz;3Le6eo%)DyY&9z|4=iZ4g7wDu1GRFvbr$rzx*PfJ#KRnf_H z=V8jnVaa$~AH&~}U8vg5W0e#jtDc86eE5tV8vRXj!As;j!f9x;r%c_4xL0*B!&q3D zPWP&-7}v-7i&5d^@k@hp;urwA9m73w;Cum^0Ky#Px3HlFsYp%ASDoB9C}tb#))Jk> zZs!lZms#X$G>EjhfE~{|58<}aB=d`A)PMR8yLzp{De^o*6!FY4(o2x}nLTNanPE?_ z`l8^eSq=LPgA`9nZXL??)#dX!>dnPm+Uj@O;Xec`g}6!A)CP(AFHYHT^?bG_1fvi z>{u^q^9`Rk)ViiKqKQos>u-9XlP9x{>#~|8Eo8!cN43=q+%H=Zo^Q5ZKhRU5Vu>~F zo$$ID3wEcYO>Q<|=Tx3e3Ej2d+j;o;%3e(r@+e%9*$uoF5ViPTM|`v9%EBNxyk(Dj z7iIawsw<|~DD5qO#??5uS+J?rp>*!VIR`AW=Dtz-Y@!fJ>;A!zcxlw>*})nwko1Ly z((G``nQqfTnm$hysMC$?pxSnCO+ModZ8IR0=3e^4z|yfFB~f$wIqMT-<=@P`BV4Y3 zyMy`|1~x+_c$k)eLYu(M(rHsyt!jjXxhz__>ir|GIX!+x(tc2EB%_z zvCC@1N2pUfz~LFL5e_)r-G}r$RpxQ^MRp5;2DV4=-FzHl7 zS+*_oQ^r63OA;u_HHs1Q9v$2(7O+bRD{o#`v0hP)m=Y3=(|)jIQ)gHRq||A3yfke7 zNjHJG(xS(CtlgHpveQzRl95_nG!2#dbf*WKUy;ERd}4*Wu2tw3S?}z%BhXALDm8p< zFD?W>zxwOak!2~vhZBRF17X4J;*s&5nw%V1uu4*?(>x`)ER697waHDHbEzu};+I4T zv<;HM4$0NR)jvS&4i@=fsS}DUcUN{3^^GHu={VJx)(_{u^`31cL~Uo$*m{I4F7Pp zAQ3w?HWVxgHQJkVUzqjHDIofoVpe$Jh~%y_y)p~hk_}a)J5!IBxbTci$|l$Ny4yBa zTTa|J#-JqNs#H~qol0T#G1Wcg9_ro+ES;Eqh}3^REcb#50J1qgDbx%>DOJFw_jy#H zLYF(*&bKiwwf>2Yn`*f$f#Bb8B!CYbGM)$CiN!klbu7^{Z+# z&IMp*R@RF^nP_rscs)akTU-1l)AVGOwA~frk?o@xPKNwxk6n->_~h%M%d8UV zt0Vu#{i}Ca&`2Xo`R|%T+sTzY#I#R|lzd{chUG=9_^T??~gx7fdVT9@-3J5Wie}SF;c2zw035NEN*W;2fLPh-xz_ zbPS7Iw+>7^109}LGejErEguzA8CE>PQ@+0 zU8uM+AtjHh!2K)bB;!D1j8^?XS^XB#mNP!Pu|Z5u_wc?vjnAJIj-y1Xjg}3&m6Jas zoSZ*iGj!hRe$ZVP{e}LDuCxd9s55>!tYsRj9+kxTM=kID8U}z4bvG_7|&ETy>r(h;Pp& z0GL{x1x$m=>DUl*w^a}J)y8|X!lm4EWyh~rp0iVRPy2i}M1;CXXa2w(bPVx>pEzaI zhTekIb)OP}r#F=SO2Xfe+vt1>sso|*0~9mdW^`!@>NSH$mfZbQWo0c(<7Xu$a;QYv zKEJvec1}He@dheg$znE$|6Mx2!Xo>7#%;gmPF`Q*knzJQi&ZrC#nx2FR>Mh10|u72 z{xIn5NxrhWN&+}4NjH1+mfA3WSR?iQ60Bl5Ijcz6vewejq~Q33XqAu3>LHgBD?B`7 zn{xz#j^`NTLsAD_xZXLn}E;PP&ZJ6t9pdGL4UB~;ZOnP?kfiAml}cN~+{2?C1N)04%kyKsH9 z7B5E4h4Oz0(G-MBqfX)Hx%AaXIw+a`E9Lw6a|9Ir(2>m1!5T>X;_QKx9uv7oRlMh( zDzI_9Nc|UqJ^j6ZPPSoF5RWUG^&4eWEkaYQJqx}oQyKy&KLzoG@3OJGU~)}0zq!*| z`V8!Y#c43YscW~PgkC>Ff@Fp^s>1Mr(zh2vB0r-NW7|#ZYw-56%+gL98^1LeE(NiU zFcU`Rs`Q4)`YuYYE}U?3e;{*@*!A8r($YIR;ov+%KL1PqpG?&t9?pjb`}T5dsVav$ zg#z7Ym5Lq5(++y*{`JhvkizZy3GbX)G_a~n@Q~?BQ^;=s<5R2J+El@eP2HrGqcdSj=v447}N9Frui{#1*BNVrO>~< zWmKtD!nC$!)1*{|g2W%{yA$F5_MM%03mFy)BlV7Mjx!U0_+=cS@%>d(SfmI(Gpqd? zHBXY>6E-;B&4+>>;7oLPZgV(K;VXiOiBkGsTaRr4^u=sJuqViK*;5J@S64UQto^C#*ksav%cM`RVq)2~ zG=aH3D_r@-SpG73zHrg3NUI$W6W{3-xDQR#>ub2j*PY$sU^bU4njejG<~lsCgp0W0 z{q?s3MYG<;?Bj&9Nh*u+j^zuj^iPB<2!LQ|FuNLia>ldz?kg4fD!s4QiXLpA800DE z9e`Q^ME1L}L89VgO6l3ta_WAc@EIDsk?g_c@?XXwd(nSc983S&%)cV$?Rb^Bt7jQ+YBnpJVjw7Tn^$m zju>a4=)4lk@hq9aouJzsKE~7ttQG z`8_H=Yd@y)MwxCIdknDP99w;FVZ9e5m>@pbwg4e7{7V){7Tb{t%pON~Asi`)c7wx9 zkDsrfPoCQPi>IU;LT)KMcAKj;v=(=5*jOpBT{M0B|9q#>Xhm)X&kV&{rKQf&H&i4S z0jHm$oD{gSR`=)vn_gdAb%aW@%8Pob$we^{OF~PA(w9@R7c`LC9qt=7&yzj;jik!W z$u^*36`3ywN%;2@3!*S3iTZH;vEeZQ_!om=m?@v-wPJQPnoWmp>%%O|uI@t^PG)}R z&4I4a>dFgOigEXy-}P`p|6PYs@^n4AAy+G>ypWXHIX@6iU%;j4p4(s%_&^>hE} z=qSmb6PD1r0#Nz{$o>qb9-;H2O63N&nOUk)BxneGE?EKV7=^^sM# zuWRs)8=Iw)lCbpHMe+7IzveGY6V_oAm8n@ba9y;rr}KFg8p+!1wcg-GW6T9qW1`#v zb90*ZPt130CY%Y81X)^2l(yPh3QqT?{D8?YzzOyDE}KdKd3!oKhQ-qVLNWbQF0Ixz z0W2tmsX$AP?#JO9`Kb>DkE)xa>qMsjg9t~JpcN5kBwm(KWy#ZQgW&?~hsl+TU8*fa zq)%DmuF*c_7E*>4*dVXmyY}}jBt8_&4ig~tGi{hd%@Qqw7b@rUY?^TgOUHmWU~u1c z%FW2MC*%2a!ohxgv=>`O8rY1-ph+Cj>{IL@BdPu}PyVEj(^to}vhX;Zdu&~?vu<6I zlY1!r*-&cM+;yG(*oWK)g2TKFmT_U&j3^#%HaQMvswwv|HQl?)OiBt3Y}C+`{OgpS zi4RC$j+;Ha(OrR;8gkxV+g9-ws{>DW|BH9*J3qe99neLci0RQ~@Qjk2-ciG}8u%E) z(qnYcP4k&}YHp2#l34?wTt)g;+dhjZvrHL1!0Djc4vhy@;{nktoXHN+s#je-R?Kx0 z`L?!zpUUv0iN0qysuH%l!Rxdki@5_zH=ped5wznJ^Sy|$$nPI~U z!UxH88R$4XC}ty-BV~h46CU1TemS~*T%D^iOu@&N7FRBwsI8ICmHNA=^C)ugUQ}$t z3MO}oH*yQfx2b)DpH%ZS?~cJA&Bjxnhlhr~q>kbdXSCDV8;Bc=!YAiqdeR@H;d`(! zGZSCQ+0d*VCLYb6+USp}K1(dM7yJDBnVMId|6kltJvL4%`6FdQhrYDBv{trkNNrm*O>^PzD7#T2J-;nQqk4=rqZdY|| zZGXsS6yX|-%lih-IbC9_B|Bxh9&4>%M=0HJ}oiUN_m^ z(Ru0n>&OaPbp)Q(5osDbTKWb($pvHNNerj$nN}-EVjP@ z@r3{-n=kvurGyV0GCBx5Fb~8fm+KD?c!ijno|LV8mgOev7Wd^kSE!${m^aH!0mkkC z{a<|P|1U2b0^IRAfwELQCI$??s1<9im>|KItB$hO|EI?e*z%#FlHCoV8}AdKFn-Rs zWYNm_g`Oe4T`=kpIsEIoEkZu7#$J;j`hAi}b>bZHw=4 z<~S}lOi?s3Uxsl}>H+xsn1O}*1%wrh7<}r3ZlB6Bn$2CU{$;C!=hy>6L-pLI?}ubl zSju>i{6lwt`gm{KnH!N0vi{Fk{eKRTH^-1zlpH{Jo)~3cznB;$ZaL>1uTBpye&XsJ z8se3K^!E3WGID4QrzR&`%s7u+*;i!nzWulRDYErGJ@fmSoV0XLcVQ@UBJ<=!O>O=3 zXAb!Tu7)AJpnp?Ou|tgsO+k;_(;R7L z^k!R;vdkxyk%;-KFp}GGx!-{Bt9EoY(`^Z%RoA0~7?Jg?;XGx(nUQL!Om{>_A;OMWSh_J1sUODJE2XGOqMuAPhQqt0${CsnVVmyL}&e1fT_g>&`sa% z6|-brCSdjha?ODu=^s2*!1S;OoKl?@JE&9ps<|pRD17Jp58cjx46~~s$E&rtD+@TM zRcQ8h%lh!()fPh>-qo;<|jHy!FG$!CGNJ@w5DoOgC!ltXtu;t)W4>IN(Gf`36wcI#N27dilT$4=tlKmtSoB6!r9 z3Q3clo=;CM{Cp49XjHOw5+Wb}oC>DRgAmqdnTlSe1k~{GwzBtYx3DiE;_i?L06WTG z(n%?qSLxN}!qsln108gywe&;C`*~4LXaJBj7AQHc2 z)E!@IUc{9j+sdI)%_IAfSdrs-*fpNc)eiK}YUrY>GQVg^@z#~A^Km#4fjM5x0&{;c zIxZX``!j>B^QlUG_K$FnIo;Zs4hw?AE*$y4Y;-!?ph`!lAbCM7?9iXQII|-}{a3IX zRn|}P`_7q`vSazr?~;;JQ!nuM5`zG!B_4q8ZqJ|ak6G_*P35+}rB8IrCqo9PHH_&? zVbRL>Dq-)&^PkOcX3R5e2!hKKj}}N4gNzoR)Xo`U+W=NKrgBdQsMbfTEkV&(o`CHx zfNGtVxZd9LZ@MJ;W4lf5UvxIi+14{Qv(SS{ihsz>8FJ5$v{aLUiD0b z6_{!+w}$oOYyE%5+om5`aoj@gqfN*Mu}DlN5s$AmQ(}1IzWE|8@ds4He-5 zXV4ik<&`H+G|@I2Bb7aqoY|J$jDvyk*n-UEE6lUHFPmk@ENIwl#innu`1IOF=M4}Q z%clC4uBu+hu5)%`(0OSOI9Vg4g$69WcYQ#cf7cU{kQt zaJ{_6j>)4O^sn0k*|yJ0n3bMN)1rC zBXGIBsfrSz>q@SwcT~9L%)J!;IT5cy+8?(=!9d4?&)y9NrR;N%E&lux=agr6eKDZu z{t{4dUbq}RY211+%swv3noqaFRV^WWyf$}07hai^T_H4e(w<<+0Z5p$&|5%6f-Q$l z;M*Ud#(olF@Cy@4Ic`a0%M8G-!`^??{w9)A(HZY+o_O!WA03@F+&~(?v5->1uRq*> zQC8AX&c0x>69LCYvZ3Mf#*Qh?2b2T%DvWy1d*t@D>i%mvn=!9)CWN6q^=)$_}hUG|O|JoGZ|rwva(gp`Uz`=~Qb89~FTt_#2L)=zMM`9p?-YO1p; z13?#ie2aGCXmL5_7NlXat^HE*qmWN$`Rw|m3S{Q%Jt~kQ%c0BB#toMRY~B3(ou~ta z^Q#n|l5o0|{WoI;N*S3}zl-9I@c&KLoYHe8mZf|0usj{MxL9P=Xv_6^zXM^_en5)l zEB>HFu$a@~;#s+n)TKjIxANBPzbnSVj>uY1dELqOixYrk8 zxm(1fr}TQ^r%Cj+kf0*{XA^U2^lVG$FAHiL{j;F&9O&d=8NFS8{Q@6d-;R=XTPWF; zS88danwo(buq#DKOHIGLA9~rW!I8B-L~kCLrFrzFY95Xkk>TD2*2cZNsjhbxQ%3u; zjD1T+aXbQe7(lGZOmdPq6B0viqs@i7wcnNVEX(F()h4YDxcuK7a=sq z<_VL1w4&7Qsb+PX;KGB{x0hh`0ibkpvw)t&G;M3jG)-wmb9>-~uGEW{-ocrY43HhsvU&UQ+18WTS#7TUG$Hhfu9yzXs4N(?}Tdt~w!PyfTW0(SO~0 zw0iRX3pl3Ah~vMwpB+-`)<4qY*KD%^JS=uCJYv)u!|quCxn$Cj1U~qaoSdavvW2IT zb?!SKv#_fig%k*N{-Mobl%$XR5wYU7Ul9Rgp0*T53Iv@lZwBXcngx4m-1yx)^8}^g zHa8sxLy@&*MB~(oKEK%>f8J_u^qonN3FT~G^5yvkDzI$fUTV!AZixPw`x!q&3KDn_ zeTZ^cIIQ^V2v*uEa>uC;L~yX0#!}IwD;V(MAvkL_)OxgUiSO^a5H|Z0Bc{InHI)AA zZr#EbrbA;14>r`pSH-qbodEsGx!H>H#72z=ks&8Q15qn!7IGjjpElSPj>0~hNY?BDF1v=D^J*uc@fz0#j=GGqga+^ zst&<+teb4oDy1djj+IlOZZ^M;tyt%HD+@oRe3uflJFhc;{fOyrQD=gIvSv`>hfIDg)|PsSV%i`TSzGYPrcTlpiJae%N}NShtMW%=1K^ zMzu3if_nET7RvFQzxv_v_*GA%RAAwjT&M3NIpL+Xc`q#N9B~7t3N*$ibz5z|chlGW zDKI@d`y=^Y#$4dX??_@UkBy`=L5UlulEiR(K4b&bd8&co#-x2 zN0>w1kpF>RvWVhgUQA`M0|6vrAnj>xi!(gT7Jr@Su%!W<)?!pSY;Og>N4AC)dN$b6 zAABzUB}0Xg>jbWpiY`;+m4OU8oP!|0kc26tejCsMQ3|*7MOckfh(s|}KLG5fC;`~> z88;NY?C$ZKnm0&Y#79~$j1NfQ*-!zIxuP}c>22LBMDND6d2&sy z3BH(;Du^HZ$OSW%&41>p4yLS&Q)K2ROg!~5PM!FZZ6lMVMS-{V{TMoXXS~A?+V}CW zVfBr%$-Gg2q$-Vm`@?C$-i>yxd6mu1NIKIp$X7t$Wk%-nXSUT!1CjGLDEMD%YHGqp zz(#Ca?HR*@fv%lpNDYCtoYO)uAsvIGi~Un)qK?ug#ZqFp@U?^Gp2gTHf1r8sTm5gV--TH%Yv|bTC~jyf>Wps ztiSJz59?TH>hS;G0e$#Qq8Kc-tmcJ=&R5~{csc9GLM71d3`uAS3F#kJ*Z026+yW;y z$h3Ir=%lGCDa9n$)j1!e^SCYvi-@$*sTMm?-d@hlX+T)bMs6r6DK*|Q={;n}#wrI^ zU2Z*}?~Epf1D%LzHLEw>wY9YaWcPrE>kx;n!FY0|@7EU>$Tii~KfnjAET2%hgEebR zFZUN4j;FG+NPXRx`+(A1>yx#E`Ij3VY2DjY9hm}9tm3)hZ(}~6 z>8to4?6Qb1P6vcat`gX`RvE9hd$S|HrLoJAh$8ltrfM_n{UKpClyoWwd^Ns_7Q17= za?ZBEuDyv?=>H77?1DNsYZ6YD4VL)qhJ$<^(myYpKmyp?2pUeH}u8HdP zg*vMsV+Tja77Fki7#PA<`welw`5H5jYPn7m7H0I#NjF-*)S77h^&O<5020mt^pbXE ze<0<~>w2{G(f~e_Vu#zXCzOdw&|$5^&r-ESjgL~u zA7~vOZ*OnkWK~K?*^%YT8+_$_u{-`lx7jIK!2N9V>GAF|H5=3l!tI)>8%YnHE8a*v(hKPpSjbuXet}ke~6)hd^zgHc$+GIOn0-IwjLs?Hd7iN{zp|FlitBFP3@5e7CX||MH0OCwI($ zyhC7PJIqS|?2Hn+drO9Gjev8o+|rQW(BLY0c7EJ`f8>;Gtf$2#N$>YwHsxyq;(fMQ z*r%0tZyr#ewL2d#@6zSP-emjH(NQs|Oy!S61U`E$aL0~$XCGd_xixv-9!@0&3|&dd zP*>nrS5hT`Kx^$S@8obIeYHptx}$-9at5RrZCXhybOd3^O3?(_6;q%IH>cFcY{{SG z>Ti(WoutChhsTR(1z%AKq0Q?(KRr^GJzf(dIn;t!Fr8$zeF7bMYP0t z-A+1uzzGZyvD%z&?(RX-b~$$IUFsodBu~qu0>CS|;??yu$+kFwF5O21_dbVpvrh>Q zv0EW11Xta2RYn3nbkG=Gqu^~1J4vVUPg_`L7q3S2?fDK>JGxc|CwLQlNB)XeqN8t$ z2n))*WWwGI1dw8{03ELx;B37=3;k667lRu z5OSY|IYVaZxV&!cOp4}YI5c?;HwOt}97!W@EexC3rJ+zuQ6M9O-!N-diQIk?w zTk8yX9YMgk;&6Mml`RoTSVXzg2;7sLMFZ`!_=HQ9B&v<%iD;zuh)g6y)7JO)-Y{I( zk4|#(>ZDzZdn$I4q8V`H{vrpq_wGC}=cE4=u3TdJJ zZiRVUT_d{(#q78})F5!To0Ca7kn>sK;iUVcyQ~(TB=82y>gdT0NgMktU!9oH8tHht zXQ>jX`NV(#5tTEmiA&L^L^Zg11`?5bOiYcRGo*Z;R}V%&V{}>IT)A-Uk0ip))795< zkwijyoTdCi_AFzC=Ii^M1+=X{{CO6=qpPdi9h-=Rgmj)J;Omq4$$1}!f7uVd0;+|B zxN(B50*#bwYH4UR>^t2*38D;>Jn8sP)bW-B@zK0c62LMZm2Q87K+}3LA;Xhr;=k$o zd!GXwN9!o9#mQzh^L68NqaREyz$8UYRpIZGMMmAv>~^yD9T*^l4kDQ)=>LvNISZ8T4A33ZSrc|IX&NP}WB^kqk9`DEUBTQtJ1wFY^u`97QRPxTG zU9(`TUR!XaPx>sJ-6yt@Ivd#Ske7lkz^BoYO#B|+%=8XlN~HDS3=FBz2ie%^I2(;Z zAWKc>P{|jWvC{R(cq!u^m~OxK>?z^!C-xWWAiT*e3l&ukA4QwB-Yq0jmd9oNfI*n3 zKN_RXF_sLA4#Q=_88sJ(30KZ9xP3HXLtj_zI2=2KKktD<8y-5eKBqItbJwwx%#F%) zJ&H_HEOPQ&UWAsP+U|$@zY|>-2ina}dyt7bQ7!J@zX|bR(Tu0i@bWg?xn05GdKc%_ z%~*t%M|!=~wr8R`ZHG9UU(AknjctiCQWw*oENVV^X=D8r>&7Rg#bKG*E(*zlW90e4 zEU2PxJQ*mc-v?_1MeRbnods-LYP^szF~(z6dPm*mLHv%ERg|HjwU+VpmEZFOMct`l zaWHPLiFv&#Ms0@tMHn_7T9ZfsEkC2wf~3yCmH*}I#2UyI3*1R@PxXh}nH~uc!Ax72 zb=(NW`)4(aGKB&;v%DWTD^8~=+?bfJk;-I5mjUDJ<#Un!`y;x6jcI~L zbD>zzo`PcmOE0FQbN~8JMzf7SSZPR&3n8a0|0<_zd-s7=yQ6<{eImIM4%>kT-TwU$ z^Hl%jouOWu%G5}_Ev0C1%jp^BxZ|bY;X0n7ei?JfF(ok2Y_t1zjvCm9wsR7A#3tSK z_~xdTjVyGCxvy=*hP%BI%5%6z>&FWUJza8-^0`^_`sU8s607T%)ZE0&%*i%W&G&;B zIPM~tIpkTSZ*V0fj4@x$V!=`c>TZG0R(yy(f}A26){-V>99F&BHU?^aLW6^6izd9N z+cOrS;ZF$lTSEN9`2vRj{1cI#t`-#H1|OxzPg@%A;;fw?mZ_ssn#z1_8h#J6p&J;P zVCaqwKN$RWcGN!hZ;GWR=NNg~pI$`c4a0Uo+|Hu{3R%lu!g$L0&SpaB7<>~IqlA_8 zq!?S+;L;D9JJz^3gqXUY8lW&be=+Vnww4ZQKK%2gi!vnu#eWZNx~OnLOAO1U9!Cdk zyD`>*zRWx4AADY(EO~apY~Dsom1l3Q(`agHYBxtF5@KTMx3f#Vp!6K8j04}!-x-Y# zSMWfApJG*~mHFHymR{VB;Ea~28}`E8Y;ZKEgH=K2ugY?TnE%*UOwq`X*9Mcj3m+}@ zr)*}KrmcWl7p_(dGN4Kb*$hp-mG6!uDS1n;+jYJIh772Y{q8~Z=G>51qWW#<$?2J% z&-;sxil(~BNdRo=*{(SR_R2?5eE9Slr9Q~4Ijybe-xVPpUkJDp*?^c(vFA%kOW z6vL&i;t;>*fOhI3eE)4B4EwY5yw~o!b;51_h9GvwpOH7GFZfh5_pg#a;V?a0N`98^38LUpO>Vmyx3%OUC*wy2 zf@l(Rdm<+@X*q2<1dN${rQD;Cl#OSV_<-MXngWh~@SGC&U>uxnd(Pt&P4`Ahz8Tcm zn9}2Pgs_wAO9!4I0o_4a-PW(`#_^vDCIW6Kmm*}k_U;kpJXmp3ZF>wKlHrGHDbONFvG*iOBWcU61Sq^vASOUvlw7>GtoWo5Yq zebTsjTmFY!QW3J!_RK%E3zs9uV5ysUQQAlKIGJxt1oFI9}-L zqeQBmZxW&O=IJ12jOb(!ZPw+mx<9VY?4;aEi89&_Jeoxf=&g_(Yuujplx+5OyBKB4 zu(PpZK9bWQA_ZdCfvWliRnbUaCXuEpOSEw`3#_k*y$IC*jk`zvN8f%MjX z`6NV&M6UbXpv0A`2_n-VNvE_?7pW^|S}9~tA!+7obD=}QeLc+0jMjb(1YM9TMPwrFXV z?2yRsr-hi&p|9^8deL#Qb(QGl^&^SGgvAkJx|u@m7PwM zRL@1&cq4XdXeQmK>TfS|C3NBh!90yoB9II)_f_&1Rt8Xx!!_f#4htUHSKm<&Uon(f zqx&3Ycg`N`v}HZH(kqcmERGffQN52Fll63Sr|YFtduUNWiA+SzQ{sj-PYbTRKkApuKO_jzR()MslReO^X*d!THlH5fEe&%(2PLR``BK zx0gp*kG#@A4Af?POR-)Z9>2=l65APl?s&l9`*>cXK2`~q0Hz!vMngHN-Mq0*n;qd{ z@Mlqe+$df?c{p6xy4A5PXs)VrlK4!r2M-#T4;k=)Gd9H%vr2Y$#SS!+-_;5?u0!VW z<(F}c}p8iDfGC>N+U!FsoS67VDD*q8`fXJUEe6{UR zybGt3+8RMBGa16Bf*!@C%mzbD%`s6?VRC6Vt&ZuTr8E0+t>0{5@BLTYU>X%B(_aZ>CnAGfoprjZt_iFcJ z3nvBbh5VW!!QO9BSFY9*_z(-=mPSWe=kDkS)R;A;DT`Ts z=jG^f>;a+B@JKNStz7@w)e+795TTuY)`>f|d^BcAsAga$&R+TfD4r|$dR|0*x@Txt2vci=OeGUW* z4=O?t4z)`BhDV{4;GQF}mL~An+)(j46$hG{?fO!k(bC5B7@8x+dz%R+uIx?gl9)0X z&A>GZhDoLqK{RcJBsiqi<}sR#m30}O$U<{*TMqS;M1!!U$ArnFC=@`=7Vli_ev3ry zKXHSxFg&^$Xh5-f>1}1Il+ToV7U-!-f!(gkXZd$#ETuaKmU2<-Uep*EeB4q}9UX~& zZ3NEOk%iNQg)#$MQo#@@r|R1#8XcuFMQ-cC-&K+;;V{qKZn& z)eLki6N7H6wzsD`)W5@Uw)@BsG@=K!`Xy>Xv!|H1R~UYq4DnliPON4?NE%v-$JB^^ z)wLpVb87jIuo8{uZVciM6nw%OL!WZ)K@S9e~+*8oyara@na zY*m^0jzuhRFVF8OfVuRUX1hPQcuWw#r(BQVySAfKTEy?_?jrU((glp=R8&@4zD4O_jx{J``AeSa`NQOCS_GjkNc;9k? zOCdQ|mwlx)$AUf^ICt(nsYa~S&|U-PB$xHqM}2ve)^4C$zTmiev)90s=#O!H4?h6= zbDqZL_4`&`fl4Zqc0+4x&Ywe)B?!QEyLpIhpIN&4R+aq%wEa@mYda&tDVgC|$63t= zC1eSz-tJn&lnIIskOw`Xv>9Z)#YJPK2Ok`N;9@Jz&1|5T7|P-uck^^hNVHxvh(Pl` zP+S>^ft3)wfC~LCm)!8XO9GUfyca1Oqinh}H-(b6@=87?fSkI|`_)itv_re~3e>{Y z)Y5VM!u~1fxzlY zr_JlRe!_#nxDyi-%>;^FX4jJ)L}9tuZo)Z|f8^(waU4Ev4oThK33^XH2&>0Wd;ZIe zMPCmD8zki6_oL^tl9?2JK=_TSR5IG&pGLz@4{GTjdR?wec5iX^cSHR3Dce&$-!0_hFo?(W{P{$Rt7vg zk!EAU^ZgpSU5bdt@C`>fa;B)k+Gy{C4^H9dy?lrXC{0@{E5a7%HrBAF%3V67e! zCe$%OyD$TVN!aMkcljId&UgwQ!Bmnzh$`04vER?<8QO~u%5-}HgW)M7pJ@Ah+VvZh zm>)r69Yk6k(Q0vOF%=IU*#i1Nr>yManHB3D?;Gb>Mh zUMI-U5`&QCn{Oz`ewDhUR<}|<#qnal+z_JZTTKwl$r8`}|7q{5;-dPZwoy<-1Ox=6 zWhA7PkdO}Pk{(h-!U2Yk85)!Uq@{)!8l<~Ry1NHPy1QYBZ~VX4-{pJvp3AfMZ=ZA4 zjH7RN(j1N$J3~l9(AenZs@PkE_@f~@Y%xzJmr^OVUFzZoc8~mo ztGjBnE3+&N;7`$rFO`2DC|u6ya+4)z;*T^(SeR(|hS45cRQ-|&)VkZF9S1wLvDrSC z@PF>q{lJ9U>MH}%_JZqhD89>p=Thh4`YlESk;;DU;oB4F9;4WmJ~b@z7Tknin1X*J z{7jJQ$!T6xIRT}9mJ5-o%F9J#J@mYkiU+|CPO>=Osdbvye*2NcG=nFYSYsdP!$Rj{ z9M+D4;mrS3@~_CIY_uI1LA5^>#W!GR(N^U48)C^QJA}|k?W_mnh6&*xco4{3x_cA} zlEQECJ%#z|H+|2G<@8oDra5I)0O4ZDi5T5~edhbxH(-K`(S3BnPxAuroeDC7Wh^}C zW%}(fe`Xb9amiB_(Lu0d)$Wy1J%r0bOaHwpc6SsYPKrtrdH&hfnpkt9UF5n=+o^%%dqo@zK)7qEuLw$n z47>XX696U?~`p@v3n>*$8470!U>@$>zbx8D_yL2 zg=KILcbiICad;n>$qL~*by!!=>QsnZcWTI0c>88P8v4j$mSS;FcC2p-(uUTG3IEuK zJ0(Lo312JCEh0nT!TRiu6$eqPJ%e(#ukryCK0=RsMAzNAxVo9aC1H{ zXYNU|Of8fRG`R#Bvs1d$yKR5S#|qlZ41e?3x7y_hbj&VklMel&P%LX z3y`+iM|(6pN<8jJ2A(H$iCY37sd^OLPR|NPFw{ckRN^HnlrC+Z0c)63xbY%|gZ2!< z7-yZ{WcDTOLo%74W{9mr`^u3?#z06&>oK}_@PPr{v z)Qx)>WvvD3;CTh$iA5+PGj7Xpt*-xK<2)8@)#H$=7jYJ(3uE=J-qh)_-w9rgdv>I- zg^g>i01qxvn#p5LT+OtCo<7*E{rohYM_-d$s}E=s&K5CmlgH z-acPy+I8N%Q_v@r3G8~YJ<+rGHZ0=fb`RqgLIMJE$XgmSQuj=i2c_;1%oqXMk2WbRSFRG{R#8WTgnX;tHm9`JwHR zbioKOTaEVKhXQ6XhdXO}{O};>wabiJ0&aUd96&PdZt*M<-fi4uYhfhl^XW%FGc)k7 zsGHDOcX$8y*B1YSnMFy;+6>qiS}6AW7r>w14$jETz>_}v=JFQ~?2Z+^?&bVCcA@i` zoqr)%NlH%{fazuJ)F7}@7U7eg(zNK2Pl4SmOF-r5Z^5H=X-p6@SLK>P0et=6d%Z_5 z-1#zsD&t_@zYZiHKG(%$3(7muo%H317jNCUe#@YF3{TsBS{<}qk@6UF2~M09 zM&wo{)+JKLU~schP1e?u#VJg}EuZeyNph@ILR)pioRMd^@yW|EsMR z#Fx%az;8hU}Bx}0S*UHYxW3ca>SC?24U!ZUz?M#0Na<@+|`2Qsk|173N z^>=d#3Ii(E{H4tnl60KV!@pRVBw^pvrQKbEH|xwyGH17|a}As>RWZJyuU;3GNMmoI z*}a`G5&2zm*7f1-Nk)NAI5zS3c|%o^pZrR-(7yw1Oo+0;*mVaf5`jY zUzC@xoVEo4jrdj*2~bKZR{a;-6PT}_1TopJe_megHk<3h!ug`2VZM1t=l-@pMBi~^IwYIr8#Y#*}OI8h8+C58fzi8adi&XLS{P%Dy ziAh%CQ3f{0J2bwlh~ezchm{lmk47|om(TwzZvB)iBU50Z5`UoN%sJ3YMxhq5ViKVu z4_?{*{eiY9O`TDwCT+DWJ}LCFepIoj-Z7tmf`S6?{0%1(U7gj}b~`)uSIzA>{+$MD zZOZ2`UhB#cv3JJu;j*`=$dk%Y$i226hG+;)X5-J#AI_>D#<137<;ItYlY4A4N99BT z1~b@9z!L$00J+qeFTMR_b(Ptmr{AntZ$K#hQCsG`e{zb=+% zy3GApcQXqLj1{4KzRr*?8yrk{XGf6HESNz9ysqzMH@eA|EA3`I`#SzyeE1sDgyAbs zRxEzW&a?mbyvQJu`}o!-3PBFLsWM@FE0Z<-sM^+@&I!TJudsUJChM)w6dlJgCTN(2 z_OMt$D(`AAlT5jI-zcxbf(%ZY2NPm)l&(c# zx=R)J9;#qICa&yz|Fh?WR}pa!sh1Z&;oXBb$kIXk&*vM4LzF#$tLb=v5>Mm${riR2 zj^RN)PV}LMJbmZ$un#iPq_R5PB$OVxvs3EGlm6xx`eh?=jI2qpH+<;_0|GXjEFq-C z)bYWdXMR18PY(K>WbL5kuKzw9B_q?1VJzxoauq+nN;#s_r_G?+zkCkCJ?z@#?NI@>@;j;Kcnv{t*jfl6|jZfcHsMkGucOou`(1B~<#)ZY)wykeN zs0?mxzpRQ#wn^r;K`6}-S0tFw+~Dg~Zk-O#an*ikVB&r9*X^6NoWj@a3M7-~kb;A= zXBc?~G5GaQ+us8$Nj63TdB)g+HIrQf zXL_XjNXN(7Y)*#i5^fb4L4T!DH4&9JGL@`eVr%k);Yl)qO~J4pnCwa19+lhfNg3Yv z=J->)gcWHUe!qTkZYAz|YU^em(h_9p0R1$q`$=g-O~0C1*F+H$qnP}n z9w$WDs8mH$D+VA=_cUShqBANy= z0jB7BW6P*NF&5;#l2VZn%=(>sD)1ohagKp<?FRFy-MQj-!hEpht6-+BlD#055GZ(~fs1nfzFc@bc)$YsO ztmIfrR8YJ4Fqp`q-z!OJ$Cg(Ud{&Qt;ShEC(=(A6p&$KfBCEHt`A;{~M~vHGrth|l z6`86pQuy^svxax@hd@I4)t#yJ0zK^-=dklJcTE`F7)(%>`XD!YcYu$zr^X8mafgIL zIp=C~=!9O$ogc&vR5JFesu2@`#g7P{M75UMX5!DeyuiwY7Enn+!b_l`VIMPq2heRY zc}PP1+vB@qd%d}Vj*`uaWNn;KNyFS_PfnKM)gK)D6BYGIp%J5fE3f4w(9VK^(JA4< zUjZbf(Msuu8U&ZNW}g%}X0GGU`Fw|Vn}L^IFL!2G&Z$U;4z4GJ4wBIqe%tX>O6}2o zm`Z%FLp6JgC9H^s4gcnp@SIbe#lX+Lgx;yJSirb=D1t=%mbbvXR8Ct%7fUWxch-)6 zj!XeJenOI&y_}uXgiGIJLXl*?QA{6oK=~$s3M#xif8Qjkl0(7tyzJJ0G>a$^v?LqYzcr_Tf2+jSro*EmnpaIvzy%r?65EDGQ5I>`O% zvEb^=g^j^${J$Q`RlPJc&AygK=EVPxYlwFCLeux$80JH}&*5RF~1Y+P&CvDl00GEgNt?NjixOiNWhE zv(Yd{eo979IsocZ4X$r0DO3j>Vqkf_kLL}Y4Bop8UV0%4zzt833+Gn(;%`zL;^h1D^<(w}4;8Bd(gmNdr$5jSG$Z)xVxtZ8jy(;;1=#V(Ap;9{ zq#njbDp$HLLwh$1=8bq$Cfwf?-mXO`2QdMvvsYso8BQCGw?17Fd8$0Q5vyGO;2%+a zQ9%y1%J}S}fgeO5=S1x+ifx+PCUvzOc0^LMmOFM(of`0U(S=CQ&+BVtY{xb?bN@QM zSZoTYxh!sxIf~~xF>!gz2n}*SSE}hnysI`YT(WeIAtq{MH5R+mZnm86Vk%!Ol4HC& zL>w=X>h{h`clTg9-E;%*ws2i{`tety^m?Co%nQSU8i2B#3_(xEi1u^Hspzq_dbT91;?t9&2 zQ{fh%UVF61)V&nP!Zi~3&hx7IiW9LngPErq(xV#9c<+u9M>~UI372ULqZ%k7f}*!w zV1=sv>-;2x>z=oyd0H~q1Oz19dfpRHQQ(D0j)KSE=_vS8pB3~E9FD&3XfO67geINk zz&I0tCjGvLUHJ!ne%`7c^M6cNI#Zt z869^UY%e4oJH1MfXU6pRrj#Pr4j}#WEl~Lqax2>TjU@BJ zq)C^ExLg}xZt~Lod~fbkbl~ARQS{FJm7mc8XnMu7Zg1^T`R7q*Jdnc={W%3m$>z!L z(lv>tUsBAo=&DE_vN1Wd6sFwUZCzYmO#{&%bRdoy9mV&?gdMkRtUi0Z;>Elj3+lM8g#h6?unkLGq8%{Z9OEvn5x8 zpgcvL#qy&2~dscC)Jy#&+e-PR$@_NjT_?VwZu zcR?IEU}zn;`<5~Xw5io?ShG{>qjjHPuDDzMM3AXRzb5@qOm(O)$; z)Vf2*_n_WQAx#wBiCo(ki2rqf_!g8_I%XYMJ{G+epY4?0pH{F30ZU|B@N9O;g+q-= zpFpdNVh1M1tg~Di@x6*ygFIT6)}kMZEBk0v-v~CWI>=Y(jAeMafl}somDxwrir!YT zhsJ8X^emd>&*nnoV6`#?!_K2r;yIAz==$YuOxTgyB+Qq5#1Kb2RCevbZbJIAh?eHt zEhu97=%w?kCa6GeUzXik(y#ahMM-|YBvf5dK{>DK473{}mw2|l^NfmL+x3-9O5I{t z&$W=yLBbob(1#hLV#5^kFtsu87IA?=8_b?N7olqdJUf%MJbp7ZH0%(Z!`na6u=o3_ zYObrXX_iR=CQmN_YK+OhiAwdEo8Vn^j<3CNQ<%|!Q0M=Rb(^$?dX>Cnp>Dn%p)vum zi|SXG0NcZa0_y_dsg&JT4gVd+*{Y9(A49L3(V&lm_vuV}GR?5oJx9{1a0 z?yifcHZ4_1{C)kNWuvd>NeNpF6~D$Pt#wLxMFXDAS%UMxgOsI}CARB<9bxb;Tgva1 zKfIo5!68NCAGQk#*+VSEMrNO;=H+lV6TYb9Wm0<#=b7LFpj6>w5;5XQ+%}BTx-?af zO+7E;we+1wCbIYaR7JT7R=catV|m#UXJtvP4}K*YpLf0KezYK*Vi{pd1ezPjTyJz9 z>Fw)+jC}J!jlG_#DsbkYNHM!Gb+ra?&*yHtk_jgbe)w8XG&^3Onc+h9NfcYvQjTbs z^$OWJsV&uRFl`Mjw%fO$wp|@bO{w0apVutd;|sUpzECx0@(|K5pT)UMYDQQO!ppqY z9)f<$XTqu-r+kSWr&@`Zm$Yl{EfH>mUQ=e^XlYX7e0NiIEP{X}9N?eu_{!i+KJ}A8 z$r+Qg_ryyFE3q$E58xFSrJcg2(!kj8QYPuVc7q=Dx$H!t-HoUi~+nX#4{vcyU z;ppaYRLQv6xBjOo3evdQnk&fbJ__d6aiam)aP-WCr$;Ledi24*4ykiWky#v`@;ZEN zjvO^M@xg?U{E?o=BVkvGE3YaxCkmitqdWimb}C(or1PD{NGh43JyMPuuzVC#kW-;4 zXb3Sob%{*@KXaB475!_X@``V=F(>oFz(k#y{Cc3V$yt@0y`f4A zxtG&(-HMO5US-XN$>v($d~4ZLg?!YIwZ~)|Z`wRRA6cC~_Dgph&e7W8#YC*($+s)S zSH(Vu4qqaY^G@p1f^%b^jV|i#SY4PB6&PEYna2H=CiPZ|Z_EJ6T87jWfB$lDH@rov z#S`n31pWBt_9p5zn$|=;=vhKctFOr{w=~tCh&W6VfJ8lyH5b)ub$w zcOHd8e<3Xg=9uFge4|*(ZYjgxf%qhA}~wXj-~{#j>4{4C-b=y?-$Xb!Hn-+I$@lJEX>D3!k3#}d6*t!UVYZ{I-z zB?Vs|{4clO{`&_YDU0St+lM7Jq5Gf2ZT*XRZ)`UXQ-$cl+}0CXl>#z@YH~tsIDO{p z!350wGognLqDS zE=s$?MsOrb=p*!o32e7r+J==5cGdQXt(XSrP;h2E%C!bAoW$!4BciUF6VeD$EhDk_ zl1)QPOtO z9fRY+`U_Cx+_E*)g*?L-WA-hf)g0kR`*V)z z2}6wzfK9AlgFv_HxbkkO{RJUr86Av9#Dl_)B2yBXn}WiMj?VcL%Z?f-WlcVDja|(z zjn&<+=@j6DTO>gBV`fMlPo_|m54;ZtE6*etw%L=m_YRMhGiKZ?Iy2M%&guzE$--M2 z+1!Id8%0S13FnZ34@fP<=H(cU=;Ub5&E%PnE<^wgAr&+}2=v~8Y}AyWloSOsmyT!i zl(^uP#%)_&namCJ1F58hHdAijxjIAbPd&(Lo>6^07tN1b;L5*N&PR*%Pa(^g_2`lL zuEBrU+nNeH>4ag~)j#b!&ejJYkJHw<8m|B2pJizm^B3F0#v~f9tv{SYJW+O5z$GebDryGKk$)mPI!xg z+-&)DDeiD>^c_Nt$=uY?Brxh`v|ikxXVvOa>kfUPcK=gaQnTY*3PshNqE21s0|uVm z;`m+f7FWc`5o5^KB|+?H@tauNOg+=gb?tpHGKBl(*-}$`* zUESo>u9Zk!i{qQ#Q5PLig;X7Ql|5^^6>_-9GZJ8@C0$Nc=wIs(CX%0zTO+Bki>h*m zpcRn&0gM@50(d;#=@C_HfkOih&i8b|f6H`S!A3?r9|M@xKhEFh$hhum+o2g$G8OMo zXe6`#)hwema>cWEc#s_2@@mJh!N@+P(B#t3B~|!+;1DWZ(|;Qa4wn~9e$I|LR~ut1 z?#1NydC9RK|76ui?Pq26@11+=zARbi2&z2{L>j~bpu)xNT-|6`1&1kZlh9JO+4o6c z+v`%B)UmZq5oCs8=dR%RUMqHsz7)BB2O72Jt>H>yX#pEsxPV9eFh@H6PEKxjjaDNi zKySp=Uca8-Mi%{*~dU)r68(zB(h17o4 zsFb+Ptq{|CZ^VMd_hS0~Bmmf?7GO(|;UbhM1+S_EzTRAZ6PB4#KzkwSvJ3rPHS?My zBW3=k&C&(+uQ?SNXV}yG_cP+T#GOz71GL$eM`SugUqct|22=b$j1oyugG`&|0?}&nBe3- X$FAIih38?%KQk-Hs>&2gnfm`fZ_3U} literal 0 HcmV?d00001 diff --git a/images/Translator.png b/images/Translator.png new file mode 100644 index 0000000000000000000000000000000000000000..60f27ce4db54ae0ee169fc05ffe1393d523b544c GIT binary patch literal 1927 zcmV;22YC32P)Px#32;bRa{vGf6951U69E94oEQKA00(qQO+^RW2^bPKA2%kj(*OVm8A(JzRCwC$ z-AzmrX&lG#XKGtZVb|iKEbZ1ct#um;3dUx!wjRuSASA|%@$AdR1C3$hnZR|ILwn$& z;jqb~MB>GuiEx3|LDT?Nrm2)-R@)j9f`u4?zV4w})&RoHz_iTM-zU9dGJl_Mr_T&! zm|QML06@+Z7y$q>1^{FX0LU1i#3?a?U9q$j>gnlx_G~O1o{Ps<5{XPYO>p&aBej~* z=~M>80gJ_S?%bv8*KcVw8jNL!7+k5|nUz4`kJi>d{r;u0GGa0jvzeScNt&97(@82S zFrRgAcEsl+qod@_8xo0-ckjsRDs%pPo!9%f)mo4Fh?fJAvI7IRjviGq45_Ol0|O+N z!%Oh%!2@Eo5{4mWbM@1wPqC`RLB?XFQn8knLkvTjo5}n4sBx#SnHloiZ$zPBE?sKL zWHMM_Lg7QTV9Dg$y1M#!JT){#nwl_=T{ApPT3QrFqlx3*DipZ*ioLOfzS!+mv$M7d->z8JASO=ox6sczSwz zVq&5+>)p2J#>QVT6WD{+-*8H&(=#(O9LI&j;nmeu42Tf2fB$}q#bUGBjvYIsQdQv& zT_M&bLvSlpe8wVrZf|dw>A`n&bnvV9m{OVl*s{nlI;<{aERVf0Aw;QETCG;A)mm!j z=gsN`kY~oF)-&n_nlq-mXVeQc?~VD$_iVLVWqRvg$Y@;!K$%jNZYW3d?4flQ@R zsV7gKT)TFyx3^cOhtS#C>2|w)K3_JQ75}dYedtp(8ufTQPN#G2DLXQL4qg78OrdZ( zogR-T8jWuE6A41_q24Fwsm5z#XdT`zqZ^XM5F7a`*<>ZLMc z5j_*p^-`Siw|hp$h0B;vy>w^HzvuN4_V@SmthW?0Ue|Mf|6jaBaG zEMuPa3clgAMbC6(yk)(}cn{q--n3^rG8S5|n2f*Gv#c}bS1&T&TfhFXv1eIlEV|w` zJNlD)*=&|&S*Oz})5`{azu)0-2)T%YKdDzBg8rmlJRVSYj8Sll60ax<@kTC!tV*o(L0LT~skTC!tV*n^##_@Q3aBxt*2R}GC7>~!X z2te+X;_Br%E*K0Z5(z@cg$oyCdhjgEvMj69>1u0hZ8n=;ug8J_$}?V9FEtX01bPO_ zGp4m(^bGW8TsrllXP_Toa^ z#58a=me8wL0j09qXxxWQ^6r|LXfPN)m6erGPEKMP$XHqx6&2y|?1>YO#zyAyW!2mq zMzT|}*ujezZKqEM?Dk*7;jmh*#!Qg0Xcf}f6J(t9z8PD z*Z*iVewv(Idie0+(9jSX#@@IHofwN9t2sj0D8EH0Pp#*G_VtrlZJ#sJtG0{}7x0AvgR$QYos{R{NtZiaQv8EpUn N002ovPDHLkV1mlowA%mx literal 0 HcmV?d00001 diff --git a/images/UFO-Architecture.png b/images/UFO-Architecture.png new file mode 100644 index 0000000000000000000000000000000000000000..be85d7b282567075bd89d8c6dd1c4f458f0d5cc3 GIT binary patch literal 72139 zcmY(q2T&8=7xzs-K~3!E-aUKD_k3=guJ&_k3Kj|?A|h%H^{4toM8t3+BEXBA zWP~G|0d75nKV&v{*-U=uC^L&F84WD6DhO3ekl)R`tYmk<#=;uuZFzLs)gX&R10^h zaGyVBB?)40*Qzx{K6#Xtn+kW{j>*4TEW?M@(sucQ|3Nqe}1tc$7j^RbD zmo?o5wOP6)6Y*QsJbEQbV&I2eQiAC80{m9hTd@YD>O~Wx0de(wB5)fIl?%^eEL#-` ze`%PLEH+#jm~=a=(n4VuYGQP17K0U7W{Sl=_UrN%>H?g{8f4O#?mNdHg}!SBQarwVqOq|g(|#N^;@wd>glVom3cj&a?h zCovR^f`;X~kE;zb`%TZRIYBO+x-iYMhTqZu=M3QzS5vmeL*F@&i{fef{6jO1Ne*tp zNB;L&xLIQ3p_L8c+@F0BkKtE9j(@}b@0`4$-6l){u$rVry0qB!s9Vqn);~t}L0oAg zFd{GWJN`F@@;hE!DR|duy|hzYi7;2zh|tb6U?HwuAPN!e=2FoNT&<=0dAvbm1%pO5 zZL`(050iPESUIT=JO=}U;*teOR*Ho6%%Ra+LqtKzC}@~M?*JTtV=br@PED#6+QtZU z^_nP@FnxrEN!+fr-yS0J5Y@hIbI(MEU+rS;tT*fQt=Qk&Frmp$LsX=!-EN<&tCF(< z87)kXxgKKUS!I;W%;-KVR{m;};3ABO#p4!|OzJh^-|_t{1!}t@80_q%a?2siA)UBH z+Lr2FOfUB}Ck=19D)j#S($`&5%fRzQWeWqGFn8O)0O~D7tV=woqqC-%^ac_WkJ(bX z+y12d)gO_ZBry#*W$!HJvj_MtEVjomW#hh0t^(S+=MhDQN*_~8FB4y!m}W{@poPc) z96P_f+#Hy^l);6jNoTo8>mQhcSXwgKq^V+8>;2X?ihJ-3*pfZ0U!h$V62CaJgkkJ= z-ao}HqP>@cA2ILGaVYKHgd2Z75wR^lB7Uo_He}E*Pi#}PP41Pjbl)*FmE^E>VBa@E zjDo*Q9Kf3nC8d7{pI^Dx-TGjeEpmd#nvpFzT0>+V|M5n)A&!B=W@lccYf2>?qe+q= z=A)a2_lS$!VBxTNs=#8CDHU$gk=8rk5PQ5lw}*`rsdCB-iIAxb! zX5UvP*^=Pte5{HUm{B_`uov|r>vfFG^Y6$K*dfLOSFKWRrdupZ<~pN*{aQhMi2>R6 zqiy&!ZY+%f8o8WLG%nrsGoXWBw+-s`5F2-}rrb4U*`Z3l6;6^OgoOh7o?JhUA z2-OsecR!jt-pJ1vUtENDG>in>Ip;OQ{DXn>WWT9;=l?t)3@gc2uz9>thk`|)m=yeq zlF)+fy}{1WbhhSaMEoe{Z>+ce?`0WzL*)D?j{ol=b4uNr zseLuX@Mk#`reQ3kt)kC49p!VcU~6uuRLk2g^d3ilELKZ490)q~aIe#W?InbGge04k zxG98qpnCG;scSu8&^J$NcZ;!Rn~l2z^6gS3BX;hg8~MAX?tj}xp4Ga(2Y-}4m$+a1 zZlNr}W?4BrFj(84>1me1;3@JWrzS>XyUiH;{O(b+k6ujHQn$v+-V@ z`)BdVDGQ63TZ(m`FYZ5qrpof^_=c;W9R;Z=6-XFX%G^E0t<`j+{NLgGe>Ymu?(Z04 zo++Zq%_f(C^>;o_%H2sZrOl-WZ_wNg~qgkCZr^JEnlQ zE092sgY5pA#Zre0?a+FgzL!7q^^{4F?yvP z!`r1Df7$F1O4jooFDJ>x4aJECKTm%NKnP&^*BbM`%geL#HuJ=v?B}zQqz?P%ffY^w-x|=45hSt>$*!Z&uwoIML?nq8}$W z(?!j|777*92im3Bw?1ap7dcV{i=d8U?`%@t-+^Npc?x1LRUY11_trpuk(CwJX|p(V zpV8|{$O;U3Bb)$aw9%7!*b&Z}I(1~BApOvKWhpMK@XC5#_=*Z;Dy8&NdzUgKA13jH zU}f0ZRik_)zOt5LD%f&z7if5J`K^gM9xuX3mH1~womN!V^%cZz1HB5V;@`_yzx34lf4?CSd$v1K5-)7i zW!ZIg<~G|NP`iWfwD8}ZIH^%;SlTTEsbmw_Hx#Z|YN6KOk}BD9&-1i7GHP%daXj1@7o-o``1#;*;6dSiSuaj9!BY#t za&CuPhQ!5rG9GD(tiYkOqwSKaDoB4ahuC79U;5{owjuobQP|}(4})l|XfvbC$=*nQ zRbA^Q34>sR_nV&3Rqx4-jwX`BwNaj0kM%&9bq$tq|8C_}wCazn$DpFSML+vk=WH*I zAp<77{LRZk-YopEmOo@UlFmu@AA3JdVppu};!q0xbzAUgDnL#bIX6trD#PFMRLHn0 zrPXJ>QrKyGraF-$7LKYNk&({HYEB=iG`I*jr0)p!;Y)0GJR>4JiG@9Dg1L5E&u<1g zJEPs^1<;GZLVg{a$1H+9B>J5lMUw6!EsBG9FuC+q;NPew+D_j2#863jsI zaJ|C-aTK_;s}odW)eC4UO^)9_VkM9Bylkb{O{+g7hv%slQZWkoME)}EXkzV`Z?}t- zXgvA+f4<-oaqp10Qfwryx%<+?(5l_v=)l;*La0oqAk&~s8;MNkV>FzZnks$`?))IZ zY`Ap_EbB_o!=2gZoVT%($s}n+&=g%48y#f8mFJjOTU>b5$@@gz&9UJwPH5=w zm=P_GLfssrpSwJUn9R-NT(FMf5qwA~&BL~EJRZ;d;*j%`+w`W2+CF{?q_LJJe1l|Q zC_~6dWu<+uZ5Z!N2${Nxf6fwPkv7+7C<*)Co22oLjg5j2HJtRF`Q1EbHAMJY5@Lt& z-cPi3kz>6$1^ZLQj1Bt0vDup$D$QAR{Hie|>gCTAIeF*}%&63+n-3ra)|m_Q~qm5i4zamZf)Go696mlne~b zy}AlTcwWlQo*!A0Ca`e1ZAVJ7%QfQV>n%?#|9a{oF>>ul%z|k!V;ErnF}t7^2a4d~ zwP^TjTb3y{e-=eE+`ap3Ux|W(!g1qg(NNmvoLjB*6?=nOseFrWD-XeYK||18R@4$y z!okn^qv4~b;&m@VWT@fTZN8kn8IT5B8+16cVuD9AQ&>+eSu3KyPWfiWFshb~AJ7ml z+NN$5{gvIzq^ZR)Y^mO%-H)v%_1!yfGkcd=1bCZevHXhcrng6W&Wc>KPP|*o7iZZx zfcPe*l-C6LLB!>|uE>)gA!AL#(4V^{8h}-3mKAT<*`XeF?`}L~AP_qW|yLv>TMRfUjw?p6rv1z|4;Ic(E?ius6J9@I=%w zYvFqtkKC=psApm89|PV#{>BsWDfN-5EyMNMcI_}BP?lK_AFd<&$?gUF#!z7EjI`GT(*}>o(e2AfHh!eO6Q!=x%<#O)Y~jKoj$P6zA$Pendj09-sicvb z>PpRKfVt&nqCRO%8t?x8IOu`iF{O zX=xFkm`edxUNA%AcK5)$C5OH}q-iL0bbt^ywLw9DVwk`h8gXq=m^LB)K@&~w78-z7 zjfN9Vi;}djcjp~iUGGa#(>f5EL(1+kAd{KFvqzkbueVpJU>?`B_IMr}bWBZTtz>p& zahM(~R8jbxMvsLtvH5v2`$Ut6MeAm1-pcVjEGtvQV#uR;G1Dh6l8^?lEP!8H+=1K73a7ROTR%jBLsb4P$RQiSD1O-20dz@LFHZkL_ZC_ygg0p+ z5m%g16Y0}MYEPT-?3oks0Zm*B)2|7}B}Scbto4|P$B)0Ysm09V1ppeZ z(lvC$d*o5(!x|OEQj+&-eQI0sKOg(=mH9hg*w>a`-$-vwF3wWG_h#ZfXDUQz;#B}VB;A`Na4jwBDwvwTr&GO-yzuC64fx-xRcGxmp{5a znk%j8aptnv*0>P*ccec#-mDO^+Gxcv!Z@tPH*_dr8!#dF2*KG?@&Ej!neF)lQ z-zSe`;o}r-T+ZT+v&k(l#wL>R@0x+P2D}wt`pCG+peBV0IbTz7!gk^|4Itk#HsLfz zErvpl@Hly_3c}3EXjlqsP)cFw%z=W9Vj}oGCYCzHbZ@iRavw z^KbNP+yG9Dby_!x;E=G0tJ8HHlCNNJKjJzBGw{(GR57!f z(f%ctYarpF$Dj!MR;0I((rvu=qkp*vXwy*crfK_HuG`9P^O7-&{_EgT-J2-2TIl;O-~@+$ztAOehl4U*!SjqL2ms!DaaVZA1azu9 z`;wAZM?LEuJ?_F=Q&Xfx__1Jsh#Z%Q$f65*$!#_u1G7# z;74N7G+q^D1G~_hJ2|vhWQ7QH+9qsrfY++f_lI0CZmmhjq3oT`nA^P0MUC*u+URSP z{~0!3vDaBy7lC|}r(h-VG(PW%2xy|KH-_~|X;)(`kXRI;>BO;&(jkTiUviVM*lLV_Ptzc9P!Cit%wlPKr@Oa3MG){SMUA9^vX zJ+#V(Cb+zzjs$X>@^ZOlNa5~zYuk;C>Nea`)E?G+(GlEYt*DoPZi{5<4D z$xtaFlKu_&Oh(^|H>|+L4TNqC? z*eXjunnyf#C-T+~_B~%gsD2KW*DfFfPP^jdiCfXwT2EHKGHp{hdfkEB8Ww!xmJ!Av zc2;#;kr zmi8X|*yBBUq58=6_NQpQMOm?r?RVMX>%%mIu7#7SG` z%lQqRQbEggTYquX`NrTtKlop~q%ZGddGYT~D`_vPC_j*7f_QAzGbcPQxy|UWXr6jc zBHbh#0Fk*$>~;}!f_b$z@`iKY^m|I!#HAgHL$N7w<3zf$&>@D5*4LETWb5+wyy-kb zjuN*~Vkk+`5QPF1qBa#mmNrX~p3@SZ$E`b2G-lDF&Mqj+?IBX=$hO)8y`Akoc7mhF zb&zH}l*v!)x@(AT`ki8_d}8~ahTm$v_%Zp4c^>`}o2Duex~TWq&__ba^@ z9GK}Se`<3Gh!iovlqNs_#qoNGAV1K!`L}!_DW!LmgFd>Q+~_(DCZlFyAl2;hA-M@b zd-YI{3;2B)b}J#)m>l50F+@g}_!I#BKnhnQFY$PpNIVofv?T#w&*)Yz;) zgC{I`gVB~j5YM{8y3?JvH94KiDkCC`w%=BEDQN~f<7vQo7tuJcKM#V6@pVTLG{ZeJ zC(fDoQ!7>ZR&LBvQ}QDr;wdX&e~J5`C(=zoNZV zsQxx!8K4vtX7xrLcJLtGG^CnP*I6o}J|wyeb21Y{ zL0o+!Rs#z$K@+3ILo+(b^y`=0sS&KHhJ{-2Kbs#Lo&K8jw`=xvH@`zDeF4 zklhw~uio)tW8%a1;nOAI9p^-z?Z&~^+5a=btePc5FJ{_o?G%?W`?<2=#zTz%+k~aY zd^gV7NYHFxXU8t+vGcblF6j}VJux}n>TLwvbuQ%AEYe`2LgHgtW`5>a6KZIsdZ46l zA#B2Z4d~rYnhLQyF%5r;w)TT)#h+OES$?qax0JIWI@NU{WC@*nVKDIKRouCH8YZnQX#cW+|;n9Qyzvh|a*5*!I>kzq;& z;E__g3qfXJB~f?UI%~|J#_g|;c#h?_x&JI(;AfQz!mSKGnLnjmjq14c)*06$jqm_- zO`><1o=2wbIIfqJmBf?6!)B(3F09I+gIUj4sVTY0Yvp-WQEXXCZrbWVN-lX}B~cGy zeh;Iv^KDxkS}wa|o*0?w2aUxAuxIs1^BEdPL&&!)AsqC|-X63^*fn$}Gi=X_`D9JI z$8FvrG}wBc`FPE^KJ_fkdO2V{(tw8b#i6!G?=xnR(oc<%+9HK?byKt8uX-dr2EQG- z0qVAJ)D%z5;1k3L?}Zkfl@Z_)e6Q9oi(N6K{OWB*fTu4Z;&kKod8|xG9$~PncA-T7 zLQUxkPdamLKXIymezkyECPMMoE)&TKZidsvL==82N6IBlkWxGZ9oic3N6958;%U?p ziJWFD{>xpS$wcTOFdvRG&QD2NG zwY$Fv(vb;u+KYhvw7%_ogu*+6|Ji8&{q|p`pZZvws{;?Fj+~JMaViz^k|aO!A!$a= zZ9<-rK*cHpA@~^n%kz_Tm3`{8BGuTz*S9bkZ1-yiwn;KelVffAf>p)*U1(#5`gn@! z{O=VU{aXuFsm#Tk z;!yq_3m6AG39b{oB>?RD43Wf~sxgIN5^!a!O&ijp=`?|%TR?MQAbsrkc z&_>{+6;2j>CN8J5po8aO&^h>Ree1&+!@{PC}b z*#X1Id$pG2;NMA`FGOa5uJtu~nyO{=p~SI7P!Esl+&!leZcQ`e8cRFCW^hI2T&c`a z-L2&Azib=aLP#o7wVz11KJC~c_CRRgdLq2Dtbt!CKC1Q+5s$;qko`_R5;CfkkiAyE z1C%p_AVn>|6Cw^$^pEI1+xmBY}?s)m@07T?8@naq!a_9XUwQ zdSn(5RB@rW{v(6cc`tL#hekcVm3%cbCbk&*?RJo>Mq)?%RWa3~saup|wL&}2VzX`8 zBFJ-^d9&f$L{`}5%9|+etjILV!5Wlxh}t_Gul#pMeVAtfLA(y}nct2e) zc;f?JA-6m#Fj_YI6|c8R8*cbN!oL8#G50_C(!)2o3C>W;owd|s1p zEn;Gt63|E*A+pAnG5}6ePtF)9S>H0i9ZJZozE2a?)EQ16c3XAPTr`RJERnSvpIpAY z#vlx<`HhXVl#XZn?RW5^b8Wv1ZytVrit?@~(JbA~h%enE^m^G|j@a?>Pg=Foz|9?= z)23D?_L;4XEY?}iBbOVd-Q)#nvHsZ*=I%yrzR0wwgpZiFn=p|YO%bQtU*7*tZ_I3i z(1DP=Y>1RLNQ$VXwoH29jEOx7sy#rApyzlF5z~m5WFs~)siL|PU-7s!8Mf{bfiBM9 zc&Og0Q>ar^+M@))o0$H@_@MU*9E^1L_*zfxLziFH-0t_ZO&YNH@}h_cE>_dp6?R@p z#fK3DdIxJsVD6&0YUK$vKj|yWtc49bq&gMzbD3Ya2euFHjje|ru($i~vV=t_^i!8A zN}dirOfF~DM3b{d1i25UJgD?0$vaazU7m=qi2hlmn(_Ir zTRk}~`+_(q=li_k15M`OpyyI{-*03vW=n@ON#(VviZ zf-icbR~EfTi>+|OaqTP4^p()*8WQhb7T+(sT+t>DMBqcGeRhQFW))S4$9fkqKqkoJ zJXy@s&>Gz`Ya3mz_LxINg2S9eF8_DBUnW7f9{;{Z#67t}C3nHre4ngLNBP`PE4}mR zIB%j1#<6+r8FA``7e6}dc*FkITVr-bQL8yjbOce865+ z4I3zJAWFPuIF=*aROC?*A*Y^h=|>S-b6kY}Un2`s6u4iu>mKT#ntdcmG^oXjdO_$N zO($}qRx?bjE1VdDkG6RJ=`L-V%$luOOn=HrPj=%^(kZuJ zht(+ZBxkX`i^MpGO#KzZ`!>IK`-XX(F5>m&H+DU}a9^C}-EV2E~L7k+)}7<+N@ ziw#%c&@`O86($(}%~RL^t%Pij5^wkydnA*@d3-XoHq_J4`& zt#(y18i@>ZNVJl2Y-l8m>SLzjxRc6=67VV z=q8J!W#_{J_$BDSGCAlg=xz-OIsyR3)CG?GjYWVsrZZCe1>2V5;-^;o+&=1bsmQ6Z z{h8`KVlX1}>@g(6C!gfQg$?_su{!1C6H48*sFA+yW+Z^eKub8B&o3N{7xDfEcWn(H z1y0zH0$Rh-DlU)qQx-Z7Qp_J}APYhmY}pMO1?MxT1~rEX!o`H@%wK<`4xi~yS-Y}} zU&I3(iSv5oN%M@dSHCfWrvXxOANNr%M@g`$KG58UBSS|%2uHdvt1Wa^YIAY>=tlW{ z$+IvFn_c-%%D-j~B!ZEVQ6RN?%D(QWy=}b^yAtxLgt&OQcw)cr-1}A^$DuL<$C(|o zy9%O+<)JBFHpNc0d@yyIqitX!zM|WbFbJbybFGaaUtQHd_0nWk7L=60$*hieOT_kw zg+EG5UYFP>^ide3G!+Z4dT0g)7EU$1V&M;>u2~{hz;4h( zn;%4o;uuH92);aI8#wY#%k&t=&&|zk+HdT9^-&}}?;q#VgCKTLg|bhLRkzk1elrr4 zOx&ybyp?r=X{A+!OqmZ1JB2>%COCzf_l-J1{teoQ7}6Z~j@FS^nnM6TVR^x1MvfF8 zK7s+3NB8e1&+%Sw6-Ly%IX?aD4$|o1>ajb^o+<-)S~r8}~c6eDs2XpJX zh7Ymgii8HcD75X8DAAI~ETKpt0)OxaeDTAtkL9Tt z;Rr-bGI3;ELOP#{U*p+kTSIUTg=X(yrFXTDLC~TfiifBqy!L{lbp146!2kSjwkvIv zL@XJdyzU1#0FPN8hxRW4<%X}gECc@vzhtJcCKJ*8Hj{8*CyluCo{18AWSu>w{Vy$e zUhRB^3DZ2RoVXx(eW+J;^(gMjuGiSCRdKmWQeOxk9~%Imf$BE|e&_Sq9~3-)hIqSl-2%RH`{92&-jeT1GV(axe>leA zc(BjlWDusbB$QtLcs(cQVs!G+B%zf>ViCjYxz+ zUcHEfL=4nc6l<4k(mXPpyq7Y;$b7T#&PX0mOc23jPE=-vJwR$b5U+^RKleAdn~)eF z?D4?j`{7*!2Nr%m|2x^-!;igJ)G-4+hmj0|?T&3>F+g}`ekf<6h-ZyvSvEnsb0i-c;L8LzB%2J; zJ_}_eiHLC9l^qb*2W;vJTQORs@OMA zkrxuLv)izHuQomj1~v|8;4f`yZN}rImi1kb{o?jZ%N!Y= z+Ibd(fpMQutHr0aSotggQVyFttTxk9gm!+dZqD=`TL!pNJ?PywkAC!o@r}h!nv(dx zi9&{Lea_tmG?Yta+WD2GokyN9vY2{FCh$oGkN5TK5^wB=G%lbg@`n~qky6-92+5nr zkx-L%4fJon=q3Kqed(2cv6_;2>&)gw-HeIerCNTb=i;Z*KGTa~&dGa``j3}PBkNsf zJMIrO@Ayyn}%usovBX7GNt3TR!Zf z7iN>z%UsUM;?UMvdPiPzla>ycYko*e$Z8gU6BJ_0F;GcZTV92S(*vSs><&(^Twc!T z{=Mf_A(=%~g7!+$eD|{SrQaNXf{#la$R3A>-U2gsOBxdhXxQO@bs@y2I|B?V$unTF zWP**qeZTz48tsW*@FaOw6C9LIYFm~ys=p;5R@#`|WuM5f{qe4-Irqc$OmOp<&}vfL z=hgSg-Dwf`y40?MdW|zWZ_r#5-qR~#71ot8^gL(8R^gDyZ5ljM$Ny6ZOL6w^ zAN#k@Yb@o>(}WSA%#NnZ{<4EL?SoPOq=1vTW7dKnc_DGp3z+=QR;%UaOzyA6X?a=A z-bRDeg}p?PN_InV0dPV1nTz2e{r|E%B@)vcf`0yKRmIkG-=$i@g~R78oJ9o!a98L1 ztjg?H0YAy| zW`+xT-E5yORT%WGS-D2q)!Eq=jpqONYJgftZs?X0DRoC~@S%YR1QI>&(*>Y}l{2&V zRWV;-f~jwgXRY2V2|WRYY0?>? z>u2QRvzuCvQ&pnLn3eBd5xhCkszd=1LqE`$K$kEIPX6N&gchR^qVdeM8KR&~iJ0Bx z^?YIUApj@&b|GLXZ0NXxKoSg>4ZQ>M1p;dI`LA|nJNoEX4M(DNZ&+ibfaWEWjss#>ZP9JLCVCA%V{nJhOag~mIV3N`?(%23R~l2EFdJqcFE?sM_$zz6(fU5I zj+%I+S@B!&^yBZRzy8wO+0K*W3YwyXp|W%Wt)e86unR zPIyb``y@poHN`p&!wEnK^6~onPnB!wDj+I~U~`JdsSjOMLGQ@H_WZ^_a`_R-!MSdj z8Nr;cO0c!qFTw35qt7wtkk~(TApf4j^^;GAKxXSG+>Vxp-IpgI!OgsE_xR)mNJ*Cy zLb6LX5zjUD&qvF(t!7$GA@0?JkO*c(>LF9xaq*}+V)IRf$6552m3&4YtGLDkU#k_% zkXMoh_Gb;}j6mKmx5Rd%{AUiAHGSj=T}6s%I#ey{>iuzg$2H75W+eYGZ|UNhXzR+I z#{@QOg-`i3PS_k?hKTjWQ&CXe0dbO$<=iBr0^RnS5@iZ)CgB-FxfS0t3}lnILsS_q z5{V;JPZC$Bm;BSGCnWG9mTp2T+1GE{$V7txohbr%-0G2#3E;+DdL$RB>&-JyS)k`! z`HbnE^XdLtLz{0^WWz2k8mreiR|_g9 z!zMt$&>u-ysSFDg>?ZuK?$sHpJCjnh1r&bVP=GAgdkDS0f;k>-8M&?lp(?-XqJNDe ze}9vbW(}0`LUg57c#%<-eobvMnB+Z*8h&irQW9%nwe>?u#6S&-&h z$U8x?gdWRw%d&-EdTrYTnNj)*?R|9S;cbn^P8gt>tOJWKF4mQ^$T{3UN8SOvA}V4N zZpxA$O5A>v6Xx+bv%sW-0$s}#@bwbh#nAtcMyvYKL5jMb_L7mc|VsK`u%Pz?x zs*oT)f|kr0m_J07qXgQ#S=WwVj-RH1A>f~ltKU9>`W?}H#)<^I9ts4cLLAb=3Lhcp zBE~)}>RW`EL-`5nO)XL8boe!p^o4RnT5zQ)P^ z9lleZKd!-2U<1F!%kr-;Q(@g1T%kxE>?1Nf`lTU*-q4kV*duu;a8#J*$eva6u4KQA zbW9eor1Yb-5TTD7u;~KJFS&6zG(7xKH||$?kCZVKD*RL~gVsJczCUp1mmX<4{MMg! z0RvtkTPW~x`_O53sdBqb>!KyT-Wqbamc%dA(Yl-Ri=m^{qmjoT#K)wUKPYsM01SVk zVUrugV=;`nh^ZLBbJ&l`QQ4}zDf!k-rVWk_z74SrV5Fv#A%xk@n56d=OT6A3 z+de;QWTxMlYx`BAES3-1oWFnE;81vHcmAYt`k=D<&9ksp z^=jnS*5I?eu=6gx#LSA_bUvNI3Io3Bu~=FT1X*UI_iiJqGJj=<-#NFlo~Wc#z13v9 zFED$`I~hg<+Y5}?>nAYLZ!+u?SmKWC+qgc-63UtkE60@aS&ep^X7u|{!|{)Z>et!` zFdgoE=^Bl9E#YJfq zFKq7|eBy=X$y7s+cXDpoGLI{LPGvWoeQs#r^J7BqGk~L><-qO;SlY^`^Pybk@`Wy` zrO(SYRwxZWL4F?SadbFYL$lw|HI{NIe7e9b5hz}_p%PogZ7^4r!5wl+$nieA;Qj+v zN}Ln&FlRO-(FFIshMhY0?ipBD#Z>A}JtSjp?n~`A~P!;5oghqxq1 zNrIo@WD)#2kPH5u{DaH(79Ha1w(QEeAIgd^Ss85>e7y6Zw(IPj@Ah$r!sX%QM5_-U zX@T2N>>hZ=a5^(vK6z{Qrom{g3Z1cNJ`WsjHDG!T68tZut-LrZ?oWCo;6Q z(IkP&B%QPH=6HZ-3I?17RK@nCSZdgW%}#DwB8uKv%@Og|sgq5LvY)StMO1L6C2(^h z|N7+=y1N^O!?$>Qy0Zc{M%dg#oe0Qf-}-TaTKUw_GqIg55xY>`e}42LyJ`S>V*wBc zddRs+_RnOQ4g2>m{9g^>^OslzgAxM2!cp-|po)PjcM{EOYcvv*7i#AtmYCiDm6xS3 zMfejL#P*T3Lio5EliGu{b!oDcDX`yF&-b+=7M}>ZAsYhO(F|=1n;oz_H~oRQ6LNf8 z{prOI%-3RL-Tyz4O&#PJNM4^;S zHx{j*6jGJfJ3GkDKP91xk}x#bqs`4F1V7!x>odPxp5ja)U2+Cbbce=JmT>0LHp+~y z#RSSP{d8sq;H=>5iVd(wS`uge!>4;W+4(Fsjo^2@bL)SQ8UHtHX2G9i_lTkV?dD%v zly;l^Tc`gtUCxfJoN`=%&aM}ika*>5pVMXerCUa{FR319-Zx0vT|K%tCENU8<8kOP zkiH=Q2CtcZzuYrh+7=-T%gQcE>dxa~X+-fyOF>}~D`NpxpogK?pcGJC-6@T#giUTY zvf@E{_clu=fd$Y<_)paGKj;rx>jbJ4x|>_0i+&J8H%1m)Fa;YIUgoEL5%hZz^2Ub z*7WCMZ&_}$aej#L$aCA2ICe$nGrRx>P~6M};=DHoMyrnmU)L z2;|g$I7-%Cn(<}TZVATuE0v)^KFv1s>Ce5LKGM%cOnF4ew-K!LMyBTL77 zi}a-0rO3v4qp;WVnQZ)xn$|Kzt7c;R-pK?@@9Z`R3~E<_hEqYF6-fxwPi(CE`}~<7 zZhz28kZYWO<;n9ZMWYvv@o@BlbJWCVU_ez?K#pdpom6=L7rm}ICR+!Vy4*_HSTNy{9WP5dE*775aR8MJf@vaW3(4B9QdGK`F$zn~9N0@VBE}{^kLpi9BV2 z1?6ZlgZHh`nhsWemdo4Do%hJ$t?r%o1j7m;W5M-N9j%JJzyx3bmoC}fh=&PSsLwR} zaBE~27`{H^?J{|0t>Y12ffAQ=kulixJ8Re4KCe1%mkQ~u{+w<`naTGxL;qd@9W1!x z<*+UxNifvnacIcSYStAL5}JO-CiAt*wEoF%)iV}3KsyfSO_~8(C-fgnbe5VOFJ8Z- zkX6RpmUo;Qi>%Zz9Rx|d*hFh=%K)UN61AL$l+QvgV8g7r z_J1*qx4&$EwX@G|NPbvwJJ$6cVWo%Xk#6$|SphwPK77PbMBoy7fd=BoB?6u$D z5#Vzx+=8tHYm`K5*1ye6b=K&ZTeBMM1cCR7^uHPpe$~-|u@h^sMkmhawR6+(b`o-6 z0=6TM4pvrO3Q>hNzn^1-x=)Z6_7Nhf{6KP+5u1GQJ96CG#p04$gmTzHq5B?6jP9Pr z!Txm(QA&Y{GWPZR$9~|oi;*RKME*5Cd1H*VEg);oVZgEf`s7;rrmnZ>`_bF$o`Z!S z^pA|J?*@AeIQ@YP?AHWbAgBgHLX=AfSgGOKW?C|4 zRkmqgJpBTmv~R&k6uRdwPuIOa{(r2!cTiJZ8173EumCm$=_t}c1O%jGp@rTJRUrZC zN)Me61(n`G1VRbD_YM|XBE3lqNq4&G^zH`r=bLY&RxpVJ-&Nzz=0 zt$j~!=cpy{kcZD@=#1Uf{_J3qH){9`j-2>GRL|7`Jv1B1K5Om7%cs= zE_lavF1Vy;F5mN-b<00&)y}L(EzWxN1O?;4du56Vc|mXXd%|IXZq7o>R)2V}s&p_+ z;_JQ3;M(CrI@uK!hysJDrhqJsG+BegJ%m!m@^K7*OuF-sYgOc2#-lsM?15%q%qqv; zZ0!?4V;2XbW;k97EXqut4QwyjoPqZ1{Aj2%igKE{O@S^i4PF{OJ#jtkxef)HkySzq z`>75sl>CaN%0y|rlE(-!%(yIpl|8Uz9x;1sW2!J>DOTLUKrmX!W!k8fCm`b_@HF~U z71n{q<{0l1&29`GlL|blcXdDU*kL%-QFU6BCYP+1|4rFenjgK+j~7XD=Ag6 zXCG#pQS)`vqd13V&%}_##HsW*=k$uIgV7P2P<&dz2fr-m?9&(!R8FNH~dc~%4@EagJw|rx$d2`!(GGZa__&7 zDU4t=45t%;g_*%W5NAYT*VPWvNfDa}!Q;R);mZjsl8!M}4%av^2Rki@*dO`ggDe)c zkkZN-2byQFCkU2{vTbhpbWn1tYnQ0wM2=UZ0}al?x^2(q&IBAX{Pa%x2T0g0)$z^R z=&~k$=|$RS{U=B5$>b?eC^`QMU*^%*qV(0zC742K^oxM*yq_#Hd`7_SGED5y} zlqcadp)FokZGM|lJD&~HrF*-utz!f4#yOJdb4{1Ny8B*CWEGXe-V1@yB>iiK4b=z| zW)pR(DPu>z{^-#c`{j6hL_1ObXk83dsGna{=t4qA8mHj`a=p58{~CRNh{v4QY(_w{ z0!=^zY5oS{aLuoLpZRA}l@7^%cD3Tj5)eCmhO9X^1P00+^hsXQ$C^^n=qcb+<;`h@ zO!ES4n9|!>4~XhgwJw(1wWR1(wfO9jrSFB}@#!+fXF2)6+}Y^Apeohq<+3i_dv>fn zx%-8R|45DUhWFwd(YRec7a5(+gZV2v0sDjex_#okQAMs>6aaVm2iJW$J9ETPm`M+L#bW_*U46Rk8_Y#qK+n+i{vyZV_5A6DvZTemKELx!~d-dKIex<|02 z4a6wy&-fIR<^Qua5bbOr<-Z)`=(nZGS3^nCMw^fs;ZtjmnJpO2)5E7Z%i}3h`N9h4ZkXj z%;@%g=`%x{aNNJPE+A;&@aUi)_n2%kRF_=T8kLeHZslphx=EnZUWcGY38+fKv=|GX zaFWL6@(-m(d5m3hzzl6Hp}EeD=3eb^y@lY_Qii8+O5ViUU*n<_@1eG>KRJv(N@Hi6 z9<(75$|Vf--RSnjY+rNjx$*GnlTQ4U-u){B1mkd5LhS^)iNP`gLH){%@b_EnPU7B7 z&I_6p>ky4EjY~z{y%JT$M;P18$c~gTErig?=R{WN&+o>ko%Pp5+kJ2}e2?%g%cGv1 ztX|JJs+6ukkA8>^c|#aT^S@Um#lLUY{uJ%gVa+>1!*k=@X}pg+-BDXB%^)1BkONg|?TX_?gV! z45vh>ePjw$;OlGvt~jo#N@2KA0=Nv%Nn#xYP^->QAcSQmL z%feYoJ{1|Y9BL#z=d(%n$dZXEr541}zSpH-$?QJ@6?#hOkFJZcWr~fQH5Sx~6pD82{L`%w&Xh4fTIm!zYM;>RX$;7C^|VILA5>L#L9}#qRp!F za^rdxX7wcS+%9*z1YB$=kG1ix=%;zXqE@Jsg~Q8Wk&EBcSxt*`tg-lUU!XJ3sdbsB z&HjW8y7gGN&^x=dC^E&Fu%%<$DYEcfFYkjjMkEUR7M`AH^%QzN3HG2(FdwHGB_1>b zzKZa^PvYqGKiexK!?g~^JN2ce_2z2@z4Wv5G9|B|Vm8b^LR4z}*NbYq5pKh)adY7xKPGh7QKAitje3873xiKq!CiO8 zj4|g1wPr88QDFO0hHnCcq+-+S-%rFh#p=lOc(gvpoFgDW4~1VPibc15tKlRtQx>6j zxQA!ox*hTQ@2|&`wGPS0zpn=BL=zNvg948C-z={6;bl+HeQ|qVkC-GJpKl|W-cOWU zvioDtgOIhjMoV!%=a!^@e{(Ut&c&4#?e zNK9{~%>i*y6o_lf_@GH(Y|K`K>gs02J&l+wZ$E9S(t|lJ;T7+xF487O9P3;GIt#@Z*DDr|^nZ%ALOvdl@k zU`rGUU!=Y6Tm7awJv-T(D`yMgBpC$|kD{g69`)f zkt{Nv8saRH4GS(aO0Oj!zL|eE_Q3AJJqC{M;9zX;CwbgMo{3$s#)wqtP?)eYC z4RyFV5t0If6_v`IO$Ay<0T==7+;xfHd zowj}FKsLoo-Zn?xo5q^`n6zdlrfZRr;*1X=9dRXf6pATUfFHHDRF|RX5|%ITTG9%_}R#o$ta9Bi`Kr%TRn6|W_rFF zGYwEiX)F#wvg5j<LAV4FkL&tY)hu9wE-F6-hk{@gwJOfUHO3bz(c^_Ad6_pmvZ~_jGeQ(C^MgZfF8N+ zD;H{kePi|8(ZV7F9KbO_^O~Yn3+kzcO$Lt_YLD*-|SHFq~ulF4~jMU!rV4mCb@+EW`mbYr~ zdHAW_3s@56TKtqC8jva^PmY~wYXxwswCPNS-3cSVn$VnN?=^qFRtHpE5j0ga0B(NF z&rT@z`+nrFF!_7&Z+1wH>eZ`1e-y+`cza)&mbd&p8FtIW&WVon7nXO{9Z2!Hxz@z{EA%UGX5r&UJXV<$ic1 zbmB|;DDcKQ9yg3q@m7yuWG(^`&Kuzi6crF4{Sl#WpudL%Bc|u2Ig|p zMWyvAE2^LWukydHttRg3U2$|$FVy}3-I2Z*5|?#>n^axs=ukbq^IjtDE3yVfxCFM< zn*v?3P3&)w^g0|5=WA_@gbGtSjorTbp|`cAwL6)sBUg{aqGqK_h7?ttI9VsEf^?6z z{7e!bfFni!@O5kMOrH1x6V&;%^G9n==BQ*W%k((zg13H=7JcT$_s7w{=16xXjDWFXQ}W;nIrUn#eH}F^lB#IfT~eiAdfL(C@#nVe zNcqF?cG7~9`iIbPe%~NbMPYmdxz#nr$)7+tyEDYD)6_{tk+8E@Mnlg#OWM!K+uDiO zWadpi3fI02n!>3v*qJEB73N>ZWb;MT-+lcR8YvR8hAGhSNw7OU-?YFQbW-syEiI~t z%|9J>CcLq;eZ2TIT)W2_58is91xO=9>MS!k8zq40@G zUg@-eK=E!tZPIxnRy20kBRL3lde=?OGv3_M3yT2Z^Z5HZHmGDT4zMc;#3-$ol0L%t z=a04*7E4j%N=@1yq)8XcieLn6%d3${FEYp;-;k9GQiBOkl3;-o*BR!XnHR(NwaUJM zNQbpF`up0-kf5f6SU+fA`2M5Q7oh-59~6_pqTejA{gY}($aJu{NzOq5LP)d!?^#zL zh}^BR>I4!S506^+)lt5Dv4oM_oaEAh*#0y-i0m1&VL&}lM=jYI+qD`eO5%99&>ZFrY0zk^_Syj zZ{j+PAL-8aJjBT|>j^yPt0#Mr$a$v(y?*{xq~*=mNX`Bj*KeM}vmuCdG}p_iN+`*3 zbRmUPUE!56@u`{5buW9wh4=R`$umlibH1grP&9e8fKIjb*eUUMojyLHf5etxi@A!y z+)`OwL@lSZG?XZx>gaMtCr_w$S6LHPS437`_zO?F>Cuiq`6i zH~6=rWCFs*+snbC#i(G{{(s&LbDssxSh{PNOu+fZYW8ka#pmbh!=Gsj$p~dSl_&O^ zMyrQ69_3E{a9QvKQKYY%mB;T}T31s((c>Fe8)WO(8{F$ih*`r-hz_6Gqs!5~as|H% zW)j(lveS7+Ya|tWyVAW~h8&F_RfJx?Rd=ML`82OPP3Mk|O)YShl&nYp`C-X>bzvjf z_hS_QzJvaa)LrNHM`Q^w9vNXdA#*|GdUcu6AH1~C(>7Utm*>}{yk|uin>HJ^p>9u_ zpw}i}f{kze*sfT5){-iIZ~V)`EWK)7iGWN3sB>A$q_W0!^{g&RvH^cCaGhkW=Kohy zgFApmART8cu<_iQdI8hbOMS_{44d&u2P$;?a>(ig@E*xoDg3*#3x zM={gtEu}W_WzhE`O}HbB&!b4#e>IRHqwSp8I`L1-(^#@adJ?j?FeVuxsXHg*>2Z~& z?TI3$13ji|{XOc%N|cVWoaN=x@XpcF=nEV|v5PI7`1dmw#_Pv_D8h0`)az+m9+367qoPCD#G?@*nmVvt|Zg*o^{ro&--5phQ{|_!&;Xdeh z#O7!CKr#@27!SU&pK=Vy?R|C++<`L^11wV|;BONc{JBjhgn>MF0S$yqW8|IwT+b#&D<1-_mIPz8 zut&6)D_ZRaejx$3No{bAJl>=icQf53PU@;ucXDI53a+1CK$|V|Rdb4<{BUC@zlY!F zk#FOjYV*m5AHlzT<3-6I;ejqBZ%-h+(URKKdUA@ZghqI^U3w^gIo>u)H1($5g2}){Xwb9ciu9@8@ZoTuJ!VQYL&CL*0iMd(<`shUO)E3b z*l%osi$m+ux){z~E|0krUDTl2 z_P-|ijzflP=Tl_$d!6y`Z&_2R(>)4|V4X|MUrdGPqs|V2e`Nc8(Nn}hgg^h<{`86^ z#)VFHGtqM53+`R!X6#n}deRm*KFN)HBhFH5hD#w$N2u77HHK3qTG@#qS;%ypH|R#| zj?I7%+2Z^QoIh!%r6M&MZ*oId8Yx6LC`IcSstTxw;BJw@Sqz-_rh}WhsmK#BX7fM@ zJ0-v=;&V0ExqGZPZQo;TVCVmtsi#b1eYb=1U_`^RoghVF66!J1kXxW!`<^DR zXc6rCrv%8b2~MuRZJQVKCg)U}v%d{DJ*X=DBfoZZ@=iSVFahpyUwV!x|)t@Vm>=q}@0!iLJXoEG~bYJHAuqyf6hugO->0`seQi|I;&V>V)-#sSX&3a$b z;JXY7eAt^RW&4K9VG2qhj(;?*D>90(+Y`&VYRgkFsz0})$mJj`>G%`*>xKyK{Y`f0 z4^V0_E6^6w;Re!Sm)=gC4%YGU$g8K?>ubII>Mj`#Zgb(ofMf3ipev2m>~-Smdnoja z77wN7RKo$<TG%Hy7l#`edIl^z)jsCgvMBrqp2RJu#mzXbI4Wbz%-Xa`isZ7 zx9LRdD=dKzomE&LIUE1l4~Z^bm>XJt-)5OVCIc*XN5BzQR9U6C0Ron8w}e z+q%ijYseXikyidCG^gSRUENPve+eCSrMd)FnL9@VO(Tb&`T^ME`)G__oRB7~OUSn( zjV|M&ff#8lWdh%SDetv_mlp_6A-vuugz*eWv&1x(j<(ljalAXIQ z6g!(&ajQDx?NRCHz#o!Dpf(!*|CyuXL=nnQ%P-$m zoTK#WSJoqhwK&^&pAz2DFWZ>Mz($6sWvS_`QwzJ@>O9MGoWmqYZ}Hty1(k#`{Puyg z+n*Pq{p$QXj^>zLbXJJsxLlutzx7SqX;N9^t zE|TAOe}Iz{`W+u=#&src(|r7DMyodX&u6vf#jkw63-g7(3zC`mM2t{J3ziHcv*a(N zaF7c*4fsxv!qa$7%yqu1E-CmZNShZt@&JRuM1qQqiDP6=%Yk3h>wVS}@wcw1S?x60 zQE&De{CCfLcQKH>=-8VYr~koB-8<V>0^H5DgSo) z^4s{QPn1UIq#5$2pYo^5D{DPK0D-fJb!>N?Gv@t%YBw;=BEikgh{GSxX8d*^0Sf1s zZ{1VW3Jwr?D1(TE52)dFTp}^#x@Bj_+?>f_*mctdOJ%3TKz4nuvy)!WgWnAo%`#*V z$!0{m+T&>z(}i+{pLAoEb~JLuXQeeF%9y$h-N+cqzF8VL6^Pp+nK+u;-QX_(UR`w& z(j1t*^|b3KM(6sQ`x^CO2BQ8EpI;Cs+D~gEZi_tmVg&?Nv}#=HC?nd$)Kd?0={_z? zwC!`uO#`A>rEK)`0Bx@IM8tnIR0b;KcGI11DUiZ}pa9|*JzA(g24X!Mk@b5&Q?HxBZo?Q1(nhxVWu+pRkRG-ezaFyxk?kD~J4H%xoLC zQARD@rWC#wxpyb)?!4K(jPkpgOOn+DutX>!G8z`Hy<9~BV%EGm4TvpnKFg}-pN zkAGPtW{qAe4E`UqRT$u(Nfs=;vAPx=U%Jf?xn%A~=!9NM3^|Ht3B{8oW8fG5jDKA1!=UNx0hPNl`pV|y2J*uRT{#DgTE;R zDr76D4^kdf?@9DzN*B0doW~y7wUEGpv58=P#cq13ySbyJnL2$2j9s6vo#9=rj8h z+TEDx_PzR*txu41d_l|U;o2%?CQ-mKbWofGdiPmoA*T8T%J%StawxOQ_ubJB{T>_p zl7~g&6!Yvhp%OIH9zLB=t#UxmV7PZ_}BDhYb#4qarJu-XK{@#MK}^WM+!WV zoi1*O6PQsX`14Jd!_Qq;1fGw78_HZQRB{T-mwfU7Ssfh9$a<&&@{HBq0`YVW$3#qI zVASDue(c?Ofg80Wvko#T`-}aA>He-Xd3k5WMgY}LEX|WUH*eN`p`qHi@bg|sVerkN zsG#OrhY0{q37J@VLe zXz-E-fEDYFKOa;zy=(XIf8ztqr4eP-VlFP94;D2QOXxkU2T2OsoW-)>_<(+?rSq<; zVRz{AXv0?47qS=ZG>5phK zS3E~D=fK)XsN5w_*u>6o$%D*k9Rq{bC9(KORYLz=^wBiPfFlsTJEvvs$@RW|mAExu z;%Qm_`Mpq^Q~4A2<&%ef(?U4*T9(sa5{UWHRpjW%EEDgFL-3`bxPX+&Y~c4F-(OPC zSLkF#U)uLLZK@uJB z+U45{hs&I}^V^XksBSlfk#Q%p`~1<1Uad#Z=KIk!t?&6!6>;LEg>7JKZTHH8b$HI-)OMvp1>B571f=t#PNzb+!M%l&XshT%J{|xfP7eixSxvT|(IoP@F!2Mrakvbd-kRsH(Q%C#wUL#T9accQ&_3;&m8hHv z9(i-4oBPg4H%B_{-~o3|M*~P2Tp0Ggg%^y|#LhL&FFGBvh^nrYn_S`VC9^}0W}pEb zoD^~dxYe4XTHBo@D7FsLi$VVc9Q{idho0=_UoYR#8_R12V!8x&oA8)$?r)8ol#>_| zHPSDH14M_tROd0p))?Exq>?+Wl@bbpv4`mrqW0UoD~IbNqOS)@#yk(VH#nocR&u)# zLGimPX(O*!m0D=e*8ZfqBmu0HrDk9A7g!w$d=Pwp1spd=pGytRs2#7{DpYU2Rxx3n zmbACM9T`u2lG{B5*fB+`#=|j_9XY}AJ1p)t?vA?#l5@e@M`SOh)uc6~wKj`>dcQcl zojXnH!+b7L7t%=wKeT{XSHEBJ%oI2Qeb)o4!!$mZ)_}NxA%MM9WNZg$UQ2SW5NXM? z4sw4YXSA%<@z!{~g1FE~H&r8?FIaq6t_4II?KNy<;bum!r5y5pq)2`9@e3vEYux&S zZaLTzBuUTbZ_qe!|0p}YqKjB9ynmU@cPHTKRE-m=%Qq*OERC(67$tk6mt-Mu#kpgiP}A{bja11vmdg{Si@r zznag;xF76;%G@yo({C(^s{Wo$0-~YVo5cs`QsR(Jx--Yp}3lL0EA#9UN}OUDrzMd%z$P=GhJS;TF{_iRRUk zmqWy^;%Bz8>#_enPce1>nYQQpmkg$YhuZ>fAl*$B3iF%Abwq!OZS=bHN*+u~--I>E z!F+uyF}?o`(I$1URFQL6bh|wtcmz)A6mdaZPXA1HfzcAhnQBCL1o0ALaV+`Pi(%)6 zm%tTT!{=-U5=Se{_o`{Q#fDBl$?Du~3SlcvxSE0q&?RQG6ozbCshA1soE5sh)W5()GI@* z2cZzArw9a;j{iEdhib5~QP-1P{R0kZIq7deT;@;7jbE^}HzzH5sLJ$0LbAr48EGdB zfWtfnPwU!e_K3Sj0jq0^tbwv%=8C|%hBmAs+4M@nWBM}h(M*_b!8)_6w9?jJU>ReuDblL0ooe-*aX^iFDJkyTix zKFH_9cliDB~q!8;iGx%&PH6@%rj(#dBi*l@)tj9Cgn#TU$qKO=C zrBjF=Emx-UvLu(o>WOmlnp!H)1e+O*3>bZayIJ&d%5oILe5Gr{qP0;DCEpH4*fk59JQ%aT ziqUUBOv93T3LvqX#P|w|4C|x=7+oCKnf-$kAp5qSp9>4V_E- zy75(-Y2%{4yFV^eM7+wR2&XdKy*@Vji^|6UByR2ko0L(Y^(SBA!iOSVdmWU>pLzHC z*7N$Cgu8V-OLpyq72jvR&W0btOm7n|B_8ZD>=2{$MTPORv=-RMK7mIB;_lk5&GnPk zhqcLjQP03p1!qSEDeX^}!R$=f+6am2PdauC-Ip@y$f57}@DQeF^#1()Q@cGXsdGsO zd}}Z8LT`54P=m&)n}%H+!fH!dqQe#5y79BoRX1YWZCUZk#KnJqit#Dci82@IW zw5-|uE7kwc>Bpb@?&ozjnT8JqS~Zqfl@ejCV}Z?!)-L!_uV6SjL(1#L1N!5HW3AsO ztpYT~%9a+`fYz@ggwt_FJ1TW5VHY0csQpA)IzI>N!hv-)P0)K@1{lN0jEU#+XqmL* z_#IO_VOLs%ss&YY)X1S44e0$H^!un=kl*eL-EDD5Qz&;*AZRImX!2O&&QYd~^$@Ua zZaH=38tKk&b0yQL7tCW7#lgdR%=(n2`TT|P=%CgXoHaA4&MM`rdc@u8i_gj4mIPj) zJqQ4Zre0jMR}!FH^%R%4S(zha`EtNR;oCnRiVnyX{S4zm$OjdP#W&7e6uSbv2}B8h zKhZ2Ic0V1%>n|YMcz6HhtUM?ipOj9MFQVH!AfM#BdrP}N0+IjwmW2y)=J)y{Kto;c zx&jE6+As9Kug~QD$u$R2EUTwcNvyH}nUy0h;NcHJUMqkdGOM05v?Ui}I$?6$%{>UG zJnRQd=&mDG!)-1K&{ua>EZiZg3!`o0!@3)EhRTapwpmh$S}GR*nMU39d``6C{78*& z7*Ni-jbWxszjPjjktI78yiAq|x+EC)r6#;P-~eOE|9+Hqjxkq%b|FpKBAV6g1 zvi9`RG&heh-T?eG;Aksi{k7{tNs_H)CD!GWtbNM4BMa}d{>&_?HA?{inV7!*v1 zYh-rn(z`zCSbk>TYzK*6PhxvR#`7cES2j<5jj)p*B(}*8U9&2F{K(PONt}3Zrox@y zU_obw#xcVrl;782EYISb6iULtTDF!L*(OCl<8&+bMPaW0gj+VGMPkz*y9f1Foo9Ro z$l(93%xKHllPpzv8AHb7+s{sZ4pjaQ>cr(Ks64H2e=CTh0^L`iKpt;|F$FF%O%|K8 zw-;57og;LQ2>8`v%3fNa!m2w}m$reXS9SMr>ww0-{j0hwqrO<(v#d-b%U*B9YySR_i} z33r1ZBc&DV)2K)O-9!9xJEQviM?KrjAso<~AV8VFu~cLF-uOtWO2li{BbhKTKDZ$) zCyf8$9eBi8!yGbIW5;Q1_U0XaOXY}b&6#1tEL>9t@Nyd28t#**j2p&f)ikejuvwRK zrTH{uyys#rEa!X?m-fIjq`E84&eb#E$p6gP+#|8-$h!AtEwxM23U}iM$dPP%mpyjA z#-}jSQx*ePhZn!+#fwTN+XmZW+|bZ}LfJF&#|qN9dj1ZjvG@g}nLO z9KFa_NZ2*^s?6%2#Rf^J+y7oMh1e4k3_W7gZ zapNC#uW5T_CEWwNq!!P*6tMZ)>|{+O&#RGS`?slA@+B{bDYKUml) z{;d{Sofwou2?2tU8cCePJ3)C#GihIK!zgX}gJ)R3>iU*l6R(w_>M(wu3qSRq^tgZm zSG3w^{sRx)=uq_JM1$Vyhb@gW)y8}vL>EhtFYCVle7nCIThj^no-${>f<&=u=^33F zKxHTrs$2-(%{jYz!fSaP4Wt33Gs)eE?dDyS(r42tCVZmjGy3=v{z1^{<<`(C9|5F+ z#Gzao+>fWe$k~z5#>(Hdym|dw-YnC9%r>>pr;fck4;Xh6ipe1~iSBVetSNHUWIB1^ z?ey6i3Snp*=O?hr{v zrvI~}Gev|QQ}L3$2ZZ-fXU!+@LEjLAOZ6Xa^6++yVpM|Y+VSmtB&ZDHsr}9jWqTV~ zBxNKb&UL+%L@`SZN%FX$bkBlj;O$LB_ z@d=nYB8dURJ8qp!%9r*@T`*|8gxLQbHfH_fqHgF%iH<6+egTKyjlC<=_2u?gW(G+b zTO_CW%Q4nL_Xct{$lCtJO3I`ThEncRlA!ZI0F}Ria+!fQ?bFU(DBV$VU23)V7{Xj3 z^+y1$NKoo&OR7_#CP=2X8ATGhtFt5*V+@x7g3sRYpge{%^2_f@s)}pCa_C5y^~Vwgqb;Yf%x`D_bbfZhH6D?V4FS^m(J2 z`Bd5ip0Zu3^4kvF_S&}eBV?TOKl^Ltp7v-;a^wZpce~@U=5h?TYstAzidcEs{kL@~ z-usc}{d6TRZexQUv!pB+^Bro3#@-9EIKsG*LTIWkDlSw6h&Ih%C5^cps zAq>D)e^^LkR#xkS^u7U- z?0y|$x}dn^)fVoh)JJ)ZP!|U(h-kDY(htm88XbULO%r#Z_qQCiT09s{{SzL+i>GOK zIgJ{SB#>}H-A`meq_o%CrG*gc6R}$?hC4hXNvMH>Phm3oW%NghcRShB>Cw%GIbgh( z!8i9Hf+WKVRJf~%@42w6@#=1;567m(#`%pZ4FA+zn(1Dh`OWpdsiy|z(CJ;HkGI}H zKmuW+NTltWPu2duCwnD0H*YM@`iN9`M~{blQkehz?0DywEyIHXrRMQ};f;;l4|VLr zSe<*;F)G`%E@gMhSj%peT`i*+?T z?&`H)CDEDN-%{|7Xy5u&qj1VT5J?4jX8FT+9D35t<|}w8TlHBm^}&?{PX}nAQH|Eg z%hy^`Y0P2zS_Px5FRZAR=J@h`OhHmc{FPV!=kMO&c$64Nin1_PQ?V{jqx!G11tX(s zO;uY?SYBL`O*uV&T+^;fgTAaHW#hK`a_ly)BK{us5-)kR1@!~@Q*Viyox73>id@+n z_A3yI_nxJT7cyZ?9;mnbG)`uc)crngS}<#^4-w3gS^&L^8mH2$yYc|;Zz@r-k zj}(%B-rV{piGk;V*HXpz({o%=n@k5XsHzF8nCd$o-sBjQRF)IA4k-SnDW`SklZ$50 zg#(|H-1rj~FM<7EKjWB$qf`HLs~3Wy;ysnQ-=Yh1X*P^np&4Be6`)E@Eab+3_bu z#JVb5u9kkDm+s)Ou>n`G}6)g+Lzoh>V$|7;^Z(&#Cb#s)_Z$I43 zMOG-z#;n-AF3SPd4qG+fU0V>?!O|xDkJFx^6pzw;@;$@}9JD^c@dI64>qxE2y>gWt zgpVW_A5#3sHEH7O&OjLu!B%5&Io!{^5yHOKdTo)rhWAcZC+G$1Yp;pyIL%!ucGwUR zy4?Jkz3miK&uf?d@-5lxWoZU^1LDfiqTQK?O7v9$uQ@ozKbv3ibw~VIjdR{X$zKdogKU1r6>QJzE z!17)=h5-ZSFqW)2rTwGjdDU%WQj=6UiX~NiL+T4;$lk53gu#5C6b(|&kMlH+RXv@} zZ%`-+m$&m=00@-wlW60$sX-qR0~1!{v}R}~kuNqa?khLE^~n3pLlTl8618WK4SMJ# zM%+z0vg!XjT}|Gm)G|-jW{|Rd4QJB%AEzrp-X)n3QGAGC{t8Yvb9dswcsO?c=Cpss zc+}(0Zgy=?XUWk}A&K2ViH+g0i`Sft0m+&07qqc`e*Fp8bR@}X3)rsxZe3D5IuW5C zdJf2ij%BP|DXvc_N^=ulnovK^M%;vL5TSMZH!RD+695KBKp*Wr`gWdbs*zFT@d(^m zObOm`)D&~cCO4~gG|F)bED3`ltvN>TVzN6G3*cc|$T+*#8yCKWvQk2jabIEuQ9w4S zN;qbH54)T`VdKwDVt?p;VfJ*j*nK*4<)72W*Zw34u)gaX`r&EpLQ`c)%zm!q6gsZe zowYFGdemBd6C>GAA@#|`>7UbX_v@2&8YfNxUIi3BEX07hlwglt2KFO+1@>q^ZOtJK zmc3xdQ`g5i4uRCp8tr#HQTC)hCO$EvWDiNz^A6la+&kh(6EwIO86-r(2klc#Z6fvO z?=ii<8>u^vg(*!bO(<=_G$ePb+Xm-FUM&g4`E^ah(9rxJ6|mEp{i>qk>NF@7u;%=2 z0mbomNFMa+T_1-^?CyTXfRUb?we7%_NHIl{ugQ2_tRiavbr7D^Tw!l#!ZHz2-6c`9 zn=eWEO5~b-EF92|KLABixGJe>lWned>Ze!u^zZnI2`IDNi3)uWU>whqQjUQO)estg zRkiTXPLzpY_n?(|IXH{re#dh$lc4U~v*GW<|Ghc|-^I$e$2O8vE@HspM((f*FTE;OYiN`>#5h@CA^X`@m@CyXRvr zQSO!@AF=nx6Z}T`!`DG2mg2^)lbAHpDAH)bkLJ{GP0oMj>$sP&zpy^8Nk*D@pKr9t z-YL~)o`!x1Q6ZHZ#(|$}Z|AQhGZRgJb?7}>enNqNmtJ&IYAVH}h;;N8c8y<(deVQB zN6R3|ViKgp zKt!jowLJ$Ovg$||PqU2XUtdgK*cwnat?XCY1E)*_t0_(VsB+2Hu+@W6t6w8l4=xCL z)ZKVJs@O~S2+BZdM5$+?pGo8nX}@YDb!G0^Sie<}PLv|{E1_#mui`bNpOkB@z9;5qXHb5pLAsHwH(3%)s5VS!WJcqS z*lV&VjVq$6IFhflMwexapA7t8qh(M94XUkdsc@1YIm;NXz>ygUr5wESY*P=+ZhV_7Vx#TvYi+g`R=p;A2&Y zH=TVL*{jPYA(Snw`k%lySK}IY)@{2G%Qpmu%^=Qim@wn(|NW4e z5S|B@-xJN!Q7>qT+8Fn6<5s9ah`4J{W*lk z7(4u7RFE&cFcW)c$jU#5Uf=aUGH~z>P_mRpY7vy|*Gemb+*@6#?l^qge0|#Qg^Yik z{3YS;G7JKZw>k9QH_V&XYuXrKJ-3}VQZwqw$b3w9-Y==By=Hd#`#7B&F zQbn$>wE76fZFlBlcd?y3p~gRmSXz$-;L&*UInm6Z+kjAui&q2V2>gqNDWFcySUxrJ z^J(zlkQ&)uAF3yN?~ojQ&jRa-oT-!oh?PYwsZT0Vx#Zhqm2+?e<4HgW^*X?_Rn<67 z>cJH>*m)#%iEnEoJxLKJL0RC9yz#desp?rZP6pYy_T>K@xspe|^Y6%&-WNZ#paHgW z;7R>9&ig%7EQ@&O`?qTh2%{@~0>mw@jf)MVl58`_)gL0(4#2qWho@~~cVt-d*30J?=-AH@a*05B^ICYHym!mk?9RDr z^ndX7)=^Qmf7|cSw@9c62uMf{C?VY;BGTO{Dc#+23nJ1jH7K1rbc0F^-7%EXjYBu@ zIo`i#zrQEeUhiIe?ft)N;aqdgb)Dz;JdV!?2WF!<`wMR83}np3IYxdv)tw9G{JkE- zj?a?e&&#R_L%6jisYMj;jxOlCJL`{ikGj55!BsF~?snuU=*8anwqn5((RJ4vM0O`2 zNCkEw(~~ErJ*#+V!JuO3Cu$_Psr@zgQbonv#Iab>3Eu*Tcg-yhlVAoJa3l zO!eBZ*{a%u(Ej}~Dh^q4x8UIf6;>j8TZbA9;D4-3_Er$bug6a;ZPN2QNdJmUy{GyM zi6=l`MeQhmn9spdm`lsFRA%?lRYQeZvluIOj`zyvP3*~a0k2hYx{?u1bJGD;S&9Ix z-OSm~W&M;hU$#!<) z4V&uv8gA*c&J@=aJ!r;CR*tceFb=hG{^Nx^ZN=fFG-;2d(+o6x1 z2ENX`9HdDCmi)*9!KNe=@cRme@=_CwJ|d{%#U@PUm84{nEc&HRyo!oS3VX5Sb28_( z^0kX50SFy!_X#(;I)NP)$4G50{4IJ@kY}CbLp;^ zRO9Jxr{$Ac`vOR=9d&Dm?!_-;?><}rcv%!J@hG*a>JBG%&rg{fxd|~ED68jyzMFeQz+Gj8#-{dUhO!Uj{zFP_~5uOW6)t( zhsA%(8Q0bS68k7_MF@_kAF4)E^2vK}5UgUo7eI8!b6RR|sN*F0>fimcJU~{r%#ngC z-Ie#J#I3bQH|8w&y_$G+_KfS4r@GrWv&LbDt>7Kt1RFWfyz(+HcfbVLNQ&22x}4_} zLkW5dEAkiS!q~miZR!fd-tW?>|Jn40XehBw`lifM<;P~oxq?r8PHZJr?DeZcopPVA_+^w8KL~&pE5U_BwLFm_R zhlz#}7}Tive4W$P#?0Ng3#*()Ttek9n5jW3KLY}@WnmFfwPMiZhp6&9{-3q9XHg@1 zanr-MTWu71c`0)SOu5@YLEGyEuj#ci*5@41W6y2k=!5a2U(KAr7C>B{b1~)L{OmSKbDRtVh3fcJY(H*g~Ihl zo1F)rLe#BULgbKi8t;L4^a^RRnBS%upjCXTxihG=fnhnizh54MKB+%K0HRB zaMv#XpD3OeUw?y?l{HsUVG+iK$(iS2jK)@PdnpJ<%1M8|J1U2nR3BgD?m+BeJb!9G zgQv~^lQB}lpb{?lN;2r`NXK%%^lQy2I3qU1lDA!V;mN@irUUQF3bmR%=U)GOQ|r#oUQC5o%G-l{?|u9jJb8hN z-|H8s^(ceAQ^5-3(X|TkPd|Bi6g63fuzdVQVg|ptm3?u{VtQ#Iq7;NXd7SaHIjhI6 z@zo+YN@fRvVen=t=CHWzqg%AT*nSoJ@9fi>?th}%MWj2#&4)Vt7y7l=L#r!Bsn7-E zd7MsSRW^)7wu4Hj3C4=FWfyy@=p!RGh*V3Rqh2!m-9vI!H4>A zozC*YUIA0 zFu7-_{mL9y+tW>?OXU8ug@>s9Ky+wgu?~bLQf`|qQ8%@^5kgsg-W28);8@tIdCzjN zoLVU58S@m8C)&ZrrsbL)(^s6Llk2k<+?t_Y% zg~SL3%ZpX+LE~SyJfIprk+LQN^#Z%o-cH61(I^j9xu%T+9TOvPT(=KqCkLfr4!YVhDM^228t0PqAr}~>LQ`?& z{M1=!{(IWo(cT-KHaETySi9ya0=ve~v%>3!)yBE0Jd#2ZoF>BeY2!U3V-!Sgv(f7N zKcjgp@n}vbD^Wn3I%Vx~45Il=cDd$lc#@;XaHdcAG9vzJBxqxj+hNXFPp?{*@7c~W zj4IpMOU``Kgx8sp6=xh!`hgL;(+*@PuZAVh_Io)y2uhXnLS{=TeXh~doqo8U3##K5 z7ucGDON@BwrV%<-RxHDMALw`;e!fg6x6}- zX4l{+FTBI4ZXs|NdIg~8ouk(iW%@s79?t^QanrYimTX9qy9T&QK~nD&8It&D0#9F6 zGmE7dDGHjz(H};%rOfJA{<%}{6DOn?x<+iay3@YIxY*z4)~gN7+`jU4at|9K%0LYj zX&KZalKb-b-#NdLruJ1T?m9%hEV#nKOAHiIJ@MuNSW&;p z!?W8-Q(1E%PDLt`1ma*X88N7gnLuvj9=ADj*zz~LBX9E}ntH}^2g#?drpBT0gxx^f z<|u&b^HDy=x6>;k$%23hRcYp@4ZdNx%J9U)%0L;DUNni91E%;v#jZyLKFXfq$cvsD^CkOY)Xjg8wulEsZ^5J}EyU2QzW0mKGh(IZaglpb2#CcpKXv(Tt>G1{dq! zg6;PUH>y7{<#n(F%$}D(-9z7~f1%SF3+6^tYBng}T7}VW#MUv8xhRK|lnh zsa(c!xQeX(r)6}gy#~|r&Xrbku&XQ)CKDDMvq$tc6v(V+7AbKOiIc`Hp}i{d;TA6#zH=UN(BjIibC| za^H7{5!OvDqVc35iSlEl8nC{MQuZuM7A?7wLP;hK!z9x# z01BXf76gqd`>MY56%^K~9(f)59Xhq;x5VKJ-o2uC?%EB4xqS~i;yzze+n*j&(ZW6G z<(w?uX1*%I1vFxhtNHVH@qc^57!{_wLq>d%GaDV|OM}ny{xNJR#vp|nxm1vPlG6>j zFDzO0WT)pbdx(HwyXx?Dhunsti~Rh>W|d-@Fw-1cbhlRJ2_lJSaB~yLO>53B*que8 zgUPhXDUYLjdKHGB`nc#tUFLjxLBm-Q7j4YW!C+GKDN2krx#HRYY7|57dcNVVg!z6OUxb1`l+ev<-Kxs8F3{;jV^Dy zf|B5tXi0n*%l3Zx{>gbG&pjtB`7ObnZj{k{l$zt{h$0KKNGl;J7=sI9th^;RE_^?x zQT-;}JfnTezq{}D66>8VD;-l+X_l+c92hTYyWu$}c&(fy%+We+LOwkf6rzU#tZZx_ z9Lw_t8CzODjY!s5L$8i?0Pn;DdcZI(YvK{H?^ZmJhT9`_n+#agv0Q_OpV}Cu6|3H^@~>&uW_A+eU|-!$u;OWrn_&l&^v14zhW+DWb6fqUH9$+5g)fav zV|Itj+bv^kdF(wFWj=wu`Ru9__&m}xOLcS}!UUFtf&ireBGer3lQo)ViypWar^jc~ z$ZMXVhj}A+Ycj{W4nU1#V0awceFvnDjb6y8RBP@PN0*bBZH8^o(y)l!QdP$rKeVn; zxrnR^H}h~?%nNj6ULu)kPMPFP+++`&SGihJ%5GIfAl-64dZ4@ld8okUtFwfL6Hwga zqet`_6ej=vq83ibHOp05Noligk-=`FUcUW0M7i(9{O&>Whioooj5R!3f+T^3P4*6^ z4z(1~{C1zR^LHn|M-(_McPQn9uH?|M9HEsj97^No>8+|X-7}mg_o=mhPv$dn@5}|d zU_%kgp2oiV4Z4!k)<$K03m*Dm{yv)XU+7$aH<)v~&$(<=uKw*_NeBPFxHQuFW<=Yg zp_PbvuPhYN$R2C2r{f6?T5}OHbbD4d48ecB4eaO*)UYr)d(ydCnKzCm_4C)W?!aNg zI*A9&f&)&VWW?$bz`Oe+ZGU(`5OC6JT7pQ86n+dgtn=(Xa{qBDF`C`jvcqX$(J&`) zy!H8OkGo&@@$q5}Yk^*+RnINCvq}TWG?Unhb3S;8;?vLj`}a$Id3+XT(`Zkp*CM;rGQX$gJ1HY^DKA5Bh_vtCU%ctd z7imMoXm^AfXB7OW=`um!tlpC>oiAOuxzb`FV|w#xsomFw=Z9WxEGrZEk@%$UXr_9D z_(c`LLte?Fw*lu(>@gCLN}KO<6})!ir^V3O~{qD{r`zg zJi5f@WpI3A9oi^1mXxb2L?k%H#vF>c105ZeCKh7^j$k(&d3ijH_$u0+4_K163qH-U z8LZ^OYQjRoa=RJzb_EdLOj?nCw7Z8Iq?mHP9cRq#bg(PjDm8NJ=iBUju!MIm%s;z- z(5J`q`NiGK^4ZQ+*^JI$CH^%}|7{S+-(VqiKHi*D0D*j_QSt2jU=5w`usua4y2P9I z3oJ$tZFLnl>&J>#pRV}iV4CF&Zfl60$`k!qFQ__f7E>}fR?)nGi6wWTL4(_KtCrhp zi^9ilj>5;Lx;r?^dihAJpft7NC=Qa#1kdnVrjDE0dY`$_^war19V@&0&TOT~_ui72 zp5c$+jhQ=6aX5PR(4rFhTVXSML!INvHWjOc`%;uZv2S>3|B~ZIw8*Y%`CZ+VIiTfs z1!zA7Fx<#hO6M=OxCL zcWl)YL3$PTqByU~?8F=@JNDSpBx{|($3{B-j|a2@{XG=zTf8Yz=~@ce?k z8cWR;B9?UlGNe7A%|NP%nS{)=-(T+jKx{g-QpB>Hq+_D}R56{$uh9OZ!zbF6)h6q~ ztQINm8Pj_>bF^7FJxXUZ0Zm73yWof)l6SAMF;QiITgY|i(6;{Do2C->Q@f{o!J^u( z&wWcpImC3b;|o&0vaTh%>-p+u85%!UG1zQ5H%O|tH}QW?Do|kR6qM~8Ej2fU^n7+s4(R9h#A@6d}l|Lbgowrw*7p%OW zD;R}5ERxZqVW-aRdFddp?OD8TpY}*0Q3iFtqL(1=oeGp{`Rr(`$2s@?LcDTQvy~wu z81*a29;z_NMr%6gU5rZnZo}$a(1GntZw?;ouGtJm%QI;$ac-PBq@wdT#-4>^rZ(XU zBBr+8*r$t+5}w|J=5F2&MkqDRm3`2o<3B4TY)U5MFf4TJMrX;6{Ed5CUT{xJs`t7D zEq;|yEcxOC_a4QACjC~=$LNReKKrON_b5gGi`RIcQ?AFg)P}x`Vpfqwt=EtDUqhMj zGbB2r_j*S6`{1J^L02{#hQ80obDI1+#gjZPcMdC71n_B2dMdVlqQ0q|bf0N>_WC4v z^#V&Qf|F%8K3l0US*ww#JMObAa5)i0E8W}hC(@9do@)rD`p@(pO#yOBf@X4lPNh3c zw}6lkKSu=ZeoVW!7A9gJ9IqV9C$8wXWw!E!&ibRl-g3So?zDWe=1^z1F*hAgl(HY! zmCq&*WRzyx`N7*B_Pp|)uBU-^?@#N#Z2)&AemYCg)sJWGyQsGuD9aY~YDBZ)e6wT) zoWbja3T_2&FdQi*)ZDd{!wcFPFG)e2P$~LxXtp3JkE`M{7sB2xG&6{l3GcvGu~R6y zb}0HR^b~)X&*qvQ!`kEbN}I8nDKw7GUuFS$J@-pnpRJ&#lW-e9&{IVdGIIa_Bav7h zJvWjJ4(>y{UTd-38~jjMiN~kj^<5%9K`iKkXWD%V8E!rc@*NMBoxnL?H=;bQR5%7b zYus?Xo3#g)**OpP@O5F{8uq(JqYMfEt}arglSR_3zY;gRBdvv7zMY;$k_*($E)NmI z5d@WZ#pJ&$IU;04CIyu2g|WbplZ2_Ndolg`;gRj2wh3+%Q}_5x^Di)h%GP|3zGFMB zxt1zZ9b=1Rf)F4s)7^;~uRnj0@Eqp;-pN%`E;+oTUeV7)G%(VTD^I^X`yi?+6>LAV zy;Sx4L>L7)MRO-!s_(DS;G}kGmi5#y@$HZ7lZ@mY#vU~p?T_9f<3;s`T)oSsAPt zVG%j)c#O1;Wed)Qpql_br9i;YqzGzFdmRK>VrMJ(6d+{}GikD=2mwV7oT>O)S?s1L7` z2%+@{R$}RY?g>Tyw?-ALd+Z%FfOyo3PUYY68bBX*=;yZqahYgppbFviwX*^oD^I(+ z1Wcw55`iwq!uS^eIL;b~8gcHOqvtplv@W+k_u%vf2FqL71Dbon{jEvKBD+$|L;#2Lq6g$_I%ejZl9ZAw(~mSNariv#sn=WUlhzH zE^ULdDkLwS8afCFgQbhaEZd3MdZuT+tc_z0U6JB5k5iQA9=E0&)f(oK94X zj&fkmnmz6gHnjfF_!KW90q+I)xO&xbsk6VZb|vW4?dCqlJR$Rn1o;4nObgoY1*bii za{NL}EqotGKEJU#uMjj_jPBirRaM;0=cMMdO+*c*YWsPjj8y{v@Ka7Lp8ZF!V?-}i zaXj05iNbW53W<8hynxDhq= z*T_ROTfn^q(S5S9Qr4@>>%h6Ex>4Qw(hsWc@GYC%;WugnXpox7>o*SYo@}D~v1z*# zAQo5)KGC@x=*I?+v}`EqaKSh&`?=Dgx|Bc)dhRqxxVMwV28Jy*(0VB=^S@s zX!z;?9fu#!r2l{8+#ZR8bG-Yg{m+i7HiU2LIB9E#Q`2fDn>Eexq~0}PDkPpWzcp=t z0v+rFRXe2mSWBR~h$KehVP2wdd-LD-IAcWj-=PN6OLl||kRVcI8>B=DrOno5>lZg> zm3V;CBDlON%L6zWnQO~2?X#1oTKfyx_9?W{k!>sW30?@MZ0+9GfGU~m^H3$VtE7pc z%JT(zD)TD)85(e5Bee3t3?z0m9YfGj7r~`Hb zvy2OsZSmFd_VBazb+)r0^V(jOCvBHA!~zHtqUluF&0Ac%0Mc=jF`JDEvEn6*kKv8G zKyIXcaXZNfy|*BIJn^z2;A?(-%P>(Q%JS>%gbMht6DH~|{jz=2cuJJYV274c3CI4t7DP-MU(n&YMuVT+3Dl6@LvYY*|PRp(+WR- z4=w=l&b=zB>+NR3{Q{!mS-646NBceuXxgWzz#nfPoR2p_#$K*+QE7BNvmif{eWeHt zAjG*h3s;#RfFvM=v*jRD_2^DltU2Q8%9OEjjK|9igHbH|nwksu3+rNRRZvDfkFk(M zkT%wLZE}bASA2kwRA5p2y^Fda=k};G2iazQIWtk7O+f!eF zAH%O+P91Ze-fwkjZQh|>^>K6<*nKGPNHX>@?*Vd$d^BqKJHqwWJ^l|HqQuffNa_lO zXSePw=fBOf*5$q@-5Iwn=+Z}eAuf& z_08yTnK@qJe8sw%yadeuLoCSz{%~sj)@n3(ay-42c+DS@cf216fI2n-q?EL329N)Sab&@SdT|t!B>EpOwt<`VZ>TFu5eIPtA{-0P@3nmPVk` zc>Y*jvdD9OwJ@cYEb5(zHYLxNy`s?)!)`*Hoj-})UBdm&7~1{B%ug}hN^x>YZ}@B>&a)*6qX|-~f!keD3tN{~+^rdWkD`;37$m zD{?ufMV$D5Tl7fRv==^1{o=xpAbPu-(ADTSDNLrku=u@Kgk*lv#WQxpdyg4Qo}F9T zLWP?zn}s7=nnKF|t3dkrJ;oJZik>iI@R{89MAFjftZD!=%a zj6iBGzPNGb8gPKg^SN1nc>QCRJ=U|}5oV&hDetT|j4>=ankZ(QA?gEerHfsXqKOt$ z1-X5a$4v2f6>*XKuJ-!6V$k(Dre*GWofpg$&`Oqm5z^_5Ir)j0ke|6(8A7|C3v*RG%!!b;_$)38VSb zVxH_sz=jP@P|>IPvT#tHYmof!?y{`Y1SKW38d7?idFrc_o3e)Ge7*)kL05b|z1eyrrIaDorz`!L z_|*5j_}z_Xuckss+Qu>;-a#`i;-f#b4yWBK@k+QW8}4xa;B*I)>Bw~nvAk+-(&!4} zI_=NR4yEqOf7Mg2cXJWc+Z1%Y=G!`u(RxOecG0{?aU&Y|qyD95c^JVag;%KB`d{yM z1sEMsmLVN1x}3$V+kL)~IxGkCz~FhXsZ>X#4#^{Hd^LU7c;PxUFqX>pT%z&t!yU|X zkTAzgR#ZSn<|KU0jbTzie7HM02Vnx!2S$|OQ{R5(Y2|D30RBo@$E_vG28sUMTEG9= zd{#|M3ZMjA@bxd)ukF-1_XG4%TL@-5?1b&r1H99w*05P7Y3`^$&_*0ko3&h`4R z1?Qd~%Enz8&*J!REo-g&cf^ju71WQzL%e-vmXv7eTH#nL{|=3zXTq(>&C+S_7vy*Ai*JzVP$VknO}YVyqZ%t*$VJ z{*t9d083xeb_F#pc(<0{O!`u@&}E&G2LVokLH0Q7e2gGDYAxK&Dtpyar1iSFiHBs* zddQK$7Bma=aWX@MtZcSNL=5ltM*b;RG>lPh4R)&{jXV#}MJDv&MR;|L$@aN5F4)Xz zcg+L@1@`-RzaGr&U9@f%my9tJjPm*)unnVM_~(9$H%>6 zT}gsdr$_SABkm8)%Ex`=Hm7^S*UxypoqPvTF#eYe8I#xz&~CB)i(17Fz0eFQRhlojQi06UKAjCuU-|m)F5EOfyxM$x zlp+0C`(@mC)?x^J%GQMCWLInO*U{Sbl0V5E{SC6 z-sGYapB~B&Ye6>`@Sh7eMnt*}dd(;O>3Ks9%J1fMbfGiir;^RrI%0kkTe_|sf-D(S zJ>o9q+jb4EQu&YC{tl+!gq&|k=;G&E+i#m4zlhX($R12m2Y7HL=8z~#=A$rby7=li zPZj5^?{+*#N)Q}NauR|Tn)P$e4ZT~jZRVk-Gh7#+A- zhpTMi2l6|JIZm(k;MW#}gN{XIQDZtH-p;L}6&;bMA4Q?URgrDhCaRRb7;`v%&D>_7UHKG>rm_ zt6omo8*Te)CgTGbpRTq_YAhtN@_L^$Z#`bY;=1)J-Is8lJZ92@o;@wx-16I zUa#dorsZFWN_s#eAnA)Vux7<*-rt8qHR<^`ztF(p++_5bJ$S=5Be!ToJT==Hy@oGE zEqg3*O1GJy3`(@u|K8A-#R#^lWSEn{T7x zL`9{ro_#$5S__UdpB09fI!vatb|CUZWC1YmYOIpL?VF3%JliQQAzQ*Mi|54bMjs)G zOesdp<0LF>bc_o14&AX()!kFD9pG8;2}r81Lhd+KT_9f&*1T^Pz&C{ z>2ccM$JCQX)N{?zJcAt;xEC~S(54;NL`L!HLvlc`WmBHQg6?SbAv_5J6*%Z4`?~$Q zFEz$;>I<>OPguyv&D=Hjdb1R_`}&2USDkT)SHA2=mlL2 zI2$nXIH=vP&(W>{3R zzU*eH6lW9Did#sn>Q|>`W-f0-tv`xk_qdmmhenv}Kc8s}aZTcT{TpYSR9Aq||7m8^ z1f^(jH_i=;7G7`C3NAr4u4Hgj_T=?1V%CDDlv!5=#x0xb(c)j<$K8d#G-IJAgkIAGrP8SUSCQJ?* zMIkW)ma7 z_WS!QZr3qcZmaDFXF@W_OCvfk0F8LEN)p=Y zc>PDk!t61hfb?BV0psdNeHENU#fVDnd#a3XJ(_1d0H4}dyHLF0b3LgO zbZP!m5>XE$ezgWmPTz{s{vq%3Rb{KA4-{VgMzv!dwQW$7Zqg})X3XJF^2qEcs#7sW zMus-9v)gh2?4l8&%PsQbzJO#8uh5Xs&z~4aUY-$H@w1m$RTeyYGv$%$*3v<~6Ks`^ zlQq_M{mZD0L_&CL%b)%mKC)p#o4>G4Z8sMwxT%V?9Mt5#>R+XhzjbxmC86D+p2q*G z=Wpttlx)NgrwU-h95e=%R!Q%>SQ6K;3fppxMbaXt<@`WP3`g_J+Fj_5c9bVmwejq| zzqOX8Pm7OkTeNcqW$2|&Gh}X9u9`8&BeqR2j>xR-GK)Z?4w8OsHtkCS!5%Vp4 zvVgW)HVtH7M)cV33sNYVEqa9~{8~mGRkRf6H>WADq0Vm3E8rVtwPTc>&$g^AEyIa6 zUHC6ceYc+;@ld@^wKbV2PI>~O_v;Pad}I&TPYhhP%Ha75DeE^&XXy(*vd!i5&CRFh zTb3Hx;&4>IfR?AG(y#%n{;=g@lQ1D8bcbr+%v9aZTK#cH$J4fytRpjL6l!O_iZs{2 z|MF;d=xslO3^ULZBUB%f{ZCuyel4#2&KK!FRiW7VUxfbLNkrZ1MOYUyJvgYK>Xpom zYu+?afI2&(%vOHocCiIcbpE{5rPDQ{yh zQ~^K;lneE4r_g7MPLnaGTCc*xj<0JEW5ql%U5j0Od0#1Xuk+)_w*^po7Mm)wc}z3VUgfXlK{8Ek|R zrQFnm6)~rW!1)NX$1lzv)in#!C5E}XP9OqcWAE`4CdB?nG7=-ht(W`$M$FKZSL$BD zR>ANe7|<=J@w&seT^~ZVG)TL|snO8XP%Lo<$&H!F+4Lj&blE2Tp)*oaF6gh%q^<30 zf0Eij*7B#%^Dr3uPKc73OK;kr_P*S$+W*|%mwNm~$f_^3bX|pt5_)msrvStTJ7|Cx z;00+7U8!UoeadFtd!t)8n|PryuRLOkV5Wq{KKYERX!_JxG;mmkT;@^y8Xs?*~i!FM)l={#8^-NV&T(pE}An^s91zJX$$ zSc#l1r*9OqxD0EY*p9Byv;}4lU@v6t5|&p}Q={YL9BiPVU_`siQ703s@(T?ULi2g{ zs-s_xODncv(+@C4%COuU5x+IWnGPl*uNP~bbfI?P&8(zGG@8|Sw+t>tHFID4v?sNy zS$>@A@3(@U9FTrJUd-?TDMFI$=%Y_Ik6+fFo^GXO&H!YfN9ZB7{dW6lJI8EiRcbL{ zhFFXGb5|28s)ydQei*FgH1JDg@#%yXW_sc+%m=}8+zAlYiVzP+{-c7p11-Z^pP{WY zC7RZMVHA<*w!X!)F-*$<4w-!$T21V>Eb#rbF;UptCczKQfS1L#*=EQtqEaU3$gHq* z1JX=b+Fnzkngds>AK6tpriL(a@V)-^?X6lI#R$E5?h0;p>1S+c46Q^t>@Ef#vX%-8 z`%Ad7eLCa&M}_>xG`7iUIsPpID*h*17k~JgZ|*`^B_o?c-^|w5iu#>7D!I4!A zSzS@xK#gYkvn?cwIg-NN?-p=s!@v>_cS1Gijb?e3UqS$$q>$I{Y$2E|kdtCnCxq%wvAC753%eVfBYIqfKvTW?|D((-W zq)T>E&5`6_c_fKtp?kU+1uiLex7p$+i}QnzjsRF0hF^uT+S5kKrWxCl!b0LAOT0mE^xu{Q6c{WVmiyTzmuH zKoJ>bOeDnz3G{p;FLsnkq&ZU49k})BqKX{})l=LSP@)YF`yc-(1X`eyUH&}8rHq2d z>iIwQ`O?p0TTFc5lOAs~hKh%ZY0UbwgxR=e;n{OL4B&pS-&0ZQJw%}%(?t{TZ`sAQ zp#H*Svk@?cjc_d1NpS}lQWjqUtvlNgZt7#>`PIcMZ>?}t{k`lG<9Wg&FgZ7La7`8V zC{|48XMd3$bnT-_DZJFK@ttK0%15`(ssIVb$daOk3~6LQa27By;fZa`cO;v8FtBhf z^nyN$%^!l_H_Aoh^2HROva=19l$B{Q<++nWVCs!fCbS%an?iH*DA5=eFKC=_Bh%PB z(cf^5l-|7N0vyBQphZKg?n4ZGpsi-8hDHMB-WD=RJ_}410Q4P&ukvnTRuyF_KfhQu z<>0YTZ4%E-6ZEP_VSAsl!VIvU3&{889?v@Yze({*_Ki#5m2hl1v#M_N?MY;nAB!0~ z2Ljbkn8E4mk@M5M^r>@pJ`|Wf>X9^S4^0G#kiSzy*=RD}6$Sr}> zU|<~jeLbM+Z${mtS&^;hRFfkfsepk!{!>1aL8vN=7+jGkg`h;-qg}nH`)kusO zrwFIFaj#!9u0N?4&`5dO6<1G`|v%^&!1AdAy_&*S6(sQ*B^j6~6l z=9^N4*hS%1%svnBQ0hxb$Mfe*Y*&2NK?1c2`vg;821N0=a%63eqn-Zg5`g8 z#t98E6A8}Su=F0oo{^VYv2aM9fu0h@&81ZK0t`%}XjxK!j68_(H2n&sD3**uUBc3O zcD>5BLhY8Knw=>mu5ZA^IC<@E?_SaKv|?|nRr`>hK1g0kO6^Rd7ue%!4G28I0aMw) zD~lf1y_f!?io3ImG$s-izvF*4eWMbc=U>UR!2j;@UR1A%gs!2J{5%@SCT9sZ4fQ#T%Qxk-($Vq8B!q!~=ar_58nnKlFCpvu{Ox<=S8pZ*l9j@OQ2F{kM`( z+9CbPoxRm&yM`NQlH(Zrcob&h_OTVrdT3H3Zu`r)#+>BRCSM8Z>vaN+AHGi)6GJ5g zCC@skoF3fF?yi-*YMMirjKopkg|PoAl0!Z1H4@^iQ&XoIz=G9$+{*{g{WL4wx&n9C z%d?-YQmZE69&e))Ap0*eh@#2Ob5c?q@4==MaBsoRvweL*r3wA}247jE`!OqESNzW{ba+`nWT8*W}Q_uw}ljwrB(#JfHVrkBhSQ263T zg>G|y)yt3w%6bBq6emzTDQ<+W@0txs?(8JOCK{MAhKkGKXgjWDU18F;m)D zT!gnWqr2Z7WO*cWrw4;yI7_KC1sfMk(F-k2nFQXPdH0)NGCMQ1DX68cMogw^HVizN zREir0-Lz7P0yai!snK_=rrg};N+R*BY@D1iNP%lqOc3w29;oZuJ8?QnBF%`M;D2Y>jd=znlQ0AOe$Rbh&|jTy}~F%=bav z27u6am;jF}gul&@LRb1NzB+o{aYAeC0W9p@Kutv*60e_Vk=y-zBVJO`5-S#hJGuY~ z45c*!mnTa-Pp83hmu$8-p;=SB-SbAd$&y!LgdxgHHTEA(UK+2mS!mt3W-#*Do>F60Ood6_nS2P3w_pXSqr5i|jOOHfZHyu^$$!Bt zV0irEm%qQ4Lgd!2d4U>0{Z9cPmu3Sj6~_z1>2l(lj?w^%=~@ z=y+42C=SUgj>H4XeJ4S7PGQA*-`K zZkF?xKDKK&pJXB}dI~g_zbeP{XsCFo-I*#mp&H3|cw8->!Y`e^*ol2d37zam`2Fu> z|0mc5ZF-7V;G6ik!p|kjWRpU-N$v3-%#iH|kns=3yhRA_Oc|!qi`U#ak7k^ow()t} zH7#N_V9=BUx|*bcgBVVzhx_zi$NK}~oPR!`2RLm&fP{VlBz~I_O9I#}8T43953WuY zQUKgcO~1z8=JLvSLq*BAu>f7${=caDcixS1dF?fy6e}KP2)SwECOkVGk62q$=OT8^oVB>6+uXZ3J&9uV1UO8t;a^3sYy@zd1xBdwsM z28k-6mE}gsJS^3)iAW`FM=#FnGV)0374~^!f;o0^gEH!^U+gW_UwtFVg)mK+n>u(b zaW?FmQvs3sW+E~FB8KjHK5E)JI|-dBQR0MW4IKIvs{~Qz>iGQ+dG_zVFXYuQ05?DF zWsiyr*0({R9!Ta;5r0(CbPmT<%pXQLAIxz$>Zi`{PFEH<#rE;1KA*oJak_ftE1C9v zA`%VubzdKnD$5(8$}v-u?rZ-+^MG_v#W^X>iM6IrlEnsqV1NVCWTR-Y{5!@VLR&As4_ zl9O>Us_55PfUch9s*8fl$s-<0k=@}Qs>og{=@TtRHLVLG@+vn=c@plZedA^6uc&-b zegC=?!|FIPS^Cx##V&&B@pKf#^r7b74)=)xGi0K;_{@U;jOse84O;Qk|J1oUYc1Kp z@#*z6B^-=BZcneRQ2V~cBhM}~{>0*AQx;yN35HRk!7z|0C=^~_Tv)Roaa)#uEax$y&i}eWooP{r7PKcup7g5v{-SlT&ewoqq6$JK@g;@&ZJ268N3B5A8=BXZ zXJiv4s_vpQI@0VOLr^lsQK{{iOlne|LP*75_<^ZLv5q&k9#BQJ z;|Z9!Zt2=iPnv8#Sq1Xa`>!$U{uf*C9Sv91hJEXj2vUTI-bb`V?mU(j)I{%uVRSK==<^-V^L%T)?^^HtV_0jfIp^%N&%N(`UB64#uEzE` z&Hv*BN(=DE zoe}2L?FGi4rrpysdg?i0C6dvAYnHeo6`58xI|&Ha49r8P)XnJ{#m+K*hmV)0z~(WBbj zwl&)E8f}XR+3q@V&S-LjRaMOwP8%oJ5^y8r=V#2OjV;T&227Kekgr;R+@pAgOvYz< z*zhR}{cyc6c~-$S{ZvTX;5OO0UxzE+*oV&pLcj9ti_y%JF!sC9tBuaCcl*x)XqZ!x zFWHcj-%2l!{1!c;PXI613^tEfNc{m;G={!P3U>yOBmeZ5KtLnhWISZw7>16oGp-^p zZ*}F5;)z4Y&I4kLUMCSk*(lY&D2Ab})Dn#X1A0}c8*Yj^8ru{P-gq0ieGLUE@>qMn zsGvb9c+dQZLr%aG?q9g>5}>iz;gRl(?hsW7 z&M#%&a_o1n(F)E;Jmc3o0)Vair~3*K*L`V4Pk`w{S&3Zg#`Ek7Zs_^CeOU4}>7KUb<9&dz!vpI;fBMRX zJOok<66gQne6IUR8ZG%H6}Kd=N6$6@IGy@+(JKLqH(e?>Cxnnl@hCT}6#2-aeW!?>r z!#6)#ac+6Dw#z>h(BSV(j?Mtq55&M5+ie@vnU?z*#ZyLGyA3^Q%tJR3Mk6m;iPybl zx5YU|#29Dhe;y{L(I)@dO&HAon(}+woiSw_eVf=&({&TL-p>vJ0up8U?fYia<$MM% z4_RZhygNNA-4zx*fq3i4Umt+qr|16c{9LE)5>N?_fSnnTA025*hWAN`Zm`C#d`0+E zp>jr~vqO@Bzx)F}&h29U<>GWbJLIa*{fOcAe6tZ*4zqk~xB0d4AyYc}bVn8^Ob+w0 z>vU&o=yuNNe@g$`bM%JOam5YYi-l%)3kp#AKtw#h4W}y+mqM~2 zz&HJAe*V?DMtkVj+2wyqsFb57vv%J>{2XjJ4Vgh42CD&g8YHNbPY{mDxQj(4DbZ`| z?sqeJ1l!9H<_+wk6}UI>&$0hjLc0R*_eJwY~bHi{2cx z@r)#l8nIXM9@)FT>*TggM8fWF@lN)@%o{w9WPnDKkRw=nqpPuAM)XacGWE!hdUWDU z9A%D*VVpQfWia^k-G6}h3}gF-Y>)%kv0y&wMGkp1A26m>r$$QxF4 z_gFDUwj#|AkUR(UK=GhrvGUtwkw;qeKLvEhZ4^B9OY8T8lC^qUKn#vk>jN19BLN(I zC=LOHjfNS3`O2wBY352xef>VDuzkF-2S1o0kv;>?en~@v-c;Olm%yny@GMc$a143t zNImyj0*v-sO%i?|);%VJHEpE6#NwzrzShKIfv8kH90?%ZJ=FuL)cUR)Cp82jNH^op+}aEx08r;_F~eoSvto)cb6sd2)c5FyK~1O=@5AS>=wSr7eVoT9!Y$yNC|tQQOn;zV=Dv!}pbRu5nF2 zj`w~!jXmNQ8WoBQ6+v(aUu^BI_mIm}S$jYFC@k*(cXi8aZlG`hqi<+d%%2~8v~(jm zFS6>`b`6;2B5(%d%S$`5O9lX~7fe>n{`)J(lu+3NK5_Qy-8!h#e9+xFgzk7be?jS& z>@Q@39@DrLs z8M}1H5he2Ve9L3dW~~3^q<@t~i&_Uh7;w|R=w1A{4gk1Q)H%g9nb>0eaOAs=8pwgx zErcEd6vMylCILk7H^d!)KiwA8pFrmnHa!8v4Zcx|IUC0iggU-EOa#Q~>y#VSlg#df~^ zdhHH4IeV|;q`|v3tZuB$IHPN~;r=mX9G*BkQ6BbwR zc!$!dFPjjj_?a-y)z$2QZ${TWTQ=Xf$!>{mQ)n=xT;OZ~f`3~8hI<#^>8L;}r78D4 zx80xyQ&i3>&$F&MPH7phqn$O?Ck!%D2lQP6@%};V0zSb>__RP@>@Zk0xYrP61SF>i z20$dh7x8==4=wgyWV_vb$PI_`UC0Q(#(}!TS+3rinndP6fL~~M$03h3hyZM0U{ImX zM#G~1pQuA+gI{DpoTKjew@t$>V0#fR=sZ1q7{GJmk0j^Vbw~azx&8I2~pEPjBgdy=2 zgc2A7f|jjXdH8ff%JME+XeH=zRmC-ZD{cu3-eF~RE?-X$P6K~fiu;~i(l4lWwq4HE za!!q?1s)~YJtg!hCi|)yZ1t4EC^#I?hwoOQfj+*F>jm0GTgN9<5XLzcQmDG0#BC@Q zfbPdtee1g=0Zg&_RZ(1ge1;O==RPOJNBULV=)n~H7P7X2|I6#}*I(&`=ua^|lUid_< z^YffVFgJ@d?dNZAAHnC^qg~!KGyW2O@?5Wpi`(Tu15mriZp)S6o!+|E{{Mu-a=^j_ z4YAUo-9@F-+bPv-k7cUkE~t`MMDcyE^%#kn3*s4&px^|% z;WEl9m()%qKOT2q;-qdrHKDFQjq7MUi0f!Dw(af{Kwnho8mKQ_DC3MjdNV44P8itt z=^b@9ejQj3Kv)Bi{-l??oIu=7e1be(|5)<{q5!H2=vy*_uq2?gahKL$|eU! zxeUl2xjmm_pV((wxaDmF<~V@Mf&apJ)U?%O)6G#D71od*1a^DluWU?7n^gK`a-e-z zYw%ZmBc<~6U_Dn?Y7JLsD#{=`Nt%Wm5@1aKjWOBLW%cL_VbTwjh;f@7J~Hc$thtnP zmtbb2R!w@V7H{rRyeXzvMCT1eK7n4MgmSx1^CMa9OPr1L--po|2(X3$6F4wsd2jWk z_p55&IN7Ze1?QFtF9jro^2^9r0Jsw<7Z8p#VLG##@dW@S?gJ=wx5h4ZV(2ZdTUN{q zL)+>b*l21cw>(98)tK%lOV1_7EM-8SmQ$&Pm>8Ocda1T?OYvy&!ct3ld@Qc32Thy! zqzgV}>VE5Jjp)bsoZ|KkoB3XN`nBtF&G~#EWieFzg*T(gkoRkwl4F3+Pod~+XZ2mA z_$_~vO9v`~@b9O34prHB4jxyU6%kG+P<2%rN^o~;f>-_bR?0Xi$!j1|HT^xCG~{HD%iK=7FctgOzx zF9S-FmzE!L^^tu6y}NlT(aEJXBrv3@hCMAU_S$}^AB{#+U+PUCHBWSXSpoSka&^^2 z_LImgv7of(cu*S4Jb-VMtt+cVEkroq*x(wwpM5u-Vrr`EaaWZErnl;?)?^Ex_WHBm z4y@XbWkd>oxBTSe{Wxep%mDu0f*VoCDG~9?($a`xZpLcG@3X5V@SY$^0V?W`JzdV3 z&^6OtGMh4+(wFi=1%!;L8|Ky=)U{oE-|SZ;JvlycM}cvpQ~3wd?P4xZ z3SB<;!oM4#9d?-L06e=T{@2?7sV1g9Ur>>+6pYfh>Y53JB(^zEr9gQIOsXg{}zA2>7YL>xGVDa0Y$rZ9L6<>US%a_ht<2pPgy)|xu~~59be<$W{q}J>ie60fO{LX zy#eP8iXUpjv|{eLGs1g|xwTyalkD9l(yb5Msm^IrtBLHy^X@lO1sq^TvG!p)(q6}# zBc~(w={J!IVvFi2Xcp$@oE0@Sn)etGujqheQ^FBK&^~Z_ijjbUQQc2B-S|Ir>v1#f zt#6OgdTGj0T)E2!2Y^u|Yo!!BrKpi1s^|LeWk`XvwBnz^dKu1to1thN)ql)e+6;Lv z&-iUW`f{rWp7I|L+SyULoNCnlw@a&{dL#Hd?7Xu^KcGLW zXXnU{p(1+opxM0n^>y9Ph`!|g9Rd>CM23!PB3dpX(VM@g)jPynjqejGe^9l3^+ZS$QhsPVICPwDlr<$iY5gur~}^4dcEti*z>;)02!IecJuIzP_- z&=Z9Pf@~E4D9n@qoz)q6N!^<;7Bf%z?!r5(@^~3%Y9*Z+XiCI?f)-1LfE%%VC%miw za3aJVIgli7jBwGQy>hgKFPZJ_%X03V=twmBe>c*)OuRUI`h*A+i8jQ=w>ydQs=u3u zs+9)Z2lKIA`j|ok{^USq@2)~MKv-3w zdMWc$J>G15ieN5Zw{7Q}QM5xbd_WAWGXEK;*yN@Nhb<&`1!+>3XnH&oi7DMSalY8! zH=kK11!)7`R6T46Uu7d#Un#ZBk;9auo@n05q1Mht4-qM})I?TA?}&M4C`UXv zdNfGV9_i+DQ?|5I@LMQM3}d#`e>oU)IXPl?ST0d=s9$=&TyN#TBER$6siHGgt{+ng z=6b&JTLmSm7M9$;Tb3AOLHsl{Pd)o!lnxZz*xW*Oxfm21R15QraZp@|U97PTN}5s1 zlSgWbIF7S^w*ju_{U2C@Z-Wv(Q(BY6piGBJO*8%iD94F~1@X;C)(~uC zwK#e57Moad>HW&_%EmXXkR-w{h6&5XAy*far&H3II#Uw)u$HYM_)z1QDIm*MeDP(9 zI{oQO1Qp2c7p`TaC7=Myq}_)DVG;1&$T}DUd|*G;^9l$`bv{I!hU>^b`4~L^?2$eJ zT3-I1Le&l>ZZ>oG_}$d2zs(J4b8=n(tsgnazzTRz_PHmk&*@?auF6v&H;ig+~ufI45yLTEHZZf5X zH7gH!DTc7W%+E2_xG|5ndOWu?HtatLD znX&R+$ajHWRP7MSpwRviX-$C3uyEPxFhi{Exd#B{PWNTP#L%Gq0(#g-1qtSGO`Md0 z;^Dmq`3-(^axN@0$qG{4Y)$ek#qlgJ5U=RnRuQ$fU$atILS(w24()(jCwXJ8NN_;=T34m zM)n?|KEXiy{&ckoH>L9vRk_O^B4Q{Im!eas7(YI-TsNOtv@d59-S0YZhIXe)O&k1=M{TV|WmWbY zfN2L;m4yW#8ik`;@V-um#^Vp=OFAAraRpf>s-PHyH3>pc#wHOvuCgpn?x45u0~Oez znAh9gH|4(j^A)8zPK7xE80!6XT$6}O-nmk zK>|PsYY3R&!KbWM1Yu6B)Qv7B>OnO!A~BTx#S37!2}rBht?)ZEuW)XzsnA7fRxAuH z>5b(`P5#Pfd-kIEk}RAMAL_%VyG&!C5qA}s0()9WQOEHJA(VDU7?(~$4{a)3W;^_X zj9Mg47tv$)At|a;{v|bycIZkO3`I%?Qh5Ijk-y52>GfKHBn*)Z#_oR%iG`XO<-`tu01q zW-S?VNt`=jII?Q*!bn0L9Ub?s9G?PnP?kl|T%L7jj@R|(1-Drm=t=KWK+qZUJ}6wZ z{afs7p>Xee^Vp|(RpE%^F_~LgP?t_=w(nOb0K!nm{tQ1B_k9mOBd)4t%6l^ouyy3I z&AXSso~qyPj(=h@wUufz)%8}dtkI+*x%kD?ck0bb*Vp*Cs%EjCZ>#I7XcxXj!g&=^ z8mkMJq!X9dtWsYN);70rTy_9E={9`m#lW=79RJ+`Y`#)ayJfD$dw>2u1;1<_i?9S>&IA`Q-ECogYZU; z?BAAPwry`0devEG9Qz1J8V9QFYAL~-HY|_zRHg0M%b>xkeBcl3kEWcbHvx<1SVrRa>`1ya!ZKsM1GKXOhgs=`rz$T+xyRn9wd;Xe!2sl z;6NgpOUrjEH{_9enBMN7(mrR5GB(|_)enCC>>w&W`R|p z{4{#=RNF-W?w^@Xq)dMHoolmS@T0sq7QG7$$D1pwNMh9>Z!}a_^EYB@q1kXpQk4?* zVq=8juC@v-16e$-HL)i3yZgdKRA3;FoQPq}BQ26A@(XSp-NUf5M7d7cHzt!EH9u;{ zOeLj4su^D&@R;)%%gyiKn}}Uht5k8XX0dO>v+FR{VBdoSD=npGB5t&wu#Dt%7SF8% zbqHKO5r{9XrjUsIVvHI&s>WgNq&Cq>?bCV+^J2X`-7TH(`lac!H7m#WYkL6(`(R+{ z1qixp|CU-ct$E`QYVtMd#-b9B8n9)Fi8Shua9(BkF+OyF&KJ3XIdxYu^2S@y3#8Ap zxw;6S&wYSXtoq$EUp=0e6Wy@^PzD)SnKK4$M4;-F%UuhsakkI&o1LN*O&xrqlV+kB z`bI#>1xIJ1UvbaMiGox$-bJhZfZ=fXb*o3M5AXR+U%h>yvQ1K6+qY`7TDMgSBoZbu zM6Lna3d|9nYBU@mRO2~74teO!`OvRUcwzX^+Fi38I#8yFdyRWp&k4hss@t!O5K7z( z=Bu&+vK-$tq@wAL79Qc5VX)UsjhSAlCSN%aYg9oGn2i8XrE{n`e}nu8;5!?mMF@(9 zWK4B={4I^b8~oqeOVFbb>4DW|U=T@X#rgTkDd^;(vi#MEZ(lSBRMvpfMQ5V+AC7uk zW0J>Qq6f5mr-1C`{Q-X!-Rs#`IOqqK4FVWmzvX?ZGCyEJa&w%Eoh)_> zInG>pe;{M@9#rrn>dr3;+$>+QKPkbg8KVS5E#Gwc9e#fN<8*cAF&%&5Efo4ac(){^ zl`zvpP!Q*C7)O#yL53;v^4}o?N$9h;?tWo)`juZQ8j5n~>#V*U0y?VTMa$$}p(K0D zq}}HaRhUyALdxz2*IyvaZkudB>i)FturV| zX`y}5oewP@>uEjTzIcuu9^7BK!88*d(Q)rt8T9c6mwhaE0;a<3zlg~u?iR*Zti@KW z==ZZkj5q}9YRM4<>W?xAl;?wjc|HU?9NQowKY8#Q<=I)|S;&&Ff+NCq%>1)_jp-`6-&_~o~i zphGjMkNynHFlk)7SL6K)T6`{8%uS#xuo#z{>#zUkC%NavNkgFkI|c!c3>CLFWNB>K zIhXaOB9u%z{BFhp@>OEddW^b;Y6cah#%C{}<*vK&D4W&kC%10am19=mL@Z>ozn?mr zfPG91MN8+pnT79nyg8=ft7?3VacX?`u=tv?j7^QIGJbJ zkuFXZUY0!P{!&)QCp}SVfm|s!S?UHELD>N9{f1_FPO(2El*4MT*sSx({F(s)V?~Pc z0XM_@^&k0g>(VIC* z)?WPV=b@@}7TjHgD)@~gQ&btyUHPqoaOW5sKbts9pyB{~#>@oiPKQHO1kf!&4y-PGIpuQLmU3wT*Ltl+z*c)7E4?PBLo>NA4h z#_06j1x3>H5rOzBNXzj9>~jK2KB$IhrN59SeExeGerOcgH2BQcr0RQena~^kR3nFa zze9Q{aK^l}C-SwkCH?PE2GMUT5yL`lJp^39AuX2ww31bj!ikUDiM$VfOC|P|+ksU> z`R5O%ho?*mzJI-ca0tlid@~Lk=MhH$h)wU3_HY$WG1lUkHorr+E62EgHTFEH>hneJ z;jcmha}~OQKNhHd3|brY{rDDa(ZSJ{C5%1m?n241Y3(#ku08} zKtpBbM${DhpCh6JBUFglab=uzZcbs%CZldHbl=n+V1%)U+gmK+!RhU^p8jMNk;LR% zsPs)<9=Zyu>pNw5HNB!sX7|Z{4S?jn_bsT!jUB9$rea1a4AAPGQeUXJDq z0^bEMmES*x8}ADa(|KYyQx~jl=lSVsq{dYJm~^3WbBIo(LsHrLbi+3OGwDQ`8-Kb# ztj`gK~LmBQJ7}ZV`)S&l$Z5UHUVn@_r$7vMwk++K_oH8 z5|@z=exify?+NL=H}PpA7nm$KmI`RaMIV-z;Nh%?E12k551LQt<)s`07lZqnW~*p& zCB-!d92tFox_howjbjRZL^AVV+R1d0!z}DVI{R`g8j~0{>Ep1=_iF0$U^%i(Y zjI$W=gPziImszC>hu!6V^t~b0wKONN;)61Ca#)eTB;BbW2|Z~4MMlL5MtaxaqLI&% zYhcktLUB=i+>mRaEoxe0cfdHPxlZoJMB~HCSAHjKI7oFV|246h+ekT}*eW6r&;F~T zS&VE&Sks0jyCMgHpFz?5+tz);1^T9EK=ga>Kw?xDtjyTsZ9P!@&cb?uo_9@uk^BHN zZ#Lyi=_JaRKturCf5<%I-dT|}XJYs$E{qhMC_QcJvp=oS!;lT758lEh&BNh%=-&4k zz7--|%di}q$=-@7jYPD(ll^t`3#WMO`!HSdeFvgAT^AJD^BO-^E1Zoc&rsRcYDqdq z#hEu8ewTZVyf!PX{S67j3&4ZE=kPvqMcN6=WZet+bV#4q-}T3#PvY+Tvo>}jqPrl^ zRLn*{rb0#GnAR@{qQU~JC?@oJ_xYzfFCF1aptLiMTq=N?I>TDl?m_6o)e4}CF07tz z{S_~55D2#kpVl0Nu9}|?u#h|63GUqY+JX5kmaweiaI8mv zY=Yl7isR;YaO9GHhL#nnd-XqI&z7Zr-^DMx?FC*4is4sYWF6~w{ri&)v_D7)ioGMd zPWTa6@0$z_u!1jK=vomt#qE%TYW6}5gXS$GrR_NVHli9wen)H2nzkU3e}soCBu1T| zQ1hHh_50tad8~MZ;3oGKY4l+(j_4sgnZ)(BOeaVsa19g26O+yAUd+W_*#pZ_1~Z

ZcKR}5hGuQP)7^*$A=&M!C?_i29dE#~KTef4Sj;yZCSWf)%v%M=J_1j%#ze|vb z98i=ORdcROZ0t`02T^+b9FE5pCM&4p2}D48vc@Rm#pz%ZQAPejv0h~=19MDnntsC?zBXme0v`~)|+bQrF9;)F#o$DV%YZ3 zQ9IU=E@OHbqOF-;%?WS#TII~ph*jp8m#I?*vK#RTxcl;LKTqZqsmH1N;Kx@Qz{Yvb zi>SN`ax4pU6B>FU06~TsyX@I{)pWrPy)6*Rz3boOrTKPK3_!gFC2Rq(MCdQDvKy}m zXf(=-o5*IzWTyZxS>1gF*e<;|-5bhbdE>&!d2gQKS)Sk^0=LBQteR^5t-G-tWi;g4 z0iF&_BsVN6ATIfsR2NwlF(%oEWzhMQszst_#W$>6K8Q4vHt!hoRtbBz0ym~&JEL@5 zurxrHPYOkq^}qhlu+Gd9h;ZX{=+2gk$l&}GGm8OAaD`jg?`w8hPLl30#0vK2AA-}z zoNJf$(-T&OlG6fh7uvj}B<)uv-vZvX0f^6&6I<~-+lu6EmpmQub)3)04wV>2oD@zq z&m~ZV#xiSw2NJGw*#{J$;^@O?r~V+!(b^YJc1u*8rFdcbHlyxOQZiVr63Z@$@&}O8>;!~Z>j@0$yL%*%{_49ZAg z-@dOennE|Gcf& zc8|CBPLfrE3@~a=2XUWKD=^_h|9rezah-2a0Y>fuj!FTvu8thj9()kGUJ<8S3 zBAiQV@_W%!8`_~=x8IV6&m2j}zofVKd*D^v|Nhgoen+4Q&EzDAeJB z?Qa5Y;Qy)nsg*O9w6diEJ`!UU8N0CLQBisBa;>MtpVd)X8hnQiP%{2q7Ww%NKS|qq zAz4Rg8h!(L!z91#9Fb!u#yl_zWqDX?=>HKu;I++Tz6c)}x~I#I@H<4mp?0<|!5?hF zh$qg6A`5{iqc>E^CBAvyw=R$CHa5THn_*!4EeW5POz&2pm*A^-O*ei%@hxZsB5F^Vh*By^)$i8;Jq}KeCP-a@0iQs{E);4_$V-K zodf;j8Xdl_t2!d!e0Q@xKtP29lDjv#VE@_eEOzTS>Kl-OQH4J1Up}#Z-23tmHhS@m zQ;H%auMe~ zN8B%=YIPLi#Q22Q;lM{H=Ik9(DI2kOUYY)42m&w|&0W_Tw{fpKbwj+dR{|UBTI;i_ zf)z)v%IO1P43hp}siU8d2_f!o2L2W(3>lqx86b||`Vy!20JM3u3H8ISkKEp#=zSbB z7IOu1sOG}Ol1YcvDf3uQXPzuI7bZd9zDt5GiDCJ3$`i z=9HQ&L9&&k8%rwn2S6_uc{$IKFBr{f*(P-%s*7nRNJSR&)HHwNMo(v7R4id?WGl3mnyx1^Xfp&z)ym7xM3rl`-)%}Fg_w26~9Vya1B;AdO6<31t4Q?26 zY8l8BRmr5}G2Sxx)PS@Ntos1qQ62i8!I$OYVz=sG<+TCV(mH@xp5 znC&BqRbE^7BXD5SWj&uQANp@lw!Qh!)#V|Hi5stNpPW$lld|jRq;?t7_L<-sOq3+0 z{5fWCu8<`W8oU;bEe29GD|U8vpm&(el5t)JxHRVvQ^<6e(Sc%A=fiK)^rpfF$SqaP zbV3%TXwkPZoIO3mGP=iC(huvYvIour1}`M)qF?4F-(saB!0TG_p|&&)AtoS4dSwyg@Oo-HMw)Zk=rj$aB2W;e6AA(c* zfsKR-u}`v8JObewp7ibjPgzR;z93q_ZBf*9j6zViA44d$7wIZ5SWfi$E-2;|04Hw0 zfP6F`MB(6^TS**#7tXQIm8T7mG2bOnm2cOQ7FZp!88k%fe)Kd40%MYZKJ9uhPO&!J zGfYuYdjNO`IVlAiQSH24S7_(}(K3PZF0W`lO967sKnm z<;pl=t)L^heKwMrN8>LjPR@WvF0>TCA!f>gnH*oJ@#p6>^JP0k>ZjBc3PmIKh3CIg^B^{=d1+wxlcFX9BNL9lF=Z$+ zdOl?4`6p^80q7KB52I!DvO8~lgpdNc+U}6G$BF?TZ3b$Q>7FiPeXThRqpp%+b%3cC zcaV19`BU8`(cm!V2w&ANXgzm*Tgy3|&ns#B)q;ghpIQ|K@@#$a;+SLZ+Gi;=5RiS zqX%_&P?`a|*nfr+-$_Z6flHhQi%D<*oBvd!opX`@(su*mQddr|a=NdzR%Oq1qB$Ut z@>m`zbV_cSc0cXobA%&k&B$w*4r z5T}Yc-hdl=TJ1UaYoQWchfgIp2hRiDvKREQpAig!&Ttx0aHsbC#7x?ntUTn^vkLm7 zZ3408?J>w3Gw|XT9jG4H;Xro#d*^rD;(FI2TvZv2ZXebJ=u;gZKGR=9gQ#nJra zwSA7XX9zvti`Cxc%22ZB_MUt8(uM57rG$%tu()r2s50o?remwByL7owv{NxY)jsqBQr? zaH&BZ+kXlCT8-EwC&7c3DK*;*gm4;w6i9X7(EUM4m$E}p68YlaMLzUvHLuz6scX|9gIZepZ+` z*-1z;S!ID0epO@ihNoN8@V^J=>JWGUQ1 zEs;LQqWy#~^f$iM&REEo9md0UEm!}8R^+<=$k**~YN4l_&qo#7B&bK3Q2c=}fGN%3 zHFinBJ1%iU+~3*_)ZbzRHUoieRadf zM*5UC5)CTl$+5QFi;$sp0bfxU{K~)muMmFk+51kiVaW`7V zLd_FPYjdw%&_a?q=t2gze%iHjWJ}{yn@>aka{o5M$`LDPHU$M!Kh~xSa|~8B_Ef*w#k0$?qdAG{nsDLBHg>PgFCz?L5-o?<50P5 zw_LZ}q)asf2Yyu55s=AHR7W?#3mJ2)pcP zgks2Yh9IX$z10%9qsviuEto;70yC0biL2u#|E)gP>n-EHtl0L#iUEVmLW%TZtFFfR zck&aB!KN8=wa(37A4n_qCUsA53}nzBJ6|&QM*Lfg4oeLl-SAt^SnLeCI*Y)wgRb*= z=gG4&)EC6_-<@18$aBvBlywjM>Gh*OExz&e)x;beogFs)NXDfu+Zg(mL-m|J8jwfM z)L|Elm_{?!RbW)~HJxd_;vtCyq}j$5#S8I5jm~?l$r{RBeXEcTHVVyIvgxP14-%uw zdOS*Mryy#WLACmnT1c`n1Rc{6o(z${mb$l}O9+~|T4!*=L{T~2k-Yd5eG$Pci?At3 zi*;|2Q~Lb3D|)(mqFyd~8a6YLd1jUOCvs5L$A8zL5%Sk^-yUV|`vy8dJ2`%9z4M()r|AHcqv(qpLFsii8n7oE`8CM0_nwdK7`+g-qEbiJ zZFA1PQlDJ9Z0u!yw2Qr>wh}@q4jk*=rc&)Rujj6XSKOEg7qG7=WC;GO-fwi&g{{ox z3jISUv_32#K;sWQb}Rod8eQd@gxNO7}A5zg^wg{T@o3MbzMwiu}nq1vvt9aEyH(Y064vY^xv=$a#{}jFEbyKY@nK1v1QrN zB<;o1Jpru^Qs^Gb&fy=0$9N}W7+O_CA(W&IXQPD5LVX_Af})4-@AuPBB(5|i(8vrO zcACggG9pXYHqugH^+3eW(M5#2Tz_AatNun9?%gt=#Dr*cui!V)X zyfPc3mRJnl%M{UCF?vU4cy)bY031ynl6XX`|2}^95Ok)f-cQxCYcNrKEVn$##ahUUL-sC8jxda zTGB=RzMJL2#qXfQ@_&>(s(5UgU`$T~IZW!CQ!3HYD_k}dvs1LKEfmZ|slD-br5&Xf zfX{Xb`fx~76soxiPI#JMDCQgbTsDUMi>vd^A@)SLTL-^{@cuI*o|QqH0@*V?4xHI= zPk;lw({+hU|1{&GpLKHsDEsytVhMU?{K2S^17JXjmjVGxCHaj#Kw<$am_P51K7NEKgGe4L zc}fR8B8Ao6=3k2kr=LoY{jjq7u95Kjjn>yWA!{+l$uNAd+rc=S&kG~JX=KPdl${nH znQm;A*Fzpze+#=!IJb5@91u^GPz9u8m`-`ej9*V!{Td_y zKC>wqv18H(;fEzuFgTn8zv<;Vd+xO@EjnZI-J>*`urn4$~=! ze*{ie5Ck+R@rO^PB{Q_j`xW~WI1?Y*M6cjMJtM7&RndGtfeYx3z+s;D^@IpK;5t); zuI0O!+RC=J%^VXFs;(yCaCz?NazbJtdPrfOtW5{U3Km6`pz<)7H)tz8YY z;(%2C_GUcf<$yhUKxkyR9NNVNMrRBzG|$L#Vg;ifpqRx0+)x2S`ia$HZL|L}&`;+> zhwVplBXdFz(HkWFWx%Q!c|WDYcP`@hbGN9KrM;OGM=ENtlS5TFepnKN&WBmvOY88F zDzCAr&NNeh4lp|X%Des0-uI40=ayEej*{=;^bFqH!hy_8->akW3*8w5ANrtvszS); z+u2M!G<|LY)#Yw+eFpE1_4!OZOOeN#_(jq*WT2Xje1Z%r%cAXsCl+^?^5}0mRM5>l zou32ps^pgdE?eSChN!m8+avPWzI%6}S+U*MkxE!HA;gyyq`>7aCNY zsy8O*G||nXFNtWz0JaZX^I`$<9j><7QMyF0E&)vm4RXa~2+Gu3yE?Mrz}v$Xxyia~ z^CsaPA{SfrxJN|tx*I^IzZLo_y4{aN&g;2qfo$2>{1PuPMn1v}Tl6yy7CUG`$9m zWqWB3`Z7y!SCwr|YoN-v`FX!E*og<5?M@@sHq---0}X=tIxxo9&2Zb{h)URW61(SO zx;V!x6-Q>?19cV8`Z4=EgSncGSB@JOjtWz%f_H-YhM-}6YV6fPYca@1+OqmY&YDwG ziPjIE`)|bqkK#qxbL}@j@A6NXmp$qh)?t}VBZz;>Cz%5YvA&mE`_q~qPtJ7rE_-dZ z5ZNpn*S#xD^F=Lp3!!qru(OdAncp3k4?$8g+PSe24dXm~vbzd0^P*N^&yMGQRFMf( za3x5g7z2&C&GX_E0DY_oCgjS4EP~)?0iJ3@(Z+RB?#B<$6Cjf)8JU**cV$V06zCoJ&~@UgdfPOz z>u(th4$)VE*)UT{1Nh`oz}Slz9+=8p=x}LA@A8>F_C|1E`Kb48SCRh2z}V~80Ay4U z3%2*=c&odt;+kH9eK?Y>@#kml^eF6(gfNmQN1F18B9AJ-j#9Mhngi4RvETjBlOfjS zne8F^1mm6gRDWKp(PfUGM;T2k?3T^Bvv_I`^c&`AuvsZGJ{l~4y{=-Xaxx!~xD5Mq zo}2|So@Sk&hyMFk_PY`rM-OuTIqA&g`JARrw69lS#FZkgjqz13MNP>Z>e& zOjOAf8Cb@fy-->61&Bu|Z$2_@8hg~gyy1pX-_R7gX-yDiry0MPSQG1#TgcoEpqM${ z>VNGL_NZ3${S!~9S>6|&{7=Qx_8#}ezCaz)5m-#e#!s8*KRs?;0c+#-i>UI)6zhugCK2mR7KN zDU}r;D^Bk@cZBRgJHA}cHU=V05D+-rC2qOtoo~5u?Un}I{SuWoPH-4GGX1xry$T<+ za^Df9s7NZy^p82s^EJ~6GfbWHB}Me|Mq@GOGjrH+SWqEVRa)QxeCGB3RE~wapG__d zGK=C$JKA)YgqdD_Axw5R!VxhW$(ELP4Ghv5e>=7IJCMfM>Uw_>3#_BJfQSsxZ%j9; zK&)8kSqtICDIaITo(U8vj{F;%Gl90yb< z_nWUR<>piBsEQXRO4GAJL_ZgeH9jne^M5o*`F$ep4(t!ne1-er@MAngE&j|u79v>0 zX(f0{rh&J%Fx0=pi6X6J-{J)JF3F(sG*qMXHO)NfLRDnoI_#&@iIdKl5cF3Kcdg%v z8Fl84a%{Sb{61z@aH?Fw7PHR38m`T58}GmV`&v?%q27ON9L9e{AAZ^V&kC?KFNlno zQd|?x43sf}@S)h|nO0n|jr(5giu6Mvb@>M|>@`M$Z3)#hV6+H;HUV1w`a-%h z@^XYALL{Hy??Yeoz9-KwJ*8Hub5}OQd2_$Fekstl$Ep4*6XqOs+4pGn$;`CK1O5?E6G z-kN`7@5SD|8}-$y#dhjkTD_`0d&1;a18rH$KF$Ry?M53lj>~yY@mTB;ceun%;m5a4 zg_-vJ7d<0cj=Xxi)3{CTt;+YH_s2iVDJ{Fg{3SoQRe7>QX6~PLOBX+kQ|JZeN+Zp< zR7aon#Xt}C?w#GRYpR%!k#6knDyyq*4MAd;L)Oerb9|7(+vK4%<=fSVKFz=((%{q2 z74;|Ap2+{!Df?b?^-)HTBK6L^P4RL1@0(SeJX1{eKc2j4?X64YuNYE!L-xHmUkOa} zU|_1l*tF)An!jK~@#aFNrNQT9PJP|Qa$$m1YJExOf=##ncz#l3b(+aEy+ifUW{cN} zi&FSfH+k z7^kM*Uu6=I2`jLsgxoZH{e5HJn-^SDs;+*2cGE@c?3e8mCr4x<0iW|TG{y+8jzpYAr=+?@2e^zx>9$)hK*cOME5i63z>ZK(mSN?gG z#FispBzyKzp0A|i>(fg4Q#0T6*2X>Fx#C}a?f?DqYgf(q?PV5shHJ{UNlt1}K*u}@ zwBueYUYWk&(R&q<#bGmD@7+9c!#!vI_loYr|Ggi++5BU5|MuKjrq#!%{XAZo=<4r~ z$$Z*a_v}`_T`&LU0`)rr$I--pfBM0g5u<1DaRWb-0@J2`X#>&SwT|cV?;Iv?_1O($mFH z5B=xABOfGvq0aLj0|SFRdP`(kYX@0Ff`INFxE9T2r)FVGBmIPkPX54X(i=}MX3zs<>h*rdD+Fui3O>8`9 z-8IAz-@)JS|2*IO{NMNTQsK;;efC~^?R%|tulsyeSCuCven^akg+;3HT22!S3kQyc zg3%G(&aNh#{yJfB8zhdy!t%pXkOOId zoY+nxN~E)lzed4jDeh>?={miZvNuBSb4OU^I(2BqN@YObe|>+&rYGc?JLg40=CdP15rpD12Xb}IyGkm=rk z34F4#?$`ml%K}^_WX@b>4HQ4P5ATWXot)KwN^|n{=IW=2k2Kqk#_XVV?|II^+WT}8 zH)9YM2k4N2i665g9&+|h*5{dnawK@Ok#^%+x{O=MMityLX~7qjoMW$bB_9dRm4Ps_Y69;jbO}n-5VzP@%j9@4 z$iZ9XD!5crb*>4?GH9DYtk31Wg52LLz=)A25m0+}FXh^Zgpnuwmt~cD2DN(9yOkp1u30ZlJY}?GqGAhB?3OJh7|Ns`hEc0vxQM^2Te? zD$@pyYL5=Q3_kK$-=jbRU;2fGemeP5MaZiu3{6zjTP-nWn9N%4+TR zlSeNp((4PmDBwL!*+PjjU9##s6$3n&u^8SypQY|$)GK-HBesZf1IQ< z_sW4Wi>R7k_H{+h?mJt4^!vu12{~KBcDhRnC?dn6`~(yi>e$0na?jw7dCijZot(WZ*(b924J#S1Io6$sU5_D+! zN&XvwKuJ8ygq$ruRKL+lZ*dl~OI+BU1Gq=H^18f|7r4{D72G>D-cnY{T?ru6_-J1; z`g^P}H~Vk+S6Opu29hmu;PhWWv?35sZ_fT}`Bo~p>A|Pp#f(w--gGeMG<<>^prA09 zbO0%6mrWqtj< z3G3qUgY!U6^8V;vL)ZW}XD7?3M-OJwb~TQC&Jl&G)#UgSkC;q))t@yrWq&u?x)fYW zq^VAHvrJ)%=$3ZZ^O?bl%x_@_V&J z&ZX$@Pj@)(IZ?aa?4+DLO4UadWJBXb?eh0`CDC9-V#Us&>WG{9KKATT(biPge>H`l zc(c~b)I_<=KxEm|O4otyrj;*Y)s9ggQJV$T=F+!rK2A4u_N^pQ41qwb)k4M`Onw1A z{Cn%LtvgB0tY)_kv-I^za)duFfnqp=0CfC z0mDa`9n@-a3s_b%uKd$rnk&VZ8`RI`A&QPw_)@VA5|Cq-{ZuU}YAVXyJZk;%K zkx2GrXIke>)OjgLEU+p(J8rJVvDe1V?)F8JJcEZE1N2A(-~1Olv@z?hoQ#XuD$!t+0b~{^o4G?*>M|s_S)riZD32 z#HiZMe(Q=gzY5|T%jxbhH?@H6{GIkLLRaCHj{}J_ggVAnJQ!HFC*lX_HBC%RN~nWh(1<#}+Zdzf zHBPM9zQXM4Hge|jvQxCbW#S4LTm0N$$kdT0^~%s->S@t)Vd3l%*{XHR20!f76Wf_^ zXClQZfujKw#JYT^{Yi>)K0?4+wd4M%=Im@2f7pFWcy!KFwjg1#lScPofR#tWn!Il;gPX(P2=PMgU=##W-EPU4wa2Yr#x8rYhA$k+PV7B^$LjDiXFp(# z%nR3@w^#DSaO0Jc@)uItnDfx+r@EZ`li6x1V}(YjfWVc~Ua3u5S+A3w(8~t_X`lCh z=;RHNz)_@vUT+U_nM?hPV3dNcZ;wKRjPEOIEK1lBqX^Zm)fm zlKO)1-rS43hEl&j2}9`^<)TMrv06XPVy*1YdwJHL)qcJEB#VK^c(&6Gx3&DCfYrCH z*?Q@6hmoIp$*ohBV3S#kKLHIF(+<=9<#sc3up0ojMGDc35{&lbap=U# zUhcJ;jHSi&?TR=u>^)8R(LelXl5g>R{&cU=ECzSm#I&H*b(Ca>^n6U6tY)F*$S5Mq zoQ}7H-@IirIjyr!itQqNb5+;D<*e~a{nkFsNy0N2B`?lfeX|v71uU4cBiWE)i8S(?^>K!o4OCm_VtL#-H4HBJIzt{ZUTD{g~| zcjYaiG(sz|-GoNS)x{Z2w~u@{gATZ4rS1ZcmEoY;ZCeo|^}!c50$;HDw1NRJAfqna z$~1_3kPOyIW#U<@XApuA7~wXodgdD1h5u-9SEP%!>b`W2Q(x!CaNZz8jH|1wP3yNh z#V9rv#mUtKLE9mrM8+QZ(L|Y|IsZk6oj>ModNSRCHUYDFKYuEFJy>{qjAK^jb>eV* zy}#Hx*aJFQhJlX{8cBHB?}S^3dmOy`Ao;26%-oMZ@bG15A!-g(ScrNxdX#!GH=jYB zSbb(XsGK5YWbAXc{O!PEq=J-D%8)(})nGE-oWLkz0{yo3DAM*B)dvUD)i7rF!-Ggx zCC%0_i2NYR_=BzOpbtTs$L2`B%`!pfN;g7);fP{y=M+|WRQ{>2Kg4GpBv6I%Gy`#0 z(ePS%2qHpChx@`F@I6X*qh7`-56GhbMQRG_=J2-r!}39On)gAk#G$gjUh9dA9@!&y zCzWy&4P!C}iMU_riCeI#qpxRBLocqe!22gTXRfX6I{`ZnP3Wojw0J;Yy{Ppiy6}tF z3;2dkSPX?v6-=b*QpCe5z_ZE0#fGqw)KpM@lgB({w7^IwG;?b|i`$@j>7 zq$_eEqyx7ORi9UjC@Nhd=53ZTaM*_^YYfgiLxfifl8*8-Zv#Pcy6#anG!7@*w#N~d zuO-^xqd8Wn@!sA5?3VCH`%2T_IOSx*qDL+LLRIbYP=Z=K)=%Asz0xVa`scB+n9~{g zLFbDyIq|LUd9reDv#>jDITRE838js#L^k6Nblx|Ym97D{c=JB|V-|i+=ra5Y8;Y6Z%Njnzq!S%7ol>Qk4+CAI13fpEU5OK2)akdzl zSUWxky$e>i3s@Vjs3KcQDmK>#H?{dJ{Gq4`&6t$3Srcs)YL0Dw$IIGnq&1XGMTO9N>Zn_vV#>Qau!iGZ zUn?&HIV9}F^(&qNM}Aa9M_1QclKi5eP9c2+r!|N+wcuq_x8iJ76QN=~!cIu}dr%g4 zV)ahI7t6*^Kss8V8m>3$Ja;S4R;w%Q3@{}G6w2~}$#pwrd?}P5^opTJZ}j1!{#?S` z#2j`8qg0NhBZ(qc&i5}l!z$1$WV0+*eJDW{hc?p(#~v$EvI~1-M( z=qrA!Kfm?F!o&9l(-}nbew!hiK?w?nX1NHyeZRqOhmU-HN@G|4>`|UX`XOPnedH>o zY_|n{fGx8mLht3?_xrf-7z-^u)2xR#%z7s2NzLhA2sM+V^A;PG)N#rrPK{WM-Y|2z z*jGBVH|(!G{PZPDmwP~cF)!A#b0S-M5JpMz>)zUmT_HBtO7V+^hF+`d1tyqg%H69C zi=zVpecq!q3&P?wYOT{nv9%R7EfnyF zMyk_L_HNsB`QoNzbgPqUd@^ zefWpSDc;_MBCe|aH<>VRAtEmpIi$jRY9B=K{1|&$E@PZG zr!@ll=GwXuE4{XnQ0CZ_a%3BR7%mQu6fAC7(0Zk5lnQR8k`zV@mvuiBYbLhqzWN0K z;^(RYlv`9d@_FYSJlr0&kVo{T<9(M&`{p6k*tK!fFO)%F9dJEseZDr-c0w6{o7YW! z_@1Pso@TgF@+q!a%+U5y^9#?DiTfvyPL2m&-0^XB7nP|b4ibQS488R{6Zwghi=~;$ zb5wg~`$fgcDn{$F@e5?2{Ip6alLCIK0#)lXpXM?+yDjqQw?fC#Diu8DyP8=*oJ?p) z9nD0BzLl`D&Ozlg|8s*6}}vGUZ23No%<7dob^$V(KD-WKrqFL zP;qE&-ps=7b!!vHXd0qR1+=cSx%h;=d%{v{YyRRW4RKoqWHX}wkx23Iw>dFW=`HzD z*aEYSRMGMAk$w$c=9!})LD1Ubm$<mG^7YQueQ3juJ?a^MyM!>3quDo%!j-jZN~WPbC!M`GE|=da5!Q7 z*b~b8%;c?0d=vDyH*@20OQe#zYCJ#dvB4T5c=p!XNnp#*V|>p9dZSmz+G05hg1h-u zS5~3bsHLau{U;;S-ub|@ZDxF3gxLWW zf`3k|IguhEY5n(t6~TbG6EU#lhe0a1F|0vNJx$M$cWv>FWvz>0$cf4J-4x&5-Yn#RKcR@#pynWLA~Bf zLVQ+*Ampb$r%|_gW+e6W&)n5_O$)l4Nc>bi0aM~iLqYg2B0}LfI~Iq1ik7A!yiFka zN;>B*&1(0K*sxM~PQKnkXJ#u^qyi!WrGraA0}s!q8>Z*J{GcwhvWxP@&lFv_pRRW7 zIhZX%_Vejy{2u2Q&;H4Wv%^&t;`+Fjx^_Z`7j zjD89rHZJip(WHF=)Q+V4G@}zrGVfC@N7NGpPp!_L0Y?M10vh6daw-XBd`7VTab71c zw65H9KQFb5mA!j}s?;}iXBDBMZVoTGFT$P@M18k5Zh=5*bhf|oVXFmMS^s5Kx4y{X zOJPP3k#Ds{4^^2DOw+_R$iC3w5rCIsY!|;I-q)}1R`1egECwquf^2ko>?XIGR#w6| zV}<4J;xReIhW37GNs8tN(=F zN-B3MeHBVLv(gDTP_P%sF>`niE+f6PKPQ{dq={OWDm{flzX1UAi~Qa>_&0gZ zc7OU=@e&KW|NYCiOjb;NPNe&$BfKIaPim1B_hA&}h`sFbCsd=@mN=)b`+cT()p{{| zFjy7Hqmgg@GT{oOOK>_)Hv}L_v6{>9Gg~)T?w?+s421k+8=F?didtOeDOEy6Equs$ zVFdEI$!!g?JF`|a0+>JRQmLqOeZ(W8Iv_K_46Hn5@AiqK2+wI3FkmPlQY@9u5e-sL z$3UvH#Rtn1#A2q4Ezr=GTB?)#ko%v#@iO)D!ax3TaA7jvkD<`-%m20W#QeA+9y|ZTRz}XaoEWs1IpID=d#QY55(o#6bXa6731T> zd262YR0lW0Q4)uSU#~c0k$5Wxm0m>r(ll}f;-53>isNx8$Ylgt;*7g;zbk`svsa%{ zPn*?KfQb5*c1Cxp8q*%YD9QOCebL5y)hjY^O+v*&=T8Jtb+^ul6@Q*2gxn3`Y0dF> z?w!o0hL{G9C&t+#vCuXn#whA(7m|Hz)$LFCDaV2bcj4!(?H6G@2=>+;5ANdk{uacF z?lcFSojx#1^M+~N-Xo%C2*>$A!@Jt)%D*}#Kc(&{B^4r@cN5H2Oa!JY`4Aubd08pM zjPHIzl-F3zzR_wn=OZ9P)n`-FRoCe*v#>KCOhNfI-W+*&0`TPIo}wd}sm6z&srwCK z#M152O(5AOB!@}EbatC;7aR=@YNzee&eV*2%rQ&ih}nAv%BTO(-WNzx*K#(; zjAw9|xUb$KR(cD6HPyFM(qF(kcvdd{chfy{63c*}-vksC9uyV2?ommZJo_7JGUSZrR?Pc-GiJt7#aU{P;ku zIJTXvVI$dzm<2D}PU2*Ts}mrZJb1!_FMKJQa_X`SD#N zrF4R)(GTk%njrXWh2ytBExj0ZSYTz{vEb01GS{3(coy>v3S|Sich?8TXF3)N_?GR? zklmi-%Pcqw1+b+Yq&Y?dx%5PO9LO1-qqO2wxH#rV|re(8H)n6J|a``bE z2RrxU9URr0hTnS*VLk7ZlWA$&ux)~+%W!2P_4l@BwV0k!y ztUJJ#1x&{B2)oN*D3?}7IPL|ELnfGj+cW5Fy8TLjxrVjbp-SneO|!JLAyQW5g{qak z(THk|)dRBk_2O|*w6$Fw^((w`aH>CYd%{C(5^ZN}At~6y{5*?9C z)ie>PI{fxy!5*-14mOsaZu+`YpY(Ft|_Zx!FyP)d z-2v>>xT7zy$eSM@Gf_p&sH;-~KYqgdWIjMk4x(|9EAK}Ct{)V8l!$IAEzAJ=irWeI zQY|KQ`;%`v#}D{%z`o;s#jPx5IGdCM&CAN zw3_Lt*)CQ4;4qOV%6?oEsWJA{K$6^2g$%iWUVN4IM}_^2ykM_oj7(3j&r1<6%So}qcNAV-pR-n@wGSOu%7jkHN0eUE;Cs>&_6`cje(?!tPnfqf+n;y;N+a0Uf9J zcAn(crt{dSho-le07gX!qwK$CyW8U#RZ@6T?jE9$WdCOtXeXO(#}ZV6JB1c4Q@%(4 zX;Z!11MCC_I=-dr86@Y62}njit=%@#v(tQ6e$v5BTMlhAtki?-dM=|$&1T`olH&pU zg7moj-*SQUqFU;-Vf=A2x1X@*kx;?Krp>5u_`X4*x9ZT$_~vFXe)8Ycnb0~IFFyu7 zAuo+l$KewK>79}mi=elK*BynA=+G+h<@e_qOj|r;&ZYfEfzF5~UWz?hVIMq$?r)Bz zssFto*^5zN$Ed?f8nu+q1^KEmvMU?Z@v+b^$lJ4@fJXF+C1V*;^?2atRORSX-{X9#8yH(3z>Lb{9 zHtzNO4%^hsF8-~1G-bS5Q2q{C5sZ1O?l1d-kK|BkB74RypZB*ra&He!C9x$O(7cOE zcN_2~1biX8wt9q5-lyXN@Se9(!*!x%(DJ~z+vbHW#`aiVvP`ec!NeVgXh8-59L4bi z%X_K3%(gZ|KBQN_0p2~No-v&ZH1{dMo05a7OI#9LURk%U>$0&&UhWX zKZlyQ`UV|c-QDt8OAyLQE3X*%VIvNktYq-A?76eyHo3}RW+L`~p)bjw$vwVeu#Sx! z^Fe7VkIpR71*1Q)T2@H(uJF8IP>K(YjuA#|E!LVi{f$qbC^RYlg}e+V_M3Js&?*Q0 zi*^w(_G-f!^m2vjuUZvUev|`vl&RvNNbOC5!9-vwxj-ABlHnbisW?b^j`?OdgBk={ z+yqMLIoqK&0S7<=(k5m95SiobvD(vJ3&xm_AGoOLK_?3{HdcJ2&cL;rR!I=DzYoml z^4-+Y^f=76TxRs)K@c3PCo~VS=Nitav~=HdSUMnYfpkouzHrC<{gz7F^wM^Wmq{;(|d&md<<09n16{ z4i1%Zc3sh()l?`{(-=nBBb_r|FzO~)6vvB(y%{fc(fpFb^{Qa6%y8!O(h8Z-dp7s& z3^2FlV-#w-JZc`|kzV=~qkI!a9usk$wbCxlCEsIq{!Gw$x&6Hp(Ds1rI?IzB!tZE1 zQIp*~JpyUxah{sCO1#>s}rAL1NGgZ6f z_)Y`z%J6ELzq9-Fl0=b}vvlb|s?66lVLLU84pnogn)#ruj*~@6b)@Tgai-=`1=wx8 z@F(9EcTf%m9D-nW#eq;^9T8=!*doCF@e* zmd8D;J2;@5dnsdNRct2^H794Q2WU#TG5TfaGCh~D`_78PtbS|XO(tZ$vVz`*Pgnsb z)L{F6)^BqUZtJ{qAT{?RQ~4dwu81>awW3h5^nX^t>dmveg-#r+ky-eU$v7$)(vZr2 z$V8_d)WH~Hmdm|v)9B!k+(*6c{Ht1Ad(BteGo&?4*POmsBR^+R0EX81;5$avsdTa2k~S3L5k}n0 zXQ`f0Yne+um~}f?<`ljd2Yi7|<>mpOuZl>56=SFkIzku#GaWB%B<4O0iS;bz z)7^L(J`_aS0&LVDsV)ilJsWe);wQg`LUJ}%5C)Ep`Du0xNa?lk$Fja-FNdy*=wqn8 zIzsNl6}5NtcG7(#Je^clB;en(jDaJ<@uazV@qU)^5hYyXT?fe85Y0hEUZ6)sAHPn1 zo=%69-g?A&GwTqea<#;OafLdxqlY zO$=+~)i?JM|9c;+k$M3U6ZDb)hR^o!7!ep#x|4k!1c-vH&mcsqF3TKtuQJ^GX zA`mcRs=qU??oiT?avvEAvE`SubjhWr4FD|6_daWGC%V#xjovU%C+zHHd?|&a&3zn{@=bF=h@F6 zyggS%Vn2{7y#@PcsD=9rVF!Ev6B zceNKxej2d<8>7QX>E|5CU*o_auI& z5wL2TZwj;oDDDrEIFy^Zg~s_fox8V;baXOj+5lNj%zZ~mFEso!3ALh%N@${lho&Gs z(q_Kw@YYu(Ld41Z5e`oWEz``m!{fjdqzgut1ejr^bTB=m@s+9fp3h5A`)PS*%~DFI zmr4~7^9j1O@qhg7S0fPOaA?ug6tK6t0Ftvid&qkPyuwP7^m-GEZHtf!6C1Ja;er0R=?mmauW z&5S;(MSATNw^jM%CDNbldCxwb5iH zIJVfLwWE`_{^yU?VjrLx@4VK>e9z7lkWw6+_4*p)uN0Xz!AVB~?O!zdxbIAH++|Z% z-(>XO`z<$vOs~Oqn5^Ii`+l-NKp)BvIQVItr;qJ_&u{;BHFwE*14fIZ1qY3WGcn)c zeuFBn3BSd}+|T{_Q+so@D?a@^kqJw~{yNX(QZc{v7og>6>-D{Y9Dku+xpf(pav^s) zo2|yUK81ql1M4QQzieW>dUgk!dYP0$Wyb8H*yC^oa1cVk{Bbwt_xZ;`a{l8W#m5x* zmIQaI2)%zKGaHdEAFgO`Z=XYZ6e>KE^I^7VXm~gm zYZRNni)*bn@{?bPzWT~$__G_kPARXG?-FS8EjI1?H=oX32E!uvZDl=Dfi6=9o}v7| zDEJSmM28@^*#EzgsKjD*h0M0suk;2t{|~^Bce&klQFk^&)!+hO`rax2PkunuP|omBECwr!AgaP>O) zCcw5798i8o9<{><=boH%0xW@bWp9`FU7=D{2h35Iuu5G#n3Lbmpv3B?IE~86QPyuz zNfOiPJ};DB^!8cpPS+P@+!QH}Uz1;PN}we*zHq zVfW0k)OogIre0SYywn<1iM*ZCS*^I$wY;sGy=K1)WQv)jV9e6$!3b0IP#C?qW=f^Q zH0SW}@Q0F7K;~yq2LM|3_^kI~_rZ9t&l~%uBSMfbf!0&FY(xLevp~L&TNo>v4$O5u zoI>PyFBgF$=Q;!{`NI#=Qv$800j(%brH0D7V{V<7;VwFDO6Mlm$0i?)dGJO~>P{q& zujBYEhTJlu#Q+uIIm5{HmbJ9jI6lSCvDNf&se}&e;2+lj5ad+;{_^G4+4QbY?Am{s zQ!@@}2(&?<2{olcd|uBz0__^n)IXy=CaUo)-(ZyazC!l4rPrC+_Yn#tRle6JzPg}K zU*Fz&eR_?Uy~w&a%9hpv!4%BuuP=A=uD>raxXoVfxDO*{0d-tn0@<_n%@WH_Z5gwa zwg-O8n9vp$EDxJNx;7su6gjl3R z5%^^VNOVWP!!W?gupExMRbFLF4_OND>YbUMO3IANHJhbus}qw|Zj`dYCK$;#$ewA4 zh#?l%_<2YyQWL+v+m$3PoUhPuyQ-DJi|d}_M+`y=`ipqvM#fp&4a^{NEgXnyKo&=E z*^j-eTM^ZKpxLvJpp3%kS|&-g87Yl_NU?-hGDrj@Ss|=*D7?nXCkPNHZK{|f4@Bex zM>UE4PtOGf+krtcOzM5=u(Jmhu#@iU_!^hIQ`itlAaiL@iRXV=yFTx`^jyXBzqZ|m z{##-#fA>#0JI0zV`(9;H{K#fMOnaGpl34%(?T}dKNX>{UlT>Jm!z?oy7L9h~BVnsjU%BxFEwp99VJ0MWiS+#VL z_;X!<+QO?jS$~wt+YSf_hbAA2N%+o_nB3@HJJ$Nsmq!f1I_S57_UW%eH79+p4pWqF z)P^VhG!Io&6iEIn+~4&1hy0%E{sRX71<->XNFR-BU6g+SHSC**PPE&H%V&&Dgs=7_H7 zC%0)FZ*skV$C6{72;(u~St(gdu+^MYr5BIU55GS49E&4CgFrTnEocI(Y9cXqlq-ds z&Io9%YdMK!JI%&q2UXLF{lCCP-0$W+{p-0?)#-%o@cH;^(vxfxAJKA#YYX~{-+v5? z$cYs(>=n{nJP9E?=qvT*#}f@M5lsTx5Q zIA25tHbn&Fs2>U2{f-yKpYvGZIA-h{tGiVrTW6 zgZAa=g~my#vs`LE=BF#Y1D!#4SccAT5+hC{ohg5F+yC;QVv$()P+y+i8VH1%lO3{p zu)_{>2IR7weS7Fz�-hWB_??V256?9ad430fTc28Lg1=)$a8b?=dtZrSaq@Rf_f` z9(-m>pQY7zri{I^*XYg{KDrn#%~Lt;hR_e^xNjQb8kffW&+`8NA+luw$fgo94!gVQ z|IEm$-zoyHGofmBYfp^tITC)N1=ncgd#$AgYM--bBqoSLvh;NUnJ}BfbeT<5!q`3V z$G6u&itsdyPBhIq;|g6_uT#BWV=sX zH-0=ba{F{FUCXB>z#<~}PI9Yp^ryr>B z?8cBqc4A6XE`NTb6V+5rk;=C^kr%@LDebZrk#Na)y_kQyb5Kn>Uy8-`9zXt?-(;0j zy7vwwMQQsk*mkyKNi-Nvw9n zx*q^do*$$SS9^7#Z<0M$XznR5{t4CE{60%n!CQY(LM--ER{zJ{C#r2}FGDn^oIAO{ z68_gQ1bQ$u3Pe?aPPaYDeJ1zSZ$SW@0D0w*XH90le!1eZM9U-%thk^+)#n;Bo5u-V z;u^*l2DHh?(lfX2yd#v!D0wmHPgVu2H?Okn`Yh#IBlkxU*`=cCHGtmwg0#qRMeS9E ze?n!y29Q;A@w!FF-c&gqfV}ei=+V*o;07rvC^h?y%#|kI?el~=0ocLY$)(DVUV}O( zRt?xSzt^Gp29>J#Q>)U~9>?^xrYUQE-@^LVOq^G`Xv%$#H-^6w{nx^7@CELN%YJHd zbr35sVxj%8V!ijHY!l*5!6i_Lm?&*R_}{fr*t~zQ&a&mR7r+RB<*)kStII;~<)`11 zS%6pzgm-!Qc6M$S1#D`;xG-$6{l{wAYdE#>$JC{$uA7QJ|EBB?yDCbmg7zF} zI^S%c*%poCI+C$HfLtmk%Oc2Gibv|%didiPK;?-#YjRsa3wJ7s0T44NWyrH@XhW^y zwQ^9w_`fjoKLyxO8AzdHImyIfXJF~TVO<8O>R@$jYh^z%h`G?)n+s9}Vdo7Q|0X^B zPA1?O>PqB09xx;3!r4PO)@I{n*G%|XQ3M(1EEE~QcV9Wg@vvJsFZWJF0_P4=aRImV z!PV)8ZvBr7|L5jFc?_J2v3c^D!mZYxjg52VH&K6aeE z-)5Uv4$uyb70j{jka&P2OM(Nl4U|qp?kLRPCbxEOfx7FrfSAzjd_MFRN1Om-*v| zVt(~%vX}Ob_Shrqa+8Rdrkhm)a=J^iaOnq(Zl?h!Pcg;0HP3to@hw{cDz8ln;C5E{ zyOu*68q*N?F4)%q;DO5-Xt17Xl6E$cOD8HSlz-ee7uy(cRME8aYYtaeBoOE}$A}q1uiL+FbIR9olq4VG6 z8H#!47WN1Z`aI8?sNiw$s7cs%GNl)e2^M5A0sH-YJGvAPh0dGY9@GQJ1$bGgYneWv z_7YX|z0J7ZJ~d*|m&%jgk&yH9)t4+>Xu!@)ZS}d-%#Vna=aZxlHzPn;6>qO*uQ9&4 zezJquV#15|6s<6(rh#(X(ILL8i&bge%?MssKykbAmcqLN-;nOAPgtf>8c?KEN=N8_ zyJ8QOu3>on4R8Z?_HB0FZzjHRos;}_w?&qCUvlOGf-TM>m-PYGdyZ{h!CY8x0YHlR z1+qoOzu-o@~;}EeK6Y_B1MdNi8H<_Qc%s-AS*(y*YTg* z&HPmZEVUY1=ECRK^X6uZWMOwt@Db|*LLZTlb%fPGx7Yrmyzy z!i}#0Isbb2!g8!zf!T@ z=Th58>qx+#6z72uVUP6He)xv()Yg8*mtCX+&SqaP9@rJo;%h?n>-Iyz|2xBmq5ET_ zV4p+Ut!A)+_Sd z&=4eAyenno06~bt%`}!XlTg~c=g-7}4U zhKU}h0c25(=XY3%_y1+U8iBXeY&Kd9(HhytM-*^oO~DugfSDuy-@LN2AswlFUxQc? zc)jP>i+sXdY6xipQM_kyDpfAEiUy6uvH3F$Ks|1?{lmE3q>)Y}62u+v?nEPxlrqMvu&C5i;45ssnd0}R^y;3oPv7=dfLLEo5GwQn4{eG-&7~NW~rk)zrAa@R$|3) zruq_ir|eFYaT-5l!6m6vd{S<+oSE$fuMrPeW!1fg1r1B20$ArdA|T|-YXbCX+CLnw zc!yHK)KvcYlLNzFCl91sW{YYQt{<%6bR6h)9$MB*!(PgTT#^fc4Y< zQr5Ajoi{fl@lZ7(EbQ6JbP1Jl&tGP`@9a7PtPvF)OJ{lu5=m5`K-c~1CX>axVIewi zYH&)hM|(6vyc)@6#j`v88Iaw5i5pS|I{XIeU)q+FI_tDiHMKIIY zDXG&d1s_#o7CzBz9@36Z==lA$cUk3cyF>71Bc9^tcPtyVVI{uEq6&_zBEkE6-&+78 z71`L8nAsL7qMfJ-|MrlI9&4({Xe!6(YyUA~N_%I8wIKB;?4}0!>pP#8S?Rvsg*yec zYX)~zk;vuZkf!%2i<$5BGk&@%0+^Menk9K6Jj;D&k5&#grD%-!4$d7tWI6tbOMs1i z+@|&(3g_9*Av^VdPB#-5?`Mr)hBKtt5Xde%Ibh%xU%ZNT#p+cBZ>;;2e++TKVNX;p znO2Xu30q1E*@zjfoeM<_WUil)p8D^(Z#>YXb6#E}WE9j2~~f`H(k)h}w{8(fCA+W)o+#h*YRZ=ePTg2aP}V$r&r@e?Ww=Q)$0V zI`(Aa(dvQQWQB@@L;8)sWcB4z1MOx@4o%)y^zU~}W%rmOD74c}u03g-=0Edaq=meT zYw8Q(c{AtxlXl%MpnI%huF1VP+?hn|Jbm$Rl?i%l9IOVgw7Qj=LxwX9F#h1%A{s#* zVO;FBKd!UIl$~(!!4@9{XqWN&1yp57>rTySuJ^+`OX@Fm-bn0M*(FOT2&p8nmrMTY ztTDNG!FD-m{ceqR1_>jd{q6oFcLjDhfk}ab`T)}q{A(IYdcesGflT@k$mApN_;PK! zFX{PXD>>L{l@OicKV)QQDU2?qXFPCe4}Xnz?vW(PR4-A+@8W68>U^dXJurOZHagPQ zwd1#If;(zbgv;v@{Nh@L7JOb@lf$bUH{Ryewua==%`u&A+x%*s>>pg9&Pbq0G zi+v+;NyTLe5GxG@N&ILFMlGoN9}&!}yG`2D5)pj>Pu1~1?xJ57Cs75~8Cmz!N7PQX zEd7Q)mE1NuRDZ*s`oP8b5rSaulg-s1QG%p^5?6*SHw@tB5q^l7`|sn3LG{JCGPaxY z{ClL}h*aSzCKUsrReHOGm2CsdE@^L(VTot$Yi&OziiY$M*JdNx8aJ$5F$PRQLb?c*96m5wgqPiV#8~%HDe<4w=W6T}P4Z9DA0%vo}e?v9k9_=CL=2 z!|!=i*LA%=-@ks>AJ>hR^Lo9WujljexUYvy{f5Y3#3-#Zzk#HMDcFSC&kuG+5pv<- zqMyC8km;EwwS%LcEZe-p9|2}PuP$Caf!g_TpTjc_DYzZ-yXgz?iCTkwi|g-y$h8GW zcAU}_JOJXdgCwQao(2xrrkm0#99ydq>{;z@&v%@U$)zy+a2?FnNkOuBzj5EZj3)=M=7RzBN}#@JWL$MSMcZXar@mx|hl{dFuIui|%!qVV z$K8O*^Rh^~CrH2YeKlA8sYkg_h0}zEa*9$x>lr`-Ki{QTWnABR<{p@AP+ct25t*T;90WS|Efm5<)1wWHwHMz0s6Jy9?ZLS<6c1)H3>=>olx1ku5N8R)}NKdwH z`6PQ`0Kb&v{!^~Mhg61%Y^5E$$xE*;s<<_pys05RlSfw6r^72Im-XwP(f@u9z%@h6 zAz3}9tU?~$FHeF>urNa+gM5_HeXNOHucUTC92*13e%&x@8=FPd9U z)b>-!c&%~r9X6lc`qlWd6nO^I&}y@}Gj&BIVQTjLwFhX;To@wEOB!ic^K{{UYg|Q_ zaCf}2x15zY+HwzrNxA@nrGffjs}yW`;PUl32Xgsa(vh;1k~|x(B!X-x%?Fk1S zf0y|Usi@jvXzZ9JP>PJ_XFPcEuKkIlyXR;t_f5a6a)$k59w#P%d(#7IlEg4;J$+V4 zXR82m%&nt>Fb^`kZX5U<4IAY{##iR~u#%3myepsQ4h5CTELikG>F3CpoD^U{1$yVU z_ZjV)Zpx^T``y267j1nNTR~T4@IZC6_p?9*SItThlF->}eCd7yDjoxyOn{{kVTK__ zjw2a=*#EBCmYu!>glV%jPU6G+<;agmTJCn6D{T<)$*pu94<4E7eLQA~NI zM#aX)&eng42rchyF>gX)%%xwEA5|bK$nv2pL92tmTR6C!4TGFDlY?#GNeQOl&A6wc zqNb9Go#^A&tWP}Uqt%^oJE0Q#gsCG-S#8Jd<4(CEZ43|`G~@i|uYfHGhc^6|Eq&!P z0#3Hc>S`wwc^)YL%ny3gNBUH`!j0$h|ZIi0>14%ii(goqCM}=c@ce3Ml!UY zKY+e#4m=yV7XXfP+&MArfd4%e#^=q0pu&sp5amB`n~)T0P7<5xlg}_8xHDSiVvEB` z-WBosY*#hTwUr(+884ztO;}Mh{h2S1aOAUl`9OE_hkiSgGw~AVk%!Z3WS3Ey@t0Yu z7m)9d7yi$a7ly~HgM|b&``%=>aLa;cBa8Qv`Q$nzH{T{Q4>B9(b4xzOTO4(MP1cn23OK3@sE276b-_n@3`Lg+MwdyY^_V>bmmqd+qTZwe9>-&!c>84RwJu zRFubK)weF+RLR&tzoG>q|kEx9$Uk8y(ex;l)q&={=MRLyOwO_-36B- zsA0_5RE^#4N`I4^US(Y4Pl)Myo_C7rxQ~T_e(KLyTYSMkiU`Mj4CE-?A<4*lAUSO? z_QiqihU*|52 z!^RZ$bpuS}#)b%Pn{MMruH4%w*39(yUy&Y>}}yCupU^S191pR4NYlJaar_TuGF7^1H2tU4n_p z-yh(Nb^glVOUHWNZ7@}+hnT6ZJh0o&1V9;nmyLzB~w3Gfa-P|KnX}31)_YnYNxrgj9y9tl8EA17*6`% zkKPs~SFCxakrDV*F?1kEGx>06$fZg_I1WaV0#t9RSQCIB)*CuGY*pL(N!+oz(-Tw5 zsw~&c78pXqv9u z7tx9^712G&zH|lZs3VB}Nt`?_`H&8{h?jTe)Uke8w0g>0cIZ8^@c3a))9echh3agRHvc1uQgzlj(iaCKhDLx({H zm80UY_8P!fz8={GkthAz(W2|&^Tz1hfs^sZ79XN>M($6Qn{j8%npzv{N&Sm=Pw71~ zjkwYVf?BTLKVYkR3-wHRAZihcl!eVlEtto7H(@&>TPXThjYrDZ|L>JH0ifsxyyezE z8pYo_uX8!0^Bv4lcdoO&3WI(4Z81o{g@NfhM5*T!fjI^=?zDbwj_w;hj4_q*a;;Bl zW0@r5z;A!K_ms{_3lxx;HUH@Ahj^S9%HE*{Q-hB7(}=`!g&2QEm@QZrc2@jX%JE##_5 z%|~QsbZRf{t&XY#qK)NOYgo+jdTrPrUjzea)Vq1uw8^sCMS+LDT=G-^PR!V)I_|hV z_f)shr#|@R-A9=0qbu=sK@lCbnX8WO=gCo~Dj0~M8aF3B2=Ud?JptBdVB|herd#cp z@c_Ns?>KDeMg~<;`dD2zjuEUrUYu}(Y{`l|3Glm?LQFdwE;XoIeK{f0NPwwT_x`adhWJpt5bXe)LpfWU}{D4DNs zJo+|e8Yad9lC7eE5<=J;R`hdZs`}>Q7hIKjoy*ysNpe8|JYze zMNJI>VxnC9uONd&({>9asX%}-O-)~)6XYY6I4`T|R@r~-Ob}8w7k+&oFlcdn_NfX- z%gPXx%WxC~;48E@{;upB@ooX_^M-N#Q)7W$yz0qH5(g?m%=o|=SP`)lJ60MnP4G~?)&nuj1=FF1rz&4OQQ2PCB#kYMqFXsS4% zxljNq2o{CdTc)j{n!J~q{RtzR{BS!|SW~kM-65-s)}sH}=VsL_U?o8EgfPkWk!gH- z0D~0+uEu@0S9b7O5~FNr_Fi^FW-3AJd!V`d#m$XjVO?lby!Um50sn*vE-Day2J(X_ zyDmmWMdd79a8~}&brG<)T8z6uQB!cj@4Tc!dGAH|gQK~~zO$bJkO^6Fl4JMrf3Rc# zno%LNR6Ga~O)sEzVlN5XUP~3%fnci}@Y~twlf|fKzRMXgbAVXio^a}c+{0ZRDP|m` z`Ruwj7QArb%CSw?MUXlZOhCyejYC9pt{ZHn_cicH3%ua1%vFUW_@C7p7)o*l@e=cj zkU*~kQUD=B&zz`3BT2dL6`9!Knc2J(`q1}iK4$O&tDDo8WHC>*u8T|gt9C>WEg+LL zueuTI3$N~)eoq&`O&xy(h=+9ObVIKBQSmr5CJxdU7kA1AxI~~Q1?_nmrmyC|Qp-1H zFlBe_g*}fUGp7hV_K;*%fhU-3vh~T@MIqa8s$z{K{t&*lQ>EjiKr~{lHr>m6gD^QlejJhg`cb*l9GrWqx5X?d;J{ z0P&O&`MNDG*s%c|m2IZ~y8VWGm=)1W5QxSffBA>5u>PM9V%`}W@%Mvdy7hHk%znMz zn-&mUkFs*%dU_Q=djH^G86P!OD&9*2B}WZk*Nw>0)u3>XyGf4{>Qa19-Fw!u?Y_g1 z$VULXH_cWM##64p-)!=}8K&&3XXG$7bY%by?npHJ{(QBw%_nVsrA+*HBp zm|wrH57%q6RMM(X48O$)V~LN?AA#i)(m-8+@O6F;VT*AyfvE`@N^Pz5P8{06kY@Mk z@ljIJtZW^;F_k`^)Ejpezf)^QUbsZ`)ldjGwX~#G_=1zuGFZ=Au9)u~D==RQCS2b9 zddFc~niBPTWw4lO8FYMX#(y9A@)c&b1ITm3NFmhiH@ffDr2<)2t$J8iAEH@F;T_a( zJ#w30oS^4e2ix3(U-{Vw+;_uV1kay^b)mB>BLUpz(ZAo(p1uvTkixEr=Z|htH4>9 zBXl2&M&|47r8`NNL^v(1|19-|1+c=F^1-{l%PZM`0Y(Z9=n*?h+@bDLYpl~ki9qd} z$3ofhfE)-e^tbMCo&#;197#k6b*OZ?^T$tjCmz7(#fqzmsy-5>0imGAmyg0TUtW`I zik?h31(02NaE~yxxnvachL2*FIwjQpWrAN9LX$I!K0!Y|eg0wTj((fkXXRY$y!2KH zA07Yut$gga|MzF!&?nYqW|NncC4(!AKG;7!s1(30!S(7&0_aE3>vO4DO??z{=U4`^mB z=;i8%XV_N}CzlG(EX31L;jtGhZO65ASFq#_4GRVav^qezq1kjpy<^o{L7_m$tK=Kx zN%2CWO_ef0?K6)xYNfk!61FjOHdbSmlds1j&~2PQUE!T~Po%Nf0-A=JSm_OzpE_vd zRsG)^0Y1jJ7hGIR*H)rB#;u#iv9KXN7lk&zQxBG1d1<I^b!D8Nd>ysPR^MhMW*j|Z~{6nEHT1D+d@KWOK~Q`Z!3#l+^c#pTjwse`B1w$ zt3S!HH3b?e0Xp=faYNr|yMt-g`r}kxMR^y+jC_OgM_Qs#_rt~m(=fRd@%kk1 z&=2eMs8j+a6`&xSC&VCcvOjH3)}ZsNK(OQP{$hwL4l)-r1%y9H-S=ne(@r+eg0QDs z#An5c0UpKvI+Z}=>-f01yW-T_B4cwaqJ zF}ixkyknLbKgpZhQC#^$;bcwQ=Jn=5`DPjm{UYDPRFUX(EE=@x)H&dIqZ>a#Fhlbz zSN*z3xzGO5Vn^Jtk^KJyIo>Iqz=|WwGE_mbJ6!6P%2^d#CL7PB?c%grkOjl1W?7a#8;l6&>MInGk#$WNBP>IAFlfNW}^`?^#~IC_}%dX9ivaOJ)Ak)jsP_-h)gJIvz$(5&$^wg zAj}ebjK>~QSMgEh*-c+i)&<>SUC=-P6=a}Bn+~KX}U%3xJ?RTjcvrw^KGnY98 z$V3ys4oX(8j=N%mCh=Sc16%Y6Fq2e)C3w0ZZ}H-(XA zL#w)c&?$pBQMXV~jq7{MtZlu)`vuuuQdPPW3Xe#WX?u6QSu_0O(X)xeyYDUgv)X1e zf;2vx+%RZfH|!oy*r}-cWaM$OdEB=GpuE-(aD%M>IcY^D-0n+O^XW&rK{B(sOd^HU zC`}}hj|qM_KxKWeaK)}Q*h}jEWM@I6R5^~nBMDUTFr?VEOwnMsn)z^K1Ew@k^r)d>;wmPMkdyYGqBLS&tCiKn0y_RZMr- zR-Q?K4al%BZSnH9^jSl0So>TKDdZGZ&0%^WQ*)%uJT>L7`PY3{;SDyP(}uxnB9KokRKVGQ>BOJ-$yG5-KT7Sq{f0LM+=VSiCY{ z3!w9OQm`?Q6Zz}#lw(wsoXM3JnVLvb9vN2-AIw=;XCgpcVhIrNRA#$G8V;TwltZraL{YClcG_G=6VmgO1p!0L$@UvgA&QmHXDzkY+6rb1OChTA`0-G7-tl&GjV%A$ejJU*l#T-33-Tuw3 zL0zWq7wz`Iptd}e^?ps`Ja^yx8Z;(ZXp*VkXH(4m&_W_A`D|x;dxqYJ*Kngsas70h z))BN}td;0(t)m^~GpY5HF#2|}>Bh$*zQ6MNmetQV+>Bi^oa<+=F1rkB^3B*nBYzGX zr1@NK6zDP87l6j(P$U+GGGlbmaEZTl-931HYH)lg2b)D1_M{i<%>~id0>ek5#|Tcy z7%-4V(tN*9-~@wwNtj~t=4mvbb6h(8E`&f68S>UDtUFa5^vRPY(XKl!AvaT{lr6^7 zLJfTvMWqrq!d&QdeY9di){w^+yC$9zpD`8#K90Y8bFY5eaKI1Wk7Z>Bdt49L1W9o=jn5J6-* z26XlcjfV{7%B!MlZah`DRGVacooW7d54(U&iROu^)OI=;E4Q8pwg5*U;8OsEolxWt}t;_HtGQY zjP@vn@8grff2#AM{sX$PkQ|tUt6^umTkYN@X#z1>f(zsfW|={b$mOosJ!Ofkl|eU=oqEQb@eP{REhK0Il> z;t<&ILBzMRI$yUm^NF7$$K}9wn{qgMgSp^W!^(;c<&FZ>yFjD~`5>rW$B;xV`17-wv)vnj%&7jGSz5a1r z1N(G|o(j3tL$%*oD(UJ&Fp^7-ow0YVU^^H!e|NGNC&txDBjWsogM;HL2sZm2%);&@ zsjICWBOp&Cy#VC73X+u9+g{fIONKgE9m=ZQ2ZLUV^Li|bLb~5lWP*MV8je)wavABCKke!zd-sTrT}XMf|2dVHmGR)JdMqYP(xmSLxu;s@>c z@8AHwaZ%fCrwh4edzvtv2pUZ5VZTB?27(n1RcHUGp;R!|AQy>4`TTKxP+pyOmeb6ad6zx>(T%(o10R;?jGm3`*09 zx^_C%Dz)e&Qpw2@f8cBlp@qQF0D<4ng)migZIhTA}7@whr){)VV&~Oh` z+zIO*F;+k~u+`GiFTIjd|I%^1->xgWyRzrRQ}bk>m^yQ6DO5u{W}cb`ExAN2+KX{D z0`H{X9UrGiPNB-&B%A!L8}J68^ScH=nD%Y78v5R5*NFYRzy6E3grx2~I|n=4V-AV! zh_byaRcV}B*=JkkSvKOR-hurZYN*D=>i#X0iA5Eq^pmZyv)JXZ`3{VkT`CPbs-Ekd`~Z_0Vs)# zOGI54T)=sRVLSxorw_zOcaNv#(ZES8aJCP1yy}UI7>ocQl(!OBu-TepkA|hgEX(O z8IA6uA7L&Ci&ArSB`lr_)y(31NBsRp*oCyZWn$oH_G(BnebUu1bORIxf1`YN6aUIKdFBgF_ouX6umZH z=~)6YtN(0tm)NW&1H~h%qZJbbRe$dYHy(~&MbL?Pq-PR*-F=v)l%#gn2yoS$z3h*x z{aMQ3uvh2BYUF+*)YkJkX%V)&D9 zel_ftdMJ4?m=BPMdSE~EaIZQeZh9cd=cu|l)2Oqtotgl6xc1@(QWm=sVpv#u12Bd!W+>)}%U;u!!Vsq6bE-PX z7Cm1{OY;!Cdu&;4ITYwuuM!2UqZJbGtI`4YGwfhde`O?G6lAE_nonTEFr*X{*Ya~d z1>|F>C(w19Wc9X7?9BpVz|i?RmDkvNBH}X=yoqS+bPS!v(F7~9P47yG?cq0mHYU2NpV;2x|D&kw; zNOLXbq=fh6%imr>M_o!bcXTz#^*~=G-@wL z4*?h@;K&cAYM(5%P+p#HY_vj(gR>ujBix8j0q40(2@HCf-v8WSJ(WwH0?S!@dJ1<( z{bSI=%jF?JA+5h=wlKF~f^7s0#B*;cjKXhqfSCUGe8d@0WQ+fu918uwzu_w&P$T>G zZlA=pP`kT1BazH82d7}<&tpj#$?p23e%IsW%2Zt%#o2>NH~RAkxkpc4&yl=yzlsYA=<6sdnU*AzHSgP}tm%|HKatRPy9cbFWxia#ewx+;hv|Jtu zK>h$`?<^pou?HzdBDr#d{o=*#P;C`*CCn{; zE`-@NReH!|N%UqG86;_Zb2n>@(M8+UhY@c!(OFw;3O>_7HZTd&lM8&r2e~2Q|+h``hlZN-T@sFGi-EvNxX|*0>~Bc;XViM}vyU zRq9-u@bsGO3bM|k=)MC(v+HC*CF&}|MdDc3A04M{v{fdircZqgDA84rGzXURhg(G5 zu|L^SN~=>zCP6GVjgX_5#zd2|?FR4UiWh=m$C!UteIG_*NlwA7m(I6z3wjXEt)JfQ z-b^oO6FcEDYpLOi*GjQ;d9jnU_aN{t|0eYDyM0z?fj$GY9o zE0CBSl2GsG)4XdF>?^+Lm64tp@b8TFv1M9{$?iPMy%hy>2y66~ggN?N$DT)b6Yc|O zWZ66T!8Zs5;xqd4JtG_EZ`%CBOGmtKm#`p@MS-amO~?Z_hMu0H|g2a*u^7@jB$ ztt8;UID7y`?aaR%FQW!T>f`HhySs6Q8Z}m|aqQ>8Pk1u;(##}oohu$cl3f<)0-1aESs3%>JdM-c`p?}G~YW% ztE6H+W^K|=b>1*Ag_W_aejnpfoQiwimq!m-NKL#2zKAaQA}LHPaq@YtG*Qe`utyyJ z6KDQ#^dCh)AI~-GQmckwJ%kL@(|Y0Kx06WoE>o55ypOviy-f?LHjac^@BeNp8Kbeq zENwLK>eYhl@QhC*i`4(T%a{}j=HEMs$V~_*8%T>5j{_+pO7HI?{%p};7bb&C;?CQQ zl>sXk<{(MGAX1i!p^>?a>SF)?k{<2|7A?Gv%T&cN0vob$PYP74o*VVrGQl?^zPCj% zViHX-gM=2cCTiV^fFUj839L_yP-Wfn!+qOp7(oQ$wT+WpCVqP^(EpONvDLFa3bVS? zwK`g=$qg0}L=`LJ;M)S3E>CvlzZ+X1Y|-rpUWWsk9OT<&WMtS&#X zGe1WH&;FQSSQzcOUY&K{&yngYA!7MjbzP5`cJygSV_U9Lk{NoYsl*3yV^l|%ePy^H zQowqs4WOeLz`>iFn`^fGfw{!~D^}ciQA+1}-(&NW?HIm(JegI-{r|OWVE6J6HP3I` zRfxq1y_p5+5JmGa z@Ek6M5naKM4H%^n)y;a^2jB!%^0dZ+O)q8I1FC87z>%w!Q*vIOc6WDRtxPFK)oBsKoBR=y6Pn7=&Bc^pKZbDAI9ZHd}l z<^vALxH~2=P?gGb(TmuO+;K-OXF+C2mD@&oyjb>k%4z>27Ta0|lZLwcyjj`Vf?8Mc zdBW?{WIMpF<+=&M92pawS(-P^Uah-*pbQluQGtgF#azguV#*1c*UCHr3R1<)-bXC zSSo|TJZtSZm(wG@ewg*SO875S`2wSSRsUEx4CIz^p9r(A;He-8OhklU^mnr@jj6qc5G_p+hxa z0#@a74Psv^_H)6AY)5x$Obn(AjATdklNDvs^Pey3@Sb^tuIR?wZrM?zah;!x9DV~z zES&1y&H+z}cAz!vWcKDx7yj_iKm_ue9`|whqX&3(K0D51>GtyROJmA?j37Xva;63# z)j*65{s&${LQ4$rrG8eXWR=X$R=-+60$&PvbzOG>R*7XJMJ7P5A)&5p1!Pcv(KSQ@^Fs+)Sfk@!gKJ3Xe=yyEsex?-Tux z@ZrJUfVySZ#n|U2{h1*pR$X%L+y2Darcc#MFQ8Q${;s3Gh@m%00-*AawXrL!YNzI) z$G>_x_rsRfv#W9%+@?*`vJN=&SQL>yqc3+M-J*-~V|F%cA?dp z>}YmNdS$%;c<;Z$a$k74;>*=teHjJ-WJedahXlfx&VGR4g?o~L3-->bEkNZmdlm8ZUF}0GLZCrVZak>ZdDXmB z33eY5s1~l~=Ix~)fs~B1n}PwA_jh@F!7WdaKi8pvWk*f(@WQ9|XM0G^65C_J(bCFZ zyjgTfqh-pDKCW5Oj(*f2S>K8+R2@)P7%SH~-4^y$*SSleZ*wWF1iJYw=lEo?ze3Y% zNvg!+%LVLnUJ=;hio>dU{J<*-oR0a|;WsBkdUbU$SsjUC*oLgLyi~wA|T#G zN)S3Vhc&9FTW$4rU_j$3V4`)kiZ!CS^tpB@I-}kOEyybxmggH*%3x2df5#ae{H%^) ztq$}XxWnpxl7NrBT&D0>fbw={77SGD>B<;3E*k0)Kl2jyL^)fDowkbD_=Qyewb;PR zQ?YIezm3o_w`CTo`vh&q^!}TtqVcI%v84Fwlmzbm31%~_672l7u2JLyLfYnEz!@yh zq8!JR)*1}IEtMSYgQky>rSsM~=b!I8*ER;7taVFtOs}I#ch0=~YL$5@4|gN^JXW8? z37X1r%naolawiBvHK2}z0#VybMLs-Y5}Ow(?c168_7mqi`yc1>#R4OB$qk*b}O?g_taClR0jUW0S&Ku{`BdOp4$(Ew8mTPJ#{T3>>JgkY~URF?Yh zE|hebO|XjJMvHG>ci&E!hVKn(zP`Nv8n*@^mb4BRJ@raBS>Bg;i8C>|D-U^4~6(+|Euu2n$=frj)DA~jLuNIwu{M} z0zgVM;Dzc(`Z`hDtNBX7Oc|oh(9il4P>-K)haMd)~w*QwGEb3bx zvz{+LXwa7N-t^-eb@!w8?R^|*Nt7xfg7Xh|1%&;k`vXCiuD16^qdl(s*?xtYoJQaw zxZ)glT`RMJ=o*Xge&>V)8gNU+>YEn6Gai&~lP=o>o>Pp(jVp$Zjific8_~9udk=-; zma+6xU)ApP^p)#dEbqNgUY3|~pyYE7C!pq2p=;dX?OW?GhSwH0=+RZ7Z24v;H?)4U zgQI`g!N7KghrZ4Pf5|6H0G5f9CTtXLgQ-RP#a9ttrI`5!xPB!lcE@HEjb zMUjt{?78h>&xWSaNeUQ-U`}-Ewz6H{b-82?`Xw1N+|`UU45LJm%c)# z-Nx8GeG#a{gR2&(_NWgF+=)ty+-ntTXT1JMC{F%*Y%`j;Nt*HGI@EZ-8%Zq(_EZbJ z6p8?y84QO8N(!@8T2>mb>W6VP={NV66 zPY8@Pzj?xpNGXeeW11R~2QxpKNB6T$hkK5QL}UO_=m*;FL&B3GgT;+iA;ZE@6B*bg zpaS&$=T=@Bnlp^k;*9%R!Lsr4ScM0lho#<%o%9}?EN&9VT(|y2qCC&jyDPInkj!Rh zXO@N~hPatKLr1>w%OW?4oo>$FY12J3qrOa=?0OSwbZ+ zMa9lpFh0)deF;US!M9b*;R2RANgj9UBz5ltX-nOgC2`{azpSCk@l76J ztl$I2pgW2`FkF?R8}ZFMxBNiK^9EM(rn6IWqIJ0UKgwpTtPzhk4oSSGmmo-ngb?xZ zoN9W~njU;M^yI2Q1-Wjrf?1nvdwiZ`Ozu-J5+ml30{y2D&KUHa8b8)jTBJ5<%&yy%n;-b2wNLm z1R0P2V&4nfKHKfAQO_poJu6WUo`1Wt=iUUv&N+?J0nHXY&{%m$@@G;0)0H!iCw^Fd zRMIDa3yGRM7s%#@fo7BykSjcWpu*IcZeLh1FQ=0S|0xK-$$oz#c6^11kvT>ZeP9GI zn}I+>Qn9E7ZNfqO)Q&!wE)o|K5P%SLVnVbY!}uuU9M7!=3Vf`b2iM?qk@3f)`pj$_ zH7kW>J+Wbqt)aBr$h@N5w@d47v%A@$q#fVwioW*UTajrj|62-~XMv#Cb! zm!0NznSRgS?G7vSD8EJeX^?R+pj`q9e1qvQp~>kX zUB0Nu1GH5KIm&M?6p%cR=C}Q4Y|52?lu0h}F0aeufj0B{&oW6caPJE@(5dOZ+#5W1 z>RxoSM>*?mPaPnffacvAgdPsj19Z&D{>>?@vhJ~!#DQ?!4r;jdUlYm;$+(}yvMPP_ zZ?+N=fb`xPDA3#8(fk49Yo-rPO_#XxOGpT;;VyU9-u#g3 zQ#xU)lTSKcPB`Rq5UdP?&X?vNe~V&~}pmmg_boladr=*(h z;xM029m;py&@B+Ss6;qd)~tcML%-#e*OwCGZesT}aMksfh;I!6IiF!fA>?q%15R0$ zZJ)J0T{5c(t0ekK=mUwu`(Ky`jxeAJTt~R3EV+{t`4G$?Dp7sNL7OExQZ`VHn9R z`EHWd>I6YswK&KBk)J3^QgY3l$TXRxkIMsDK|DSBNrCL<^4=l6P2l%E-fAWW|=*83Ubs+zX&X~Hn6e@aA>QsD1a@#1y1^mD(I?0k?;iaeW~+AqdO zVDO4$5HHnZi|M;_b2)Mf3Zg)yt%nk;^zXfhgh*Y*ybE4hJ4TuZ2K+EMI-53?JzS^& zW)HL7j}r~vo=ua0DX&kBJE03I#dGimjVgUcUzIj>U{^}!^t4&LVw^t5ePF|00Cv6 z$b00f?lQ=z(h~wQe+}C?7&g9Ig@+jbJO=xFb3>-~A;EowXux5xrpo?OFXLcYh{Am! z?^pB&%t-76!2HZ`X}^K|bCcoC7ohWt5guCR+)3WO4!!b%XN zK&$iHA7yc?Sk^KkHG9TlGO2+1}g*L6&aWV zeh&j!!Wm1A4@58>2=;~r5EKJ6ZL7=k7d2!Hh@>XudD<;S&$^&m6_`5IzR_V6QqZ{N zFZ6Al_|cOi5ROTSVeme5esZgmqN4NB(PL2v_m`!QLWcPo6iD2GyiWv*2;I%0O7gNZ z?f8WGDD?3Tibwj__i41!J^-B*8D$aso*PjR`omXa6-UaxBwW@$yJ?BXqdRkvh>C4f zdVDaB9%QZ!fH9XI*551UjgRA3eOB*|j@H+9if_~npw!J~ zUH@!fsJx4tP4!pxP7|(X$ghQQJN0Q7`eBUQl0wikIK&{nyPx&mpaJlcGUYVA+V%TTDv;-I`2a8iP zoq&HdX^i;W*-+!DW;}pHX$@-;FwP;ezj&mW~GI9ka>>wV`ag=a!*=)HI6&9`WMOJ z`XKai$n=4uba(QDoPEPR-m_0&n#K{b$q%XXxh$A|x`tQw2EBg0UUHI%y9*)w9!QhTbxYR5oFSIv=rc ztkC;}l%Faj^PeZMvkjkZ$5t`XeI;##n4o=t3sadp;S4YK;R*PjZ>*Xtlmt%f9~hv$U@om);!Cb+|mx zk}}u9!=3iQjklWao<7nx_=rjuugu>|&H!S}!da)CDhDM0H(_yl<613RMjV-sf~5FX zx3;LS73oC#&T}G1G`K&8^U{s%l)I@3C0@?x;O+**VhdU>jn ztS`OB>(x1H;jN%E-k2&V&+t7?K;<7#kUaWID$SI;44aJ~sY|;K$S9^|jGH%SjFowo zMm5mI2)2EMkSizbD%1I46Vel+{NSS%vk?09AeWcLs%7_`mAcBKTOQnlDJN!QTLZq! z@>cchJ%M@;2Yli$f$0M;GKH*nZ&5iscp=zo!Oi9p!fZ7UU#j2N)3qPe%(bK_cgxP< zKOmJ>;W@2sw5C9G-%T2+7iDI^ot)WY3!_a zx*&e8J~{*>;pdK03WU@MlC%bISW8ydJ1v$u^H zI`VgCRUN#ypE9?Z%{PFd6>{Mo#lJ5)C$=l<8tDr)F#vm@R+-s))8E^IvE7!hmFhQP zb)QZAP|57q6eV*KGJ4NQ(#i#DD78vK95VcZ-~IltHw0dT0i1%_T~g`=C?9>rv_irG z7>Q!#VK9}Zx7`wFwl%N%SK;(jXKh6-JW7tB7=* z6+hz)>lXOVye>2S;x^0E`i3dW;8FyJ1rL|Lu=Q$8gutu!($k=CcVLd%VRViFPtyfC z`s_bnH!-7xz>MnGeAj$lJbz5ekvY{X;B2=qu@j66Npo?Ld-rJO5Kqpf&er>)Nq~>P zP6fy~v`Qd{XPdmW_p|^0kjkcCvFn;Qis}s+{4gK;6G@3JjKt5&nLFK%p<7nv6S>+! zfpnw`C>c}V+_8FoTd)_4C?dPj@1l^N%B*L$tp=K}Ct1@QG7Lhh3r?v83k{lKlL2F|%e$^4~xK%$F<5qb-#G6pon0a3zkwoUzx zQu!pf_ndf5*-G;R>`iShVxOpBB*&woFT%${&u+i_T(F**09X0yX#btSWeUtx9w0$R zEbFN}n)?~MC$byUDXWtEt8Poi-Jv6Tp!}hPN+P{Xq13Is9oKInsEkh=Y2@%j!z(g2 z&38?Wvq#_D`R4fPqvVkszMP8+^d)Vr+TlA*h$Oz+q=Tms{0fcFTl$#HJ zj-O0U^^1b*Rjz%|q1k#Z{%+lTF;&B7j!{m*u6BiIko9_3!L?x7+`LjB;(lE0Bv%|i z*B`@o-*MvTiNNtw*mZu|g43$DG`DHl>f1tQseY0@ZU2TDtDMdZt1R`~ zuK2Y@k9a0RtUFe;-m;dW^Owf!GhM!J{JQ^~xAqOw%@;94b(l#M7NuFKT;YfW2{O;7 zTj^<^WHr(PsRZV=r2b?p#@JduF!Ti}Dai zVZeF$><8f@YcR!b={IK?t4kjEp#j%Ebj!)sf>hA&3zA~bd2gNzhb3RTVkRbdk5hDf zAU3A={CPe|sU7?xf$P}kh#a1GANh`9yfuYQMCBZ^}-_QTWTEeAK+?AfRB1N#| zZm%W0pnvThCF!7655^s~utSeV#>tI~$5URXdT}3zfCYW8CCU8L{e6=LsbiZPqGZ%- zmdc)eJ{yjCB3lAn6IzgZa2flnkT>~I{)SkQzV$dVq7xw z@cd6c;j2EPxh~Lmtcg$L5B@MaqYceB@&f^OdRLo7))g%U^|K;FaZOiSqwosv+EQ=-mSHdGi@hBjN&Ba?JVg=X-v>)+dW9yhyUAzRjoxQQl@UtQ4811IGdExLMI)B2 z^u6Z~2i$HQB00Wya4@rFWqUXKOm}A>h#uaY_0)SeTU3F&7ezWCev7H;Ju(o)ob1y@Vg+=D zSSNVjbv*7%-k*OlU1eLmI#KozuiAw(vm?6P3I0MVgvkm=s37yE=A1S>v6oT6rO>$=%W z+@k4y->$F%I_Ldxsn(GxL3v86ol@__R=jsKovu4$*%SwrK8dt)XmfQiq-HpSh#$Gxoi8{aj^_ zzxsZn1tn@sv2W}HBDRS4w}(v)nIVWd@>1qH zkJ`Stxzw3^o%S}Dw~daVTct)#PgXUP5Na6kf~IKGu2oQulMSm|YXw{TTDA_F(F1lg z?&HzT+}k$HQ#FyW(WAvN`a69PP=y-MgBnT*M;*Ut@-~~)AtoVW3%y^p-E)_@^<##h zY}#FQp7Vu+@jvVcAtY|X4puA&`!L_3J?%TDJqE_0`51$9Ansag7t?K0g0f9$?%BRn zi9n66p~U~Fu|y3g`a7Zq66+x-8@DY|HH;9UShu0A|0D#Y{vFg*>e1xp@9U;PwsD3vvFStLP%oCcN(Lx_5r6o3Op^*vdD9t+m#!=RJqinw$D8WDD9)P zx2-(b5-;KH^SPfmzB=n@xswh@oplWkQyspLuP*%mbZM%m#sbG_0000bbVXQnWMOn= zI%9HWVRU5xGB7bREif}JF*Q^%H##yhIx#paF)%tXFmyogC;$KeC3HntbYx+4Wjbwd zWNBu305UK!GA%GOEipAzF*iCgGCDChD={!SFfaujSCjw%02y>eSaefwW^{L9a%BK_ icXuvnZfkR6VQ^(GZ*pgw?mQX*0000xHNiZWc2#|P>wq$$8ox(Ls=+C$8);$Dh*EdRUa6;V&#DC-f6O1 z=T=CspPt`u(Re>GJxwhtK(MDm6wjrOP;QB@QQ5cSZ;h($&?yOlUQQr}rEGwDj1LBE zne)8QREH&$U!;2(nh(m?XkO3%Q~y#9OiyRRqGb=Pn-XDH6AQ6D1>>x z0ZFMkzSO<$+<*@#)1g3jaVOF^l7W)%mws!|i^Fv^k7lPsGn|Pwm zMq}S(rki1{wOg%P_wihwG{N~n15A$;LV9%uBxY_x|IepE#bXk5d}hKV$`^Y1LC-~u z)JAm8&+gUQYDXEgH=MP7(#X@#uYu~;3W$5}hRF0#2!6H(!uB&EX6^(b2lory;K5Se z^LPH%xKnPZ8CvnD`Hp&sFHeWm+#OJ!u@QnU%fQ*q2WK%0oTl}{PW|rJejY)Z2BDRh zoN*Mg{zb?vt3hRDCB9m?41HV|gR`9n&dMXdd%n)!H8nN)9#PqtUvds(Qjg-ng9nI; zih{DTGMuN8eFtxFHgkXI)bD;hefsn@14}ztCm)AV&Q&~k_A@SBx&&WeUnFJZfMapk zc%A8iTM^} zi!4v87MZ<0T36q&yi-S2RaKnwmgwl{7q!G}RHRNN+kH|$B z7&f2Hr@a0vC!P}3SO1*s4|-o8R4=E?tDZ~EQ9YUbx9ZUZs5I|_MpF!Q_r+j%eKd?~ zcG5f=1@qGFuqfOHtNd-S&IyKD+Ggm+uZ4UB`4KV?GXB$l60z2uPWJz!zxCvk#+}kh z8rSpN)W6M!#y1(zJ)4dZr_y0^ECr^AlVIMQ2uqr~t?J`oQxgN*@+jC9Z-+y11RV3j z;heJ#ju}DFjrD~b)dl%*gKU|Syt6GdTdTKf-LK$jeqRi|n?*3bUJSDY0 zV09t`)-5TpC9yx442Q-xiT6fOQPUXx&tmnk#NZi!^rGVm?Uk0-1dbK(pMWz z_Emd|nLXERuZthrUI*RQT9`blh53(r`QI$7o8_?it`v5ci(r2t9}cIo;dnR|&PUSW zawrw92ll{iUm{$qVllEf5)Nfiu%b3G&e{U~WPhl~t@>NkQpcT44C~3hR(oxTc3Uma zpuHYu?fYQS)(Fcd4Y0gh4U21~u)J9Y%WEYtC{BZR@J{IN@`GM!81x%sp;NyT+LhtZ zEDnZN4z+(q0F3g2VOo=Ov&LeWpBGGH{iq*MAE0l$OJfpT zs9i=@$3i_jfY)cH>jJX>@AcnSvq7(|{x6582QdCw3qJqp2w%oNY{Hm(`=FVe52ejz z=(Cc(|MdM9n-c}GDVrfN>MQiMb%KnUA*4qP#Q+O^C{1v}ur+gFSQvsq$!kuKZ4;7r z_CK|!)eh7aSM{kK?8uKXkD4&{@d3ErYJ_5N6?DVv(0^?nB$gyY%qs?>Gs7VK#byYN zTL(e6We~BS1}RHtNShcyYPc4pX1MbNt(@kNZ8j(NF5!%bW_!&7ttZu&by{nArtOW; zE-S{c9s6Mty$^#n6hUHH8pIaFL)0S*!c&7GG=bJ3qgO*<Yedj8my|oO@hYsQ7$&-93EG)#NNs};R#!Q$_p?Yv;J}l^{1AaYt zZg^kg<>lpS6x-cxX@#(fX@qylS$LP7hfY)zW@j|x#*G`GH63Ve2nPoT{#ul+N2kx6 zg??^}z&Vn1_63`{_3vwc-@bh*O-)URjEscAhIDuqoyENJi?B{Ph54oD(bm?6pMLrY zPoF-8wzl>g`|j@Un7ncee>}XY1Dm-$u5Ekr>%@r@N1B_PL2F7VyG@5}(ouY#{|#(& zF2N@EGT-*Ev<80p@+EwHd|+p12Xk|CgoTA6C_0hG1FeZ2X*{yN_yO@>ym&F|+i$tf_XBK7mXu3o*0 z!!0dvi*JBT^a;rBIt7^>$6%gx3_F`HA@k4$EK9A0%)Dp_P7dsd-+4(smDS_koc0S0 zGI(dYeEG6`V*T-H>Vc&X`vg>>Z%_mJ1vjGa)_O<{Sc z40x%hum4KT#C~m0Wl+p4-6i|Qq|;;F^pv0WYoqiH3~q?{=9-Eu%kU6hvS)?h!i4$U zoLDDrc7zngw3f=gO8NgIcG~Bu)4PXc{~+6Q^<;yd3&_dI^?BQSnh&I@jPq}M_xhOt zm0v}E%ahhlO7z@R=vAcfv-=s5Q#k zcmny&*5Ln@#OjJ-@BJF|zvAV8&nUx52%_aYNj?!yLM6R5G5kcF8!R=58#+W+SWn$T z*g(xu$WYx?NLSfSXsF^yVHG(WZjkhF(o-Rk|-+PrZx3jF{!k#*O)koaTY!-CUTsueHjk& zZ}aV^k1u<9zd@OPs|V+8IwfDQsdY$Rz>6U{U%!&i^yd+d1o@P;AWR8^6MP6qS^>G} zrGzCBc0!oJz!0y$WdrBDkX|?KQD4u|B|^F??{mtraa7xFbB!=wYVWPM% zDoU{~;;B;Yb|_VCgJMMp6ib7kP!tG-{7q2E-3WzDe<-A_hXTc+kmQRY6o>q-m#5{_?cQ2;S`l&7+Ss!x}~AeFAjx4K@bdcw_p)ql8}K4`=0BQe0V!tE)rG0 zRb-)lE&sH}l^mY>rA(+_$biOK!ii3&LjPncjE<+mxFrcDgx#4Ec4vNIH_RL3VbKr= ztGZpVs*Zs*VK&x9;jk^)-ht&6hQlF0434?m;FJ{tC&KfF?_LkZs3njioOQrfPhQ_e z<4VbQhU+Ply!Uv}LamtQ{qp4+cS`PR+$sWLj?nrp54uGf?lt%J7DTaG7EN*0B1V-kCV`NS! zMp7KsDVv~4e3xAKLdXVsyyVms*jQjZ!8@@b()pi+&4OC3H8F(I{HpbU@X9-7(7jcR z5w}WVa

E7gS$)c;O-XOZP1_z?(XjH?tGo+J#U>l=LdYd zik_aXuAZ6IdoQ`y)r2W2NFjeD_y_?3fh;2}t^xr8xd-ge!G8d@5igDYzzd|4ij*is z)fmws@CLzNTH6T%0ul4yA0$M2CLVAQ&RIrY0&W=r4-<=GP-tQn0)h-eMqEVAed#pI zMN4gwuzx(uB!GlKTnUX5sRzQk1{(^^%0_u!0j(7RvGhu6<f2oJ9SM2q!RU*GsZX*L*<>3r>W>%DlRr)Nd4&HYDq zHf2;u_x9f<`M_>+KH~X08ke4**$kRGp&>$t%z;MULY<$yt{CYZ?ry>!4X5j)Leisi z%B|Q`DpSbunq}ZkOPLvHd}U+^1l%t&d7UJq2u!W5SBQxR5mDAUT#gML9IPcIOf17C zqs5Dgq;&~F7C({*;x|Gzg!Xv~$AX-jrbodADI!RJtUsV~IL-Vz|pbT#Ala)tS z_Jrc9=nxc%?|r{~Ut=4&RMepBxOb*$uy8`wI;C=i^COWGy%4o<$L^^$D+&nYI3wcXII1R1c<>7u_Gr zwY~1o!NEa1U5l#H6DcfAOII~g5a&DYSMk>U)R#-u{OOox;8o9!>n z4_!frF6?!MGBPU-R&nJF6mfsziMKYz>TQXc=@4wz+8zhuJ{i+57S6T_wX}>3fyM4s zsY;w#brF@|+r;id9zA}Cg@qx(=C`&eYd^+huOU$0AfTTu8 zueLY#`&0~`QkNS>h>=oOM$>kqPGQcSEqB=4>lPCe>s0_R>J{YMsl_I^To_>Jhd|C* zLViH;U?fhAPmImXT&eUJSM+(pOG>$(nkFwh@@8X?>kx97Agg2M(c=%PPEJM^rKN)T ztNI~J!CQcUCziump{I!sEE^*vBd$(nnKiU zY$iH0Wy)TI^G?AP>aH9!E z}0^+^$r&6lu@7^SHJ=d=;3pArj z{^VA>L<8noD@~JwgEDe*h>}ncKWt-9s8FnEoeyQ?T;Wwy$M^LYSUY|j-pe14^otxf zQF)J6CZ9;t1PXFZSkwbQ?1vpB!~}A@zQL%+!?iO2>(=Z zd}ckfRk?hd-_88)Xu_;B9ET>^%ag4D32Hnt#Fu+%S7f-ywF@G4udEBsB44C)kcq~9 zZ3Wr$+aZ(G9{$<_NHg{EHCj)`wq<-$y&o5a$9nmBFZ;J0iC(4YXLQj}Gf>iw=}ATr zj<<&*VsE;2IV<~Np8|gq?~}P7DfaaUDtS+(Dd}^i-`@VfG|C>F`JHv<{lmr1Sy(SM zvug|Y{TPGok(#o0yq|B@?ywgwnrN6^4x?);x5bv7{232I%-PH4=Eequ0Sv^CpBB4e zaap0!4!<)&D9=F^(cNb1N!T*4@64vIThBk7qNnFu6}388_%~nMRo6*T@Ay0VEOsIX z8Bh~S;|m|P-`51X_Af)e>K~nCwAiKV=%3~NHjY~EVqICM;nsTFE0{C?BBRi<9@fBt z4`cHk5hdgH=#A<*OLI5Ti|fFq>o~Sagd2zWD(;tDr=Rz;-Mx!k%u2Q4FTh;mK8TVF zmFz^HiOD&Z4;>Mo-!|VsI1#g)s@*`mQg+4E>vT8!bAZniP&59asJ~rfp1t%rmFV{T zz(Y5w{wbc)Lg$rn#H^y?8FZ-p{#P|wiPF@;Js?wdycRKQO|WgYxeF(p(;{MgA33Y> zYk3ZgOh8e=?7U^1Q_uQ7ddMs*!&hoXrHP4NOu~-p*P}Pn;b8t9wT2QE$31pU?up~P zYQwjcrf*wUuOHfNlf32?v~_hcgGd-4AbyNRf~TD#5S`fOvVCd2T?~anK(oVY1-%_a z9EV;6xl-$YM{Q^;e(A`ev)*9v5du1Aou9b1b$vbCz#7wT zEZ0*)&%}o8PS=NfA32Enm#!k~Ss?Gv{pEn^Eg0W^e&JA7P!VrNNPYbE6;EiyQ)0Eh zT85O?Ijtq;_I+;-+}$R)Wf+=>>uaj1dD)yv&0w9Y71)R-TJOoOE;CV(y4xGjIjA;B z%}{Nm5ldTN#KPh_vNZ$NPBIledtf-noSK>n38|q#g@9P4Ttt2As1)u|d1xtPkmWfc z@NC}&{luszFoTg$(YUBBtEf3E_7mVzrZhcmG%S!E#vH^7Qd&Lz`^2GH;h-D9a@zYOy5gcQD4Zh5bhc`f%Zg;(o^C|8s){!1;<~LFWE}Vbn8AHaYc5vrc1-5d2Jb#0 zvW4mC=>dRICIrLc$lkqlB*m80G%ar;YOnp=BgE|vpe5wn_uUkzJ3LAj>dtSFX;|#C zNA|B-k+_F^rx81y%r#@Su08ky`xdPENwK}JhZ;frZ3)E{uCL2JKJqTR8r27T;gh?G zM@_xkjz3L?>Kpn$u$+v@Mdc6DM%J&FteJlsCO$J_?ddWb)lbXaEH<0mH%nNH4Eqwl$TtjbrWq*)r)g z*>Sni27A5-{%gk)=bxQ&h@*f(%?yReL{{FBvMc}ZX8tfb!Ll1kNl9brFy8@(H2~59 zlLiU&ZX+OZrV7=&fQ_>^FWvREnus8NoW-BSZji}n!{8AcJv^)!ia_?2l`E^Ns;jC7 zJ3JJge2}!$0Ozl@sZCD7y<>)qpBo+>cWVK*^oh^P%FeT~M#=w#qoha>3wD6`0!A18sjZ${Dtv*9AArJ~sH(1PWNb`fvYD8O#llkU+LV-?eYU@^sjrV5 z{alAhO6vD~cY0?-NJ8><-udqMTMw@GZGET;9!j>FGze$H>bqj=?}$!ROsfkpLuKDx^I2 ze-;+B3=Q8;7VA!vPd+~JsA&^Pes$VA7);JS-`{rc5>HZ3J(|M1@^Ehyk{+WV@jyNv!xa^upymhVf@ z=cXTo+!p+<(({57M1*pYQB<&Sax1Nx($w5mt;GBVP#cqqY=V(#yy1KP`>jobV~`Ta*K7mEJO$s7opW7!5^+y-;<)?@Z4hLQp{b)25H8pGIB3DPpBfWOpiq$PN zH%xeMf0OeCa&{NX+XNbz!9m{5&BqKb^>27BE)fLyGNSOiyPNkXab=agd*kLpUdcJ} z@tEjwO`a}PIWc3AdgHah7CN@lF-@Cmpez6J(aSI9EFEtpuxY%0KZ$4ZW(U_jh zSGx`;f277-ZdOk2f5!2%S)oUeXvfn%(Za$$KR=8{aVn$y{XkiUg+=H6t--)RB0$xVU!XYRyQQ1k zee{9Lw6d$LB|!-0ri=L0GEHt%wEVigiuQ61EBoNM4{@68JO^Gl4VqQi(EIE4!pNw( zNs?#Kb2f5Ez1JrBc?b=@KrZ0OFJ7WAhwn;{QD~o@dhZ_y((Ape{quE7h{&;A=fzYE zJi?X*|JeWNG+n?A4$XbE5*b#wl|7RMb%uc32L%cmX}}LAD_0gtQkxwUtQ^dSxryZ zGvB{z6KJbu+1-+O@`o~o&)qSh5Vh!!{zz45H}&1L*O=8E@D`@#+=LQU2KtXD_`DPW z-n$=t9!8$L7=yx zBye1uNf14O=375$eF6>D?X8vFDt&z73riG|z1jzXlrqJbU;xOxpiJDcNW}*p+d!`$g z4^?5M%Y<%qtzak=6y)2hQU#sdfhD7FrQ#ORbvp zEa%TgT;Cv`r;AsP=OZ#)Oo{kh%G7ky_mAlDF(@|osOdg+UtiGnx)>4EAsQdu^{xsS zaPL3|ad3+sEV6^Y6dmK%)tgY!>_0qUaaBF82@ajDPPKbE*{(CkC%<)AonB`us9Of1 z9rdK@&<-*)GxkI{a#jS4h$FX)xR3%R1eOWqoR8X}%?p#DY>JV361S#|&u8^>c2;4* z+}WXq$WbU39w0$(ZL3)f?PkQ~?Y!&6z;zX7J80Rps~pEyQC9oWy(#4V0QgoiYZY2* zSSyz9P*hxlR$=dli_PxP$jFx}0a>qGOLv*Id^lY4#GDu@cvI6&m|TrOHUa{@ASHdG zdUM6JdXDoqpP^qOvZ}>wD=P&CZDdY7l}%M;LxPS4k-c5XjB(|qf1^p5)HTlXupl!= zrQm%YF35R#wFl(8y1SS6VY|l75^{1_?4hU=7H{L33Vy@TVFkserz`b+M5eM^APelz z7k+0nX7%O4Zdi8Wi$bO0&(lb%jj zUs>f|xh5J?6uzMu^%j?#O!cs0Xi9@2r(?6At<1G~JS{;EZheKliIThBp+TIj_mAeWdk`e( z(B)on$eTXAySuZQIvdNYRcU&9D3wNETls*F+bmm7OM5Yv!$KbSkez*VVehytxxCMt z6%ijlRi<9PRIqxQVtqNfTqHrs<(188qVc0rdY5l ze^Lp?fFkdC@hi)n# z%Z96R1@`VUPqpM7r-cf)7~4`V9aq86R}Kn}^^Xd4Nnr5f`9TRy6bcSVZu|PIgC=Ink%#r#WyRockMBpkQfX zpkK)Br;9Z|1*P)~p^MJWF7!pQ36MCtFda_@dTo5sUUl+a<)@~YvaMA=;``?h2f6~H z&{~}KLvJLce+D%+4$YKCQeV>#C#y#js3+vatT$giQ)~bZF-NX7L#Wb-2M|JF8=4|% zTh^@p41B(BS`}{p&A}q3QMjK96WtCQX|0OvE#!B!l<&Wv<S{Vm?c3VoE;q<{rU6f{(RkyOqIITYMxnFSNC%Afb{zJO95rq-Y6RiN)iFV z3ZOPuv-A!}grKGKr-Y&DG?YyZN8D}r2i4Wxgk!4%Q=r=LQnPB}0~A$>n(t~WHr7XB zVbad;q0TOxX2xA-lYtotf&5;{(Qoh4UlNFf7?aXvQ@JWVS9owlEska^NlAyc`!PaM z!5$nPW%D?6_4P@vaI`@FOL*!nl#_~SUeEN)@+ijF zvgzmsy&mJ+{5sX-k-1FZThrKa9Nk!I>(#tpw6?BHm%yg#r+1mQlV|GX8oRr@kB^Ul zq}Ft*wraAMA)c(|&@;ba40dnn>Ht|1dk?o^YFjZo+>a4kSy?#<+P7G!6L4~>Q^dWW zcfur4e;yk4`)NT$MDKR~c6Tv;x~wK8b=Y!04IMk^jhg~qtm_n+1|Ee^(ht zNv(d+XE(%uS;gxsA{~}*Lei%o65(X6W~FHHEjVbbf0x_lr6|fOpZbPJy`v#-$f#6u zGxFdDkM@B;9_*FVi$iK@^%+Q*Ds_9W_dYi?@H#rSP3Btxk*jjrDk!#Hke#*0ekk#1 zt(_O^qoHC9yN{=fl9I?Pl$Lhd^SE$u=et2^es}KlBL{(@W=h>Rk=gBj7d|4(aXx3 zk(iPKw+#XDrAg-QMn*#qrMQ9>4z@(y6NRXM(|&4t+K3hc;(%n3I!+pk;B^%Kk7(P9$Cwng5r3xu?fRfA;@0rYN+jnW^ZggCJc??o1o0@%5tz z&9;s2ZiTt+uH)$XyJhL^%PsXieyCcJ=nyTc;Nq{@S5!}?a8~=*&9h0S>9As#;wtxN z<}wQD2lU+F)Tg6a2778iVwDGH5jUx6ChryWHnkqD%rw7Fdyxg*$*Wp%l488G9lY?# z;qtFC=oa^NB9%RNT&l2*fb#;`y>QHY{G2Kng^xEzvPZxaGb2;%|6kp7bk-23zgJ-& z&^LGoCrc+;pv!mkRyb-0)g949KkZr5v#`WON5lS)#-l4KDXA>+X{SuCwOnJ44Xr@r zZe`q-mo%Q_neET?pyg#1vZeuHOd~>DGdt6s?hjJe`9ONArk>NabMyDv2~sRaQ&M%E zj{858m(?9KHu3ABN2xk0g}WJyF=&o=^4QMbyTCq795h-u`-_G&X0}3gyM0opzTH02 zRh~d9T)|}Cv=%w}8Gm|3Tp5FdPCY8Ss;X*35dy+F#^h6wwYAN6187$fnlA4#AaE^O z{gDnYs5mSLrrzF@fHlB=nb}ccd+l4AOF2n&8;6a|S{3!!%qTallN?>tYhqMzP*_TR zT*mUe0eK`51x=7K@JpKmYD`vq`!LALR9jg&dVtiBVdU!+8&~!7!o0Dp>lWPKb$Mp0 zd@D=!_zdRM@$~WSJ#)$%F8u$lEGEnx5r|fnG1suG?kG9Btf8V3pO^^E45%{)7N7tf zDqxs4JZHD!oOhB>DX1mkG_&B}h&8?WwK_h9B%u4{px*Ot`*}vVzN05-{vM#W+CmTy z+??Jr3`R)x{l3z!dRmhn`oBj#olGO3>?WrNsrY?c={*fOSx8 zcQ`h46*J=i6y4hPx$0`d{Ad?8*8Yx!H(C-y=ngkG||`og{Len ze*2Y7Tp{y*W@pQWw%AdZpg6Ab*RgXzMB*VrAwq=sQKb+G%)k@ZJ|o(L7%_^|AQi>e zk?}aD6i`iKWSN2=QmEhQyOb%&KOn@md>8%c{iwf-a~`FSc_Di9qSfsjw)#KakTA5H zNw4i@GKWTd3E?`e9E#;kHpWML*ciu*8b*J?1JLvWa&)w0i{9~;&6^Z&-ruA8lZP2B zk9;2SG;~#09i50xB3J2AQBf~|bq@3*&_#ruphhJ z0h2Rv&x;gQIhxj5erbS&KtmUmpR)YUy+1iUkx#zGDL2#UM>6<;_4^{WN$d7#?k|tWwix%Ic4Z{c(*X9Dy4?YU=zhqq?oGv(5`aZucQ$hq{Fk zd6L3H(@<1W*nJp?U(@Ka;N6`a*RwUJ^ONZCc<)+xypvnesuSWSlcu*CnJung7-~-= zoT2w8iwOx(s`DQpCI-KC^ z?+ZicMsk7%8czJIC-(O=lF$M^FRc8$)HBaSsfFrunI4(3sqQDKJO%Of4m?k%OxU7)MUW$TW#{VJ-=OlY#}xlc2bpL=bHX%Y(j$E*RQH- zC;5fJmHH8%)4rueV-PM|J=g~#UK{l$Cj1RH(KFt+5d?o zrV>~&+4#cb^_+o-%k2GpXM?*EU~3LViE$_=J7jmg|5|JH7AGM_FSl8-$_V`dl%v@8 zcvC2s%=r0izOyxZJ~=KfEc$Kd%=c)HiH?qLWGWtw6txdDPIYoP-OkC0-`8gt9`25g zjt+>EZ()13YAPzS@}o-~tBb4p0kXY|ijS7(`mCS znE9#`MM#E#lvLQ`YNx*brUNQ8ExPV}(yqXgMXbgm0n4N~(}2H}jA7Azn;U!)cH3ktDSN@spM?YA^H4-5{v1fU^i4fpZwZu$Ww^mczte_tP4+Esn93dCYW58JRRr(Q;kzeKIN5{vjOG-8ze}7BtQ=-M<<>hs8anaXbb=dA3 z&EmiB6&cE0J2z!mm+N=#1HP*VD1qH*bdF(jYw42>d{tAvGxH8VT=d~?WdK9&i< z2Cil&E4@IE6^M)eCf(gaz>ctD8>$%a3i03JA0|*h_wC;Xf^6~MQE9IK-TdD@;l=tJ zD>U%Yt0+0+xEkPK#(#8el~klC17 z|9qt`23=n#`U|BIUPwPCEr$$pW+qYP*>iT4Nj%T^i<&l8&p~K8yzIOV)+Su$f={2K;KC@(s0O!w^y`i8uEU~~fU-YX?_Xc7>b{%1@uJ6VB8?pt+?6F1K^ZEqehmF%Cyc0$z;UBN&v4dMHZjydH5Z zbbnDX2L}iE&~89%T3=laR4DF#fQO2S$?kmJ@i^0aDNcRKJP8LI18t%J*#vW1Jc3)@ zM*FwKK}>?a0dMDvl}aB%969#HeO?cObVbXmtIYZbhA}5_d>*j)+>Q#oR#F6pCviNq z4WDA_-)!bh%M$gR2-$`s+EIAD;a_zP3^t9VmEPZ8dzHbeDqSgzwaBRB>Kc-*ZlB=b z?2Ea!&!SgWY63oRI2J6^oA;_*piCRR~y35}3u=01IS+5WDtY01rs5`NbTEg7!$UemrR{r8)*sIhE`tfeNyxQ%c1X2{G;$izY? zV4LN}oV;=Kaptv}&GZmhK9`U3Ss%Gh(A>^`Rq8tY{=ESBxD+N`Sg0uoX4F$Y-^YV7 zPG|~`fuP^28YfBdc6TdxWPGi4IbE4ovEg`;(vz8%Fw83fXdV0ZMQj-Tl- zShBl!t@J+I)i*TF-!pp>J6{9+>wKvg9L&M0hPvs?TlxEoAAiacKAkb?)&LUZddqD} zs$m8n&rPd0@7;#T&JMKeF~{AR1RTmY6cmQ>EPl9rWhq+(r~2LIAXJwA(RBf*Rdge< ztNI70j3R6^zfFhV-PG@m7VEcMoJTy`Vv-`GmrLb$w~t3>>uW-7#JIS+DVXO9g9dd) z=fQ|M-QAP1oh8E60hvv$eIOqp^%bZDOL@dI#*D7#u45X9^62Te-cYTI3tgx>WTH7D3fx z(}lT^yKydKjpSA#X}f8S&3oqeL-5cmC=Ob_Z1vgQBDxk@BOx|+Vl@8s={C-&yYcxs ze=uS50R!RfZD5H22pk-rKcjb__4auZ5LS11V2)-XHew(&*&1E%=K~=I=(D}{yPn$U z4H-3kH;N&_+BG%8i>4Gw`Xv)rTCbzwqAr1u(Ytf)P6-x&7ZJNZS^5lW!;T2NiITsn zQ;mW3ql%?m^8P%qUCCLzU@h=@smf;Y$5mcif!nxzg*898j>;oVL6rkQ#-t+gCbQyg zj48tUH9I_@q>|zB1+?OaXT!q7F@`&2)HOAGdVVB{|I8)NQs8#KJ(~6V?)Ct`>Vekb zF*=obLkS9zT{qeW?pj{nYLkNu&{|uTauf3zGo|uPjQzEB(M*7IywPr=O%>%@ud5vI z=h2@Tm95xf)xWxTKRv02fP!+SFi4$H+v*nAz)7S*v6JIB;OhF4o=$LiOrR+mu(2C# zZSz-_qGvgy@f&1lukkC~9Tgjw>0Bg8n=Z!%7Rj*wJftmLX!(Qkx~WsqH^FoW%LqRc_dv&P&QN5ADNl(Uml+hZ!tF zs)vu)rr{dtTZtUb*KC-k_~HCnZq|C8RfLoos4NX(BuJQsgRpQ9!Ngy*$`;!;HB{Gv zLq{H7FCK!I`Bkt(NBL?kE85EDrl7d}S{xR5ByBtG1V|JwcpFYl|8PQyZF5&sP%e>N z{V?f9M~p{CAp;MKGam^rYz^M{!*=h}Uk!%|92IJ?TABXL-l>^KBEs~L^Kyg>SZ zh9DpW24^P_5jKqPvmdzl?5K0-2ZBx1y#VEtkdP3#9>8(cjV&?>ESL}P?w@*%YfaZ^hzKtaJFR$NWb z!~_Fm7Qeg(q#vzKAg6l~BEAis7ZgzbOD|gdbzGxVQ1$WPHvh6#w6qWa*~SmsA73bw zargi7z#~*A_ikSX=|MsMJFb9DCInD#y}$8Uu|%&psT04D!Sk-e|I4MazPO4!>X&ksqw z?M5HFYRWaM7Z(=bRY@>DK|s_e_^nf;ke0~B|7wLXojv3f{Yfbzh&cugKv*SGYhmZv zOiNx&Jd_Kgqw=Zp-=fhS;P&t!;n2o;CBw_y+c{=;)I9kNyb#PR;bhKpa_sTgmu#a} zlM@u*obPWJ(lauKzp=LP4Qc#`jbPudxymziDhNaVsvJobjOnddus=gblHh-MILX08 z`3@UZSGVklLC)d2Cdo_1sY{-$@``}*$?ru3Y8DCt{qOeWQZ30%T~kgM)OiKpT@!Q? z%J&5n<4*ySOI13a7aFVFaww}FcPHAuz8y;OS!rpD+WLcIZ|PaD4Vd>#eChD8IN4oY zUChfL(YW*S^7Qoem+NJ0Y?uQ7*3{Pi_I>pf5%G`JJzZ~2$t?n?tdD(dJ|j+icAdA* zuXX_JEIT~ZWZ7XCUs+lTGBevdJ6j7|TGj69`9ptJsj8%8f4L>Wz`$T?y6b9Pm6yk6 zyV5+I!h(y9{rB6Ovx&KR>+j$A`1r^K90SwSb@I#cgF8b*vWAB15)n9b3=A0=833=+ z*W26RsQ-Rf__qdu^(sqF_!`02*Ow{f(e)XaPVMHe7qxcXHhX`G;soe zTbP+{Hq`U<^)+1YYu=t4F8@Ns#%_+l1u5ppnwV{AGk(?2va%d9Xl)eL&szRf(8N7E zpMpUsXTs`kX2w}(&i;H`Zo^98;LtOi0yPHvHb^V^>#`inD9|43eC4<`zhs|6i=XxK+$+@sI zIBRRWJ_ck)|9|}Dbh&*<)_RJvvHZAc6j;fE_P<7!($Ps3DsmTsMkejY$A@2JVZ!ePHVaCZ@5iPBS!2Q;;Ci=nJIIDoy61;W z6$XajCvJ&qgF^D4z0vZemfPi!r>BNu#S}&z%9Pj(pP+<X|lq?!T{rj%VJRP4T}R~&Bf39Z^VA3*45TtTw2o6)s>TrC}1)Qk(R#Z z1ii{6XelVXq~aNC;B+uIB&CG`G@%H( zy1oW%OixcQD=WKT-AD!q`5D98mqSBCW##_{;P-in6o4LML@!1Wp&m0gaHgD`)y5zH zBLcxr@lJ@iQ$1NQrbDwv_SUCwcyppbvjwg|!9XhLsWzbC{Xa0x(nYSS=~|(bT`zO4hqWLx4Gp6$f47Pfu5S+IjR>b%K<9 zg*rrn@9vO`TlH)6jsQu!&IzAjuESdfU~(+a*RJ?v2v~o-9;yOs3W#6Hag!k)lR+F7 zx}~M12=M;~pcnA@EXiZZ!?TNbWmMPR78ZKsOU(;-)1qUEYj?b43;?{{#iPYYDghmx zL4O7s%r z#Zrcb?AO*3H@?XNH@O5Jz&)DvDwNIeW6)Gox9eBVvIR}+%!k)I6%pWn-JFj%)_l{| znnA)>5wkc|ppc?hPZt?b*#(RaA8F z3%f*#M5Q&7*J&jNo+qEJ#Kf*y^z-1LmDbunwR=fjUHoF8ziPB)%HdC01ZM*&8!&Qs z94II#wg55^a9V)((%+5-gTd3qicPh(we|IsaYrQ0SYalJ|IHJb;~2vJJf#P__4tn; z?LU5O1GB9Rr2A4& zjQ+W>n>H-GlBs_1Ql1eh!0Xb=qtnEmN}gR|IP>-Q3@R=9?R%K5H;ld|r>i8qgk(ILC@u@mc&c(` z)mY)=gE)mSXTxSlK!w#dr6Qud1;_!1rRwUINx;1ZiQM@Rqq5Ry{wMmUu}t2CtgKgn z+O7g1XBnB&x;ukRZW2Wh^hPLvrS!e4X> zk4;V4Oioy};$H9&-B^7RNm%mawa%V?e8#<7anxU(tGU{l^8jGJl=yfISunqT=`kyp z*{7f?0~;Q6{|&kb-`AjaaCy~Py|m=T$CzSsv7|PS1WTjbgFR~PdwF&2Ii7N-lhgvT zva*r~E^(hHdoIRTxql@jIrv24K%zzhIi zmc*b*PfJ^$PXZfQYupzkoQ=pY6*}QqcmQHx|GQ5f$wMBz&Fr;MpI~_vb+C!vydq&Yl@GNC zHTw6zPzdJchd0h2B7y;bYVVqif|U46<&mjV_WGT{lv#9OI6^}`7-5Qz*yQm_mX~)e z5H69^$^>A8lajR5um-!u9QPFtXVgSZckijG6->y3^z;faFyH~aErn&!X>Y*kH-O;6 z0sJ7B`bAll23{JLb5FagH#iW@Y^7Xm5h_I!hBktN04}_M?F8?2b}c zpi$t4M|u#W{lvPLqDh+oHSq4{#@aBNpPT!>D;QBjLjzjWEhPE>u_8sBumeST5iGw? zx`BR_e0mQB#^dQVnuD08rW=qw*|hJ(pDnOwJaS?og&tdRk-w_$R2}kV^bL^KobT7V z1}twGbrx9?>VD1c+zxo}Z8tPFKuE%ni4thAo6Z*W(#i6`CJ$=xqBq!F_igjZI69zR z-ALkMOFQ3E;qv0Dn+!iokDOuU0CCa;`dsj={?P1eu_x!>6@` zx$^Oe6SOx%3ahN1znxWs@)A985$(K02Ryi!M5zldwM$N0aafmXZ6omSR)BIZhv--mKXt4C_YU5?LkOMpzz?$( zsTA0fsG!s1uPP1>mawl;`aX*dG-ID`!rYIr1ckCwv!do_c8(5bt8_KV2WaYLC1Ka5 z6G-r+OH2QH-d7>R)1EW~%*EB!#o5`-f|jugMx5*}$jYf$u{xW*xJ)GRa9gb1-ak2A zHa2M&nbe@pR4%xTo|`GE#<9@aTR=oU>Um4TSHEokFVs-6BD9Cc+C;9kmse33g4YCD zVJ4BctZekgX7Y>o%kFS}C@Phs8d{=uG}2IYtBDSE#M5HUQ(k5~Z%9h;9@ITi41pT+ zn$Z_cOo)X==`BgHrUx(#SpvRq>8iTyLuQ;v5(x`-{8?E<`r1qOY@TL&aPx~k0(W*x zb>@oF($X1cWC8z_DuwLOU{#ALo)3^Ac``tECV}n-_x`=4_1lp5uA8O9`)J))6Vn0} z6&2UhrP(xfZYLNT?P*# zlpV?R{DS6#sY1EoWTxYakK`$Uw8wwAKAuWRj(+5X5|Rh<=tV1UX<6k(`zMs~;aeT! zL*g3Qq!-aCgl}9wGL3}%r?sUx-VXIH5$r|1${EoTIE_2S`1_0OD|~?bC&WGHs+9V6 z2%$sHFL5AFRYPQ}K5}g>0u(QQlaBkd@``^G>0ihPW8sMu7DHEi`w3zm@7XX21bX6% z5VV%sMK)fc#_8g^P+CBIiw0XSf`P7bvKf^%$*23!?Ga$Q8+v~M8?1!8zun~cy&b-& z@`x{@nTRAJqfBUL^1Gb@&rkz8jT(WzSZhp3w0|3zT3&&Qg@yYwJqI`E8Wma8m-q*| zyWO)|WDfkt!QrKs0Apq6i!sv#Vq#)cVm|12WGO~zE}kFF0J-gVJXg7~v5}aVcy)D^ zmq$Sovp)St&F|;#LvS<+{x$FnAS~a%{|g{2w`X$fod0-g6654AA^KAo%_Q4(B7;^Kj=ZIf3CM3%Xrtn%sZ#Oad@;~I(|Dp zFHgL99?euN7Dkzi=L@c~mR8AReyAl&er1ZG^977sb>MKNCh&Z>CWgM9zO~mjYe!z* z7En5tILnAlD_cGV0{uYCd-nV(Q#8?CUYTAHX+FtwpLodro=JI02@#L|x50QSihHnf#SYLlXp0CEn#}D0pi7~wien1rRdw+Ek6YFNa`hTyFgcADQ+#GoB zN%&PtL4WgTy2HMemEUC^iFjF_M!c2)1#;V_R*jXL`x?m4!CyQ*TL7FrN%k}u@#Nd3%qhD6my7FbudAKQl>2e6==he?ma?{zQNFw~ zvdUitJ6?`fV=0PRz5zE&Lcznk4^K~Anv$xjb*(ooV)63Dm{ofI-(~CdS_&kKBMvc@ z)#%@j)LG*!e!I$763&V^M*`$|bF;;CQIx~@`x=*s>AiF1ui@tfMziC9;M{v5W(G-m z)qw&bPal*~(uJ_HD5HbP!QIW=7!o0GX^i)7}m6}qjgYHBv zZuSZj!fTL_kbn$?w!CTCX;P=f=;4tj5@?w%7$_!(wGoOs|9C+TObTfM#Rq_&4G+J< zq_#g>h58=0vyjQwdoY;L1E{m_Ccjowfs$dq+OWT;hmxAw%)|ui|4dXAxI##=CBtM^ zOG^uI@CmrK=*P#$z#+lc=X=cyA1!I;cRr?n$Lw$U^fGhb1W~yti#OS2VICeQfQ`{3 zw{)$F(v{83$}TKutg5>6p+HV{S<_1&(=l>y1jk+EN=a8%2sX2S zo@e*;uu#KXj&4^uQ?I{wclQ8yHz+!{9cb8c=H=z(+jDMuO`R0^@Y(g%^CNE`nRULh zBCq*;?`JD3E8sW}51aFu@@>xB<#~B|?d$zcoSqyUaNzCTZVQRd_}x{>8mTx&(A*^zaF^H^1fuw z-Ca|i6oor){P+l*UH|y=!}_!T@9lM-Hf<9N(>%AuZ}04!{QvWTr$w?dVG}%9j3hU2 zb31u5;q13R-QCW@>X$rKnp#?p0DBBS9c}|%I@79DYpU1bhXql)*dC`_pSfL@b@}J= z<@2V-R-Cr~YVo}x{07(Y#m?>#r;H^e+E%V!4IK3@+<9kyhWtjqnkS!u=IH5F3kzR< z`s9O^)t$J#KiJqZE-0AfNHlHnTzrMS>MPgY%H~Ip3gY(#OrEr3=~A8A+S>H==fFAT ze@m=OzrC3$tj@Q5*|QU;AD5Qwu(CRFfMMp01&YoG7-y9=tk4h|EXGZ94J$WR5Lw2DX|{96qe9r?(EcLuA#euFwB@807k=PnmM%;zdd0v@@U{%EE;o zFTa(#_R{|)a9FOos>;o6-^mjm=g#Y|S&?wDt=fy?ohN>s6?mo0p@bXR0c<%mU92mpw~Vd_6rQofkiUaIpFMn#j-h z3m7)~n177WxmN#JPw!Z}`3>Mgl{%pWHG3dU!rGMw;t%U3wrwqMp1-LDsFT6d)z4*} HQ$iB}M<+{T literal 0 HcmV?d00001 diff --git a/images/Hadoop-Architecture.png b/images/Hadoop-Architecture.png new file mode 100644 index 0000000000000000000000000000000000000000..8725bd330bbb2501595643981b32256636aa1e12 GIT binary patch literal 43815 zcmXt91ymGYxF0ZB5b5sjZfTa15Tv`iyIYs;?oJT_=>`eul192qy7SHdz4L~{vh2c{ zoqOkg->+`?X9el^NCZd#0KAv^D4`4haB<-CV+cI>nD*s&4)_DkMOhjORE`ntgTK5r zmXnqMUjF;aYcEX%UqN*IsO16x?=b)Sh66IP@W2-lTxC8!|DPh%fbxFBO>74^X8c&5F;X@nQ?gB$qVL#!i_2DDE=(B zUv{(nZT09N#l6;}NBGU4K`sic^FiTBV&Y}c;yxDT)(4{nqCp%*DXb;=VkiMP6f(r`_F?lpXwbk5 zbg7WFPL}XgIpj`Mwll0c@3o`lhPPr(FL4N8M8nX?s6M}@FhUE=Xhezae+2-LA$oig z3;PT7F{Y18bvIJ&mgVcHBZ);Vy-j+*N>9A!2IQTb<+}~OGu^g1^>KHF|6W` z#BPt2UQ#^*e{Q>LeXP5H!AaylGUPTc|H-!Z%eE8aLg2AaLJv5IE+07&!08Uh6oj6 zWNTXigGm?~=5=<8q{}}q3r$Q;YPET?hxY9MDUK{CDDae#fPaA{vAjq0K z$a=fK_<7C^*tjgWk6`fuAD!lH>EF05n#877o1R_f`25yE$!YeG^2%#}_)w8HxD7|N z&Xg0VCiWvw!|{FgBaNVIbad)|E&KD*e8=}Oi|wfmlZ{zF2oe9Z^O3UcS?~=ISm`2U zwwt|j#Bp^#J35XfM>%-Lgw0IC&@+E`Lb1VopU!+K)F9A?9Ay_^I2DCUlJ1v;6ytka zZMWQVj-;EPoo4=mIj3nio9Vf*)9g%FWD}Nue|t+sLo=4aEvKPzGoFVUBpR2MC0{yK zrQ7VhGlGv1PL=XC><9|vlN}wV3I^HJdX;1SNGh3$%{}{BrtCc}p_BgDs@ae4(KA)& z$p1TU|9T%6_cFh=*-d|gSMm)#gO0UmuD&1*Gs}0?;Pg+)Q#UVg3g z(!IYYEi1;n`Kh>>w;$Ou!xb8#o%rsb0W^OIL^MH`1Ibi|CY!OCAui<~yvxvsns2HL zF-UjtWW`e^c`fka8X0E?I@%UO%XM3xR|>y(+FQd@kH*)&IjnYILrI6zDLI(|A70X zaf$L44b8b~vHLuvQQ$1Rnj&5jLwy$Q{Ks*PCjZNK#uze~udoOw=f(BEzkeGY6hD3X zw^;wZIxQfxvc&-R5GjlLn zL}M)^g6!IE=DEj70@5Z)6LWhQTJSoALb!_MhO{3*hku>$;Dl&xSjmqgSJXB@F~byj zpl=C~V%+uW;2O}L8L z4D_Yw0cYP=LOeLrl+Ratk)AV6{bhWiC3{)B{eFLM_>=v{jcOkb3EWcePWGppYisr| zZT^-hYC0VZUXKh?#pSjF!)J^P23@Ui$cIjk~wal7q`56b0mLI}Yg z&1&RhVYlixPd!a@K+WmU+CI3Z-tx1W7IJ(4?}5~$@B4zM=iT{s@H<=%4i4+Ar2Cv^ z-OAL|)WSlOfADs|b78fx|6^HU;lUzB@tHD+^Lzm2lJJ z5v2?sTVyV$_WB+Y=q(>%rFOe_twUQhi|Rk&h%<9i*jA-=O1hTAteu`nL8z}tX+N5A zGHS~8u}E#|PbU=b4!hk3f1#F71f1UEKLyabcXu0FqK{-YrcmuOzl%J0t|1~w$~#X^ z@&Z6F%lL?xN9A#n#K^|EtdxurqIjH=yo=+~?Z(~jx~(Q7&beb}J(~isD@n&@hn+9K zuQ%6iyRz5Zt=z{hx@+}AB@skVjN5w=z8;D98|JZZ!<)f+b$NM^G&Ai_lQ{SemqpP1XnuP*)rrU@?XvE6d7% z)HOA^?G^D~UsL#+?6SHOn9w`7>YPqsu5Oj|-8be=G|lXTxwikb91z&{BstU8`u5#2 zwL=zXaXD$oWp0ypV=VJnNi>mhVh&#Xs~B zol>6598dP71BZ?^@)a4k#|o5sGk(WfUOnYy%SkS?dHjV`LIb?@Z_dvbn_Wtaingkd zthctee^gYIDCX)@+f^WtBmGM|_>I@QT5<9Y^#RWNU`3j|K3+cg8#_juze{7>5SqV+ zo}Um$%*#O#8vI;6%PNOeG(mQp<>TePiVfKv9oA4JE^BX8SAbw{;JEnSTAur%U*Yf& zqo=L56M1oUl)emOVM>Z>jb~j=0SAAkwr-q(X7?^zglt(;pw?$mGu1zq*xP;Et}wK2 z^l0sFQy0f98D>UC29|uaq{;ncz8Ftkcer2*L#5%Np?7jaUQFKl`ucAYBhfIbErWxL zA>>)(^aWeHN=RPtZEZztA|jnpzYYIoX9T74C~Wrvb<&hA^Zdxuf1kwmZ()ZYMrmm4*RuF4r|0Km-+e?vN$;g^8& zXJzsgI{M)-|4%7#%5xI{KqsfsKKUo1N*iMJhTAF-4L+@?JcQ<~CObmAwOl}1UFx~h zMnzkeK$H#+fTx1wwPg{((BjMx#f4=M!^?L}P2mDD64dUc$QrnRg#MjxbjVFkR%^6h z&ZsphE-LcAI-pkVh&(m|u@>G%v|ea(pSsX{k_RGPK9`0TqpH0<&y)Ttx2^Re{0qj! zmG5I6UN+qLP)LNZPWI??U622EFM04k?;+zQleufw2A%KUnO`V~2r(VC-hW2(_$`gH*a(0v)o(Dw$u*2(qqQmMaE^hnN~sHW|CfvwpF zRlE?thwEsm!uo{gfq{W2Vu5Ri_0JIUxH$H`1s&7&Ujd6E zct!9S!yHNYVl-lj*ZHVOzGpt6?xSgo!vGirp3eInl3U`8ftZqCAs+8cqxJLiv%S6j zC{{H9l*p$Sl$Fud#E2kuQYDwNX#4vO?+qyOcGqftowme?Ge}4-V)>fgvj0PFBaOlB z)uA5%1X6utV(mBS+mem-bv8j&3J|i^1)WSf)pvL!C?_{q%l@W%AfnVI~wWZ(qktoZ}er{ilebgrKPFTo) zVB_J<%W8I~vKlmyY-Ig1EGj8^mfQizVaf_{fV~7RC0y$0_QkCrN@gGK8W9Ta`t7dt zZV`D`lE8A4uR=l;y*n{pe9teZtnuBSnoKB)U7OBYOgM#H`iixvxx1W8{=%8#7C#q! z;bJz!#&^$}Y;i*Sw;f-@XAPVGoF^jcXQW5;E-#0W<3#ptV@Aoqz3sXuL=PL^MnsG3 z+d_Qp%M#kR195~49sT75+oU_6jR|%7@V6p4W;L=0Jm+S$rtOzxG|r&t_1EZ7M)e9y z^f#nRU&F&?{OZ|-nVsA2-;-lq@0=fh74E*V=FmhzAXl|o%IEA}rI+UmlkJEWIqEkH zR!pxCntwk@5}D;7Nd}|2lg=lQ zqd&~(GyOyGo!U0|KK{kUygK7L2ngI)@r@zM_1m3DUK2|BgVT!Bd}J`KV)gV*fKub= z61J11S)0jB`0_#Gu zMv6#=IQ|T+7}-L8CDvB9W>uJB+2P9L6Q}*+ub+TQY11F~Bv+bGF<`kV=a44G$Fnyq zgH+>O@axxWqnOc(*0GcF68pNLhxLz~wP}KHF&xKoMLKRqS`}Kf z1#qK;@Z(zTVdYU#b>qXP>|$3_%P8c69>+_?a;Ztl$vP4SiqnP`7Sg3tHq+`eEL91= z)?Weqgt4-6Pe)FB6Heoi@^9Zs-xj~qZ=Dl}brk~ml3fYjt@-k2Qj=R1eAjiJ@O$Ya zjKxCuG;rGKyM7ej)T{cMebk+Q#Uj>Eegp5LGKv?Gu=KA#lQ^ zfS9Dab4kioA+vbTN|yAiSz|tqjOKT`b@fE`4?RCFSZm!%Mvs;6?(E4Eqn#Mvklx6B zBTSS@27qB|@vZ}0bw2_4!}pXVI^GO!Ubjq!_oCaM60erVN=OOIQc`mr{`|BVXLs3* zFw@#@`|x!dwPPSTHt%YGMkr6z-QPq4>m{Hf_jLTtJDibRk=5V7r3?%LmYSTH=ZB7$ zTQ@g0yswY6A}wAk@6Za$C&P*Uo2ei7JQ^J;{y4-iG&C58mL>8xN5y)kCyx_t0<%&F zFk3ie^Z~+%&-v0*;w4k;O6Rn83)IJ*NvvGAUVlQC&u*S(qEwNRX`D(^p&zu;C7PO1 z!)z~-FvY|`L`3W*gjTGbQz$Eoms6W+inMr!MX*>K+r&4&h?$eG6pj{D1KEhFK5v;x zEUqU{f)e=ogqvJ^(w!@1bJ>6M@*n6x0sb2jcjL~EhLkZVqxf65Kd|8M6?UO;KuWIH z+3i{3n2Fw^*^|fmkS_YG)1m!BT8b{v18wwiZR2KN0Z3O>%^~tmz znOjDe0k+KJ;9KDr4^`&O;JvFp5dQ0B0|ynY>0iM7+j>2jME&hbV0`h&smp#LBw`{| z$W#?;cv_n7%63kR7~q?=$L8b;7)k1R+H;J{x`u66vHH@j?B#u>ggvdEye3beAYou+ z+;_ci_W`lYd0TsXFboyFp+d6ECtnAeEkr(OX+5a^L{&B2AuWJikXK+%i;XQwjSY5L zvO&WAfmaZ>MM@yQsN#!KY|d+xiKC(_dWp9+NK!&k;UO*f?o~Jqv%NSF0Kz6tS;(*j zN(-OcBZKIaZ7lhq5tmb?isB*2cZfUX+~M3rLvdw3I`uvSte?EZ#Ka^ddehiVv<|erQPF5g_E{5Ei|`(}w};LGx+vDmy}Yg<%EWQMDH1 zC?tZnAP)ndsjI1BXJ>s=DP8Fd=zODOYKQdQ2nQPqL5mUn-fHa6(!(GhGd*?uo=mXW zZEG-eotp=2^E<+v2eZQi#tilMQE1*JLiU6hwR_din2=t$>I|v_QHYkJ*7Wzn$AIH; z?yAppFl2Z^hJ?-qW>s0|Wry;xENB&%Uf`gD z!uLMv9}HdQ9Z*^J%p0#>q6aVvj&5&nr>9kEX-j+mJ0!kLV^DA?t^5O7TVG!v^93aK0O;+C$j{ll(CA4KB)%?!AJh%3jhssE1RCbWDmyP3i=k8cs`}s*NA!Qgv z>5Cl~Zy!h8yN=)#P8K%thMel7>H37OA7UTcZ|(nuczUN zvsh^~@q^TK6t%T3ySJ#f@jo>5>~0Uc2K@B#RO%LBWX!z0bx5e#`uROa@8IFlr+GFA zX5=%nO|~}0fTxQ=2oM2X{b$k|tV<(5R8w}X-Pyyw$N3z)kx`y+I;D;xw2lxq?Tz-> z9Oic3XBdnfj=c-WSkBi8XdMx-akbn1;ifmX#;Kx`*xBxXyMATk^{J-|)}1<3W#5T> z5y`yFr*w0;i`Q|m^}?uy9rU$Qw>cpv$KKj{vC&}_T)i3#ipH9DB`r)evDpCPv zK%{P+3jA=Cb=vw%F(GHkF(8Rdrdz-L7Hwthw=Tbmbj0+`@1JDPhhg|*1_nz^BaECU zE8(HMDIe@Om@hoE6qIr@hPOHJqJEn6k7WzYEid~m)>*wSRzanFStW!`^hM%+Q?*X) zhiWp5K|1}Pv_F5IFrPooRnaWZ_96K@g-OokzWARuZ8Jh!1qbMAw^$c(t?Gb%PnwU zF+^76_Zx4zyN09@ABkIcVjCOMS~rIlb=Yspc-1{6iJcdluFt81QLKOaA)I@)X*?UG zj0)m;G5fhhIT`3|JDvSgUG%8XPD(7DS*_(CRJLnnl!?hI`ZhB)XyuMd>8Ff94Ft}V zA$pGh``o}*z$c4mij#=RuC*2nDRXQ*aDVAmTr54v+O%h8gQM} z0?a(r?yyB6K$p|wa;t~Gzds}&msNk7<#fv1Q*h<^=|MCvt^S{Un$M)Vc%jsvuEs@Gy%~BW-Hxz!-Sw~E=FAK7m`$6xN@6}oLsivjP-+ticqSqu%8H9K zG&FR4d4B4&EF~c(UjCueczSxew6tU%a|13ogAQLS?f1R3JUl$E{O!z=ecc(p$=K%9 zUU!l-!V66A*(13do2H|vdjwq5o!|DbInBr6v@1Kl{d#7=g$j!cq~-5y674enna0FJ zs&X`0Ncr`b1Vjwu+xHwE#V!QM?&&QPO(JdA2O^%tNO8y;L6pQn)0$>r4f*ZY(f;tr z@Au|79Nh3Ykv+R-uPN4LbRtl!3BIc6>mQ$-R239#{QLKBXXko*7#9;0^WA?(q}bQD z`1ttL)YSC!^v$iU4$Cd|O-*S%>kyF_za>&2ucjv9m*GgZ052yeCpC57$cVh0-1osv zYKF$TIwpF0a=P%}wJj1qBO)R|Jrd#d>w&?+ARa8n^rfwOig@a6>@80594+}ATx=pc zyRLd--kl}p06;xFUOp0Dgpsj-qKq)m6a$tXUL_)}Y%!kk%dlq4v>5%1*IDjd6Y&;g zt(;k><1WW9SF^yz_+&MxGq--G+NoP)XjMMJ{%~}5>j{l3 zLBmAW&9#z*laHE7?z_Km0C1*BL`BaWo~Fv6(IWJr+drkM*2IH8(Tb9bYAC)S0eK(Q zMpj~>4Ah2obaWW$>624ZBA&H34Q&294naM&t4oxqUyEtB&T7ivpb7v$3JogE;o;%D z_DhyxP6$xT$J?{<93dA+M?R+wSVF=dMT%pMo1vK*xAUz*@lcdEcB~aXm|9R6G$*@W z%O3|(6WNzl$FTj94g2R0oiq^#r~l0A1GO zKCsV^tvW-?c9LQqSX%JMX5g9$`*4yv;c&>Ih*@v3&C z0u2AeyENil!2x`_7t#>vqokFP#W*T2n;u9XdT7VWJoZ{=Ce`xIhF5CdO@}v)$?Ol^ zcxjU-FI;ozKwM@fAt@>A&CAmcg);@H^O zXJ=;!C`6WI5*%k;=C!2?*%{y%&}>rjex-h$7dzS(=)#Cb|p-pMLFs{kzFZhZVoP z#qij!H`C$+Fo(KJN1JBj71u zN_C4l`rjujKe(ai_FjNZ`snBg`TcvHB5~untc*a;6+cM*LH?r{;^RDN)e^1#zwCz@spOos60dRK2~GHxf$kN=HB&} zG(F~k29_o1$KG6i_f4e745EVwmNXO;`*?oEEIzT)DZ!&QS(N= z!#N6(~wWmZv& z+Kmygb3mi*b$>}8Vzl`?+D3RV5KX(*d~_h55}ey7x%%y$9dlDtP^zj~ILgZ-Q@Srs z{>=WPoU@aVja}AKu9pMmxYiYPcXtQ1y*i0`1AJCih8liP;h(88=p^AZm-h=dwH_|N zyt;BCI+^|Pw?Hy7j4n?!EAg@FOIr8+Zah}m5G0y2eMHFT8dN0sXlbPl3|?}DHPE_E z`eV?w+;q^S*ZBOwwp36+F$o|R?B$?gIPqnHe@@51P*PL`pueJHFUO!{V&}Rs#O$YM zVq#)ttpfj78Kl#pC{J_P^|~w?Iy$XpZp09BdaN@y$F_m-Q-J_RZ~*BdzE%J^IfB(qa#|{8ho_2w%*ygocu*81o~ z94ss@!>+(p)#l=2Gfhpr5To_=b#HI)kdP23Cnw{e7CfVRDr91Q*R6s0fDS#HL~Joy zP|^CoEP$4#<~->8iA>Cb6qX?T2O9L|WOZ(8sViQIUkU zw)g4UuWr27P|&vFF#VYM!KBlHtibpic-|NS?t`<+lBv3#xn}fe?!L1 z{BM7%l!1W(lSUrxt)7$Xe+QPL0_Jg`MwR};-@k_k2in@&NUy&XO2-}?9DsumrjPLw zF?6+h@B_TyZI?H|+Qpp`IDmI|$H&J7g2Q035)>5VcRQF);W<1Z{1`1itd@Yzv9Pq% zrc$bKI9D}3HWm{doy}#HkeaID<@F(1w$G_8dE%kq6DM9&O$|pm?tad9>uyW2&rO8gdl{BMnw9^Jy=*+pjGo1nNmC|=9KZ-kIBf6Iid`C`1gMY z4L|>Xx16)HGdMR)I5xkd@bB;M*I4%}B<+}FWo5^5glx>sf1B6JP$iuDsj4Og2TMvx zy^WU2WH$)`%go=z-f(nv8w7}vmX_AT^*>x@Z7LA`WNZRGY;JB>-QCcM5TeU)CEX^+ z^_%zB{V~K6Pw!m;a#OG*85_d`)pd1SXPzJ=07qlTkq?9b+;IcB={N<=;95(|%oGq1 zU}RvxKr9Fbl}Q9(&5_TRM}U{tW3|H%1Pyk`7U5dmxf3g3Q*h;fVpV za3K^H6hwhJ0v5dtuwi3hP!K#{^0FJlsO|jyJL2OfV~7Q$q6vyCD&#FgZN4TYskXZR z19iIv@OI_27z-=VU=N=8Et6SqGnd2XWPH5T3@)GVPMJ^k8>G;W66!ze>cECr6;zG} z1wShI9_3yI&)Tj$^4-p8W~pK>3L+vLfEJc#78-(9p!_Xb@hekA8!SoVWThRf>~KJS zetvUvGdRNF6@z^=lgAE&I#`?SGY9MxT;WJaNCo>Egq1|(Tpq`|Kv#>~ArUcgbW~I? z)B5V_wwv;~n=o%j_W=_!8gTk}2PSlap}(9SW67C}t5m*CgQyc!Yo_Njpsw!QL&>0; z^gOi_QxHG-^~)C!tV1kJ%*?il1>BpOnjo*?F~X;7Rz86Bnwy*3&N;il;5{B2xbER3 zw%m|{Ib8Q9=v7Kw_y3q}^oH}rM7BJ0HGkL;2krHmni^)E2DBR$6LsDBJ&^O=-<*(Q zVMz>NY8%p^!v~Bj{4+2^G!a=6I9hj_zYt7}3=NTdx65*Qms~X{ztK!Vdq01-+bx6h ztV4U6cBxS%9G24MB>3UO0w{khFFz@I?*hQ%)04-)MKe1)Jrf&@Rw^bYTkz(5-Ed!u z$XIY;Wo5NHmQBcE3JLnbV$h+huD-Le;e2zf_a)AXozE z%+mo8m7997_0!+744YI3!^-F6utLx)vR{^D*x$$V}BBnY*R*{GE=pSi9ozvw!k^d-NOaM(*VR|}#{CVm?!{=6|jZdPYqH^0Rx2@~! zkr5GK%bK2<3IDMNGMJ|ak4a+*&-4@mCZow`kPfc?%&TXV|McnUd|OUgIUzlL;qmSw z6Q?w3c-z~{>&&yw+?`KQ5VUTmMzcC+Vq%D3zP^g4IG+i_3?GK4A~y@Z$vXuB4rf$Y z0XNofLA#=;82eAt`9Y1Ql32&DqGftsj|V@QuW?DqIkRGwLQegLfvlo?D`t&fJxsbc z6Z00v(m(-K=It+yB_Dgi{#zpCK{`rqPCf=C%I|oYD0maK7nKhl+&?qy?*qszd4W8z z^}Iqr!ICAi=91N|25Z@Ji<^x+{JQ!;H>NmPHHwRizapTjtEfDHS7c^Z46g0W(Un#Y zhg@Mli9sAN$*Q6jGFUGNuH{(adXei&a$wuRPK6!7Mr6cG72gJ@DuM+mWB}M+GSgS|149A z`6#1lNrxgje(Z;3Rtx~TJYS{0W354OPxtDOa6;daMWJ*YZnYGD`TZJj{J=yI%~F=D zy-|a&3s=DIR56mul%H2RqrWSpIJ44sm_PABxBm8Q z^M%>`vuX^;eQWLhHh{$o9w;g<2A5sE>%KCm4t!?JdI$ZeufH-oJ3BmV10uoEK~XW? zF$Dz$E`n$@?H6_@s!0Tl@GTIb9~}{abAX0Mh6VwP7{B`wxbS2(A7^QI!N#-z&gBI= zlarz&UzT^tPt%FDpM{}UKi&AIgAPs(NyRskP||DVsQp`|BjUx(oPTEZ75hbop<*Ub zsMX1Tcth$z+Lk}9KkK~6Ak`G0s*hNoweQ^}bn-W|X>W-$HCNyRGN0>hRuT;MMM|-B zyOxR^TL0GRCQKY?jD0QrRPg{6cHjX9qL?z@1d_bAi=ehe{-vdeFZQNBYPjEKvf53h zAs|F#XTv0V0f3N@(1^d2Uz%#`wZ&*gGI)z^ z1z~2b<%xam@PE2*Jm;RaZ>*?@>N9Q+c+rPKyXWQ_J6O=&yP5y2gP0Zv7kBQ$B<729 zzbsq~nzr#u&x@R8*LT1t6gw7G>(T{90y0Wi=F3accwjOF-JAGXpRT$hrSh*)GHkF6 zR5ak-&9r6z^erEzWZh@rZ+E)w`sV=4^DS$)&40y6S1XsQ$%+G|qH;&gYZq+At{h|d z55|SjS=Y`SVZws*#PdIE_ozh~Y1%1<~U2qW*X) z?fk=@;&^<5n4Fv(B(N~z!npIQMEo}YCw`;;)A31luyWeT^uvzkdDGz-`=K<(zw{At!^Ow0``rhB+#eCP94q$ z+ti<8IiIV8U*S*ABP9PaAwU3R>Et<#FS*= zmKU=0Q4RubJb>-9A}uE~p|Kjh(tzf6qwNZyMn=g8O%0#SKD~dkP2EsX>8TG>A|CQTUkR4DRIAei06nY6Ul&{lB1G~W^vLn z!Q?*^aF1|C=;8U#$2%y<$dAlClrPkNR?P6kkae)UzC%LtzT7*T>Nr_yo&qI&ZS8x5 zU+UfL_8_bQJ1A_+5(KzlrvRIL0o^zD^2*y=&md7YyTv*J{;6F5%#@S{ZHT}EtRK z&6XQ=CM!94A^|!kP9nSS$V6aZV?qKO14mjPCIEAsrlg<|ew@Oj)O%B)2AA8j{R%Mr zRru;*%yaenx-I4XTbpNF6Lc;MU0q%9<6-WNrd)Y=m9MG39ugD%TWSI{dE zAQw>n1I0hTB?%ds!OlKlgYS7~lraB{AYC43`!c{S7ID`O)PX<|5tqZ;ZO4ulX?J_O zxuwP1$EPtb=S;CCy0-^};o8-R-PRQ>bgLZ?WcdZ+w&k(A$Mrhy)Yy&&N5?`p)XRPk zvqwlI(8sC%7$=Dvqm;OK~JL z!JG^Trpqr%8-l7Kp3je7j*h44oEFw?RYBEZChD4+A3ru}zkH)?kj_FG*#aJ*xdYqe`%Zuc1sf7L2;iFw%`O(-Wjz+_e%;^Q z)f?J@(p%LoPZ^=2;NM9XnbTP3DJ!W^UlxcJ*Rx83Io7DV)l`=@?)G|`zO@_R8qB{hRcRD-fgBv5>L|9-i_ z?k`xq{1=g6Y;<&c^FK{qV0#e*Jm$v6(6VujAKLYxegUeS69tIGBcr3M?LN|`CY5Dn zV44&@D9{qV|7@k*=kV~*#?lgPkgrSR@U5up!$w~uCar?)^8qHD$bVKqUR{wnrntHw zn^GA+7PkaLH94`ckusI`Yo3OM+~`Cm271i~+Xaw2P*70tIIKLX3^ta1gJdhPva&jy z+hP#f_eEfXBvr(KFpMnl*+S^pe78cUtxwY+an#(|>!ZGccGo6b^JjGe%0#NbK$WpA zFfeAS(J|Wekj8(s^;(Z7mv$|_h`pBZQV`J>U}L;v*UdJEfK(M^#JN0n8ChAn zQ*1ujvpV&WUr3o74tUOwOUr{9KWXVwL=SPB zNwpbK&2D`ZBEHj9nzHKZiLtS`0hR2cYxAoa zGTevsSCpkV>>6*1kvf(id^c6HDk%bfF1#tMD8?*NK+$ZJrr`Xknj3pwpx-;W6pHc* zIWk1kwlu~%7d6WJVQ9%a#5``??5?9fNecrPMVD~D`gyONb)VC0Y9EiBIu z?9Iv9$=LpG5_ln3|Nh=Q+?+(?Q!1@Z7E1s8`4h}VJJ`+3V#TiUu~1TqJ6ZsNU=lMH zj~yx|mTx7<4U3Z=H5ss!b&_SE<}VvEp4r6-MSI9sfOruRBr=jK7NoBKQ`;VN8+`TL z%0RFR2Nx-2_6nO?5}x#Vd60Ie8C1BKnSX%Z%;>jL&>IGa1hMg>&8rGYcSkvVaju~* zq`-;TiG_TX{>o)HZ)75Y>Z#t2qKm#BB$ntulYh9l2s5S+(wY7oi zFo}Q28S4ME=J2O>SX-Mux}2n*!!&5eLtHjHJDfFlwraf1)*3YxQ~j5ZW>INn8J7b z9REEQD_mqXlEW#I84Hn3$B@HvvR$(~4Of1vU1&7xiH51fC4cZ(WTJ_jBG`HSPe9xm zvHJ1{v;Pdl>tK?c$IDHd@X-en85q^-bJK1VW}LdFNdKGQ9A%dh9SWC2yJD*TGBgP3 ztW9E?Z2fI)fgq%ERNEB7qW1Z(GNfM;yIRC;37w*(sfo`xk=s8M9b&WI9SowxAF`yZ zM}y!AgUiansRX%10d4*uv3a}eKjYmPG=fTbVS+wSpAq{8(!;&{L`3>vH8=kpfqPvJ z=-#CJS8AOTau`Nu7Qf>@_S1~S6geR}RNx1;!*<@YH!%h{7H<+c#L8^DbZKE|%$FHG zRspcj6W@w$I@u4nmZ2jRBz{t4Fctw+_eD2+a;h@AEIW~lDoQ3JT1*U#*(!ta-HQOX z!HOS0=&^=6jiatrEHTLw$VDM_C*7PJq;bz1^DwX4al1wLd+|SfHj(aV`d;b<5nH!{ zOV&quZBY_(c2*w}2ge(sgQS_<2v;H`{>TfSX{R+GBPq}mzUGCvnx$b zg0f&d6?4uF_Y)5sgBXsk2Ug%LEv^?e5+HOY*N@zuwn|LQ@{W zz785a=clJ=_N6>|B6gUNcW@A~pc^D&?B?-bxfC^yERnP_A)g@w7T=ejNt_W z6-rxMS_lXT{!2%t7U=;1QGcu3A&>1mo0itXtTV3YJV>p`#X~Ce+xbOAUWSqxLBOq; zE3DV%>2z_H^cluww@{Y3%kl~+St(|aF#+JiJ~#T$@>c0q*G_tFJ!nE@5|y-rt09|r z4AT-R182)f0AJ_XZJC!|olM)U?PvUJJaKq{DqR&0v>aU^MKgkN*9ml~2p7 ztQ2-X(p=AaHySIETG-Sy|L4!2?r!6&gBgi9?g{fXQZvL_5|VjR(aq{KCY(qz7b|LP z?CFgmS*bTa64X^i&76nJu)6NHGQxeO!tz6oJAs?%MJ~pW0;z5 zQ}jy1d#l4?|4jVE6k?iMo6)s~Hlmb$%pLu>G%!#$n8Wyf;xQUkiA?*eC>b87Ir2}L zt*x#9^e>9pgDZS#5upa6@@qube{DC6|CR^8UF6iPtOys;G0xPl!j@!VXf?BmHLgu- zo#VHfOi&1QXqK?kwp-sY;*-bH{wf3`KOtHpsMOU8Ehqcm4TcZ3}xIz z?ETGf#u(x+Ipb2WyE9FOd%{$VD%#rgTe3Rzf_+8+u#%IgKF;e|+qnLt;Q9cI_&R9H zkuD!3y0*7h_Txvp@7+1*yRV1XzOpsci8DI;P|pJHCvU-+Z6LYfXUQg1jYg~esi~+XDyT-iO*x0%{*TmgWM_q@lBkrha zORhz*Z3m}pfSQMqkx}O-2(*sTGE20C{O0{*zxHc*Ve2l`VsnI3Qr1|~1{E8Di-M=S z+cOhKZV3qquJx4r#=yr|;E`Kngv$Hcho7gA`#LQr%pPG?T&hQTn0?o@e00UYftYh8X zj->uqTSE>}?B6*WG)%Of5vQ9CqfD#Z8s0xEl(hVdLIl|zwe6W;(qyWTzyUv*`$98<*P>n{kS~Ut7!A)WnWo3Nk zBX}sGQ(*50O{uAUSFlxBS?#a-00gRLT?|&q)mX3JSD<6*>$+7^RcM9p*+AA=9fCTe z8mw8R|Mq<54qPHiQu-SYUl)VbE5Z*Q5@4zQnzmEqG#hnXjy;%5*QCuNSz94s;3wo{skym9~d|n<;MA_InEI&*~X%a|i%k zTj3dK+L#uOe@<1WsfZ{g)Ij+X^cncQFX`gpDh3rSu#ozCd$+23yQuhWO-#V;b2@-1 zC~1#p3rNq4M|v|wiXq#<8C@ODjraGLmX(EtQHx4YS})Y4e%e;U&S}1eU+dl=NP0as z|CDFAJ6$Q_NT6o0?hBVdmPk+{0mE7t#vM1?!V(e^vT@|`C5e?tvFuWK2MlaFlgWIM z?*@SFZEUbqn*5Hk;*(c+m!R6cZfb50CQbu-Lf!)aN4lWffhy=cy0~04thJMeG`!Pv zLFsDqx&XDkk5tN`D8$z%t0ER-L>%dlJ`%N^z4Msults1_-Ht|61vn$NJzj>3i59Z6GrwrEtuqyEvTfJ4JCnl4C5)K!IhjUL4i)c z3t9D>DDa>BB&rLh4M0B^E4eSO{7VFSN!R={HD;8Q{M_u%0@K}+A~U9pdeOk$OLoFK z)8CNPo?Qj+EYRrpK7X7FS26#a`EgY6R8w8+~FSn^}G zmFyuV!@R#gDBnx6Z|7;mXP}$@udgkwf-=P~Y(kA#mzDe#g?c{=(87l1BQ5}F=DX# z3mOzuRFQkYn6s^|EeO~sC@B%6pFv#&OhZsoP=MF(>gM)B;Q$75H@CLxz?&Hq7RX>^ zqD(CLLNK*SX)2jlAyEt|KJQ32FIuIgF+j3 z2e>&&ZHj4By-NR+slW8&P@+R*!(Xk;>mz@v2ypwTpQl;y@PSd{PAYran_w=fH}%y` zWkq!`g~H?%9E}Nvckk%>RB`HynwrY0lYSSeA^lUrPom%l?by4ef-mq$!QI%7@92}- z+60ATQgN`ic2zWq?2X`x^U5q!|NglVGeAO;n@_H*ImfD95o*ivkBs<4$HcJ1@$F4t z6v5ayv%OK1;V*a!6?(vMB2P@7rZ&A}$QZ3>^~>*}OxVpSDDx#GB!H@cx`8E^(t2fN zq#T&2z-7`Doj-p(Fw4ov03+>@X#Yh*Ksp4%QbK$LwK`7F*vJ57Qm`Z694)9+akZ<{ zd656=*>>J{ZHb6LQaXkeF6Ubqg!sIDQ23{=R&n1+T1J)6Y|Nb*T;6Mr_SG=sY0C$d zWCm^b6(m^fSsT^>`jMc?EAi0{2>kYK|KHz6PzCCvO2&I;r2{K5or8lzeyS8@l?O{| ztLY%dbdjeFh~2UMZ$Zapqxnp%@p#2I|l~WbH_zIPuDA(QE+?yv^d{Swi%eT82M@ z_v7M4`ACPE5C|Lq%4l;F6Q5O7UUf3F$EK#vfxAM#sWJef($f8Nb8~X1PF3yf5R zRUfQ4yjcvygs@AEfRihgHzcb=dh+6)fSc)b(l=r}(oV?lEjOlp9_7%$v1 zBs+){)cim@MZ~JOI6t2qObT=!8$eyjzzx%&8%4-#Z~N^z%-R-YC7@LY=EVjK*#H0x znRJ3dk20k~33@^HuzY4pruF6)j}v_`PY0skSt5@Q8wOym3X~t8*|WgLGD8r+(%=Aa5V)+T z%XWNH$YEe|i96u=5fmm~;Hdt{4j{ohfg4*u9s)yq;jZObe{%|o`(QTG%Gw%CMxB6%C}7D>3MT>IJNWn{uX?nh};Xz>7Dv$NM=2om|NxVn17C@F*S zpAO$U4$BELJ-y|fk^H?zFboFH!{G2RW%p!LYbzKPz=SjHMb5)}CZ%Wkf_4jTECo{& zVixsDFij|wLPR7Nn*S(b$hbicMsH+fzDe$YzTFk*VS@1kF!6yG@}8I6_#7eBZzC zxYnw*w6JU}yJg$9v9Ppk+qP{jV++ez)|PGaIX}Pe>$(5gE9~yN?(4*HymbPJnHdQm zLS8{Z0oZ988dxBI0SFHU)GMg#-kJKN{^a*zdzn^zXoup|#y0%mN(TH7u@wM&_hw$h@I|XHOI=-E|9ph&D56?O zqE*-X@y&1~`E&ORFD%J#cr8jV6k@R50GG9Ox;IQ~Ty z;>#h##ANfl`^KFmN09*Rt3Vo(^{eBqG92nplDc1y+S1Yz7y}5z02zb7<8yR$M92fe z7dg3TAiK%yv>a=%K*0g1eQH2}@NugEGEv;Yn?O`)UvtVL#L!4d$gIofMzA)3`YSNt zUrpuvzsfVPn3k$#!`g&4a1hX%4}hf^SI-pzKtQeFD?oMtH7s~+fd8og5|5gi^%{MC z?krn#bC<^(8zAsG9Tx9DQ*YK+P>2O1%&*E@DiR$IRS2vX9p8`BTUuObKUy0$4Q8gL zQE%~i|Jq7&snM089h})jBmCl?ItXVib!2*~4UK>%w7%wsLF9`nlxlBRk*{)+DQqvY zR0v-k1fCg~Ul;0AZXbd)6DO>_?#iXo1xjXs@CRfu$bZxm6*UElEui8O4o0$fcE$%H zHL&V}74G`_dS-eWAY#ULb_`hRVgFLT{{?0wV2}U8%Ztar?K}H-Ym3!>;}>8hURf}( z495(Bwy&&AGoa5OlF9qxxsVtno-%R6?Y8KBkyxNy(w2#s&enDt_goS!E*W{EeYBZ^ z0^vLsRXi}6{kOkFSXRSL4+|bm7&_&PRZNrn)!e~Ud(#+|Uj`#mLt|qm+gCLkhtBVA z022Y)6c`v7K)ynJ{r&v`)(Ig3BMP9MU}-HaErsx#jbQ{}TG!_X=ZaMaHflPQ5Fk#(?Mu&t*Fc$G-fxs}o`fwzu^OkvXR|qm-*?ct)&0cW zyCR~U4ue6yunqBj$q_>hp0oE&|Y+hE2~lm9#!y9QqoSAnbqXr~T^rV)%% z0|Q|pg#jte)KtT9jG=K7pal;!oH7i<{v|<%ea^0;DOoYGAlXUw!Jq%Po@iryX66cf zU=|!490IZ7bX0Jf;D6#IKqI#TX$N>PU{WIpV-u&Qr4a}6dES8v9s>s_Ein-TArA=g z!At{X%{S*?p842;8M5;7c0hGB=5j}F1-A6Z*>77uA$=s#Qdy{XTS+6#Z(ei%ma!z) zyWN?#Z1arsi9x^Ie{NvAG9w8@rJaAVme8_xn!M!rtLaPk0Dqaqw zHK&FfXfZ1|RRquwbafI$|B&Bn#0DK1X=`SWLLYDqD%V2KWil9_%bH>RNN zgMmQfkGAmkiKA-|F3Fszb>aT5<*gW}l1l5$xbw$q=`Adn%a8%A06<7rXM6!LnV=Rer^#D9Pb`s(Ux&ao#n8x|H8 ziIA@Ylu^I%Xp-DtU%WsWh}#eJaT^;OfXqA;LGLp-2;~18jEq-VS!snm6zZ<>gkdg- zFgrU7`p4hf4gmoHpc)u` zb&Uo3f#zlkGEK0OPtDDtynnCX=0t56nGhQb`E|ncYOX{4!J?rbj_%3V;> zfUStCMjzw@s8@?1fEu@|qpGA^Ll~Dqzbi64oZIWc348!hxgf#^RP9+natk{D^2;G( z1!|S}q@*TL{D4h(ER(AN?VWkUBKU>!xj;-TtY(MpVMMvAluL)DYK?&W(EK}>HF-z2 zh4>Zdxa#aKegCcJkcGKbZbe9s*K%`FT9RM7Ifduz&K4L)Wx zACJSb6A~IgD_UbIq^<37@TaT@8+LP|xeDvEmi5YyI3k1~27OE><^c^|T~OBU^YQTk z85^sOE-0jW{o%N|xdF&!R(auru1d}N3>3nk7XuV)IF!=s~6kS+nT?OjvM-%Q1P@a}q@JHhpF_i=_OAKZ zD~fTeBSA=z5TJyHfFyo#k?YGB9ck%9egwd&(r9UDbhw_A03jWK;X&uKdF~2Sb>QA9 zCL-qx*$)EZuLL}jol=i9n% zFA3^kT=FI9bh^(1#%4WXC{h&*yd# zSoiIj`Xce--RZi|E3eaDYT%R&PkDxNWdy-Zhrf`I5mPN3bYhRC@9t0 z0f7nxhnREkiDllxLD#9!CxrJqNReR-NyL+Ed-@?jI(S#^VTrL2uSR7eqmY?I;VHyy z@mXlEc^0HZMMddT)K|X*$8HXEQVd|?w8=3>tKh9^$kUHMt2AT_8!cS+vkY8YntJCx z-*nvik&4I2Nyuz|TcD#0zx2jQN2A2~R=J!IR?)2gV!j%^lRQ8ZL@m~IsDI6w@DY!F zp1)SV#&)K;l3=a-{21hR%}D53$p8rhKySpwzrE^aRpow`m6csW$27JEWn*66IcUv+ z*pMW!udnao^3s$Uhtx*Z938VN>n*JR(~~DDX#tqc_m7Vm85uLys#Z(U1ibFg57(^P z&rTsmkw9&MLdXYDQ)OO4l)O@^ke_7&X9rEs>YcXBO;*ek+cf;2 zhw24K&Q$yk*FM*Ho+6zr+Mfk3!Wq#eV0d%7J3lwMiQn+kpjhaiSGrwB`qdLMPA2+dcaNrN zSGTa6?{hc@TCrRvg)oaMg7FW+1Ax2gKPU{+Xf-k3f>J-tD9B zr~LF!*hPssyq0R6mPQ1NaZ|=1&R70E8yElj@7`vMq<9L0WEAISa$nL}s}eOenu%@J zNud3|>8g*GF0bLdC;L%wNnLCq`BZ5#6qLU&PHH&mFwft$-uIoZFPv?sk!ikNj>m*n zEIM6QP>~@+19BB^E-uXO8iahgENvYwt#K*TSX$r%?P-v{Ve3m)E> z!FI4*%bzCX@VHc8RM%LAo|4lc8b>;E&}Dlc&1EEJ!bYB-z-@llKgZk=VbJZ|(-o4l z>al4_$rD!}*Q+z+(GQkP+uFQj(R@F*5|)_6Gg6UVi!t={&0DQx4&IuveBQc0aw^xH z@P31D`TP7Jx-q2Z+uyjW`3OkQkix45>i~AifN1{!<#~V56jKf(c~rD}PG)>^dgds) zbUd1J0sjD^Bk;$rg=z9~@OsaG4IjCUy|5#$WDa2)(4fPbNN1}6l7(>fc`(^Qvyxhi z+8t3+sALJ$pnz=uPq^aPDUd^rfe6Ys7G~zIUF5<$YJh66cc@*;z1qG#9Qp29p^71@ z_yuinV4E7Bl=erdOy9%px({tYEmh=sU-iRo%{t5cR9dUkHSat=#b-)$k-J&^dWNgT z>*w~Hv78p7DZ7SN=UvT)odM%WCW1-SHm4&Vi^H;)7=%eC0(V73EZS;@n- z6)1t3$EoHZX?1M~r$R^#KTbysPHh_Zsw)+l|ZukB?)9K9|c}|5F74>xvN4BH(XY$LF!eYN*6r0)pcwHwCUnt$3^M}K3;>sFIB%8%mkgjpT92yV+gD>nK z*HC^H1O)-E=8qBm4y{k~In53&!zRp2KbaUAYdbDMgRI}_t~xd=VCu?ULWH;BjedG0LOR#vFti8MC^s`gju2noiF2Wj)>E9R9 zh&Jv&lOJA9N;;7}#|XJap7$Q%FRhVAXjj(O)|Qvsq4Bl=1PqFY%bDV~)b{o-8+gbE zmW!3DEU>ZoIM}q)wU%otS;OA9y@*h2EXgo$IcO9;6d5j-dcxi z)p-FX0WeahGdv)Mpps8>G)aA|HTq2Ec5f(m!2npu}(OrXeWH9|g9t z#>PetyEWh_aiiX0 zr+j-ZR>~Exl zNevASNHURoWw?;+7mn7q2(_OuX27ziV*FUs-q48Jpo48Sko zhCr5fC?FK@8tw;^xL8l;3E0M?W)1kCK0!l61I-?ICDZHTqM`!KF9eiwtd`STKu8cy zr-bzQ6v%)nvNWt$Nw~`HXQFpGc-3` zmqaR>D(qv#>7+AjD`>41Ey?KmRf z!uWA8!ek7G%R4v0u6`&O)@vkfb_>3z3uzR3xhgHyc#HrVh#)GN zB;bgg!RSH^(ctCn1XVMzf~ke*h7a2&S65eqJ$Zb1SW2xAEMGr#Y|AuoU(*Oz0J;fy zQL~Bcs=T}pXlVH|&)p#NH*~b?`U}!;#B3i4`uU1?;U!nM@-l&_11i7o`-Nn--%D)M zR}OtfCWaM#(_Gbg$H+;~0JnW_`)!R>aOiR?n3muc)ucwhj)bNN)i_jYv&d|O+5f$Q zzA8=<|6d@W!v0>`EHwt)PjWeY`1trV2-S#$ z7z3v4XAu(+A`gDcOH+d|A+-8obT!oEoG*36Tot7%Wu{*^i+VZ`KD^SmaIlx>skb;= zG&f2Wq6Xdb0yvYEwb5TRyx?&aZKhRY6j|==97lR~W-QvnQqwN1dgOVi?awa(KTkZ? zNtvQUJ1_Z)_GYB#4>i}n-s{&I^m5FLd#lUbzM{Qu)BLNUv&-W5`Oiv#D4h&$2rwmk zu{Yt4uE2}L+=H9)7T@EmzR)`&z?Sjc94Xj$yS^wy?)1c?eOjQtMr7x-f24dMu{GWr}8p7i^q;PYQuC;kJIsK#WV)GE0l&( z{b#j}cIT;n=V4Cr%lLca$OJhyTlNAP+&j&_{6T%VLW|{@jiKhG62jo$)E(Y(RZ+_> zWf_e&HTU2451N#Bkv-hcGoz#7`iSCYDYh><8WHedN-d_9S%#og&tHG}WTJ zyZg&s?H1ePjSVGDCE`v*LA*bJo=HwfKnF1u#j<#S-hQ~gB)pBI`)$0dcUwG`QAi3G@cwh1iis6^{`Xh*k!)Fo`-e7-16O7>Ge=Wk|{nc?whszp3JB zWMm%_2|V@}Rwrsw>^e;V1>E(!GBQw=cQq6i8Y*W(eG=U!QeIN>9`kh2&Xr6VW59K; zQWLloANMZ+j#2Azdot{ffsDr<85%k?GE$(+i<;jMa1{S7<5JuKZowfbl44=QR#D_nNMK5bkXCv@lr)QviI@OzDtyie`xb@#F z%#4)(P=N2A#qD%8D&2j3)RS8F3G#LRZ>GlLr}E-W#9nebgdvROb@g+$xa(OhnM zImm)hfNCK22QZ)f3x;lqp|L^TAI-|t)Ge{7gDL)ZtvWx`*`NdlJEBnp0=SZ)6}=Tb z*s5@t%$`IP1*5x)aZx$%Y|6Xbq;joz4WL8~Au@(s?=-{SwNPx2PQyyB%qn|v zWI1d7IsRRj1qmg|t1X?B5XY1mwSE)Ds#@Yo#}>&1IpI7f%66@FovkuqOM5}R-5JlM zCFEcEqN^}8Gc#LFQ@{gZwOv_QUG4Pu>x+4((QLARAsgbj>Ca)O^+?1zbv+F;Dy`dW zAt@B+I-4q({o(ZX^c$b+lRs>L4vDM%K`IU9#>VRHy3P{VvkXH2zBi>0vNZ+S9VaI* zK#c>a-{pmc(Ebh83bpIKDPzD%0kmS6W?_O_>IxK~zzr6K^5$vgj{t2Lz@ot30NB}= zJX4Ulp`hx9^9g1zjXA`AqQ+nD#O)8Ve?FqKzm~yC`cxxh+ z+t~`PZz0d;CrhjA`T^I>$C~5in)MBGZC#X=%f+S2jI3wkiO>eTIr5;RQ;yQtUyF{) zh`&e-hnE)LFk&$*cxt)P%BOL9KXDeIy-?HqbFqqWf~U|Drt|)HeqZD;_7XTOKuK8s zYYFr$AAHtt4ANwh)6@S$T!EqEh2z~j{#ImJNqe?Z#cw}4^0h_Eb(foX;Xu0P&n<{QQU0U>uoX&8|r6JSZL;`k-bU!!bIL*WJ zada^bwV!k-B-d?CXy-x!(s0TQQ( z@P`iT5TJ|z<6r-&0~|9zQ>3cMOP^BFVI&b>C_#q!_H7!-Gg@l;dalnh|B8q@oS$4* zcV4bq;l29pt|?Pf&>k2#fQ2EF@A@^D1{h~ozc^sf zKw((SNzB61AVo`x6#)vV{Tcw|0=FQ*azU*{jk%lMdn&qb!qMZ&milcnPLatxWPDBs@V=W83Q9yC0RF=r+efC!ncmjYs9vwZQyxqghi~<4#1~lsM z5D@POa;?BF2dD(DPbg1*f~PMOwcP|vK$!{rNlGA$;XPU<7~v^(7R(9+Xh1UrcsK|n z7%Vn6$DqFbtksABsRrs`5CC7^&huOj9PR>Tn;@$Qya4DtG&VLnJ&hL8-@Ru(kvokL zaRLE%2Mjz>_&m+!r=8#a^#S&Z34h}6VGG5;9RT7Xj6fL&Oz;4t2E-~&rycd+loHe{ zh!lW>fd0sn&j-{YWRd`}O3bTzB`_8F)Kp)uL`9)$ZVNVZuvSz@pY$P>`(+k|zMTPM z1t@$#psjrNQ+wok_%NUpfgS1sv?8GQO5?E01~d+cEs&v5Uhq#ld8~OGK|l|#94yGH zC6$K2Y|0eTZ`80TL&#X_+e*Wy6b5tgNGmL@3hrQbe%|NyBnzO6z}bbPczBD7wMqz3 zG-)}xYVNo1h#LWa!B5|h8nB*E3=P3Y>^u;`4vf{IFPZo0N|0n40Ly{>w{O^hs{5Tl z2_6n8uSMg@7;RBsnGC0(&l0JXLHI@aUQrw%G=Oe!XP@Pc;Ct@#TM%kcuF`h^q+3~8 ztM9jUa{0mF-rL&)b2TV97&!a@OhOAVVJa$#v~M_w6+(UahTxouf&%4zJEXQ!kSq|1 z&E)U)fYfE)GkJ~={&Gbk7RWD144{6^6k7yy1Uy$@Uvv_b7>0I)6WCwN%dNl)Mx*{SKcuLvjJor>#Nn+X*ECGZh%E38n>8OH}9K-clPUs(Vp9>Av9z*@3ra5wVjCP_LS1hKX1 zkfBXI1|@whEh~Eq2`4h0^|Y<;zW)Wrz%_sf^i62P^Bo)=g@uGHnvIGH19DRDLBfp& zkRN38deGQYe8j>M6A@WhTm+FjUx!M59DHOroo0>wp$d}#< z7z+Pesz4?QVi0Dgj)HeF+SHfQAFB>%{4F0OLU8WvmDZ$X#-Da-yT6 z0-Oh}Bf+ym9hv07;2_w1JwY)JJc1x@SMy>gI`p@ZI^U#DNReH$Y?ttoqk8?`--FlTZP*iTM8g zYbTbfJI&N(h3QDL%jw$K$Oy>81NaWN{f3C4Au-jl$rG{Z84wL|ad81&Jp9J{lE$BC zPnVw8{c31p0z~STM{o4UNKJuh#s_-k{)+|@c5T2jgS_sdU;M@@Lh!>dmLOWBk@;G9 zQ_9{!N@^I8zZK>te4<`rk=W;;_yEiG>2HG4Nbr62;Yev<=CX|my;J-SlnY!TKY?SM z)8nR6h*`Y@3S*}YC`uSK>&ZTUj?()KKd_i0SR_SWp;pxf8Wk`ryM1(~He*M&;lV7@ zZEFOp+B&t@6TM{L*jN?#4FHNuN=x&AY07ae@{kMU3?Q0NKGdvevQo1Fd;)^b+8j@c zC+ZUNz={p*)mAgbW1s{|nqcDK=msDqm?;+J<#e`YWOVTQn_!ZIK}gH8DZb8CGBGnx&&<%0>wSPFMho*^ z&5KMPS5Z|BLB><73ar#e!yU1f#BA+h%05A&!S;am`wO&&YHDg<2u?xK5{?P}>R=>J z0yyJF@ekN&7Oigx4;po>v_Mtnz?lgYLEzw<>4gQ259+pHy8tAA1)wKQS2>uNnE3eC z#>cDM&hB#R9R^Xc0!|I-!}Gz&OQe=mS8wsZJtSP!1636u_YM03K{GzX(*mwK1BC2o zj{U+O{Pk-EoIL|r!P&xNnI3ugS!W?5B!sE9==#qahos~Q=*CE}f{99rj1U?xGv_Hh zZ&B^SmPdm~zyfDxWy!uvaw_~eEUKD#KSPwd-<$h@nq(xzx4_BzDR0&ozmxfc5W=_f z^DC;FJx}sWbfFr86Qoc~*PLuuS{&+SFAPy)#Wr)-gOm*8#M2v9I7Xtt{FOOg6j>-p z4XBU_-b^l!lqWeAe|lP5SV*=VS4T&W< zNE--=8f5s+0WYHn>fMwC2vAG{Eu)O@ONGN|zQz6%o?f=w%eZywHe6Rr_? z?Fjz9R+jvVX4Rk)0qbKqT7;^)I?#n^Tr+)Qq9G1A=u?LvAj05>*f83yN99ECeC@@S zPN>sU+OIl(uOC!8XX|d#KjduAkGXm>yK!`#P=MUTJw`p9QCs*dD!I9e{r6wqm^U2p zwKms@4zJ%&#h=(emkTn>dhNbDOZ@n5B{bYwG_vF7#upV9s>0H^R_D`+QuUc8kQo-@ zD{A@9Y+}NItv1%IXj45RUOcKIm?HC}=Yl*k~(Ng0%C*U;o>VW9RC zAt~d_Cnq&zDy29e5`vhE-{7^~4wPl#vDOD-_;%HGaR;}*?^s63w%^ZfkgLL4F4i3T za>7-b=9e6IKC1eL@MNE^i1iOA9mL}6>%{d}m-^GwFd208PsR`kCbF{eb2M)i-GKFv zBX|1EHPKy|j|F`F_&l}$1R$bzH*OBsSL3|mKODHABO!gZ1aKA991KTQ@={x+uHP$h z4}Sa`htblk+=)>HEI#On&PFT;_r3yaMn-I`LKW^181+_x@?rk&6m1X#LufKD3Air= ztBr+xtfgNu6X}u?QsZGD-#7#HWKn#D8iciqiy;R2C8VgKek4!=6sQdd5JY9Nw~*dC zYIW!Uy~ob@k;1RbnEJ)$CTcBmpVcI(SNuJlb;3En_i}P()5X%lFW)1rW3JT0XG;aQw z(bTlE$DpO*eDdi^*ZpKe42=~olt*!NYKOA)r|$my*dzI~klZwKZtkU#5ztrk#@r3k zY>bfxf*3KNIa>zjHLb36u4wl5H}-6C{0Z#oc*9lZ$l$~Z=B0$ zVfht3v9eYox2%E|9jnY@zYc1|goNl%ZKH&Uy#S3_*`Y;+;}(fto<#L9@m9~&jM}dK zJHN3xg?ZGochoWpWM#&=6E9Y1Mm++Cd^MrJQh%D&%{B@QVaC#7L1D*MSED?jBS;I< zgQl7v>{RLLOD{M0eERwl85JO5 z2b4N^s;a7`$==OKU^0m{$ey=E;=oCqf6Tp;*4!<)D7H`Icz8if^pd(uCVf+pJ`2Txc7Dd1`)UPI;+O%NpvC1NjB|^254;lWAgCUG2ut3IPZ^x%0RZ8sne>_)}{!;#k3(-+U~oBSJ6& z*|9x=Urf$;VEy^G+3wBn44+>S@FQLO|8hifhYP_fc4R42yUQ0vP1}9EUp>9%*SdXq ztz%|1P@t|6?jK|j!xwbT!EvQWt0GwH;VnC23WXAg#YmB${P9b;$lY6&-VB7KflGV;?*_s@81)*wBf$9_31D#McFZZ=qz zFjR}hwT+(IpMK2K)3>)m+q;|XU=3#1{XqM)h}~z@^VbfZTw;~AcZnBQJN4_>z8Zsy z8Lp#k??wc%1+9Ei%3R871;2lrC$TwH_mRVauS+0#UA$zeuy4UWX zjdTWO`S0@8+n3n1u5cFyiI+XpS1oPF#ze}mgNgkGhFe>tPVkOBsukCfv5-fv#=b^j zGu6kLZKsal@YBH~y;!luCQ4=0@c-~GCVSZy&RZ=CUNYeO9x#7M+&RljZ+&V$&&IbS zEGJgI;prB)Ct^``YsWF8KZ96Vw)e;Ta;`@6Q17)i+bqkBf*&kn*<0Ee{=75~ zfe%iTf*901O)lo)3WIK!Bv=E!v9MotdD;iN+GWEmD;;`Jv0&hR*bEgH8tsuGJ*i0y z5~u$r<;6BqqW7W?QKY~rl*vAESdl~;8cL;iZH}W0Zr*88Me}fIXbBib6%bzv^36F* zj>@D;yHk$TLPk{IQ2HCMI|oc?sT=mPT3F^Zqg!%3Vm9c%DJ{k?y@5p?DtQv}XHlln z@g>)$=>ZH4_Ydt1{|-rT10 zgG|S&=PuKFMAPTdc(ri{9?w}yo+Vhuh<>ZJVtl1^Q@UovS_A=!Y@C)?eHVWE!q2N9 zk;cJGb-YIIksl?zuvtk^@z3KBblfN7PcL(%*%5a5+8ZgFp5q_Oe5INHNx^c#eX+Oa z!cJgbEBWD+mf45>4$)yFVFrUkv5WZmwC{ZLt$(~R+3Q9Vtx=dvnN%W)rEMAAuf*w+ zn#)6)XliAyJnC_WfxW})H$nC~ZcM7>`3jWjK)7h7pW@7ToO3s`Jp zE`I*W*Rf@nyc)f`Fq16?_BSt+wz*WF?zbv6WoMXPN112;_)y2Mb}>F$vNEvz^XkG@ zCQ~)zwnN+eQC=Sslt=jZ3=7A zVbbc~94QGG`ACE&pPRv|dz5aDn5Bhfr;kHxP||D>rQL^(&ueX>LTHa1PWR@$@xf|Q zr?6xsfora-2aDB3oBS2bjzJJ#)bR5ayO!&Vzj8KqSqoupVZ#T)@Ot!B5Iy1y($;M6;sGz7)l z``T5B{kG5l-{%rSDXGZpg^}<+=IZaeXQ>}ADd^z~i^}p)AQ(BHm@WGU&&&x1*$syd zr@m^u;~7l9&wdtXDEvAyH7Vuucs`!dB8-oRvu6PXk=7O021l07k4DIKpKVoX_J*_s zG3R&?98+4~W1p-yblvE8z=R1prwRF#ik?g~w!jGPk(6aJP{;j96bP?(%sb4+|N7Ae zfQ`6`3AzR|KiCbFwAGOXDB^~T(8?S%mE<}TqMpoLI{jh>SY6GT!89Ccxh& zqZaxY*dCfW1G9iMO}z8I1(bv^@-_`YR&lFl6t@}k_czpod7=?{igX_<@_Ux0# z@I%rME!aHGzw{)aAk;}P5b8J$9u#PZ7rt6^Y-Y1{LTk@_Y=7&g)ZY^SZLp$&LB0j$ zO9e$sUHJ4Irz<=g^(W-Hx`j|J*q3f8(k4lo6X$dX1tv6*C~gu;z= z>cuOZKP^26E2+ol@`o+vf!9o9PKh*#giBpza3M4!>F@60$j9Hs47iQU+kaVVLb_^5 zO_Zp^pdhe9#1M%>B}lAPKN%dhab0SS+0mrb>C5wOZyppwfH415QjtVj=y;03n%r*Z ztyj(N8{5tTh;pF}2`Q8SOAbf2u=zTb|UZftI9Eqace0RXxr@n z^pjX-RA>EaJ^!RDZM&g;-DdMuLdokVdJ~(XRS(6VJw;vSS zijnEQ;u%1s(m$q<)mmx1fe0wwJ$PB|&P9=|1wn_9Zb(;Yvn!iB3i${8VN+QwXVbV4 zQm)wH{^Vw0;QDyOo`Z(|`EYJRDrSz=s(`}U6ITVrh+hzUb!h(^^1uYMs{`hcOw~M zktf%7fMi>1bR#F~bQs>ot3WAHq%)*q1B>x^V{cEqrtavdQ}xiaI_7II6vR+ds!u!{ zn-miGSJ|3QUWmq3TGJ{26(S^&os&vCB=LfQ^C2!X4yHf*On(KAIIxd_4W}h({JPjv zyR9Rh<}X8ez$8oyuL~icV0Zz-2RDLZV;uzlyA{;PZg&W%5(8B`52EYiH-$jrCj==r z?T(4@%*OAB>w$Pk$d>s>hiwdqZzLhsr_9VO`L7x{l7zy21=Nt(Z83`7qCv4=rf)3_ z;UNOaUp&7}yo|^}g#2T@=00n$gpE^@cbvuM*2a`Uh_-i&5lVl&H?-t}yJDMvt@F8) zWHeu?ZAht!IbF1Diok;>Lx1NV;IH(q5aJ6F?|Aq7Hv@?zqCmEm0s{8sMg4_{5d;+z zv*R5~r2JSRB$n*dMnR!ERz;;mH{~Bb$X&y}m_}aRSD^9{#%yH3}3h``I;%S`RiWD*PI7Lk+daUFo_J_|1ihY}F!XfuvH@fUDf`}AQXVrnx2|Tz5 z8sDAm7ayq3EUi~~0t4|rG9ZRUayMB`we@UFXDQ@=ADLRxUWq$awW+Z^4# z4eRSTBB%Y&BFXE6FCuVpw=E9lDTfR}iY8sLpzmoGhv0|UQ?{>pmzQr+LdKS0+uWI7 zFH26lGwo+$pW7+S5Pvp{Q0hN{TS=5ZreiY z3HK!x#n!z|4cYD`*CLMj?;pG49N)wE?!rei+UT^e&Ih7(^X_fl)8n`E7se#LWdFuj zy4XSG{_Y5lBxO-BW8D%P-&AbW_{o5p$;0fvf05*-T$A4G|3%5xY*p2%mzSM#cY}F8 z=~f#JE1^V&(2PP_r?Y` z)=9>fyXi`gUt>NI5Y(z7v``|1?^@G%Ki%-p6hi#fh_#+k!(5pFP~M@b#PH4;dv$dL30EjUj% zztBG_%HYg#ZCw&#&81He2pf!2EDQv}5CUn9ow0Y^yXW1-(mD?+EUq5UFK#P;PR?H8 zS5Hosf|_PkfPs8wLk_EDq+=`!X$#NHkJO4DqNnNT_L$xt;c#qqJ3b!J`{J|HsJ-yA z*eUW!7erksievlAh8Ds?h5-KLM?wxe7z@ss;VZ^;&S~E7TikS6m$XAM37FOk`@~mP zR*nKGXKRFo#?cOef#g~VV47ZMwYeGy8{L%p@kF;WS@-+K)s|BSk`M>^ZEduP&h_^XI*BO~qap!Y?EC?Tz!aWa}y^R1(qw z!%@itzJksTu+Ocw$U<=fvbyi6S|vzk|4c&OGHCZOPYpn~mKSrkGcrhvDytJE{$ZSi z^IaK5a)vRM#l(V0zy|3srD7il%~@8p!gJR~G1R=-3OGe=}AYz5yxoYp-iW@5#`w#YWJs<5$1Y1R3r zxiv+>P^iT#^v6nNQ{#2rywv}8FHDNX9t6TKe_7-I4MWhycZN==2WGC(IY#zB{k|Va z2k)kVC+>e*023wygo+2P`rKdtAW6rZ4?*u#scR-Wv~`=eACEKOa~c|;awp>Mhn>2I z-@3TS*}7R62t&ZpJ2UlBW3oxMx&s4B+W|$xPklRA*KQrPZylqn7VDeBf)q86sh^P< z8M_L>QvDz;`1=m6vgpP5(SS;cA`ihCMVk7X<)YqElAQh&@*}JEImtJS{v*o#kh;l#m*(y7Ny|14mUVl`UDAv7+*vMp-{_m|T!%P1G zI0lgOR52l_{)O^WX>g+YgHq8kQRvcJ;Xgz9@Qnje<={EJwp|+wK5Xe3sGv_fS?L3tS?`hD(E;^|Is}?n@$piK*HXhPuWu!D!D2_hGgZ( zcBON5Ce$LC6hIPFEEOfxl=rQQp|y1|vklXWJ5o7}1~943MuEc}z-bELK#k$wzyGtX z1Bqf%F?sk|=Qo4bu)~(VBCb49wn_s0HKb4XYj2omRvOm)!VFfv9hZ79(#Z0=WxH=| zW?3v?iV`f^U&*m(zrn{6_%{YW_iA!}?{&=6=?Tb6>UlLU>s1u%b57t^SG`V#H+)@d zCH#~mMGkw~>F{dfYQ!_rm*&XPc@cB4R)C(-&)sU~Eob9l^D_9=gI($PvGYg$bEUYz z&uU$yg?=4hs10ET9k)b*P?0Xa_iR=gM><^6>g!wf?8NT31FK_Wjb3lOu%HmC_H67Q zWMA$+cm=mukQ#+ywf3`*yDQ{ctxopsEwnAp^Z$0A8`o;L7%7mMEq-uoNI9LcS}gMG z8qTHXHF~Zg3nSg7M{?4d;^>)oT&8bn{(Plw%awU^Qm0X4{P*#ZGPAkS3H$YE#jKmm zGQ$ixpMU^|&6A=Yr{BvZBg)aG?c=pIktU}LvBrBr$q9}VuJxjrEZjb{IE(SbMPmw- z5Mj_2#QdEh~EX8O#kqP^5-Q@Is-3o2y-39)Q*YqYVgL zJgLtQwwoHF-vwv#c&H0&Ys<>6?)HuqF1WPlQ|#kubJ{IoCll3Kty-?c%2i~#IJ_0k zOn;MhJ8-6=vCsw}EV)@gTQCW^Um$7^?R^C6@9;eNQ?Hv6tRlc)3e zu57JgjuMV{R2s?kYFMM4@qt<&9?9`|G9C=gxkdm?Zr$-G6c$Xq%8KOjiSEINxIc&q`^ZwsQb_jI$GLw@mITotrc|Sv>{bjW>$15Q>Wq!rw)7Z`*inVHjhOLE*eMg6cZ0EGbZp-=P z^6~@ulQoHm+jYl(t6{n$@bWqH5Hxl=>{Dl($J&4NjRM8RRoym`&aq;IyU09}I3q*Q zygVKEV#U_`KD=}b~20SCTTANAmkfm$bu2?%=FCF6mNj={9llE-9vLr{74t4sAv zPk4dUXeR5yMM3WP=SLX+VvpSuc-&IQ$ECxVN4G zWZ8-@?9xphyU|?GVZ^-$t8;z#qapubD58#a%vbK*5zmaP+z<5P#ByP+aHzbJjEGF|8fgUJ7!hu4+9P-6op|eD(2FR+n1J zhJ?7@(~lPAB{m>;m;|Kn*+Q&Az6STC6u->41wQ?Xlvx8%e+wVkN$ zj`M*i@!~QO=Lm|AGM8%8UC>+1)6dFL8b;5o?boD?ZtQ2(DfD8m(YSK<23xAp@Tdul zKb96%6toh%SW>n22}?A2d2wubEi)93{GKwEGo@)^)$U7UQw;>H-9F#FR5U^rJH^-< ze-GWJ(WK+5U5|k#*dkq1BNdmiy#|Ld@Y=>>ZWrf4bu{Io*rm*f3XvMk$jh9*ds@(7 zcoUeg;C^;wZksVtK1bwp$c5=v58>{I9R<{{^HWTi$abIYtP*@${Im^or82 zWoX$L9EmyLaXaFnz`SQ#)UL zl9V(DqTWTo7ndUh(uk;xcz^ff_mk+@uMC0x{U(g+hP1RaLqkK1v5k!l#+afg`Zz^l zjH!%}o7UF-QZka}&Yx}HrPsp*A^+oZ+pgVjg>W&(6e&NkZqmg4Z_ce8({sc^j0^4C zblEz?amA%*jPc{;gF(ASlZLHciNr6@tr@=bI6vgVp$q56FZD=$w&iAU8pim>Ip^2! z(-sW1o3!QtUxvl}f|Zj84qJM}Tn5G%Y(4;c171(Dm8lMi=~(nZdy2{_uxrK4)2dn&U*jT;(;$^FN68Q)33wA?h(zQ4; zyGNH{YxiD#fN^~HRyGH3y_E@amn|5=(sRUEzG3Fj6{lZI^Abn)w>$5fcHjNf=)U$R zy+6^{&Kd5=74qi{?{(<<)76fxhfP{}@Wvw&Q@oVumURr;IgEZA{8OHZI@(nlEMuBLR81CIczqY$KySD@+2!OOY#1P;$1Mv zw2_s%hHfp>+PEOuX`#iK?yDB8TiedA^PL0pdK^4 z0k)WZJcwR5A_EnT{3wyr8il<^^6uGXTqPI79x8jkKX?L^mJ?Nqf4*31}UuElLT z*9#De^}T%fKJ|>$!)-cJ`y4Gjjmcbej#e?Ym&2oroieiVO?MC5MfeVl0id+Zh7@fLMeYar262 zVRV3rh4qGYt3vnK#0wg00;GhV;TpF)Hly>p4GTw18b7t0dGI^mWiHDX_i4`u0D#QQ zyc6?#zA&8o;lZg7DSVNh$-mq82n7ut)*iz1nnvCc9o;eP}}Lql(GZ-mg*t5?6$7co#2pG^Q@mVP7a z)*T5owS|)>zIfnSvq?`jfNK4LO(AmqzajGcc!69Z70KBIn_#nvV%Aerk*KOjSfr}S zVuzYxsKq~aT{R31_wO8dJE3NCV-^6g2o}q@2`k}J z3~_wGsoVZ)>gpn@j?<`y5AXYu68_{lE5o95rjKb305vSy_UO`q002y|2@b-l{ae@3 zR@Wq0Y*kgw77a9e3_oUKs;{ET_Pyh7P}8t^&wd}gPd04dH}~yRm3nQjY#KQGpAClk zI)DHWVzboM*_57viC(R?S2mA6an*Omh;~{k^_p3l*>`BKuLTqaj*5C6wYW1k-x+B% z=xS9*DiUm3IV&QuaK>N@002tbw1ZwTaf!J!(BJCXJ2X%V2>R(-PG+-Ndl z<3AH;9&y<Ao_8vuYX?3($}>-D?XdNxCA*VKk)%f|K8$z|12A=u2^{wq=K-a{3x zuVai6ora5|FhQVF4pDSl%DWs>7$Zb{>B03?(kzS+p=4kxBW+g2{*#!ZFd_)_jYd%z z6D1YEfCyA>!@uc}@!2l{fKsk~1(%f5gwOXX@rNyTF8h2w{Sjka?sKLniYRe=Q(XA@ zL+NIcJ2$QyJ!?aKb4>scKN!&MWXr(2+fiB8_)>Q)Pw zgplX`YkFplHB<=YYo!IrA>lFRwQ7}~MWi$@m#d|v0bg2@m2O9Q-ggt%vR32fKfLE5 zGHF>;6Hr;z`FsHeL>4^rdKMo1R$i;Qv09078I|3rNWX{vIXaEZv}mh!*js;7bKQeS z&v)w3PBD!!#t1}bPM>*r{{e^1zj5PE!wxNPoI28^Nu530_dkB}sHRQV%X_zbK7WzS zW4CNnM^QLJsDfuFNsW9K&9yW)EY*Y(i1+L{*r{`8gy8hS12rt`P@tl&!loZM{jm7C zDG!4ZGM>66>R8n`R9E=r^YRi?5)%!KOg}GD6lJ*wq&3jXe)S=veoYg_9Vv>Y=28qu zbhUNR*D21l#d%F}ChJv16BhwvL0E)Hj&9(y2}7 zt~0we>@aA|wj~qCO^J34jk zI;V&AtOav)wRKkSI^KKS)?0JC&ROo%%F1kV?~dIDPwLXCOP}H6q!^2YLVbWf&Yo%r z+BR<|7E8xDG_&eK8?Ccz)zF&YaycdH0Ro7WWLwu#_6mUsaQCvS1TK<#?NBn$9FU}tJg35&0WJfEjb)E#@O%7LI74fR=o)~66V_1t6^6A zT?RG9-lmB~v!JY`G0P9HY-~Ja?def#?!>wdntL!5V?4+JSoTg{yT=1t-`(dO#`yfH z69BMk-<9r7)N8hw7#sF--k4r>N9@KJV_ej^g@yfub+{n9VbeBt^N+;uvsbG>)>Kdb z)boUK6W8pY)V%-rg)bi5Ff%pXFn@Bp!P909@A)t!Td}54{9l~Un4cM=`yarq0N{1? z`XtBxiF`3ZAP%BFC4KtD6>_6I^TIsC!#?cVzi5QL^^wPMFYjIQ4~dD3jh*EFppGgK z1WTNgId0#T;8*uv2@MB9>gF5&)~?g??w*-9Zd@THykgTgf#iAl5`O;3g8`xs&w^k3 zck=w`xnbA^&*1!z58gk2l+XQWYn<6}jO*RAy#c`a+?E!NEZP}KhM)6_diE^f&D-ve z-~X?@>yB&c>fh(w3|0tx1R;dI1(b>t1*rqEShS+`)v9z+M;+C-RcoECdqG`b9Yt#$ zb>gauA}XTn00~*lERry?NpkP|2gs7z_VfPQxBc}#f8>wk-h0kH=iDdf`<(B6PVJMT z(x*prcCC@O2>D`Je(ocWk1ts^RBP1AAuBSiD~by1Q4$fEJ#*6jg$%Nz^Vo^Q*RKBb z%-t%qE~h}I1pt8rLPhTF#wML50xF(Y=nPa7W7>%?7c6}z_FtE*pt$7Vx9PbrY5)M3 zUOjGXB=bp8Vg1Q-X_0r;@#!mLWUQikK{a2l)T(7Jw6Tw_-Pk`beDH*MK@I>RnT>pV zft17AeySqqlg(xgH@9S5&N{l-U@&3;j7zz1W&S0(#C~jLBeP0 zZ57`i`f^eB9VShfqXhtn9y7V(e0I~-r9(KZI!@m02G}ugN?h)aIa`*`nUMI#%F|U9 zd6%v|C>2VYQ`2{lm^bdTWSTZ%d3klu)DXjfnH|6-^?xM(PTFK=Qqsp??#eww?q%v7})QI3f|IQud?_?cNlF5a6_j7Y^ zJ$WvT`#9={hvH~Y?ZNu7g+C8*bg%~inepBdcM|f^+7+n1HJ^EY;s+a|C)bR@)Hel$ z;v1)K=2d_WOFaNVuAc2f7cl?;0FYgV%}k_F$m0W{S=#4|VjGgjm;=wP%d;=>_Wp1| zBVu!9fjdIN+9c(BCHlC>)xROrMcl#DTlFeeLb#!(e#1(-}GwA;Q)I z0I+_;mhm4i_5$GCcZZAbKVG)?3gG7z@-n3^!X-3iz=>31sK1S>KJyzKMX?}t?NUDgt#;L^ z)2CF|CM_zjCXy|NMGphr&ZPYtzNgBd%cQtdpy`p1dtJ4EQT=P(?`Hbl!!L+~00M~p zdMdw8)lya|h#EGO0I}|Jx+NnF!}=HAt201%g}Yzx?9+YSJ=U3Q+jDi$z1o5UC#=7h z0|!o6e^4Cq^0U`F z`>#K+k556r6JGH&=!rY~nH2Q#KN!%yV1Nq_eEHtnclGsv=ymySXM^4cVF3K6`Btn>DIH0WNg37t0u2DH2BF;VXN zL{SVy(N2$w&Krdgq9}@?J?{fxz0T??x=Z8kY!L_m+FPgUyM<%tssqFZVoA_@!^f8m z(!bnS2%xh;x)(Eg8l=}LXs3AjvVmS!1M9~+5JD6+y;@u!2JfmKgb?jXBF4H(crCYe zb@MANg99h5-h%}qS5Br*Ty^TFwUg7nJiB@BcqccH<$JScc{3~s(9F_(_N+Jv;mAD& zIw7Y`V?Yoz-Wq`rdbkIKOk0?9=Y>L#=~XQ?Y_?q40U*@2+)J6YS}tm2b2w_9j?HE_ z2^)1Nl+_k}`q4@i1_Uf-gII>57-Hi2GG;*pY2Z}Vidu{v?bXamxyqnbtCV$zAWVV? z0wD7I%9d}k@(pSkr>aV-MV8QsE3)p@HLJwD8o6A?X4f={rQ9dCB9>mjj9MZc;;FGr-sF?ZBWXZI8BNskr-%XRn_cvrAjWSIec)}gyq=~!h-KJKghVF z6!TbIK_}s!T{&XV)J(0aRog6JR@bz&tD1S7jDy=|u0N-daG9J2699FbN=}mkZLP25 ziCYbXg>G=gIYAwd%@?+GvRrFZO(~ND0FcyHS2wg8)y-l7mm|?0+r2Pi@y7{n0cBc@ zC1@6K*tLQN%&4m4)il(J`7CCGL|R>0snQ!heSfZ?rFDKxVxwBiWHOuOEnFT?D{qpw zsBl90qw%+TM-)N`fH}ptbf)lo6~UgRHI^zK`Q7nW~3|H*9o^NV&|p7h@5H&1*6AV##d%=sp5 zen!rYGsjoIyWq#XdvDS_gjG*&Jt>%bdG*Y1)sc3p@3OOOh(s!VkcN3?`GzY+)zVwH zH-?0bjrwTMr>@T>R+dDEu6I-RF8XGFlZ(T_i#C&+= z)S=N+QzuhuH2S!Vb(fayJmU~D?oFpwuf*9!EamdInZAKj7_oC!MLFNc^H!m(-ck=PWUNcud*$H8>*+f-Y&(8?N}^AB$+OVO!)0n=u#W2$ z5!=C-tF1XfAhajNkN;x#!oxcsve<%yC(`F8PSg+r53e2G+SdH}E%N4(!Wt;ER|+ey zq|bSXemPd-gh|#nO&<*khdPQkp3aX>Slo6iW739i+^A*%0JU5uG6k+riM-e7J@M;{ zyJ!1a+PS%y%cdp1mwouN@gZX)mh9rCe>QE~4ku;Rq;achF*9Zcqbt8H@Q;kT@@Cnn z{bBy@hU+(T&gNe9N?GNcxL0y*8ALngO^q4#!BJl`;rh?F{!mT2_nCjz&8kc$c~gLR zLF$ri-yS)#b0^&^aQ}O6+l5T5dbTQY)Fdm4U&7L~H~pE)v68?s#eO&Sgox4R~r->2mxqUb#!O}FzU?R(iXqNdQ?^=R;6FOH75+g0059c z#+095D7$iS1%FO6>&3H zro1!1xa0-I+GH;(OfN%Ia{|!_fJn%DoOiX|@7!|$019hKL_t&;Sec9lii4Z(>_;;e2ciRR*tY7HfhR&d2^I) zZPTVsC_KIEi;5B7C($aK6v78bk2Mkt`HH;D$5a2@$l*l9<;RRQSJ=iCY4eGCl&rb^R`Q3y(pIPL$Wyya^>8i`7y^=#rv%w+)dnjQ z1^_@1ax<%(HhRW;!&)0;YJfl@!7M5<&Be#Y6OlEx8c8-(9~Y@DiOLw64Cpw8E===JG%SX`3193!i9s|ln4<3fFKAFfnn0#OyGO=>+M;pEqXQk>0RU=9 zIKCkbI6L~g8~ws((P%EfL`+?rwter7eetvxRm}`@Wqw_UB@yPagvB>c6*b!evIQ1A zPpy=qC{8JV@TBGM6nkR;K%kHbp}DI;Sg5xN^RU(aoL4$BCeF$XkF@FP7g zpq^yX z2S&$s2%ahIhlE*+ODiNAXaYtfo#Er|<_b*uisEOapcoq|6LIpfCmQ)pZ2?|pdAAEz zt@&(QG28O)EyU5u+}fFBK?rnrJGy>Z^!ppb=~Q7o*V^5$G4DFxB4|pq55SB?rIjHO zAq{+iULkfLGDcDUP(d5(hw$|Xh0AXY_Vs+hk-kAE8LYkQSoEnFVUcA%0IkjyX5x>Y-#88>+EYdiUWY>(Gg#e62A6Ov2)y z8z_^b{T00DJoBgsN?ntZEEI}EJ?%{vc7oz6S6^zrpdo$T(No#!ZNR}yQK8eB1xF8- zunSu-JFSWAMsAZ3Xf|yk|H#1HhtGmTLqv6adQhlbDE9Sc;CX2{Vf9h5OsJ_z)~pxF zT?70uK(MhO0??CCDig^ZX^sOfKXr{H6l-sjn}}39Ycc?!o?mV2=0`Or_7Tcn4!U*# zs73`ww10IF%2h4qL~ASaJ~6dQCem5B+K^4{VqU$XBP7U=Og8WS6>V1=saE7)xHWkL z-`v>+C0bhl=Ac4UQ-gT=yO;qshaceQ{rZEA9l8z_QOH`{-Cg?s2@JTNXVld-iWm&9 zem_Yi&5kZk$Sddl-|kxYYD(&FOHzHgsG<}#sIBO()ZVq~?LPDx)1GFDsIgJ#uGpE-lX@;(PH*WB)jO|wkC7+n#CD&k9Hg*;yq&9oeTX|9CN8VM) zTFj_UmSloS-!5;{K$4}iJ+*TF)XAnbYjnG)`>A?6Jyh000JojiHF<8P<07(#Vk~+l?ezNFdXp+bZuHFnVYHTwL8yo713-brR_sQ`C>%8Mq zM|znFjxC8?e|7DKeTTQM95yMYkyl$$!pypT%haO(Y|ql z9hx8V>Gq2dfQw5%aUDc}feS}c_|0l>}8cK6bEj@@n~ z+jNXsd&1D#WI=Ymz9K#+mxKV& zuGcFJn5Kbm?&XV*dmMO3_~&pI3SMIZEWDVwQFi^vN8Mj21KzBF&ulozf z-(SOeJ@vw#SyAVX41b-P{T0{~_L(%o-_fjU9x$r^G5_EYU?{*0I9uT}EF6E^8OJ{- z9K>f>e{JCVP2%_s6W4#xll!X*CJsC##DU{4#8()=OgLZRgoOjg--WaYcq{xh@gIBM VHz%aLPMiP$002ovPDHLkV1jhqhMxcc literal 0 HcmV?d00001 diff --git a/images/Libgfapi-access.png b/images/Libgfapi-access.png new file mode 100644 index 0000000000000000000000000000000000000000..0f07fbd7fa5cb380eed81222f05daeb42d250728 GIT binary patch literal 99779 zcma&ObyQUE7cV@D2%>a~jHD>tQqm(G0@5YYjDXVJF`$4n45=_6GBiV{ARwJXgLHSt zz&-f=-Syr--?N4V9L^KFKYKs>5dKD04j+#i4+H|?E677MKp<>y5D4o%?p@%H5pCiO z@a@4zc|9i(=ppIt*BuZ%jS9Gla>Bi4thdf$AB_f$)SK$Q?|5ciCmu_>>FAN~VmU~utIK?smL!8n zN=bfYn^z>_9o9U)x?W3BUCm(Pi%Z$_Xj;KeS5TuJjVt<@w@G(P&;OsxznX60VvUkr zcc9Qj@jKTyX)k=rxOvBGv|aV5fIFaOOV;7@?4R#Q^lK3>kFE>b2foR~K>RfT24=;R z7=NO0)~z`xe`z9lXRWkw`EIw9)wsF@H5^A@8L} zJ=C`tLeZnIb%wbP!orD?ti7Yi0s{GKC>G%TnCZthp7pVubjvz;V?&wQ+#hQIn3QvHui z8XR!MXUd9hYqii4UdN`P-1Ty&eOo>E!t*M0XCEN6iHVwc0X}ybJT_=mCdc@B?bLSZ zQ2vP%l*D*ofh76)+`T4JitCIxFJl7J^Xp!5Zm*8c-MC(#&iZP*yAre45(C4l|V)(C+>ZSIXbu64G(BM5r@s-qo_jwSd$E`kiu50+<9K?DvB z$J$MdbusVibaF7$%}1A;p4R?`G$7D>R+Hj$`IfGc!rU*~T#7dGGM%)Eeva4YH(@!S zAO25D`(Wo}K?h^|xhVM6Np?^~&@BA1u;V8X2v+RffL0b`;faA6K6oH{YA?1AE03|} zA^6|QjX?x#I=(Bait0$D9FDfqYbX(OT?zxQBQSJRdB(nL&9QFV^@}u~NzLw$Y*p!q z-|YV%wbO2MKC547?W;+FvAO-um2de~LcRcd*4N9mj|6XfC;xS=85m`_|1BP{gBJ+M zw|J74tX2bu8Mm{)!|_#J*;vK>2IYu<%UOd{YRkdd$J{L&66=ccx;L#+JZ9~5HSh2J zPj{RZSz_@16M5*#2-DBwNF-W=_^~huRFSLPfDU_LA1&Iloyb|*RN>wERb9S-BJ6)| z0;Qn{2_!gN>D?R@Oiw6#!iL^@68>+WpK|TRlt0!?_sM<<<*wYG`PB8dRQ?A=@c#z( zYABus5oG#ZYbR~lM~t>a;8~fywwjY)S_ZU!!^xS!u`bd!l4{1@?6eWn8vQst*p~Qz z5>e+w1PiadTV6J9vT=z%O0`4{CV!2=0yWde=8Hbz(cXMjlXvCT2G(a?_`lw)S0m!l zVx_Uiasu3=#^yprbDhrc9S9^uHvTwMdy*fMJGWc7r%iJN&+RXEu8Yt4{vBH$VJ&^@ z(6U}DvY0*;d37>wCP%PsvAR5qS1d+HO3ii`)ch?a8|T0U(LgTVG{>(p+%<^G9qD-q z5JjQEBJxJ@4hUqOuc7E3NT4R}vDrBoq?~C3Z;S{sZZeZ(z9rP3CLN}%sntCw>au;+ z#a2w0z<__NJ^mEz2&&$>HlNcOw{{P+fS#Bd`=d#*8p?XR0(sw zqO#)0Ix)`Zt+~y6IcmXKw^}q%hKOHrE!M4A<6z@D)k&C*3!Q(T>_VOu=DeC7IsoPihQ1dcgHh zvEP4-A3DlvjTD?6zxlC+XTgft_Q?C1garceCubY*y|qF64j-K>7u#4?ZtjhnMbrEI z!v4n%Di#F4k8&6-+Bbi^de%?M%@V1NQ`bNIQi{#uLirzcDl0V1lSh}Rq1=A5Q0VOUu^1thpnnoZCTLL4 zG%U=^-Q=Pw;q0u-I==snFh?2XygPi&5UWw4qB_$wR^_;?m_zt)k$q!D8^hCxnoRMe zqy!yx^#JD2AR^6=l!4r3)TFXbZUU)X7pIp|Vy;X@*#8D_7`(9cWkGye{`oZBQF%D* z_Ic6L`_mzGvrRg{)3yi_4N}gABMrlc|ESlme>2jfp~$E&Qh$9h?14p4Af!7ls8T!z zjl_=Jq5`+rv~twC5r2O0 z3y0wC78loCRn>>F9kGy*&weu{2%X)G4zSx7AnrDLspE>h=#52b>$9YzUp_E-`=6{K zaY`)TD6Y0}Z~>nnuq<%k2(pfW1=+xFd~H>WGlW~!XeJ%r%@KenpFLyZCix-o_R8#q z4S^?HrTuKB<#GQLL3^}5KDBqw%pSetH=I?odsS2E$?+vV!z<-|@L;z8*|M!F%d?S0 z0$~p^Bn~bdbqu-#qPrl*$sDWlJnlE^7sCXTMr{wFNcq@)=bDi6kWR^L-R0+*rVD10 zg4ie3S=ppeCL~ z<6C*jUTd4H0y$2DaVBM}6qe)>BY}AFZgQ{VGPyRg9@%!i!J;}E)TT;zuOw1R(5vDSI zzbyVON=Fe$pz;C{Jkr2GFZ^efPi6Q~g%QM<05ljWL`Kf8Dz8|*aPW|qZ`j9I;xZvq zfQOF<%FN)5^xq2w!{i@INh>Al%eR!t=ubXNiuSw67pCpMygK5^gZwB65qpo&R(y~j zuc)ZnPX6C&vWhFfYWy_L?hE0U!Jm($j^gh5Cx#2T`}nRWCMnZo#>5oqUaG`_(`Zd4 zcGSP=AY<;iFnyuF>*JanLIh8Fri5N{blFN4Uf)o%#BACW_bWk&!3*>QRvI>iXN6V9 zuWqioKid3@GcfE(4g|lUs)*y>pyGOS50$$=2&Dc+C^>ii`23-`iSzV+5FhUl8FZLm z0LLEEGDtJ-OFxr!u3LIswm_h75cJHZX|7n;Z5w_56Uu2)x#U&czk7JWaI{?2c%#oU z%Q+Z-7@Qu84Xv)nP?H|PdJ~4ka|!Aeid|U zytMlHzX(IbNDa6}y0yJ&r|8GAu+B*aVdf$%5R7P3ycOOc?l3WX5T_~vDDy$8J`Mvx zzeUd@kt$sgF75cHnr?^-jB)xMe9bMM@-RY4U#5>dxWBmU`~ zf+7*p_2_@`f}Jc3U}iqo+TK!H*dHQm6fS76K7^PUik>_=kMCGiRIzJC>atlrdJ?YF zO~|3phD<=)Hq|U15$9cc1M+&E7xp{(kh7iR?tdYfwKh!qcDe!4JW73zyOQyGgpsH} zCgi^9Loc;HzN3=JdDMn$$&9XZb)Xgc*gW=`;!>p$6$LTF`8?ywQ|RNxB>vCclrpwy zDEdf+Exr6BF9(8&7oyZs6ciM|ypwUx#B9awPuqS{jCg4e<*<9b^LUs!(WUjol}q1Tpv=zVFSQJ34=x+ScX@p5!UF z{D!Ln{-sR!03PSuBMoKfcQ9~uXX|-s7oU)6TZo9C$7Fxiutij8hjs+D$=y0CI@gFD z`b3S5ha%NRMOem}3E5a6$RS1hWRvM)t|?E~4S#~^$Ta`1@T@b&N`{LO^AR=BQh~RWaP=)0+WR3L!NS%~wK&l)Kmw!_t63|Fa zB&u7=eXq2V?X7PZIz!^wfSZx`0?X)(aK%t z2b8h6FLJNF2k(}=j#IK3ZIrQf)vofn?1~aKElJ3ntRjZcz86^_l4e2liTc{xM``Qx z1rCD-NUmtAF(nd0Zj$96X6qARu_UCtyu&fIFN{M9n|mO>xiB#(;Y-7jC1!@MPU|p! zIhJRQ3q=3J&L7G- z-tGH2JZ(!=s6Lt*{lo`U=sSpb_Xwf0`o>J{Vp_NP8aYDhfkLR2nslI1gCy&nwwgVO z+OXtz38^0}~lEQ(N*Bh39miHub2C`!U27=Fx zXChS&JL66rIXO(1SH34~zv1Dql);`m?3Iz~heqf*?52O4nRuP2 z_fxP_^K3xW0>gkLl;EFC<2CNLpWX4-Wx2~=%Kql6mlryp_9+_&?3F|A?C!QxA0Efm z)4dsf6ypAK#B=}>ukk+B29BKAJY>#ejuzWJijOz{J?Q}d@S8dr*-r4~4j7b0AdOq; zIfnO5MH5eoQ)enZ{*Y7XGQjI*{n#VCGiW~YAGxqjl|XJ%a!^|h&aO|7z7Wq%^)=Sz zW3M1Qxo29<4hN2eThYkgGQpz5qf-^e}f_J z^C6VRqb6u0Y3OhLQf)ZuAT|D zzm{L@~ZSA3uDBIp&`xKCj=zc$*)q}Z=ycGImnhLSNx$VU#~!t`z|pK z$XfPDib|U8bx*_(H|^bPndpKadkX6m8d;?>G(c9QE5jB^zy})8I4xcAK3hd?ZZFUJ zI2ML{>G|T+j_s+{Q+--mR#ql!cbGuAs@iz80*|@Nz{>if(Rmt0Pe4vwv<;%!IK>6@ zGpkww$grWq2Bx*Iie4&&_e|R6xBGti>rZLyF5nQ4V=*IJVc9U&cE1Bc|O z7?-(EWSQN^`=i{({xR;3|K4vwJ-pc}s7}(3k%{;Ec}%vNG`!+h7S?-t5Rr+ZlHzt} z;7rx8g)LL{lD$o$AOhFqcSB2`Ih!`5SF0b%gn{`k^C`E<#V`J97GS-GRAB*SrM8i0 zd(iYDX5nm2kq1E)>sA054mG9ub4EV8q{04(UQZ`^MLmpApt>%yDOVqRXpo%Bz2m__*L=JygX$X~Y^jzWuPl1N*Jpz%RO z>!}2n-ilB={7ulOJ7yvv$uH2O5`xV!yAm8Zp@q&?sBfiPrtyiRX^SBJ$s-!SKf=c} zvNr)fTg#&bXo7xP>h*0&rPge6|G%^{S!-M{vKW(6`z@a*1QQ!(?BGZGYi-VsHkN+` z97Ks|2Zru|^i#Fi#zZ#uP(5NPaJY!g!E5vZpac%H^{8{v-rh|cE{Ag$gU=2)pe$_L zuRQ3|_lGNmse%q191|f*!^##G76sZxEbM2wC4W;(%;AyMFoR0F*}!Qc6C0QVF0g;- z;eJ@A-Yqxsi(JyOl{igm8R~J~MVmfm1o#T$k7}?V4Z)sE6IfU%mo&-u0%Dy21rXRK#t-`>0aDwtdQ+=B8)ml++;k;ecP3fr>A#io;n5=Z z7@nnOA(2}}qEBD2>S}Cbz6U-_eR)Ma?~z^z#^yabIKbMEwcbSAon6IAGbq|jS^lzE zWT>F@M$k9Xt${$LpTo&{Vh9?hRwH(GCb}Q4a9sWT_{j)L&`oRjwq%5L7^z?L6CFjR zb-ArWkU(>nJY_qW$8OLAB^)fPDdj)upl9fjY7+_w0(s^;8gweIJ%(2`=Q2zzcT&8F zHaY}RnJM$PtY(r=5{H?d%S1hOzDJypwC(460##QVl3fkuqxBrh2&t{)ZT^~lAN;8& zDz`hBE-+IkmX8z^HH@VA+i7YFZ_E|Q3a=f9JY@tzOygbJgw|ghy7Gl}dp>NLP}pr+Pmb{E*O(O3CSjLJd_ealij7Q4_Ml$N4oO8c zWDb#F?c$3+Kj9SQWF3|RrUoBr@EJD7Yc#KXi*uFJy?b3q3(Qdll{?dwiw#v0mDyNs zGuM9yWuhK+zaaOkU*4EM??~90bkWT;4jQf;1}zpz1D6Wp@&@uzCH_HDEN0?Z#LTicB+ zdH|X_9-S%mVb+4wta{UcROoR&;kWL-4S4#f%fQ#{sVX&8lS_z+FF0{cP1dr#bMQH> zqTlY|982K!KF;F1JCbar-s_59H5ndkX=&=A0610G*4FOt0@N&jD0apLQ#mjgR_@SL z|KbDSE=F~=>!&9Q77#Tz`D_bs5F5!e$MDVPjr^w)t!g4qZE#gSo+nUFcJpmKMUjEv z4*+UmrjaS36M9C)O}&A`6?M7e-E55pn*H$C*h+bG`)>;O1Jzdi&UdDxk;_i)=`k@A z^mw;=B%tDZeRdnFxg@UE#3HFDs&1Ma)syRp^%4KVs#a(fprtX)pcwxha?k)ZkW!2m z6^Sn5;yYG`pPBo%>|L2X`9i6j?B3{vI-NECI93t1Mf+T}F{>p#6e zSZsyjn8G^U;+ir!R8i9So_F2wVXD^b_-7`PR&$cs%5B%RO+9Q(B0borOjxXLiU)4- zySvG#c4!hNcJsrByUuO9RHD)8l3*2WE9)UGd^$;p^qz~@)xN2=wc85_2cevPH&K(|i2%50b6g$9*V3S-4Lrbe}{UA>zV6~inXEJo#?Mr&R;l7Mac z%g}uDI3J?5jb!Bianmf`dVoVo&i?GX+p6zThe6TL0-qKj>3Jn_8TmaBf$q}GHCBL} zd?iZsvYJjzP13x1+KNi2$bSMNA);Gqih6S+E1q%_(%?P+Y!$K+U66=eLM6>+2psyM z58Nbt)ceCaB5Qu>C-WZvgcfD$I)wo4Sp0D5Axia7nBA{4ID9c|mGIg3p;#lU=;Sv} z>9!7l1LfUJ34`Jb_M~7ujpD*p>o#-wIx@6%)=Yo9NzA$*^RKe>r0u^&#!p;B_>AwhWybvh0&Z<`e5eP z7N@}2BcAmw_uvL!)G3seUH7UhO%R&?{t!qI03|4y9gyL8jsn=J)L~?Gb7nI*{iVlw zI{MZ;{rAq!T0{DTq_f4nE>iOJkNp@fetujZKa?GpcpLOu&f}Oiwd=i%hCar<2(>FA4=1v&lz*iZs4O5DG0l=XIH_|C|)n3*f5gytU+B`h2hc5?VneUBU>F=vB8 zIOE--l}RJzD*zqGwBuHNTY_6fG%c}Rk135EUDsNo02B7ZM;_GC*4i)OK-eTm71}Bm{sKEOlOj43j8me;}a*iW-Z8-0<-_boGM;sbW)xXKazWtr#XP zVbbh>?kP9Z+e+8h0{%|CKhqKpJ<26t{_1OJzxn^K0NDisS)*|24(#7vn5 zR9jo;`+Wf6uoY|`pfdFtMJ>%;Kl;{B<|gOs*!Rtx-ki7R>ugV}mwBBwavF)EpkwHb z81;VNRumXU4%ZcGM1Y8Lpd01^b!K-6L--gWZf_F#g zjJxzr<;{_OPy#UGl$Cj9fA6O60c??ilA}sOV7|;LaI}wdo6y(^gFrEv53KYW+#KZ9 z1(DA@d~V{WU~6lzYJ5SsHIwfYI_keOQ*$U6)mLoi{aw;_Tuia=v0FQ>L&}HK7t{3~ zP*>OI3>LJerY$6*Yuwkd<+jeGZz9FqrhpK*)e1y#>y2&A1IN0lCb8A&VPlW=na8K% z#*E9D$eN4m`w|AaB}-g_QxCiVui4__jW4ZIOVs49rRKLMMyOman25s9n>pCuux{aVhY@^e& zdp5#}ge@$VT}fs{7b`tg4}U53RoV^$VQs8<>g|nnTb0_4W?{?c7inHLf9&fjC0wT; z2ZcRCV#+c^BVfNs+<*#&+^7hj_T{yquLWc7XIsmmLX)aL;>mz1A)mkFvF;@P_Tl4+ zCKuxZs8o&N0f?@WrGtiI&~NlnV8xc}UPO9QJMD8xuwa$!%o?kOdG2U!l$e;gkyh8T zk`8~x{{7vaZ*#2$S3l*V**9kZXAdA~8{5V=tF{t22BdG^B`=AL0nu#O*l7yAeY~{~ z9PCk~<qH=#fd08r`ua(C z|C6n`bZ+mDd=aF|ZG2W&m**43q6`E+1OYvi3nco00p6YsvvEuqZN^>2SK4jd{B4gk z(BDYDBv`ZKG7+)wBJH1^dp8y;dv3Gs=~Z*x{;)4+ssZW%>!86n-Pm=zdewOKBVQP1 z1K@z?;0x+!epBDIHKVm)ncbSsUscVc8%t>Nfu zOYo;G4T$K!j;bH}^c4oFw8$R~IG2FbpZIDV^IW;vbESK)b8z@xO-vZ&%?MbuHjW15O;GJe!pwD(x+++> zo;=OV88F=BkLTwn(I(`ak)MmF$3Iyq!)d5)4$dbW>JJvCsxKwkWt0G%w|>Az$~N>p ziMp5nPyQGN24?MmDYqACi)7wc z^S{bdMO@$Y&S-h%T`!v2UqrDtI6Cb#-t_Ctef(4PTSB=BLvM9lSO38e!^ev*KP_GC zbLTqy;C()+mYPNvsTkSroAYON_6M>-c-j9#Vk*xh<1o3nac5|S$!KkpPY|QDqwvwh zbs2lpclKQf!g5-);MBB+Ue%qr_>__%@r0r6aeNBUCKeYn(Go${*69kgy~MTRynSiG z#j1O;NC;iLa4stY?8{*Uh#2D6N0^gTC(EY&*{O%{%9gDiBmMoFq{OYLm@neCm(+<8 zLSMpsS~H6({OT{t4Bj(|-LaJhpc!!K_f#~gmL>rOO$_>zM(bfTCTGv(vIF~&;-#G^ z%bx^3w+|C#D1h%BstI6Q;NX@3<#4GUbbqqDH=3A6JgZ-faMtq1{WP6$-uG_lExraW z+=s8*B7N{B5RR4+rz^fkvs1JoCsKL$b$(c+H-?=S>1P)E$RVB~@V9@Q=*U`5iVn@3)GhN7VF$mZQOen(JXTf6ES>|O+hmoA1!JJw+{F*HZgWrA z_2umJ1q0nNp|tu{uOsxErP_647_H^MGKrCayoyRX?{S1A*kz|5~%U`_lI3PFH0Y%>jV7s28=?K42;*=j>MFA+|N_=dja}*>h7?^syCK$Llmb zI=HNNWMm|Nm{o4`YEi%q0E@(K*+P-Rwg2;QJM%xLAfICdS0En^|9xR-z(uy2WQ3&PD8^}v}Xj9h=j+D+KWC(!*Mr@t68(CJgYJ_IMp&?Y#%sjSz2aD3-!5c&&%o?w z))SJU&3V8(T7-KNQP14`$u>f!QQx5_3^V(Fr4@x}f{rT668JgH7p zl{j#`;(XF%QgdEez<9pO`Q6Kb7{bIeC3p*7h*_FRLh&P%hj?RA`_I?|76>HoS(v+4 z1J8!hxGfcM)til-Zvu1<09GPu-PxSGU8-Z;H$ynRa}?X;B>!dx3nA#z8qMuE|ruX^<*?AIm!a$8M*Tlpliyk!1*{=LBA zbaA77V0oa?+r2_C6gT%O{W!-vh=i0x+Qeh4TAj35q)Ep(`iIXb@R&3TQZVt^3ZT^? zO(b7Ewc()l^Vm&2J=Gu2av+Cgl*@!Yd3alv1FpZwe1gY7O-gUS>vbkqQGu-(Kt#!v z8P3z0;v4_g4Wujcv&n8dXu;|IpRTKsJ=^Jr?cKE61;;boE<=y4(qj`wC(bQ8eiyr_cCa{??_qa;hmIsv!NEO;Ai{zW-9o@kq+)PLsPII=NywEWboIjfnvi03ZDtVlEq83#fU|Z66{sDY8G7At z)x0BiDUMk>R4Ni$g-Q9SX!0#`-sAadao<1$m&Gvj)%Ok?v-YT0+ZCE3IVdIjDOve> z*E+Wh!Zf7sAW#R6hIhRk6JRYa!jNHg`744x2+d5bzUY(Mc`r4 zT%D=kjb#{ZL{U-nF58reai!ldS8DhVrLm}twqSj>o8z?XXYpzYiCaZ`7f2rgRuyTr zNLinp$E$si3XAS2uz{QQ+qcd@jkm$f^KxX#mTjBkvSB545J@3Gzb2^ER^7@bc^d;jqu26xPc6W?O;MbB>05qwtV@x37P%qSj{27R zz{jXgUO}bEuZbo95Af+*e1xHUa>=qOl=_wVTb^vT_)wZtm1@q0RRJ^=j{JVOjBhd& z(&%fZ$<@Xt9gk#4k$El5NZB&=aHsSW-fgX{-;v`N{=VNfH{_oSEC|cO0<}RCw4h?U z=^-}YNrwxI&)p$)dNePeG4zLTx0;e_nuP6&T{>cAW&qT ztugB#_}1aPF}=SOcQ}~N#d+r4*x+@#J>GcOjs06s80>VpG)~FM`JJ#2EkO19*1qV- zZcF^$B@6`m&|c5wq0F-Kx=fe8WZ*bz&5GDW#I*6refx$dSx{@m_yDwL73KAC902V8RIATPAV}0u&nEiAhL`)?bgLuTrCxcZG(o@( zeXia%Rq$72Je?_3`-Af!3Gb?{Iz&Mf_4J$t)joRs4w(IRH`JlG>=Ut6fxs2mP!@}o zJXYzd5I9N$o;>k%Ln@nuYAT0`fxd!B1NZ&=r0(jEL%$uGPI#f)&m&r^u|RM0zTN|c zNlzf3xkfk|U)>_O1%Gq|>f}sLjVLIIn??KfEO3HJT5`QqM0+>2>Z z{ABZ+D_zw2jc3ztq(;c1Km<+BtJ2!ZQj18QK8?xa!YM{omc^`2Yk@&Bn{p;BHopLsfCz_z*xzk z+ry>z>6Y)Ips?nnF)r$S@kbv}swCIl1u=Mp1-J(T(#}$T_(J-2 zCz6ocaPdieT> zMpl5V8z4$s1gDAaA-CFN_KAWp#b6j4Xv_imYd^d-Qqy1i4ke*oZ8XUZcNdJ`bt&JD zwnp5_*u4Pp9U;S3pLKg{KtuuPPi}LK{Kt9F!av*Swtd+kM|Er6AUgYh=eA(}PGHj4 zU7v#5?WjS&jIV=){;kZZk`pd;#zoA7V~pl)^IDhUdmN?I(-K}{{)*<+?Rb759)Ms? z5v;b}g`0tQLD%2!fc&5fNLj1MR-5I)#hh;5ZzxP2Kyz zUH?h?TmERTn!RV9Nfx1H4O3^9Hr8YSR36xQ?d>0l2zr8bY&sE|_ZS=Zb}?=I z+pSt~@aViQ`PsCzNe zfQGaoQ&M1Lq{NuTD7o;p7183p?cr8+yRjBOJwAb|XLog;ws!Y7$Ux%q4EcS3>1Es- z1L_~cB00f)2Mf4mS7PqA9zI^d^Z)!62*wl(0xhtu5w%eAk+M{>UPTDRjIL#BeZ&^H z9^woC;iWp(oQmW(n8zK0=(F|aR69a@MV}t8DAAG({@{6zQc_Y;v~fD~pM}diy<-pF z+j{2Sh0eEFb;du`3*Ng`I)60YZLmvZ0hFsyCyT;R_~IRGbo31r)e>oIX-Q5~1ax{< zaNm7$eh9p&0({XkFf*T!60=!?b@}#ki9Uolr$D#UoAJRhmMY2yep8RDHN777!pRh5 zN*H)`ala+!d>wr`7CfVO`F^EI`9>x|^I=AawX?2daN?x&Ju?DjWH@;#(@?kN+x0DRLNyD&3Yq9GP_Kw10D<}kMq!vAq#9xHmws64h<@21{z zG56TOb!R*wZ7A2^l=Gxm#AV~_O!;Ppkp)mlsIGG^K3*Z5E1oIys3R*=yD`qUiwn2* zI}gBbx~eqmj!GAGBUV3sI&*Nb?7S1JXJ-O<{}MNc^S_Jtb!pFw&N@s4F|rV>=m#{( z!?l?&w`Vl%)8XVI;o6FeE!dK5^vERgJV*%%V2OaPCwH{ikR=FM1}>({+U4-nnYmUH zS!$yP@P83O46k}mqs7j)Nc%BjVhs{8xL*p0sC+g2j(;ymnB4d~eO}}OJN2rN4h?A{ zO7tc7fc>jUoOGpf^%flC+BY8lRPy{{bb1)dl62GS>vWNMW5e-ke|ipwaNFeUysFCh zG8&%QaAV>pzKtXmVDmCjYP{YtIqmf={+p7iy0wBCeC*y zpi!2@Gd6@aDnrLuwF};!+-=VW{a9^OIm5c+k2`JrPcSbMty)#JCPL6HliUVP8e6YI z|B9YW#~!VGre)XH^<-uiGJH6DbKqBQ+)w*y>$OhWOsQF|<<216<^Jk*qt{r*dewCtY{%E# zR^QW$WaM%$BVr8=y@`=n7%%6dN|}1z@$)_7W!K%&XCE18*1qrW zY(BJ<*uT)yjd)$J01RkxD+K>D7lKRC})XsAtc{|-nx#I+wVHXwjh zgeJ=b=k_3>#@Ng5ZM{*t_w+Pv^}9b)P|RUEd-KwHzQE*RQQ+b0%MC2Q>a(Z`wR+d~ ziSr4uI+DQ^{Hc{j{%9EUoI5@A{c?ZThI@;J)wKh96FZZmW;+8qJT%3Im~ylB>KX8z zs=RkUcFWJ)7y#P}xb)b>{T{2CH9n4a@p<@(4^vjtYh6dV92yP=;k15zI}>{MCasB} zDv*(ozwDA5yfA|db!9&(HkK~psGIc~i;ss^oNl*GGmL0( zgLaG!kuWq>l|(d1n`hM01y774cS?=VVJo6lgEV1kTZy}WlzsZ0fDuQ+u%Klxr6G!& zi=niyA`Qcvn_NkWpD{6&6g5oS;}*Fy5R5mlCqpK0JD#++=rvcbQ`O+~^olJ~D{tO(SUpjL(D zJ_dijhx~?&On!1Q|eWFDd4MD;!9K1rgTKI%?1@x5@l*~LLZF6<7Ye&WcU;3l=T zl_^p_kb(Wg(H7Ri;WIF3R!%|r;rwK0e1G@NX6A!yeh?kWIoA%yt!|%rs1-vwelQ~IiFc5D~mv-A)4{#RF#7{Bzdaj+z?OgBbt!Bc< zne!Lk{l;;UVEn!wJw2Uk_aycN9h^PHC67zcPKUm!usJa+rtq!|hN;rj~07G;Ns>#GTY?W7AIkElO?VQERB?`=?jkgg+9$v`-& znuh(kIY1B0KTcHP(*1HZV4~pR$|Ok&qqo$*YV2)1PsHVJdq`}ZOc!h#@{CEL!ReBd zK~Yupy(N9y!S4dtTGta!poCEFF^rClok;}t%=1ZGHOcHgVdu{5XoS!` zhd-|VS(OX*-_xhuNYxz`%YITbkC*2mGq9$rs*sZb4VbQulYoEZL2z%;IL$n>OR@E@ zll)n1uVvO-&l8uGdgE!4*9Q)vKS|oX`Prg7!pIiywcyx=%zt-;m(k#bpw-ih@A;dp!ZwsmB~tRJgeB!)f3r1 z(Bm`KZFp})xAyK$Uf+61fr%D8@G=%+E}xK?=m~(#H0RcH-{MRdZ(R;w`M|~JT@!Pb z?HS?d5}!J<)c^znF-aWyE?D}ln85n054ZXQ0usl+Z-^%37Bu90UCTX8@8zb%y9&$} z%j2@Wc{ln8L7y-rt;FK|H^?<%)>7-QQss8pB(w8InkkB%NWk_);ZHc(7TEN)$WL$( zipNqFcn46iR^G<0_Xka6*#?jGOrwv%DnZabwiG1P9PxVZ4Y57Qe`W5+U?7lD{X=7x z{dKw9vS)_N7r>#9#Ve7`3K8`2JmDd?D88PPK$`3z29kA|4h~d6j@!QWT8`xBk5H z3z9Z=1d-u!GprMLOYwosPu4u<@0xZ$UV0+H`{AGeByg8uNf!DI0!+EB+e{~#2s@TD z;=X65&tX@5v(6FcZQiUJ4XgSW8g7>K7!j&oZgo9n> zdEZ|?{=bG8cjkLMab&SD=aS|6W5x$V$n^QpX4?dZ`x|G!wD^?if?IOrsTqpZTz5<- zbd(=gxvAC6L7ADKBP#67s;r4LbbX%u#HP91@eZstIx|Y0K*y}m9-{L_*kh?Q#b~Io z_dd0?(0hEXmr4mOWS2f)m>+8{>JO_kYf}zV95DYw3cuX77E+^{Z>& zbHUfC!qiUc030}iaL2K&{U5y|pJS(26Dcd*bWO3kHgq=`ub3>apJ-^2p#4N!giJA( zje1``5X$%}pHB9zBZ?B;&OqQ8-(sU@FC@XTF1!YsvZ8i>T2=v65=OKoQu}J+3ME4k zv8Iut#u6UdW{-e&G#7@qCw~`V347xhaA8O*Md0P(8Y*$;Dz>!T%o%8y-LHS1pqXTD zv4EY)2o9%xo|N<|49`%g3TZ;HINY;W#vEC`gOHGtph}-GWLu>6y`h?4$vvDUMVaLN z&%bMv1nF5w8DbVga+vMF=l7bPA(BXRh;;XsNWUZ!CNGgz@l3i(p^;&2xaN8Gp2+Pq z2q0cFhK{*Oso9B!5XO11pZKNo1Z2naP_NzT;5(v-#~zwsj*DcYr4u_Khx+n>I223C zNbDn(k^8=v6dD3PjDpt}B?H(~c{f}5+wnTT7#35->Oh8gHI9k|T`?+32PC(=QG{^x zu7JLU-F@gmw@^Yp&N;)QPxyHiX4HyA!(SwnaK4f*qxX?E3M1g%7`UwhsQih}H{#{T zJIAU^aQCyB+p~vYTkD?*huM?l#cDNh|HlUdNpaPP$P>pycZN)m{+%In08^(5FM3oW zsid{y{1W&vf<8wi2IlZ3;e!ZA*2N_*Pk;EKSYO?o&Uc;obM@WIAT3{$ILtkm! z6{DE-N0bjcwibDUamp%4O-(HDXLOVWiMjXhBka`!Or0CJ00NUfP`Or&gu6aE@rEZM z2T4au&~cala;G}?C+0C|^o47f`}^$&PE&2dY=7BP!oPmYP$-|~-!!=Ul2JN_`K>U8 zMed^srbq9FE^`8=PG4m!2n2bppI$0gkfo&JAUIh+r{9_rAVJ6XysO`&R@e#!FhcJL z&)c26+&tBdw1f@Gm$mK+KiuXkP6%o}vGB>c(uja&ea*zLMJq2uQ1M+5g3ulrmMEQS?N`~06mG8L^I-|+Gx2^Q%>MT?d?(o9cVdDh5#_$@uU(2OC3k*O>}jQ zjrUvzi${76qS__^F(~HmDLGO46r`X#g};o0;OrBTn)IsfT3_G!OT;L!02CAzZwnS( zFT;`UmTG7vw-Q8=F3c1SWZ_^ud?gTdvNO$Y^f9EHU6o>@z@&~i;LyG>jttJ7&zBEn zM5IHd!}CRW)oDR+wj)n z38_NKOSN83++L`cp($&L{hSX^@yF9buZe&slQoN`^XH80&AXu;2$56^A z-)nNiqT|S>>!mGy^RW;@)%#D9R2ObaJu5`h5~$-%YhTJ%{i+?UQe_EnS%)EgGU0Yu zEpvgHswf1stCR6s7nNEl;%1f1pEuWSx0tbUC!IZ?uF2B<*lU)WeR+`w3%G9rBffjf zQBVl#UQ0M(I1s^!X19p=<&1ENtz0eHPc!y(>v7w*iM`kJHm^!}Wg^cq#878b2K~_> zgE*1c&rnKLcolHn-(*q}(y?|x>QGM@KC@QmD5G$#!gNj?f`6(S6Y}%;29fFcd z(FDDkbr7&Uxr7aJ%sF^ZzrPwQ(ec-5J_${Zdv!p;QO!(0R6x&#Ui`&^S~0bQNoID` z^z~!RGN=yPW)e8!@KnvVqQ79=m|Y~1Ub1Vf6Gh{8FSY+;&S>JwAv~8R(C}n?>H4B+ z4$c&+q}WA%$Dz?|{k8Ct86`PG`ji=AgdN=ZgKjo${AF)LaDGk750U65M^R_1${49uoKd$IuXo0Ftau{(7G*4{2@j4M3GPhVv))yGE_*ZJ5l zE!C?uD_O0*Gp6Wk&Vx^tF;U4`U~_Y|@_l69#K&v@C3WWtke~K)%mv?#7990XQ2t1@ zh5@h9KL96na43VYVJR*)B)l_2lv0j0kz4jZJFs&P=`rZ?>99dY)M2a#& zc`Qny_Pg}r1}6M9?g+E#wZssGO%(+<+nkwQ4_|`0)~eBtca~<6bvPk4v*q$+=_k}oOIGpA*H5hwc@m#fT`nc{&5qVunkLMsp175j zPC{3cq_m<%oMJ3)+HD7(eqKFdTTASJO74t0c(Z`(XHes_8)fe;%`>6{kD1bR52WY~i_!W?Q?RP-|KdoFu*M#JT9y zw}cf*$BF$G_1nkCUt(L|eogyUsH$b5T|l$t_GU}Y>^^UZlKaXf3gW%2(yg1EV%Ors zOP%lHlHN2~m|}wa9I&6=B*u+S%ggggCmRQNsOly$Ab~n@K+ROQcNQY0(P?yAvby#&LO?4^iGtqiW>n}GY{PwH zzHVsskS4JCSE zNu7w3S21zcn#atiJ-B#OFJ+51c=R8L@OJHt&*(~n%Kv~c?w**P`J=O%zf`uwC&r7mIWoN!!;CqCc_Z^%14Qh z?lV4+8t^^W^!_nBqmK&l#Gta@%<+}iCZ79zzH>n04&C|U&Sj)Vo-3$Gj{=!DJ2*Yc zphTqWB9=9=a*^g%<*y2GIJXx5V2ATWuEKtU6%u@?O_jREYa{tRxN$K#5y5Ajqn8GK zv-JiTVx|Uj(J&3nO?!x{I*R5RD z{+HpfqTRH_N8tQrzH$A2)E>9@IB#l+d6;zL?<=C1WYnSJ;9_JAx4ZafOK=NAv@j%; z;+~?l3+WJ|z-m~7N3}aRz7-)etS*)AP<=ej`Fy!5f+h(<;*`YTtWAU{^ys>qN;ch7 zMn#rP5J9G4ls0Y^oEfZAAQNR&*!F4UKOYpo^@Om-^&(2v>EqV!e{w01SPMIv_o2z6 zdT4LRh38dYbl+sM!=)!F!f~TgfV+`3MrxFDNftO6{9Z zha2c!WBMHgKEI(}+PsLhb+8ytvDkca-60?nFDBAVbkfMLLLuoR3oZECH>pctYf*!@ z+z$F&ENxXa`})-&5$E~ztT2P-))v~2YgJ(p5*sGTk`v1GA>M2a4A}9eLV*Jh)DP^# zZf=pfUb13lW@Da@oIrc(v^2D_xD8@7i{W7=LgS$n4~s*}z(EV9^*mda346=XKRNJR zz-QDiv@EcV*hqTMCTyYgoKzJ!Iih#}I?v>~r-DEdog5d^79SX*7ig=3w%OSu~*i!-IX+zY0)5DQ^E zDGaS24&tF|L`Q?JCw;sju1~5)Dh#bKY9Qoh#+^Hq_AmXJA3qRS3!jG51hy!M z_}Imuw-Z`h{0#X~V}haGCO=@^qJ|>;XC=QAiG|7<1SuvL4zr)0mQ>-9e`TE}foI>Bua^0I` z{^kf4kv)2B(JRD^qN9k`kKD&Nj;rtey@e(J=RHGE>?8gWlHn!>RRU%Jlx&{kaGvd% zN%8aq6P9A57Qzivv+VGQb?qN;7;VQ>eMxE0r@uK^gAVndw zA>8 z0>ocr!hMS*qJp*V%WOEYnP;Oq=3K*`@)wY1dpou?DHxxzjn<~uKV^f#Ao|^rsTgO* zTqDC-z7wfot3NubO-kaZ&7LZ5ws5u7>(w=G`PjC)K685Y*?Qgp7K#TlSBg{Jd_vcC zi2!lI3rz?c3^;5#-k#tOSX(H(q@l_>>{K17wMCQ&xz7!G*%+S#zw{-O{Ng5ohumOR zPrU!cLK6f16D9(utYd?y|J9X1z;3jcU&6_cZ!|gQi)Y;x0XA(=Lqsbq)IfTL=z;tD zPpnEA8AmW(7Lm|9(IUG`Zs2;32tb+r20U|OjT!wA%KDC&I1(2bQr^mdcVUdu!50z| zw@#NFp)?~wJ*Q!~zuE}i!$xDC9!3&F49>mFrjIjOO-?&j$?_$EZ9mU-ig8J;Vpe5r zQz}B@+}l35lK3#7qM8|)8xrWckAHZjwdO;Cqux(y26g5{e%kCXuHAG^O(f;#B`Wfx z;pJw+US8e`Vg4c_g88w^fn3PW0L49OL}W?*Nof>LCMVFjPV3>3xWBK9O+Z+c1lmoz z_~|*WS%CNB%XX8NOL(-&GYDdG%p(wKHyJS+9A}F)B4E6JlTk-+uBMKXdHw?}Eb?j6 zYd-L@AU?*&MaaOV?x0p1IUy_Fv;r&zQSj$gu}b&@%qD?>WO+;`nC7%hCvEzC@iT}E zbC@5k&+!9|eQKefqeB%Ma$$>XUx8zgy!|sQD5R^w@mvk?@3O|qM>a@M^`6(rAQ(5o zv^AA2)h4Xu-2xToq03^rlp^{k^d+ynKl8_{DeaV=4+Z>moU~z%*3(Xn7UOVK)Q0`l zF#l_=c|C&Uh4>fLSJ3Q;}p9WG3|P=rKAMcGcAs~(=TBxgyQP^WZHRlkb*%8TXd zwi>f_Nq`WuaU;;?8)lHn;YnLD>{p2B++WBwd=AQA|EhheR9MY zr1=)AeVm&vxJo2B9aPKDWmu7Am3P$1RdLL+P{a9PS^~IC(rebQ>kkRxGGhzbKd+ck~a|(4KVK=MxsL|L`6x2 zkgtwubj-Z${v^0^nHJUYJ*;|M`}?TB2W7r_j17Y_3%PPqWu=NR51cGAug@_3?Qm-5 zK#{Oh3wAL6YIRULfE_hBAd4UELVDIHviA^F#Sdm9J-L|jzAKsBiWZ(JoOAwk+h3^8 znXB@!`LWlMR~X;sd9YcStpOZ8%8I}VOZfeSr}O#-jqnC+ck2v(yU@WxDbMr_v}IOB z0<$DRaLv~OtvG2dED}>`s#*G*PBRbxu$QS)igcm1bqnsyu+RhwogzR)GQUi2Y4zIQ zbam|g+9%L@HB;Z5$z3b~GJ3C2gmvLvS!XVfi(5X@_-&VKU0?dgY1>tibZIe)1{&SN zWXOPfKN{WKv9V`joV`B#*Vfn@i|rQs5EtY-YM0RB?oB`$lm?s@Y`fbTK@+$iTLeGP zxt!+3aH}T0f<9(!mrC?I>HJu79XDh3Wc&6fYGJ(p?Xp?C7!Y6bXzSa62zwid(BK5U zFYyTcHmvjnoiYxV{BCSD94<-)-sbf&f(d$^$>rs?2RQBe5u;L!;3GUf&ypO7#67dg z5xA!j0PK@f^@|rhL20T~ELZK^cwd~Iu2h*;A-m2=nfXTi7PcO^%P?@Z6^RlQy2Zn> zHhcd3=Z|MdHh!w5#NOsV7hf-&YH-17#@WWl^D{idL3&N zg@WI12GREI71-SR?zfyK*3TPA^|c?!-z+aQdw%bNIbNUW#EJSSI5lt5ds1!hz+?At zP5f^|PSG-dO4JrQNG71pI=}B=xZcp?jBYIQ?Nq&akBrR>8S9El z4V9m_OIwvSj>{s(J}8;!Ty3JxN6uRjjDxbFn|iCDrHjd;)5*14#y;`l@;%n)dvv^a z;~(FAHaj8i!#nUE?`;ia0KuHS+AmK&*Xzp*9HWu)*lqg4n0e2Tf~{Rm9@e~e%%1nI z?fNq#Lj$F|r>}}qXpD!;sq7;EOy`qj5d~0mv)bEcY;6NuB}fupM>JQ<&!yEc@i+e0 zC2c2H(c-t7(~*+P?pO^<9wBX&H6A-}sxiN@v&D#>kMbh$jO&At5cG>*=-Mf55hF|X z(s3OcnuJ=nIBcEvMee6)d6{Q0+9YM9hRHjS&^YpS=lB45OwFS=jNoJ$giySEKyxr- zC>Gs+WtLY%@SKaw-s$SIK}+mdMZl7PxW{1o&!)Y|wsfAB zU8{F+gIj*}!}me_6W5JslRzK8TZJr{x33A4`e%0C)w7Jh(#{rc5)CyFoj#eR6MWvn zfsx~WG}W+8qGHnDw6r^+>c5g=lAx~ydS0ewzC8K54-EyN>{5ckv>dF{M#Pq;=9LaE zDCu6+XfSSkMsQXPqw|Ygg?biImuRon>ND2Z`~7L)(wd+1(J|<3QMqq}5rf7P#}!{r zm*9TVX$0PXxLneRZ@ubUDio^=ufQo%GYr;5kZa$`_uW9JC!`DIR_VUkmThAQEeXe} zY3C{D!}=c12OUvvh{$0=WLwJ*8^HfnW=~%21L8p@x%~&KieSIMp%_yHt{CC$y@_MT z>nYTdmhQ>}e1mC!RQaU1h9Ni+M?_gc=iJ;)s(9hU&4MWj$GNZznYKNRoT<{ZvWFfG zn!$rb{Usx!=cUhfNCMA^=w$LGWZA$3Wwr2PEqBad(M8QcEy8!~xQ1nIx2;$j`&08T zsHn+osbmwi-%b-#3*m`-U>;N|CQUK|yIab6zhyBAlY1Uticx_>tK<;0Czw{AMHiaY z=CXB_qA`FG7SgH!2iI4QD{gyQv+JpgD2?LC=N`V_#Z#8ZoRrWO2rPjb)ps|tTw$&X7_ z*UhbhwJC1*4vHYwahnWgVSZ+wN(WOfbMe8eNmP;Oi7m?mb6&R(^Q|hxvn6SHEHu97 z{k_KXC(AUqQ{v2_e%mPvGtOzkDm;)z)DNw%kH@8&y(=DFj#t@+@h8oUr$3%asj%E0 z6eBa+)%mlZqx~a_UGHOT*;%zSjbJ89ZrZC8j;F%hEzsybF5WMfj_fU;E0ESpr9-ym z$SZel1hIlb-?}{$okc0k^V^9&pX_8bAYrf0a#caYk2R4&foVD^HV{Hw`tuPIXk)*= z{rJ^#q5p#V%AlVk|&vo?WoVbT* zGnMdC-@CQw>cTBMCQ;wJqU_J6^=LU(<6@WPmru3)6rDRdJ(c^grM8Z-D}-5J+X!hSgqX>lb~$4L(54 z9zF^SiK+U&pBsZNNoE$~LYI!0g;y@Y*W@`lD4#I1J{x9MIDGAN+bVB-e!f7ie5%7` z{6^%KEYL2c?RA9gBOa3qzm-95=h3xmxtr(E8h+6#8sgQm?n3bMTp8PsDQBoxQa?Qd z*Tru)u=AlX_S#^vAfAF8l9Gw&g9ubqW%rA@b3@y)4z(VRft0P&>a~XDPwO5p|1zXS zk%XbO(*E!H+9+MVj&GPU7i0HpaWb{3+}yN+8SORaCM15EomH@;8xpGziUQ zq6}5)Rc$IJ^%vX8Ck4{Rt1{)CO*UxKHxUJ;H=wbfsp2o5BmDjefCALfIgbSvM@Bjc~jZ`CF$(bi@=M7 z*oT99GujNrDHI4u9~$-D(gxi$8s_U$M|3IluAcqk0l~BG+oqY9K|2@&L-#<2UtVC3 z_INX80gWErM68@Bw{}m8;g@UzrL@F2#%mv*JkxZ)peV5orKAm2KoPhV-htEgS(!Rp z#Ni$Oc*nB;?5pt_SEC!GxYS{bGveCapL1)5YD{g%Qp=sl!UH9{YdB4|BBk7F&Q`g(c>;^VHn zUpSs$#FpeSp!SGFn0TFRySH|-a7-W3dvbGpTN0SIT&jSvE{+8wwByXME~$ zpWbUYy;Jhj%dqwAfVCfIi8E8J<}r^HS?5w8fI+5!tOZoN`tYQcP}#ii@N1_0KBem~ z7CoNxG3-itdiy!F$X}c#5}MkZSWKv>guX=fWc&PFgVi=1>8K2GDBl7M;72m?uGY=- ziY%SW_3()YBr2_@(x6X(@{7Ha{hGuV3amU#foOrR5`NQQ1Ezp_4Q#S61KRIm2o`(n z0)8B{UrWHeJTV<7(MPG**#tiNj#U~=})!gm0vG%}pS5*m|_>hR6&qm||e zQCU{}azG2~9FBj}0mW1bgOrn>c_^)C2+)++o#VTdc^uewyLZd$zdO5$8*sJkYSMO1 z16k)Pmi|_w9+@Nly>W0<=1XKAov@jHpvxU@v^>93lhd(8RK(S~pmr~NG#4vD_q~X_-MesoLK7EH>5vIP61te6PN-W3JK!$(S+kzGOsa<@ z9)GS_lQmvN$<{+|C<%%Z>z-)HIldD>5er|&woMP^nG|g3QwqSgoeb;;Ykf9#%2$B$ ziFt0MT>9xpUlU}Vn;E7_i?F38_t7K3`dYy(#L(S)O4Pz$1LtY60vLOO^Q^#yOI!av zEx@bs0lf>J>ms{A7x!O0+P> zWQzcD1A}JV>ekltqi*^I+ECInuWbMZUvEc?>Q`i5JaO4Nmb`iXOS%w8$bY}ceI-yA zP0}I59JxXqwP2?C4mePS4FZYk#++jk1OYC23s5<<<$z>xU)b#ku=Iv^j4& zBeI!84PU>pZuN3Lc}TbVi07kG_{) zVweokz&21P?6oIsJ57MFsRb*c$bp#dEE?k__lz~YJ;SLEZwvu;lh$#yd7Fyghy?)i zAl5i+Md0nX5);EL&T=C*G@Fyp8>+4K%TYZ?<&|N%AH0`-6yXDL$2ZffzC>x&vQp00 zo8g>nVA}&=ThVq@Sbek}PwBG-5WJhoxf1hKdbhQk5_8u|iv5mJUs=bL;G#nkv$8x^ zIELm-2LtMIZ~3@GAs5B6SqfuiWD+o>8nEH-bY;?HkNX;MBY1aSqj8Iw5Z2i<%+;|) zD4wT$tGWI=nC2r8&*rL9yp?MAfPD78O`2mNi<+u~CmZku_WE@eo}6#JIK8M=^;lO%QS?TlC6mB~RYa?Jyt6=`1DM2+|?io8Qxi zqMP41Ct*P0JW)^{wZ_bp^krVkCA=uBO^pwCuEvz5#B`d=Afnt-LeYB{Nu(-=A6h}=J<7j^R(0JO z6M-NJtPQ1)%{5`}=WhiXDv5`$`hxKw;OUQ>;9(RrP&exS6dapc_C!ZFTW{YPp9i_2 zQ`F_GXvUK=s(HienkRi(z0F~(ENrEM2!h5f9J#_6S8~-o;B4LLJ4@f{8kcG6wJN9K zXGPm;T#hJCeHvuR$`I5nHU!*k=*G2Dc}-bd@~9#Ri8jr0!$JMG2neS2#oPB+pN7wD z=D?!UHafz7+?WU#FMs1rIqP=XdA4m8230zTjU(G;#QW7-_|-{cFLkL$Sx|B}XLe2iG5H z<-cz~B{Olr&tZ;?Bq+NFtT(2Z$}MX3FdjKY3)+fo7u+XgcaYH&IoYQ9Q^Y>gScjb? zN+|(DBBa@8fGU9Asj}$mlVPH~;3K?WaV!vYsE@>`52tG?dfQ^kfncGJmRmTULBtlSQ#J&m!vc`hg@+x=)As4tw9uE zYij4Y6u#D9isgK-2RXFh5qm&m7|>QGJNC)D_P%^HtTO-wazd* zowUyMdQofcL@+@zGnkUa{pLB~)hjkDu`sQ=Yj_GhrL46lzD1BCBSHiQw>m{{d>{V- zuKv)w?WtO4qt!9dFb}p0tYoA7e{PVe?Q&P#?p^Vgb3)`}_V=j+-uu#egfLtcZtrhB zPhYCbApucz!c(+zi6v^(!1J=-_oQ{8Qidi@*Lue2p z=f|(iDtUadxeB%@Szx(MFH zSsO+HaAOEmuVKVCZ8{s4n$v7_I>e?>EFbz!@@^zs&Zc~iQ>slokk>ei}!b#7KpKqHTIpwEsKZ-;ImMHY*m?U@#yhxA#OYs6*W z9y0>Vug2vu+!!EBI#2GZLd7|bdroVH;1DU(^N42(Drj@FA5X|Lni$YrPTy<$k%r1s z3o3i-2^d{|8GefYZ%P(@1`^;U;$sD_w2qzI$Ys|9Rzwgwl3Zy;fC)s)8f%B3=s%uZ zvRq43#lA)R7)*OwV_k7b;_9C$zwQ1_E6y38p8vy&zc%ntB(bzZ&fOf9s0HZbLfq{-+E z_i?hHS?#b%Ol@4Q0=wF;YtS?)@>f6iBP*C^_enX^Wdhsf=7%jOk^`Nn5a4`h3oP{K z?#_ao67Cn?sUkWxuDhS#xxf_g23?z^g?@A9yYZSeD!%H5r*LvzP1>2Fe2rqh49e|OW<7qTXp0I=6zTtUZAHyc}nrR6?Z zx3Kazncf~5=J7JD_|JexXV}QGtm=i$11~1X=>gXoqJ+!Wkqxk#RpD5CZfb54EuCLM zD!Eewvc@&q4fnVa^mO=Bw*75WTqt*f{#gy8x^dEpB-f=T%o?Iw6Uy7ROJ}xWgorUJ z%Yu|H6Y58NeU)f?iq+&0^dfjTl?9d=m6y)L2M51fy$u^kfk31+8dMHF z*yMi&)_1d&&}2}v9>%^HrI5PCY&ED+p$A$UI*-=+I$!*w53&4}7mQ9UYm#u`4!q%s zAGMYfi|1F`oJoD z6pG!+z!I10oufFnb=9rtC;sqNqPG<4W@0PW19>A3F=c`WO!`RS@_Y38Pz+6~0oHpC zpK+|N-}5=TW|_kbUy`xr95hM&d+xyiYPww^5_Q5i4vSnYUcLb0ofqG^>I$tLqt;hQ zY#7cD5Bq_iYc2y{_XOVVVxZALvCq!4WL?=|JXm|HwhoW33?mNzr3;%B{{6{Ixo_;g zbA=yN8)6-itaD8%S+(5PD?3YI1ixvUr0z9k{o|knZhlttqwKYztW^|s$(*Gem^7fM zD^7Hj{NBFWYt;ku-5V*giZ?XF9};@*H#8b7GgcT`-#69g(&knn z_)9}T(8uf;qSRx207IeyHIKg`|&(ZLIo9;UA3rtd=Yfrm?yT-_!t$MOBh$fc0ak6j>Nl zDOOGHjpf~TuH#oZOdSx}{ih`PmfoOu?ar>Ym4@&I)E5xt{TRz4!ErWBgCXSwDr^3UL8vxjz!bCQ@rbBA8idj1C;k^yw^ zdGZuUB!P1+;CwdrUgk`#lKcz(*4#-ZjPo$G)qh+rUHd zf#HmgIdI}XAIsvOZWqKVHsb;7in3l!^l*v{*s>_J6{^*KW?u2|%QQn911gpxEtsE0 z>OS$`?dozWd`emsMC&{0>;$Oc#O}s^BJ<^;++mZaa!G=Z)$?cW!ln_cclgwshcCSV zBYRfvppWVu*#H`X)W9|{y9(8QPi$SY@>3N{`dDkDp??>U(cW6bE){JSpud|kUUWwQ z682?@`O(bIAosEHSA(*n{}QnB6$Jch`$n%Xz^&?$@mfh;%!>M~)aRu{U&qyY6hPRA zTOctki&YuQ3@(gJl5#*BN;#uL?(Mt9$#?$fGybdmMlOxRc-W9x%b+|6E*7cBzMa~x z8$PKd9C69BDgVBCGoWlZp%~(C#sc3a;?mIeDc21qzBjNVcHsZ7AoIQ$l6k9bu1tDpIkF~>DF ztHK-rPR`UyLo8#}8kch55Pil6ehRh5CZ%pT-OdFzb!>1tcrR)QTM+-UVn0d{ocHWE z@%-hN-C_UEQ{KSDIFq-rSKWgwgDEJvv2iQ!)7vJqb6Y^!u_kYyXL{*Js(u?~EPD_? zt5p}yldpjPAdAx z*j5DS+asH#E@!~ltm}1t-!)Uy@>`UU##hm zn9LG{78w-%Yk}McWaQQ&pS>I7$p2dI>m%EK+(`IQVt3h=af=>G$|-`vMlld1$fSdH zTf`_y-ogX2*M^C94Mkkv8-fVB4h^Nf3tRrPwi8hAN{*!0>dS9X+#`|-<%aa4w~}Oe zGr(FQbnG2=z`uR>CY_#~FkGHK1TnI5%TdkH7Mm7E_^mKpOBhvk8J7EjBPQ97O-B(7 zL^yoV!FMi(A`kvq-ZqIV68G7~egY;eb9AH4vCr!(bt3Abf0caYgJ{#k76gb~WjV9< zGcQJnB$>t2apxrOvnyG4DG--3h?j)%lQBu#%+Q1d*x zp|vmX_&5KjS>-AKCovbU4q&SQgE~U?8FF z5Q-V8<^P>zz&;9t-^@`?NZ4flEye&7#H9rGlWdot_QBs?fQ}Lbz#CCwl4bsxojupDD)-N$(-#6W2A6o} ze?|AU3F@Ul2dA6eQ}gF@13&sc$E#ql*1r z{A$b1fMUQ4AiIw4tdLl@NB?j;ps{r1|Dy7XRKXzAAgJ@r)t666%(yP@^<7}&rbUW@ zG7@%Dmf!i~FAX)4`-IEzo{o%9o`Va2me-IX$Z^=DH0neG6;AXYp4CrbE zC&hJ@5yaOdutC+C8Zj&kn?LRLeBVGAUYq=7(F4@6LDqZxRr2gFgZ?ivCOlF3lMbtH z09E#XssGt_iiiKI|B-%tTC91CRTvh85Kbf8a*2jSs|HsK+ZD!oR>v;zmdd#Pe`-`; zHY~kOp_l86+|BY@^4b96SGiv2Q6P0w{W?!L(!-@zsVrL7RcP}fX*^PXTKC}`;lO*z z;WrHKS&T>+E3Ren$)Ue2@VI1i`cG$~Jz)CZocFgBXZf3>{s8jTJvdo~r1o8o`ln=f zUmoyEVXe<|sidV1<8=2rXZ5I;bTQb6^8gKjWw{D3p;N%jTJ7Scm#_cF#IAxE3r`t2 zun2x;C4FG4V9!PH`kEl0?|=GN$}9~7+Xc02K*CUM;#n z8fW|Ea;D@zrF=>wPWqSObiQHu*I43w%m|+CGa=*vOHVRqyAuO4^SWOEFrxU84s2NL zQ@qUUzhGG)BhanEKYwtGP~kEenWkhZg2CB%6wyHyfP|$R(*O50M1WeTS6Pmu@@?fpNK`fCvB$QEfx-ckmjE!B zhtJ=g-+5ZoWGo=UG+Lsd``*WdKtqKKJx4X=hwNXHL_)i||(C?l*#G)zU{&c6~_VT2_pua$HjNXW#V{MUmV z$05-%o+XfYH|%5G=>?vBmO#xg`C>k0eF|zapojC?#XMM`tAaDU{}fSyth+Ho_vKPz zpH*=NKxYtxpe}n?9~9GieE;E#1&3AtF6k|czhQ%Q*?4YrKQS`T0@#TEc9R0;YQ8cQ z&O80%CQZTA0#mWd1cYGgPQ)VS1!`rI}gS z4;+aBI``j++0c)^EX`$Q!ft1ZZiXfj5Df7h46aB4dyl82L(Z-ycMU($rvwVdc}Ms6 z;v4|{{-_0D>oukP6$zmZ<3|>+eI`z{x39W&w^^fV5XbHkxwEe&q1dCoBn1IUj+*~O zUGg3lOKuu>)}=_lQES7!b-=nb!e_A|{H**)XLJDkk7~QSMv}1TUs(|Q$bWjos6gxH zxo}2-O;%6huZzG_hV4TuzlGvK#41bEM zXW;~V6~`gw&M&fWy-vKdi%xUC!^e~dtCyCY3nk?$Rlf0k1zx&}bZ75m;1w4Jy|s(V z5&j>@Fq=MYfaV%-?oCUnYI_&pFt$gsei<2;CFEBeCo#SS9 zXTI6At*EWaif_-HNjvaw6&zax{;QP=P(ngD_76-7Xnt6eQcoFR_L{Kg<2`eOlf9}) zE3vi`N6zuyfouUH%LYN$B!8}UX#=6!kz){Z+b(m3kpY$xd@S48y5srV)|g7U=CHL- zB)J_6JXZ}^ueg1)aK0V5Ubuf#I@P$+bGx{I-T7kW+XI@NkB@%wbqt_`6~LLd)rJB> z6e=y7p$Y#ZCGrY91at?V`shDiQ++xb&GkTCy$ut7sGOxkp=or+NP$;b|M2(?d{BuqsMwu_Jafh?i}v=*?Zepvcdr``436BfN!Bk zDfpk_hV|x`x265Z+j8W^uljtS$LqWLs(0#CrZ3@;Db+D=eC=8aJZ19Q8VPuBAzvE- zVStpNYwLK4OdTvf;<6s)QT|3qvz!q9xp(=y=5K((es_Na=APh^Z&WleqOmb$*?vR1X02}^j1;sfTP(UM@o&{a2gKx#A)IL?M04otoM-+*K% z14-eal9fM)SlkLhxlF~O1JK@ z0Bd>e{e-)As!p~y?Eh~(yg>s^6uB>CYR#p*uUd3apqawZ3e&c_8KFCbWX1t^IAyG7 zNXO8ota@nvb6NQy&DWJF=QP#E>miWP6OTAFOx zbMZd07eI@D`ZR;lIqP@Q1+*A%GQR(#)2M)e$)@lubN~4;(RG0KZ9VIugCoOQ{$2og z&z9oYnrt@qf@51w*(on`k!is^srdXU=A8+QVXog+BYjpH?e3I)Ie)S>-X-u^x%y=IJkBW-Lu7O#8a{l255c-&y>fxtebKA<#GZGYxUmZoGi&t~ro^fR!_xc+ute6UjL zPM2%dtjju>>KDv4f)>zNjE{kt{L{3Q;#J5cDc!sqkz64Rg7GuQ0lgV*=Qx1f!fKaD za(vA`qG&Lg`$5ieQ&ZQr>`2z8+HM1Hs`Ozs(D43g-z(4V5R^^k*V9YA8~FVIG=C{? z#|6U@6J#Nh$k$}<9&i&A?g7H``M$I8!Mp({>v-uJV7lu3RKz51er|v`XidwUyJtDam_(F&kGm=71Bf-E(yu($E zc=6+(D<+p0X>Dh)wBo~$0Unb4dGzyV^SI2+%!OiE8BKKxXx;~~^>|Ug4ach?NAasZ z>7l5``L)2S{acH`yxSp@+Y6nx3!wF@?TX~~;%EOi*9l^ii;km(n=+H@i;2K@(}#H} zqvB2%hhu@4EDJ>e&uaH0bCmsVH$-Z)O)kE(oB01fG<|m^-vMB0D6T ztgLIs?OHcGE8`+XT!d?67A}dK!nOCf%D9T`nRT=G9`|>8e?Px}xsH2Y=Xsv5=j$<^ z&n1H03C<$RzjgbMzFi+4f3aRHAaM7t^B25n16$bN)}aBVi^1Lj#M)#_W54>?;S8~5 zt5E?A4q6xJ&I-ObwLjxG^4eo<$$`EOPzT;N>r{QSx=9hj@MT?n$qOJs`6XnMC-ZqM zipQoEazAIBg)q&;U{p?uDAAv~{=;8AA42G1c8g)Y?Ty%xJFkx>6b3LD&!IN5Eq7y- z#;?HTKhOVQC}2%HT1tmwzb;ya4;N+1h|8gMb&Pcz0;HT9->2Nl6T|3@HKy`G^L9-Pe9EKmKMHK_niOEH6zVQr>Nl`v%+UB2)|_I zT{Y@tS~YUCyZL{hY(p^}gNF~JHdj=w^(^BfulUEkZp22f8&OA>_QYR#mt6sN?jDFg z=V(fTc<5Gx7V(eD>Deq`og0>I{~QI31ZpRqAkKHX_R_N&vUBBvU(T-|H3#n>T^??o zlOfhTM_%oGww)pVDB%ntYDK;4<^VkEggjy~ahe$72 z3b7!Io~PhuoC%nQx$(plLp8)$9Xv9jxU|54=|a0{1eBH#(r|((|Bip({O? z*x&MP>3w&CZu7RUxD0`DYFR3=9C5)|eZb@PK+2+g{oCci3jJIrbJw4^ zE_5S!-np28;ftYt?#aYxk5k@4-+L-zI^wX!!t7|lQ0a7DZ@}WQMR{MYobp5RlOdH8 zDZ}%v3(RbZXIfAaY`SZ)JiclOdS#aa2e@Qc^h_Or)3iYgZ_{Pw36Pq=p5x#=5A)9~*>elgl5bED+BT7X>QEWWhge zEn84WxzuMW@xeN)#qurZKf1e@b-p~*#knYk1S`_cThk<@c1|3a$s!x~o(jlqXCf7SQ?ja88a+*TG>lOe>0&>s&HrEnMzo$I14dcF;L2~{2B|g6V5d-IcXuEz ztt=}|UAK|Ss8BnJKBkmJ8Z_@V ze{uo4tiWpgX_Y5T)j4vED8Mnk{fXJ$Aagk345yjpdD%e~wtGB;(YnubtA!|zIIEsC zuQ@z~3DtPc9vmU|;%x~#J1R$Lt>*16E$dg46)4t`gB7WprD60+-$ozWiiP_AqVtV08Ap*i*?L zpV2Kf(0H5$jD$QtkzySUOM;oL8G+d}lBoHi`*Y^f9?2bSKs))g1bJZ?o7RYI^n~-9 z*WzsV$QgeZrznltCuGe_JOrs`pl!8a|p1 z7!Qa#io75I=s&tve(5u@{{t<`DxbHTe+*0}_rA3Du=bk`4UoR;QD#&3?67eSv7N(7 z(n{65cl>!+q{txWCI~HBI?PipkQ%sfP;;Fuk)-uiUf9#6*shHHg>J(_W1rQ|6z2|a9qGB0#h1N3)=x#n~^H7;H3g@NLR1T-;}pfy>ubtODc>2iG|U|Pw2 zg5F+lN#i5>didm~CHDLcKV*dDAc?CtP2BDbH#wkOc^zaIc(`OwoM4<6Jd6uYHCZzW z+sX?Ys~sa#_Oo9Q#BE!9ehFTE%c%KT!;T*111iw--GSW?J^ekWNO~orb>(7mf?RgBZ@gcR!lxCbk7|1mGW>isbo^oOmape}7hPd6Xly{PEyN%8B@_ z)nrUiwjo8qO%TdlU-H8L%2)?k;)o;1Kyx265o=EjS>so_Tqw>0Ctj;` z?#tZNgAo!UHn17pZ(2@DJwrBXns0(;FfPc7^GcNyIO6GpNw9tmvBM?YGcS6yBDnoLB1d0Uv-Ue;f2K&+To7d68K|igU zkIze$)g^-QebyfK53&i(LDxY1xFKhKNa)%v&-3$i(DuAJ$_+xYPhR}&I6|_^R(~>&_r;T- zc2n}25q~*bRHOshfJnI6?JpSe=aKEMHN23Or7qX>*k5e)Ztj*R# zpU#0mW%f0$mhSjtq2jL2}DDjlSx zeE^|C=HBi>T1I!pQXIKjcS4@_kYM?3Stv*1l#1)1Ix!$FnL^`B*>=tF_ftmYqdr$K z73ixjNvn#=;Sl>|07>f;mC(%*MUQNm(Ac?l`^XXHW$(QnGd;yheaKn?qh7k0&6(4d z??4$zIAb2-B+fpJ{=<6~CGp*@khQ#vSrJd0-PTt}C&xX5FiIgO&t~l9xPbll(qT93 z*Ad6QGBZYd^OWL!$8|Z+yD+7C-@v|GSObQ*SmEF;Hywc>m-Mn1ql@1xvBkTs!`gu_ zu0&8dD*dex>~({9;+A?W^X{}8zI}vUDF(c&m_e)CRE(^n2PYQ}=Cm%HNtQ1?S1=RYm_bqv}U-2$*w6t@`AKRlT ztCdfJP^=$5Ldps*hR%G#DD~A@RTr%XO-&hODpgRxW#FRdktD71U#%ET&=7M)x1+ZF zcR|K~O>$XEJSI;QW|a|_HbP6xLlC6zLib^xtwgD)>BkKxLb z;oj84nQ=D5FR5$6Itq{SGn^rreon6Z2FahMleI=VQ(iUt6bdzyiYpr=OS^0BaK7xg z>gG#@!3NnKXLSEAX@Yk9Ta*r0hyt;!jsD$VGc5t1Fm**?Kcf`HcNueoyUvb%m-`wI z4u^R)jAg#>xegV7sSiS{^!AEhG=4V|(!(ry*$JBUx-PLt8-|`njQcv#15ZZ~hx{=p zC@;1Qu5RetlS1j|$vk3(rB?RbL7Qq+p8SDw_%7o`KMyA*CAkTeqR$sQjuP1?ZRz&r z#Xk=NMirB^dg+>}mqWzE7UE13)l?Fn4)(5HT3wlW?@8RULGHv_;zZTAa^MSK0Z;!f zr}AN80CkbUM$=yIq?V@U=V8h8iar=t(7ep%Sh3}3R^0Ymp(sl9aT23=^Y&_2dg?ti zc0|6Lj+?fPiG51rw|3LpM?z~X!lsa(-SsFGr=oIB;rINa-TW_XAN|ca$cr9vNt}Xj zXZxG8V4mMYmoL6~lhAV5!wR5OZ?)BV)cfSyZaV0Kt4DN7o*8?53Mh28QDFy3pp_hz z9HYRoU=Uww?CeKGZ-P&)ASDc)Z1OT-w({A|6@;h*60Avm1%#Pi6cW>wP`0Mf?40E9 z5q8hCAA9hH62#P&EyXQI$OeXu(Fqv1#QZ3&lm5I6dy31Lg%ld|MNxyDkvwYFgd+2} zMp0cMvyVObpH5#;fg~IT1Jwn2<(8FnX=%D?2-#A75}LoQY#Ct2^y#MNL3XH$7bC9; zQq<(6ZlzN7Ps*HCKDDcrcghc@IFX@DmZ6N)1g;jtdMVgvE=&EXlG7Z2q905}M{(GIG7%O_C&S68dg_wQK z*H;KLGSf{4&+*k=8NbDeIBHfZXLyY=d7m#Dw|t&pCR?5@3ClOJIxEbW5Q3x24z%}@ zY#3fkIt!NsX^!X?=o_bfW@fG3+ssPQ)nJ|^hyH7SH#$)IcDy0L6N0Lcwqgasw!Q$4 z^u@_P23-Cz)F7|hs6j~Dalzr1Vlv_NJ(wN62KQOb^P@>Zf1XHOyW2)Kr_0;M#vN#b z^T2x;T38o?^0}CJ3?voaAO{Q-N!!Cjr$*j0_Vds3ea&%g2+9f=dYvzUBV|NMv@K-r zUKQ(6jIB1~sI4a-61fu5tEK3Ss?f#QH8~PnNkEooBc~67uba<&eYP{GF>0Icji*>j zno|YcM7>_!njMh~S*U*;MXg5O4vLY3mU&Mw!>x8XAYJ?}gsMusG(1$Dnjrf42{$Qp zOkMzOdgI@Ph|RttG)W(#jNgPZ=ohnBWyf%}%QJe5Mu9-3rdZLOU*6d=hvNl@I>8+`1*pqrcAA(qJrIb0w)_`DWXk&G6-L920v zw_E;>ntC?-j9o&WiJrh)`DG|oZ)5+Ej!${Wtdo4$OR*fjqJ+c;gKz#+_%=Ne znBHs60j6t8f^Od8Kpd9bA5g|F;~P97-V4dsC>f02fHhwORm<@FX3O~?eCuqsVqn@ehjiQZ3`jc}DE&XPGvQ@Wr2yCNE*}An{Oi*|Ig5kkqH3n~*Zb+IY z4-aTiSX!+N%Wm7Ba@)d}|5iM+z)c=iCeO85tn0;ySXRwhwxRO^V7wEGLYjRpyH)l# zM=1%Mrf$sNtIk8dgvuW&KuO~m;xEK2%8+J*f}exd6bKQjTwWOY+eW=DmSf&o6!6P; zxpIH~%SXLMlchhK3PVshZ`xvG>${1f>ltCV$EC*sG7}kle6)5U{Qc)XcSg;%zMs5_ zzD{PoVpdbO&y1Q0gI~xMRkBnHV*qD5UJ zNItY6f)`3FbYq1c)6!@NL4UK}nuBtXI~bH<6=%l1U19vb_vE2~_>3osPmH`)R+GTif0$gY%*ACyi=Vd9P4Yj+huyY(%Sf=X?9W5Q`pRuU1r`zhj$hHKyD zV0S9CbzN7Rbx5nV|7Kp)iI zL-h_d7%qyH1h!EVH~4)u8eEJo4ovs4oe)ZDFVgbZ}U< z+@|71+n-lj)%yoPJEORPmOlB5bpq!-N;C>AHgfD9Fx@Gac+DBb6I5C$F{!89#B%r72G-??_ zy1ybd_U>kL>;*2NN9}@Eu)&)RKwp_l4N3#+wX$llQXl+#SypH(6SOtVCl-r`z}u`S znLacW_qq|PeAoNlpGQj3bi1;2>o8DwEaq{V+xUXS5=e5D5 zJb94}H0t%TyUDH!8-81Oc$GGub+G$?VWDchuqVW$u5mHCx410FwLO2IP+lnnTj6S!yd&K?%D4ZNvk%}Z~-=ti3k6aSuc407#V>J3 z*##rr8BiQWM+P|dzElR?oAHkw{Y>o{U_gBa5dP5 z$NLjNWBTk$JU8KHt@-G6nm##)c+#gFke-(Xopx3bZ#)ylW-u3ZZQ9NWqn@m`VxnqO zkHmz#C2ucVw~jTzet+IK0A-c{DmfS`^9hCPzvmTiPKJ3WYtV2${=u8bp)Bcn`8;oL zhmCVhs^LO8nCD{*1Sg;Vdi8IQ`E->d0E=i*aZCfVT^-yoc~oV7cbN$F;eM5NVB z`9nLeC58^VET%jZMw@YChv}glT7)NSQv5#nO1b<<>r#+vniU_qqPl<*U)(895|$v& zXX(YEVN6e<`xe`2#j5wdW@@~QR(_+(pSH9(>@-Abd1t#x%E@4BWmC9UGTmf(w~Nt! zW~PM_#1`&{z1FPXbn~ocJ@D@kl?1N#1CU^KLJ*VD4|Geg6?4wzHYSka4mG4CNbrVO zU|)~{GpmBpKK@J1l!B(grbxbam9v75bB@zpS{>0?VQBYlR8u9K5hv31BSHqKHr-y@ zeI&m!I`5kZ2Hp~qr{_^tMfm~MTRsyc;e*xRB;OH(4k(}8UYDPf#b@82c7`rzt4C6* znBMOlH0_LhqMP%}q{pn(GvmJV8vcufE>DgQ$KS|TG4-F zD)dkAV)okK_1fc#SlTvuLX@qd9+vD%gzBr2^Ej9Fd%u$r-bb3DtAYvhf{#;xpKT1e zs@R?zbY{*Uc}Q+?XxN3IP2N^k(;2vw+|3>@kph9jFj@nPKj`3B z#e@>`wPS$31(seoOLHCPfvlas1Wu2# z4paDOB6-!~uVU^OVB6F+;b>&QRPp1ydD+d`fN(I2F9#(zZBmg=rY1xy)31=7ogI>P z7lOMcfT~IG{xq6-3l0i_3Dw5%>@@4528#sa%{KQNjZ`jvRbE#WDXqd#NmgBdI*?1! z($P7yGa`>hrd5C+NG3Rr7CrigW;^m#9@}(3g>A7Qp7byI=m&WFqH@)-D>r^(S zz$#Hxa*u03?lLn8Nb?(KT)pR(>cZ#HRXApS1T%?`3DQ1MUdg`My7*F$3Us&$jHAkH zW^s6dzdN4+hTdx~1F8V^80|pD4#t?64ErxrB=SLxK~2WpB^839ij}uG4KyL8yGIE! zDtDlo6XyRHBCLQUvg}5?MSzSFd)a^2x6&C&i)Lp+%*@7p@Lt2GN>H3> zi}w&ttRdi5jC!Z?DauSi7+N6;bh$2hHl8~jT$#X^z>BB&NX~te;Q91q?6SUQ)*k2t zIVik+mD)=3{ukSZ9Zup}4c6F74>4Tv8)&O!11;3|0jkT3 zf4%*LqyD{EW-o?pH&aEO-SQC&xJ2qP5x4{a>8aID}3BiPb$)x*Qh+~8@@x1p- zb+c@yyzhiYn%PuR)$Q6_|BBci@9h&tKQw1t{?4xwI;XW#;JmsZ*gLPAhISqK-ZG|~ z@aW$$H&4Ca^?WPGD-H&}g`G-}MiM4aW zE%wl}jrg#EYkOa`=#8V81yBz7_ebm%_`WR3Y+v0H?^bp9PJ2j(Nt!tXlp#|&az3Ww z^-gkm^oK!+@@wfI6>@9ZB00@={qv#>kSH*kyylRRT0{9=tDwi2py{@p!M!qdoszLzFrV8-;_>75K|@v`r6=u)zA3(#9IAPiKc=+=bjf1Xkvkj0^`qhiQ z&tWSS8kdP;p_Av41tV6R4!e^%egH?cQjY*%nDhw8(&FFGMH;Cwbupb4BE!t^9g5b2 zUbvCLqi3#GdBp0!)Q;=+r2yDZBZW&?cQf;tcoH<{#=ZE0+@>^9nV;zij-2P&ftbk5 zgD}UV%_h)xN_XDs)%Nl*LBr&wa1RW(#! z?1&Q-4veXLh1b9UaJ3_M9oZyvzY@Z+H^D z^oR*dBI!uZ(FZT)o7}T%nod`#>~NA>MFjytZD&pa4!O=ZQ*Q!k(0EJk_}y`g zJP(fDBcw2Ox=|AzKOf}`nMg(?RqMz^Np%HCYx(q=c)lE0c*Hs{!k$`9ix`){A!V3o z+L@fz(TI`&R)IzCr1boIB;t5@Ae>r<7k3nL#t(t#upZBux>siGduU2&Fop%h0tRU9D?UZv=ht@tl*>irAx80NOns z*J}ZOb&HZP;D9_hYS~+6g$F#J^?M_e=NuuV7S8nk~Fmt+_~{k>4r}q-fp>GUV&- zeGG+3mVfhix^Ka2rRAP&P*D=%Rg+y!KAM^iKt>WhkE123?s}z0QaJoJQkQcIOCool z7EtjX$bA6JC2guKRn9MO0}_*zlc9C33kMoy+{1%ot{ky?hBnbl?RMo)Um zuv-uOaKE!WZJE1i@}mI4;SIB9N%dxtwYmO#1@FB#Vh#Q|LG`)0-cn!ks#l9(l#Knu z=wD&(cdz6tYkq4erj(})cXyB15k%j+Q5VuRzk9Ch(d37Y;f*4v=3*&R#Nacg6jw&W z7JS!6Jf;^wY$mofD6L ziFqnv=f{^9WJidjoo)m1Py8Ybn4rr$MEgi;kP}kS35*Vy4X`lAbSue7hsIxzV;GRK z8qq}#nw8$GD640AaEP;glU9iQHb-`L(8;X&+B4EbVKbvoQE)iyyK0uwAi*^3n0;EY z;Un8WrqxA;-D=~yU+(ym2-V5?vL{yZ>ksusH6%qYswx5{l)$vjwSAOXUJqAi0ONu#DO z@tIT#$Mjp#bsk9iOa)qrw|Tfoxm%4^wCn3}rzPP**sAsx@26s`E6hg?h;2U*wf8!8 zwG|wF=b+Rq6=r;sioj?Txu%OGAsdl%*Wc5)XZAkk7_aDQ{Vn91aH{XqHov16S3wV3 z<(&163~1DFOloqtipqwy!v;^gumwp7N*OWemx4RHyrF=4QP*kD785Vn05xV)0>{pB#{^Y265yvpC;o=iw$jyHk-7C~F+gj+6u>JjM;m!opxp%D`4(3*l3-dY z5hmh6gE(e4W8}mZA5#VX0fmE?vbB&5#(dZl0Ppxf`-$q0->mu`Fh71?8~aOYgI81QJ+P5?h=66WZYi)n}K6QzwV@ z&Fs#dN*;DW3-P_TIZL{fHN#O*|@5hiTyKPvTB5vmtv_gD_p!twRkfL6t?aDgTrnL(wd#|ou16Q_hP(cuUhbvb6wLW zxtqO^a=b5gi;9O#Zv6C89h7>Ca3%pZXq=KPgjjRKtIYeiAK~X68RVlsB0 z@C6K^^#ESS#}8$k>u$C`JKHQb0bP9xB9)$XGb75X5c#We|C+)BsJK)XS}VU>T`Grp zA8n|?_}9`|Tzmyv$4VrsFFyp{#gBr{NDj{|z8t?Z9Bnx6P?%n-xv8JFBrAmrk&{*u z;GFVc!gwd)%IbN8Scs@vwmLuwfF_W1SpO-`dh$J8F(b)kjAs2<}1UF zK;lGyIRuvxV@Z!KI2Q5QU{X}0STZFSrp!m@N@4PjOb*A*nI1)`k?V1jr!mCh--g0SFRAwBeJQH%x!aQ zxQwT|4Z-zW^=HdK>NV5Qp;n91|J#HYG!uSMRIrPkJ(WcB>CmZt^(CicqbQ;`i*<=l zzh_hHo+<*>~>dRc?LqeENo%iq8p1CU|y-_xgvfMj6~w zB)p9}lC1hOp5oxzZ%5~Xf?=8na640$OcvWq?xiGG=cNcKm(r&={;b<#5Jc6FnOimg z`1valB16`9J2XCK``K*IDTYvY|Cx9jcdS+DS7FnwUQ3Xr2q*b;#grBQJWCGl9MfGK zSB{KsXo)4c*-GB3tr$m~+t|7n{>7JG$Nw~~acOI){W?8Y3F6U%!Jn<^X;e&&qhk|( zx4ALg3=|T9ha42D1FozFC)4NkW#n9LZ?D@FCO=QRz}L7TX3w%3n+@ScZIiE0Bhy<+ zSk}yPlx(rr<;LrG%HZWy=b=f{?~jB^EAs}WHY?!o%?IT#1Y3X(8cU!}o6?NL-4a;+FKwy+&3{5|B58Q+}#4Z@O@K-FH;Qp0nF6s?IGqKYNzp*ypm% za<9qt&O8smv#7-ckzU~>alg}7`)uf;kXcWK(b9KqJ6ZO2wpG=+&xHp|x5Rz8e%?kR z7o&WH$&L9$qr#Y@YPEsON~@^#@lqdupSa^r^Y5?Q%I}#oz#t<<@0l6HM6X1YCa=Bh zKJRWJj&=xWIpK3@fv@m^U9flRuN-iZp#LsnM(xNE(W<4HMJja-cxaPin^9U~N&!wg25}tuQ$_QFV^D;Bh4L{_brl^YK$jQXLI?{IyAkplREg z9w!$T@NTelOXUCIt~+;n91{nGzbY39v_aojo1J#cDaNRi$%)1}nSxgWH)2Il`@Eq# zx7ylLHo`(AQws<0Ab@8bvGO!~?~s8I_0~$`yZi#1FP?^*lv{24-+vDWbQ!%Xg3e0Z#Md!Aa!LQ(kOQSCzjD@u! z^~}R1+L_|C$!Us9ek**8al=%}-2Qj=v=RH#aWb=QBppF@F)oF3fz#(Rd7*v^emT(0 z?<`@WpaC#rwWxO@+p4ESu8(wMjhTy46%#A-+LoOFT=UHezOoQ_NyDX1jg~f;SNGy} zq@mEIX#eBh$fgOg>129EzQP8-i+}FX7yX!+B@c)JdgOVLUaOe;L${;g9q7ybsQ3NR zb!8K_p7z(~uW~x_&$Uhrg{UHb2qp%_WKJ5yB$INNy}7PG5H)dCrKlSXZPn2vck8J6 z@G+Yjm`JWr7<{+eZ%L3HI%$4#UoZH&)V=W+Pm%I#(o(K0S1qGlgnVEC#z;EAOjqqZ}iqC$~zuJ&DG7-Kp{@BV7#_@$~Y^3lhF(C4$0 zW}3`#f&>}@3T(hslo5iQ86Q7ooiD!U7CSm|cu^6Xr2XV~GjO{<7DB%;21?skfN^pK zuRp+2QR|EENV)t5avnF&a7_0Lb&)2CKx+|#J-#LtPrCUuqqMM@RiUZ&y=D&9*QN43 zpI`oeEx^707u$cXMA6()!oHU^Hf6AeVJks%Hs9C(OZa<^085ez`|*aEU#?$2dDyM3 zOihPbAQicn;Ts8_qq2E80@$&Uzn+ltzjw{Jt)nTVSD>5BM}cOVyOF(_Y<>N^A-o;w zfU~3rfFGkaZ*9IH2(qX)MhXWJ!DLqVnb^TGkd4z>mg+bDrnKCF^xk(J+nAtC#uFa% z;QOOuF0o|c?w(5{KSdWdM3sRDz&C0)*5PWa{e<4wm|nhr@963J@r2Sf@N_&|JP!w! zu=ewFaYk+P#ZDN7aBY8VX2Hn`!Ey=1 zH20W-x1NHb%1$)bK;NHEgeWLw0BuG*Jk{fTAneR|X4VPoJ}dRN<@%GBCs>`y{~~!i zRe_PsVNo=mx3ub(NF2aE9BrJvHZW~fp{Oub5^oJRO{VD55SzKvO<(M)#UG!dua5dA z0#*2K;%((ZK$)na3Oy8JqBLYSJ{3Z=_+yn14U`?|p-37{Wd<6%wB7}`#JJ-;U5u#^ zOzA9&>Cp#ex}#3+CQrH^g@5u5>E1zU0QNpf_4)gy?0CmFVJD&JIHmh{>)k2*{M87m zvkeI=R=Aet?w`PC897RRu}rTkzf33&N)11pNh-4OGGK=1rO&@u3ITp;#{aohN6Yp^ z`gd+mxx)lF!y8g+@$@;y9r=?>mn5pB7*K?<$pvqM`?+;yu;>r z@2AF@Rt@DleO9`6_)$i&UcQpz+@ykNfRkc|Mg5|>d?#(FMS-S={+8q9j^a@hf+4X3 zQp^)>7M%gLEDJm{`C_oQ5*j;6WdzdCDmQf}#LBfn90ELR+Q7+PI8`?pr{>jmttujZ ztxE)h@~QRVV-02-PH!CMe@+Y<2cBL*vJ5gvZcPwv7w&Hb4|QxYAJn8E0jX&a79B$O z6{2X+`>|u2oNs*4sFBl7ZcJfZ@&tUQx~JTGLO5e5f;KFw+L+5rW(KL~yoDad?41ds ztq}B@Ly9oW0iz4%LCE%Zz%oFnA#QSpurt$Le`i0`d=6>@CXYUqoKN2OOW`jxMAKf-SWFj;;q!Lq`T}; zg`p~w73~E<$H+&)88AB~HF6h@c*UCsXy-DYhLN5>DYZFWiPfBAX@NK_^LF^lKIrVs z#q7D~EZM*HKcZ^h0FlMZ~ee z-h1gg&wDqs)k;+QMv2l-W%sOiFlp-A-Go;rJ&>br;!)mN?jhi5u;2{aC&5_)V2a}U z*tawB%MOI{3m0n8<#5YMx@?)92ZsIeG=P=;x+{Le)f5V~qxYtN$i#PN+mGAzdV%TN z4$0XFDNtDE)(bjreabtxGf5m8l^6-jKI!)R7RHv= zCKv0%tl>(Z$}`*YLqB`o!4P2Z9>BAfFf&bM3zm{E;~5q1lqp zhfNAmRT-n%Vu7k|{(-x*t&pBGobIjGE{(HoL4xP{b^ziaamL%S)@5eunlDV~BaE$6 z(B<^FS2R{~0Jcz&F9@hryb}u+9ItYc4mwJG+U2zzmO!4|tlFY-vZMEZ&bidjM=x-@ zetONX6Oyc%s>=@$eg^Z>JgBdS+GGBi$KZ?g7du;x?h}ynzrn!3b>iXd`Hups(4}9| z&AT02#NVFBDg32Ct0I>{j34z)|^M0A$u0WwhQ{AsC0TOW;*I+Ir zDI<>~4-L=q?e9P6`s>A4<;)n+*wv7sN6L0f$O`kNEq3=X;rcg||Homry12_N54$dOA=oh1igpAc6&qY>+j%44-{$`(WYbQTRc29k7*!E}|5PcVLb`uYdQ&^UcEr_F zcBiNUV|M;sr{?5Nzo5JIPQk8_@QtO__fMirfzw-vQpe=2u;`YvHLF%=TYg<7pt=sLePs_t%lF*@xdLv5F~6);>rUe-t6biiTzyC7bXzw6dz_}dq29& zwKDT?4<5bdEcq4o?fn}c&y2=kY67OGTydoOfA?uN`3nLl$|=oIW~Fd!txvru>fU1^ zJ=-2eyO4wNV!M6L(B(?1gcYVIJtjK7_${R@N_Z`Zv2Idhq?0k|0}AL{?YFmzG6SSj zbl2x>hotjjmZ3i_Eh2vz$zJ<%r_s!|{^3MWVbbz%ml}t7QQuBzZV^xKpxlpSK3bcQ zbvPzZOgTWywVS53zc2lw{Jo@>s2hBzpzMSMWYw!AUgnxmp>*G5*ecD0)qR(Tcqp4m zk~k2$B9$|38+4yD6iSujXT{2&Dy**uSGOuDbBlXJ2G#ds0sn*fuNW&-l?$c9>Wq1< zYM@dpp(ue0_f#@ve>I*Ts&hR@MJQ7kMB$U!InI*btWEFv_Lq&|yrA8nmJ5JY>8}r~ z=X6N0VsXGU9Z5})V8#Upv~T(0m!B>O)_y3TeVOK)J8y!;9*XAp7NMalRlbbw?xdRF zO*UVV@cf^w6gH1SHS*jo)`VGFD3g}U=Qra|e;c!P8E4*wq?rl3rfGQ7*BBNznJIC! zl->_ooQYo^3v+SaDWKZA)Gm1@lqFOepM*vwlZ20aT}J1rTr5&v78oK9+W>Ax$Q_Wf z3%XPAUsj)@A_z*o*EXkLukA~vZN8j+AdYhg%DZ~Y(zObn@IpctPI$pu^?-$6|C&QK z1s>W)b-4CcD*OiE36o$C&|hXph4sA`7uP(A2z%gg*Jv~KQLr!-=@2(H)9ZL`cZ{z= ziKZ@@7tTef_X^rW#aLl!L6?V~&2>Qw%N^0z8O7ccTVDlC zzZe2|dlh0Lf)KE0EmO7@9Mlh3AlLms84}-|camQNp2)N8I;jgGe6`FyP3uaiD`x0R zm+v;s=A7I=ML|^7vO=(jcm$>VT_KstCm|E(ls5Z`%6NowqmX*#I2|)bdz=JOt41SKYn` zU(8Zkm}JG=4#{QGfx`Heq$v!Kw6%JW#a7K^|ljD_4x9>&aXRhCh5wu-}-hI#~bE zUZ_Kdce&G=dONT$_d$Votdt`MV+qDVM~+~nMCMg2+ox<7VxsA6`yhvJA)9^d3$V`a zC;%;fRiRH>W5gLT*xXaI^5v}`fC3u+Z>6iQ&Alj?ubD%clq(%Ai_hASfdI4CY*=@H z%+HKH^%zs2U58Q6#a6|yq80=JaT?d6_!1~#iUu+&ynclcADkVrPkn8s0yQ7ya6-P? zAI}%WpVp$#(pV=rWL4#4LFH&u^QxE`)(WIr0)Ja(n8S9mfFL?s1O6E2mGyN}5b*-E zoktu(!+TZcv2w%wyQC4&f#APCZuLzi?pfWpuz9RkwbjR-0s2n;AKF(5514N7;& z&|T8q^}BtZ_x--#-pAe^>K_~jFmum!ow3$B*GbsL@>-^HCYZ~TjYh(>oOAl-KRJf4 zeS+h9D!pg`b=4~E>F(wZY02Ubvn}|*(~{@Nzbw1o;CNfUJ38kwm4CTYCp}s)5YP6x zH_h`JoNA^A&cK0O<4T{l-+6HuUy&A-dduE@O8@WK=8&PKj`wAS^d+lb4b#Tz@zQ(C zaxJH?(|?#v_O8#e)a>W1NCs@d0?&H58hKsO4~^@*1eX^pXMWh6XV2oHYq{f|fDl1H zc(TdQ=vP0GdH&GVuo|m=Zv77#uLr7vx-viDD^GEIFN0jYNR)seqI{n*3x9HT@kO97 zR#IiLv-0Efh|dA%uj%KVPdPx4#ED%dsr^Y*<_ErRp!vSpKcmJzs-_!1We?ef0HGhd zpYF3>&5K5hu2EmdtB7v;oi-wCOBt#}ESse_Gd}^eO7_6dyoDIhCEzwNK#*qdJCMAA z5#4lM5Sa2?6tw1>7%Y1i!@kmHYxeLmzI^ib^*J3@QW)!67!)T7 z{UUQTIPK<7QLSBo13XAE%CGvI7I++Cd{Ib9PX$7i^M%TFLpguxV<~#){HG3Udj1>O z;fvqsmCD$o%1@N2*G+UHX{`D#LEnXac{8zAQAn9v?+ z@}R>xNk%Q!B4+;QT&jA)+%@{tzvKqA5ttbv($uN)+dp=-6g!z=20-RlqV&~DS>6Z} zPdBov7Fkm$%WLF_=f$qy_`y`n?Mfzr2`mn=zv>lLitAxf^I7(B_Y3E~EHb*t3JmbE za)JQp`n|(-joemiOJO)_-_dpS zZ`(v30d2~{%JwgtV@5R5)RF-Qe`2z#SR%?;x)>(d(Wv7KJ#^_bk%VZ)wi*Jd1xgtZZ%B zNr;N_X=Lkex122PjyKxcysk=I>#x;u_VS-R;N6K>FEC$p;tYWX;}?P6Y>ar=63%6! zb{hVDPq+#>3TixGzpPP*p0$QiuYBBKeewNI5beuZp#1o@aHdf z713hVs+8$4CC_N}O&3(+N%b@!aq#-32YgLY;K(Lbgeb#+>|-A>)3K<}5$pAxAGwrt z6VM*JC7!dT7=RF?dW4tcd-r1W|?LMt4X=Qb3$7+5C zI&W5EKF18kExR*`-FbiK;^J|N>x4&ac!`=Nvx5GQbh}xiK-=qg$;lS$MfOU16#~m; zg6>QtCsZ~9mWTE13QGr>opq<@m(=xZ7n?8J6C?ZY+wZkg*w)Fu_iS3*wr+5UteoxO zE2=C&7ut|;MWu`R969MGWwIxE^EoDsp^7>qh7-#SFteh)u3;$k(u9m3^SIR>TvQs= zWu;~O`vf{vCE?!Sii5-Wh+-GBdm?^xK|O3x^B5=b*LRTRQ@i#4E-n|p^Y;CH3B`_= z%Ox+P16k#Xi*>H%2_?vE3;Cuz8-9C-aB$gKfSf)Ahn8Jv&G37R_n2AH@<;ZTcP4M| zi;rUe{D6e#TIVAX@!6Qb2SFUEKje1-Gw?x!yT&c74<;sFLwXVj+G4_=7I)$gnjp$I zgDXM@Cz1HzRt(k>A(`7Y_-@>U;+m@KohtF2%-dHD+67}9@}Yh7_zfzr`}#8`$j1*- zRW0^k^4X3V8W)Ta?nt`X*j=5|INYq?Z&FdnQy+DXwYlVdIMR0&+P5tq>RurIVd{ws zs|*B9rV?p=^mGxg&a)w9%{!%vy{3)x>Pi!B*cOUB@k zkRWI?rc?Xy}MX>u@BN*YUUyJ16?pHrF8D@B{{2Oms^FrJY{oL7tBS3WOWI7trJ zTRsH#rbi?j_uq{=29Es#1!v*zvPNVh4Q!I7`-w08Gt1cX7A3~H$N0kVV{-Df6?o(G zlcMnVrrmalF#!l2IB8MFpM+R$vxA^dj(+M=6z&7Ex(OBo2Oq~!9E5s2te~yw1{7w#g; zUR$DjzjD)Vn+NJu5U@m5^|m&ySzpJW#oMByO~;G)`rzB-ZijH+X~DVQFFe9ms@6*y zdG`*~&dGI+?KNqL?#*FR%;Hih=yE<2)3(JIq8>u&%bR&auN7fF zzM*~N-U{UraO-M*l_3b5-_yvv<^G<);P{hGk~O#8x~>__h4+>m|K5Y3U$1K2MMA@eBVrhR!+r}^(FSy))7S7 zTfk&rFrmXKJ+9o5apCFM=5b*fDi!tzVgGWdH}}_`hl%0-3!{B{T&wstdZ{IG#TeCpW8=3IzTQZSU%)C`o)Yj?`xO^XHA>^l=_ z=;0U_3rsV%!gEzUrFouWYgh^>7B*3hGt!)N4K+{Em5bK8XfY(Mcm(T2X)R zeet(T|4sYniA3R0;I5wRf_Gb)}wWyiE>(7Cw0|Docgg(zEm$?ayS0uOvOzCyV za`$6_W2p%M*_&qJzuUS)z6Prg>-`+mSNf#L%gR6;j1QOJq$WUONDb(yLRj;@hB$M+ z5j7=aP8qNr0chAOTgHgoTmE^n{oz;@o_>43rV{@IP)E@aTpCr#XAUcZ4>qYkmO+Wv z@abV^!r6_XL9Nk#iJeCnGM*$F02q-x?J(a%EYEZaaqpwOQqWE!V1;e2b|Httt7|bN zx?zDRd8l zOf=7P~wJ3TViekGMkBtVFpC#z(JA zO!gK$>ltsZ_CHE``7Mp06J|r^{UI5%-|iC>D1$Bv7(c&!<>yR~Q_s<34C^mr z*FtoK;3M$Ts~v)!+J!LCdTySarXk$(AsVbgI3~#9AKW2Fe!L-n)XAse*3zF)Jqt17 z19lgar)k$)fB(5kp7(=a@>2SA$h_^`fg9J*8#~dTvF?1k=&6tqqUgo}+Q^VzKQbFO zdb(eHL!?l*8Z#1Sda0r9WTd+<7ebtaA_dJOX#D>4?Z2<{sJ{|Szwnzr9M-`iOd&XC z-=009({cP*+W)1@t^M*(w~WvVNCAaH4G=%&Q#Sq>3O*Bl6wF#AWa*5P6PrTxyemam zxm%dTK<*h%hNc{o5M78T4U1Gzf_Q?;<^+zdKKv+&mq~qfjpY177wiZ^hzsMO?PfEX3R01X14WxM>v73ZZTAC4ebN51vHTL%fZ|J) zXnty2;)oJ_9|dV4h7qGQ^UK)#cnxjME1#pS_-!iLi$=3!BYXH9i$hcqFjR@l$j971 z{iJ6QNn7t=sEg?&BqONO-hBPAH>bq@YQgeFJ0Yf&b@K>hyv^qB@vVk7cpF?nCL zNp}wm;Dy6I^4mtrPn8jU(lKWqavG7ri}dU(`=s7-FSqXoY2ltNLtWV zK=tl&d?p|ezml{4xs>-2^rJhwe~?p(j_(hFUR++*>08ioI&Bn#vTtQ6Q1+8CsHuh zAmME!qnEgP*^pO1NsCXZ^mJD`R#^l`HUA9)Qea`9tNoV@{)m!7$r(Y3Tv8YPDBBKJqjq6Iia_+dT|7rn>)OJh8 zY(Tu`+tXo&A$8vNM?9s>Ax?0-jF3|Nk#`tD1{jpo6Xk1z=!F(7nt*|lhRv4rz}u;S z^`ZB-w62&M0W;}yB2lvhLoq09R z5U}8v&(}=KXIHST%tm)^`0q0rE7*kPTO)*-U5R|b6YKoo$}eV6xJb~GF(mCNj4q_4 z#>rk`<2WQmdT z&>dFPr&tR5jFo;X8t*lI$=g$7j37f%qnpctfc3XDe)j+&X0{T;fVsD)Vz*mNz}d#5 zTT^i2%X7Na)aeJ80PS+$V)M;gur@3UoNEF1s`^Uo$>{>Ur!fXPEDJK&tk^|q0@yV& zDPr!}fP+D0#$gb`i^ z#T9s-+;r09cQ5F|1V$$~`@>@-+6X(ye#)b`X5m6~w^m#Wftb*z`Wmu3c|1(9>yw3y zB)UU%(3bLYK12_A-bvwLP*?M6lJu=r_ZPk_^GwZh*g>gTTR6p(s46-+jzgpUJ2sML zU2V3Lt$?y66Dyeg8jiIfD41O~Bl8U%?#xEA5$|c&G=K;V8p)cJq=H=@z?rHiwh`9S3fT50{>-L?! z%8~v34qUh~E2Ct67ibR0;^ne_Uv>M6Y_iL1?2e;(iD(5MgL;m&`d^nWJ@EoYy_Yt1 z;Za_a6>kq6l8?fE1>7ndTkLm|dpf~G2lFt2XD|TU68yBK({C?J;3<1&y)PxsGCBZ& ztgtq~!cu)<)A;)2jsJUxx}}5`i?@*&boHL<3y%Ww&Sy`>F#!qo#Rz2xV+zCshgoZ+ zN{%GcJ10^vJZ7ftV3b2to>hwQBeIXt1hm>0s%S81tg>L1_x>cEMe^?@P7pY{Hz&gh zet4{Sw6F!j=68AXpoQSH&+`0GgYOAea}JGT&GBmTmw83V!Y{1*GvzkN-BkJ>+Y{xE zvsoPa5`Mov+JfT#;;G#qqsb=voCukMgu}zZvS)XkRu^ILbzPNB9Z8vLX7my?h9TVP z34<9U3#vQ2A|i|68EgOKrl8%-^^pRuzgx zvM*KWz9D0YIKKfw^V0|MJv0H!94g{B_W-fuGrmm+9anJJMuU}Q?X><}DTklSZW0ri zs@k5evR~7s2{8G(I~|W1uyeQ5R;0)s6M6@_&5S|@KRTVM{xF%3l_4XKa!bs~7v^AM zYW;0JR$X{)gU(G}${Cb}$Az;w_8;Uqj|om|&wlw&(kbKLJw{>zS#Z^hn~vG@om25A z*d?Px&qIfUiI^A;TYMbhkoNJ8fYk>0p5`XQNC+{x`FBpx)W}vF1P|Vg1`iFz#D(xY z2ZH!7%32mO#s$@u-!TS+9L>D9aSirT+H)pfeHAgD4l=XU{YU81A0_q$Dp;sf^QC!T2+o5zCF$!l@A z(c6vt7ot0XOIiPZ2x_FysUIHa^=G^iu)8jP1ZS|G{NRuIQIpy5wha+h?bEdNz%j}6 z@;QCTpnR;w1kQ)Ko8kK^X%ep*mXJjTRUX&1zTA4G*7t@**Xd9XHRV{pXe+A%+_UeY z4o!#XU+<%~rd$C)plL6TfCBBRT!HNWf=lglm_IsZKUc=5{QjuS?tG6x(CBtm5OhGX zuhv`grWAvwF@mje)yz_T-Xmog9fS35t$)-A3l{Bmai+rW~f!a=*Ox}LIYL0 zM5H>5i-kn}ZcC^+$o~O=pD){P=;%9Ih2Z1O9r=-)wQ4`x(?%Ms=E||@vjpFOyIlCg z$rUed+sFm;-@OodZ-+NUEr^a~h_BSsE*Xuj$f?wfeK{!R(*7~OeZSaisoJgrwEATV zFyH#am)ywDCmMtDhj)$;GZRuzHLUGLoi=TM=6+2owoi{#e|9_ce)e+bR06YzRWeuI zim|~QsZPNT{NO$uS7}IgNs~+bZ3EDLgYfD&0w#SMwqu2j3ETmTXJl&+gLpBIp9=_a zONGj1yHy><>drkbCmFApMm8?VmcYjDJou1y=5v7Xu|Kp&02zobk!b)^0Zf1XY9<@<1IL&uu&=MCe00ldbkuUBy<(sl(At^{%+Nn zMP9GL<4D}l?H+T7qVOS{yQpFq!Ahz?v>gmFl(L7EX^uB|YD^a_?�)xNOW14T&3{ zouoIcuf88Es=?wSB#QH}G&E8!h{qLeKc5Z#EcjUiOs=&tU}+TlJKvU?+(eHr{?q51aXN2>yf^lC8j`=ynVKGo15Av>a5<*8_n@Lw z*cy6rZ2%c#uP=>0EFv@LWOg=$__^%nTu5Bzcl0};r-U5r2iq6%4QpfHn+Ppv`|P5i zC|75rXE2hr+E6IGOi1q!LxLuhH57T(J0jvrtyTk-m16){P(P3e{4SX(-e#<3xOD+< zLr$c+Kn>LF#lfatT~v@AKlA++t*W?+pYF(XF?9n^je?11=)*4h5i8~}5Y>3bnc!v2 zfP`>Xs0t87-d$7LvWry0iY%X(&U^|$5leW06yE22_rOXcR(l4LP^5+KW5;>|PHX^* zBmxH~)Dd~v+OZum*iR)`0FzJv$nz?6Zl$nSc`C!|e=_oFrsx%K)rxQZ`KKm3z z06HP$-KXmhX;r%$6sxJ)0?KK1ACIuA0HFO92J|P4Irwfn9twH)N#fqDZ9HZWPcU)5 zI%D>H%CWXI3kf2kOb$-k?~^nS=VeCqx;@V`0ro@2X}aH3j>!y$j%rWDFjOd#TjXnU zwk;HStV~v=GSF`&4V5LjIGFWFjczfVi>+))W2F+W*jd!R|w77$hs$IkRFI?#vh; z>1aw#lkxiwuqX}76Ip`qHrVM3Z?6*lZvio2uLeotY8Dfz-*HDjz1a%%*L9=OlU}H6Qe!w!)!GqCg8#$_FO;X; z>)dFO2$2V)0w7(l;*I5%S*$BbpwE*PArI_mQ{-TA{4t8H`jUO2vH2{??OwYdt1$o; zx@M_Cw97SvQbhvtF5;Q(cL67O;oTHyPCOdnG-NnQUO=a9e!OgutM-*@r5*{;X3G)H z@O?4?L;E?VO@T}M6pw`_2%*@Kt}K69Hy$#|9sFVw^+>O0-Ww+#pA$(+wL;vn-Q0cR z+Z34N{WY`u0Xw(?VmK?c*>!dn#xc7qs)V~wgQtW%YOjj*Xp!9#GD*Q1SfVS6e>(eq zr=FNG62Dt`GleUJyPYKu`*a#%#T6^&I%-fTnzF{9m(9bZFyabxR5+e41M`T%J}i;v zwZOIW{Kzx--q_hhJOEuqQ^6rpe!3!~LOl9&D&LZipCs14?0RhAu$c!YhW@LCy?J+F zklk`$tFil6odtE`&Szau%s>xbyTG5aw{1Q}V5W(?oBV>*C%rexaG&q{if8~&E6wAX z8Sh7J z8-7B@zR%~E$v&JvC7&_0}QvVegA%K^C#GxRW#wjP6G=6fWRGh&}_ayZp7`Z^<5W+$Z=_vr34@ zUHoX^ob4&^$CK)!-Rk|lM~H{$QKn@1x`JRyO&`uK|a3iX4 zsa;|a?U*RLs8w*)pn+f7cHKK&;w~p`X+&3bFGVcDqb0xw9;29E$gFjx%m5Fb%EU0J#?F4jm3CWEiFMwLu~~@N(D8SM^kaGrp!vO z#rR_+^@Q*meu}tleh0Im@8*mR2oXXA@D6(H!U4n zIM6Dt&9&6pdF|+@iC1~LSJfvf=n?S4XXPS!ZqFpppUil%;8T=$HU~nd^zEPn&i1@V z;08qIu+t~7!DwJZcXG2_gKEFePvo8>E}A58*+SAKXd{%k3GCo^z2?_2Ogv7GN{KJ; zQ%{Kt@c3L(>~zeqk($l#=WF2a*|<_$@*6%D+Rgsy9d)8jFn>JITEBt!sHCWj%c3J& zS}p@b!o_NPit90cU;$bw(n=Ld6|APWKkv&tRD08#!!xD{K zJMSv0+@!JcFj#&!)<-$n=gTabLQpU(q%^gVwY{w0$x(rhwIQTP7>yR~sCVs@tuf3g#-b2g8Clq^BkBC$Bh+g!o#mXout zvS~kSA-<0@2A{q0@U?E!PTDrtwbNn$$0;Lk!U)38n~hhKYhtU6+Yy2b7$vUi<`|V) z3Boy%vb@QUG|W8aC$*Q}*(h=m{C!7N<6Id z_vDDd1JMFX`YyL-^XrE5`>B+N?aA}k1v_QlU4KgMm*_duYf~2YbDzu#c0B{Q+gqt< z6i;{+YLzobmI5|UQ&peH$LS@27Sk789%6^A>dsd7FVzCWP=<${iCy2XVQfO5!;HlC zt7onNpto09CJQ&a`#8e;Yn8$|Zac5>l(M=UENW{fjh1LPj@V*hQM6X1y0e@uFwYk= z?mr<8+%2LIaD@%Dyvr&XTpyWPeHXup0k6L2yzN6IcytG_{n@BAI1wrC;1m3ks3vmd z4gg|78e1ugT+20yiK!f|&nC!rQ(hl@ch6ceV6aZIaJ_o8rn_l)5-$B0qg*L6C=BR7Wt0j1)m! zbRiP_hp+#nM?SriZ#le~D@uY3j9(J-xuhROjCQWz>$}J}OuHUXgO#9iUf$`IK_PXH5O9wb+k2FPB_o85sWLVU z$>-vhZs@Mip-i+~Hsn{^uArMo#WIjtpZDT)LaW+io8dTo$S>37pB(r10Fc1(P;l$` z9M%Dga+YdIzQe)0vr|jl%uiiR2O_jViGXTete$h5h%|Y9S|=)(y*rcrpnkdjPuP}2 zn<v8V6Mt?=7*;9cfLlG)iBb> zPpr3rZjg4AS==NMVMd)ICZwspKila1Hb1Db# z-7(|-3JnyAW-hYxf6pdTtsqGLBQ@Ab_10?T`!_tO(IFZ1*orjs-IDHd7)@*)SbMG} z=In{4SE_%|>{7Ipr(Z#6Th@}*+~sK}vpvWRxzKU+lzv|kUB6F-uu8PhEz(x7tsM(2 z6(M<_$zC|iwW%>9Swvpt$Pb64GN}0##cY_XwRUV^X;VU%F&(}ZIXpw5kZli>DR(dE z|73JM5o}K?V__>018FK<#jrf;m5DtWP3lxCC|E#(hoMt;`t>A*l{H7Da=Buzo^@XJ zEv@9?vUHt-Wu7uoDvDK0o%2UB7NNtZF>)RWolOIy@BETu!adp}f(<>tFA~vuwpvPu zo#cWDijF!-_L+o_?W+dNW!gP|A2PZk0dar`djYL~!`g*!R}EI&w&p46du{%e!a#!AwX$Oo`3ys0{lon6VPa3=?a>2?iPp*!FVj53euEhO zUs^g|k0jclQfEoQF+^f+$KeUR|5zUcCS)03=JH`R1|w(m;BEGcp%#1Yr^@C}%K3{% zL1B>angaH-Cm}@rJH>9+)GAQQ|o>`$aBkEf0GQxRHtZuQ=8T>Zw?-r)c zMWWB1y!~!Z3P-$Sa%4>pTrT4@9M8XgtsgCG-0VR8=9SmVN22r4Wh0Dl#`engGK^=W zu!J`==C>Y6oDaueuXQ9=XI4lEnWe~0C%z*DOzuj0a4S+iCL^QV*vuWBn01fxUM@?G zbbrK8y3xDpat95_^BSd>{9JFeorMaY}Ev#0c%e%!2V@vI!fRauJy+l!7l#lC%Wn0U`I_ zX+qG)1gSZoMGJzB4x4z|aJpP{jtQi6AkfKjcDfLtNP36&sfV^$U6xcoYLKgP{})b3 z>ESH-W`lKq0)JvAGNh)!HK%F_FCsGHz@?>K>*%iv!6^0}4D#K8bhRu~fpWp{b4fqll=byQUwN->S2LtjQYz7mi+OCr};Lz2N`T4+c04ZaoNyuJ4lkUAP>QR|tiYqV4^Bz9Xe z88#Fd94kG&pY%Ux+@MyIgR|-iEhv~1);Jz1iM^ImUB%6rZcpm1hFqN}Q8Rp-bt(3_ zyj)`>w_Vv9#9#P**w>|G-nnS>!J|Sl>Lan;xW0jN4o(> z+ojRsKIYn%>E+%a0etvK)o-3&(9(VJ?d@WRCwm(=p@i5?E8kV6;kNQ#Z^FvKao;qN zv%6%7DD1>jrvzd%K3LA0cO-#rKlal=Zut zwst@4{jUl{Mcp+0>f_C?#6T_Aue0Raz3X_k5@Ar{F}bNd>wZeLRe4H}4Vi}nCZw3o z@7Rq=91KZ}S>b@3;&1ET#y5cVwdI0$>ix$cAP5N&flIT?>L|Z#6tu}vJKQ9l4p=V| z6!on4`_r5;5~l&Gy&9HM9Sz~8k+cdiR^8uh^hgl=$;>BZ6b|@L==M37@3bF`ysbBH z*lpclZ4!r2N%Du0QTBAO<^MxTfjcLEpjsTmPO8eKNy8DObqz?8CNQ1w3JC3 zPM|b%6t>ADeUAdmwqJl|;zfVO4LCH5#TZ&N$3#HTK;WxOqI@B-)2$qR!NS1#7T<&- zo#J?gR1Qibj!69$v(#hIF8X5xAuR~Mf!H>jB^Ue;mW$-B3d->1;h~AZqFP~6#IUVZ ze>)PX+E2EEY_k_2`wVMVOzzD^T>3dG@WxevG4g>qj7a~E z%WI?}(ndg5!lOdR*=!SCNO5l7gXCeySb5=t96RhXBl%ZK0@jJGn%i;dXTR+|9nOlP z{mt9LSNhMtWSswL@QNW<+XDcJA1Bgn{2!s18CsDL{+ke^E2IxZLgM9~#1EWWfkfJopy8BQx7o!9r zgY(nscxIL}Xbx~W(XG;{xL34ECJD5qDS>+jJO6lA^pQcKR*a6nt;J?!6_E$<2vdJ> zu%YwIeYzjede>PIr!cHLZ!p$1W%Zut&%?hiV`AgJ5Xx%)O&~aXHuSWhYVLM8I&H>n zEtMjl2E0EHg-4KT3Or?XI*mF1QgHMZu6X0}HZVeauFoa*6Y}cR%Y#-p{|))1_-4JJ zJa{=7w0}(ZTb6z+{T40qNDRPUyT(+Kc2jTE`2}%}CfjZ4K!g~7Te~aawTl&Gdy7Lz z8$t{JO4n+M4bVn3E=g^*=YnQn7$zoPZ`T6$-v%CcHC>|x1sV^<9dUzLne84z0Dd-O zw?CSc64!YHfk&Tj*ZoiDwhPN=8)ms3aHFHO!pA=mQ(&^tVaWMC!@>rL;CpoJVEC}> z?P%aHti}+-rXd4Vf%{PWd$aGQF!YkHH+}YWs!hO@3`n_nj~RL{^PRg6T%Yau?&N!s z(RU7qs%B=Z*D7j^rjQa7zp|O$bDAChuNL5#!wDiMN{++>e>UThcy&=$@de1}-; zIJNlPhrT)#4!bRzeZxF+O!?V-OBOU`^TGt8KHa>zD7hNAy*dm$J+Dv1`q0f}CwW+U zj8vuhId;2<0wZ6vqu*+p@9vMyohb#^1yf3TT-pnf5N`!g2%Po+A{AUR>HeqHZ?EOO zaw?0_on3p{-Z&{vrbzMqY&XCJZy|X0c5tR9ROM=dIbdZ>BHSGpTh~bhgwOBFha%!x zx?{xo{Co7oef~p5@%tBlP0dEa5fkb+-HLNg`reEAdH)R|dwF`g$blP^k>uSv+wG*$ z(#83upCLFLf=8rO{p#-^rWgu9k>&oNZr_94%_TtrY-PEtO-|jJjHfRZFQ31)ZqQ6fk6s!(U! z?i`7rcxuL^%{=_Ru|#Z4%7Ysv6wW6_-y-yBp8a1m4Oou_qiovAcCYuBvZ5tU1b%*a ziuR`Q^ThDWWiuUet2j)Q)w!>NK`CV${l;T7<7w9->8AVu&q46nbSl2D%ko|wjrUR0 zZD3$ba9J{OrJU%zf}kn)`#Wa0 z7%yZY+(aVADR-2yG5`jHjgs-j*~YXM(OoKF+HJEahaasy#6xGwo^uR_(R81y#7*sz z>_|>IGGa{20FI0=yH-1Xb*d^4>;xmFotQfajRLpBnP~!Z{=QfR347bmd)cqlyEI`O zAm%?k<)Kgf59H4;Yi)*8Wz&65xzbw~`UgS(t_(aRWLe`lw&fwfZ0Q6sq#hNm-P&q0y7+)M3}`2i*6>UlHopr zL&(}AoIegcXm7>&+JY72ni2f>aQ$uoBP41VktfrAb)+UBie-%!Nn)qydGX%OeVYL_ z%u1@LOoQqpmlNqO5l*Bj_?OlM9|U~GPT~w*75rz#Ev(qPZUO@cCNQ-8*}Yd2?P%;` zy7lVp6`66F?W}_b0071<+|NI;g;l|_h7rt&zJt~Aj3*7K1}qB>N9wdv?U&=qE>QO!$sc2Wi!RK0E9qx5Mm(1YqmJ=n$aqkc?dGOPt^qdSBg~z><|;kSd+YTqAD=pNErheBQ}51V$F+2I|F_Zo z`|}_!TC^dmv9=jMka_9`d&HNud3ACMmapv=7DYl4;7W>|gE-t@AW&w&{i7ZeM4u90 zq`#IRETaINR zT)xQs$x0;UjOELjVNeHMW5=IfysxfpgWFxs;4JTY@! zrR3-w`@zr2vLs(ik9anmH~+vm;N4vQ*{lrmyU=U}05UHg0!Vztox?Pqaxr#TwAP3JI+EEh2{_^NDgqBzF(`{dhDX87c-ojc!1o}|ue;o?qd{t65znlSfJMM~ zMzh2y(mr&TPAqf-qEz6wd@@?~zDZxBH0LQ61A}zs0VR*-b$ZP&-iJN14pojK zC(KRN54fhixnS@1SHeueP6s2EilvAI(G7;QJq_H5cEFZ|YS<6`)INrFqP9lU{(pr2 zMF!a`_mYdg{wJnEUK1)@CB27!*8GymKCk(08%tQ!F~tJW zsw*LuoJ0Q~3V6hEx7qo6coJ`#?D@N9aCZ!?4B^M++|B>6@6hGD z{cna&0*6@f*F-2W0iDgH6*sFbLYsG`=WCQRffsRsXIp989VG_}x{oyy&}78;1f{oV z9kZfx^t+Qr61uMoZK!)@SFKW?dZg^g2fO)Eng~1)r|1FeRtxWZZN-ld^$O;_E#e=s zYOy|2PZLpRH=e3J%=^Cp4Bp)4-h~5`BDRle8=uI@hX6>c*H3b0v13Hv~3Tp!xBerC10a>GjDkz`A8^DeFj58_! z3KX$}p<;0l$gsbT4r@boju;r+o7c5}ZYcQkaR`qKnle>kSnylP1^#oOS^7Z=S<@5Uj;mZo1xLlK{->OCS%7g%1$lUi7)2>~Okf2mT0-#gM~H%?z`U}DO^fY$ zQj1kKABuZ5DD_F9UrggU3ONW0m)6@Gieyw6Rtmp-YfLrQ46qw;Ybclo$43rojcxzE z;4bGH=0O_1BWC&(d}dy@M&A`S8cu&0K%RXn2>PdY{f`{AZ%}@xtyu>|%Ge`$6r{Xo zwUs}R6@r(#@@?v0qzt0|=AJONS&?{OM_}hji_3OuqXhRYwY|daFnZ{kD|H^6${@=E zdq&`o)(GW-G=EgR={Cn~=)5#72bS_KCV{Hn?xBl$h0fE~4X<8}kz|8HYhGq_c4vF?pffP-1N=(H)njV>s>HI`;+84gWv_mbI7A$*LJe0rpha{aq47~#pIOm`hNH(Mbr7^ z?$zAw5mrxek#XhANIV3NMX);P6BR=IGi*LZqhHv9-$-S0VsMGbOT>h%y4f~}_VcDT zJS-LgV)=->F)9&x`#WlC>6|VaNLd{HJF75Hob;@E9xdIKPFw7MgH}%*S8vatPZOhM zLD+z3y1_H|LqH1h01kbQE%zK?!MST;YV76rdJMnsa&|=Uhq{O3Z19ZNRGH-2vgE~3 z+~w_+q_{uH9hU`}Ri8WTy|xsquH7E7Nr zziEbLf=JLvbz!CQGb_GdDxX7@`K1`a!sVfQcf!HT*B9@mx`|&Z{X>+3aTT9k6noD( zR95K4t@n>VjYoWk#Zb7V`eOi?u29MNRH2F5pY0j=$bnQ``Fb$ z{kVKdT2>c|2!U#87RRe|tk~|}W3~5R{B-7yIxRvPQA^!3}f*H~};_X|pzv`Z{?b6{)Wme4n!bVhWKxIJdKryDN| zWBgtp?A9l5Ja;KojiF30BJNVy|Aq1FQ$+XXWCY>=c0u)K_hiUT$08F@FobPn7-3Yr zdjA@W<*Yjri-x-3zj?S5hnXbe3fnpQ*L-UAVcIs|?X`kLul$XK;=3uPpsGm05X`kP7~{)bJIs;A zDv!Ju8uD^dv*$Bkp0|Pl<8R}%to2OgZ*gWG(AB~TclS zx@Y4q9h@am9|tdkjkO|902A75yytUvMq55oWW5GzjwR0eHH>@d(>H7SU0I6D=Tl&L zU}OOSg_>c#0VeOnHE&&yfR(V4-GIf-x{^tjcsTa1G{kvHaZMb6>o|jz$KMnPLqT7et4uUyx(Cb6+)HPPsMzp`WqpuIHe-87yGnvXR$UE@ zia!4ThUhg;QFio8(My-qlhC>I`%Toh`*ls%fy!xVm=KAxoxq0UmS?%}2mP7cLB69U z{fQ0H(|y^&hb`~t{dJDmoK70u;EOB>mb?mOx7ZwA@d5&Z<5O_pis0DHt?GSlB=7Sn zZiMJut3TVTmWAQaQi9a9;J?4d87=3Kx|@2~FWvLy7w)5eNR=zjc0El${!)!Z6x};# zu}SYwvNu1=u!1!);dpE6)+o&PUV1^Wxuo&}jmJL=2)39B9>={o!=B=pE&jcRf27@ASS5Nsg+m)_T7DRRqv zxZt;?I`&5Aw~OU#Ry$^IhZSpGFg=~OtdM|ZMCVn7mD;}R)Qs=z*9(@C2jhZ`NB+PW zSK!{7qxWS)fwuAGaAq(j8e zq@(m+q<5ru1f)Zd-a)_+YVsbOah~V-etrA8_`{WR&Ms@OweEYZwRabK-5ykKb-_6& zeBgglz#x6GY708;?Rng^pDW}F+qgo@%fq|`QzfOY&RQ?Tec-1r9cMNpQ@qdA{nJT6 zGJo`{<5m$x5)SYHhtg;7P_IU@|B)y!D&Z>_w*LN6<_b`wbJE(kDBuKl*T(1JHK~Gx%~AZd{siZ;)e< zTeMSY@kBQaRjb#VAsV<%3jbM97P!>d%nCT{W?{I~R@^1d=f@IETzat*w%CK))M~bS zjCngWa%JwZF=ymrBy#yf>hWL7G9U%=om^kF8;IeT%C|qowIz4#G-ZF_joN4m3q;R4 z!p}qk4_mLc7XNIkZN;7{z|k8@114b5UjS+O*NrL;!c|W>_uEVHA29ICXxzHHg8K{1 zh-D(Ucg+sKR>;Biu5XESbv5WOIn?7rhcb>-l4jKXbHjvvZG$R0@sN!_B4vL`!MBYV zU&^1htZpV3kON=dz>7m^Y>HGP(Yactu$^aDYLDBeYA@XBp;jvn1}8bL>Un&*FMU~t zdlY!p?|L|iEb z8P;lDY2H%=&f)4&Jh7WYdYUrznA6kIrI7Z)PhjvpVFGfNP-4sx9kY_+A~~4QH>jI7&?smY7|FcM*~eEA&q&Sx=)QVs9u6Sozr>y` zERBt(^X3L=kd)SY&A4KRd$6n0@ato<%33d7;W!b~l@6Z++(}u><$P?)AKw=*v1P58 zyhW7Qs{O3Vam6B&p0vy3?TLn*^3{riMrmN zn5|@i@c{J&am)zzdnEMLRw$1j=3;~*fsKTmoNtI#k34;yF`jb#9bSJ_aJc=K$vS&3{ILf%A-9nbe}DhF2qbnxEXokEFC9 z*Rfe&$mq+L1jTK-YQE^#N5pDw$O}&@gjVIX3fY*W63!cLDs?3f>N`H8KkKz!l=v#C zZ<-{aZ8pVbw_3h%z&m{t1l;ugZCpqwVC)%RaHN-s=5{fy;vlWO<3`gb->}o!HR(o7 zDcO=r#H2QePP)ndaS5giLiE^cLK^!%40VO7ZNVb!?DrdaLg@m(X9VnHIy&}Su1s_O z{uUcEW!dgglQ77psKj2(#mjh<*5c#}Uw(M#LVdMgFSx&7{!pb|CA$o^LY1b{0_!@- zyD6BVIsr42Y_hau5jj{8j&*Gm`T3X`JjCktZr9KKs=MLpYEMXXE4g>K{)}eZAX)h6dC3zorB$WR7V&*FCP*GG ztJT+;H$b4>kM6H^Lzrg8uBSRu0mq+hA{MZDM;=5?ChF@< zmj^X;(4PyxY^zsenGOcJ#LW(6;BE(SE}oR*jkz zS%?YrSG8Jh+)XWpat(p$@oBVaK_^cwZYmSIRRC82)+bkwZj)`zGncgL3~P;;XK5BN zUCWH2L?xXsZ5}D4Hm$dkt7A5aO&kPj8>A`PYcc(~(+Qr z?agl;jwg4k402KXvK-w^``K@krh@IFD}xUdHP*V8Y;Wm=_hcyi43G9->RK!yQ4&BX z5jez;Xoo@dgR;+3VGm=ycfdKIau0p|POoHsml7i9N$eZjaQ-BTxW_3r6DrT$Dmd*{ z$$(=&Ax1PEDUmI@t5U}`lj#QUbp+yQer<;&QPZr2roYnP(KuI|bqQ{S!qBu82J-gE2pEmF$HLjUdPV z^mg|@xS(mrOWvb2i}f&zjgU4N7eazgN%om`R_>3RMS7E^D%jBRVewD1Mq7_0^8IXC z#@b=r5pGGe!_F?QdlEa{{D*%GspRTXN5d-nThhKY3dW*u9>@&+@e$g7CvID4#Cg|P z=-rO6U!do#^jBj7_JYZt2*Y$;*)N~8W9e$b{heNVPEJM4>(XS|76&13L=b{N8V-Xh zDi&4p&`<`?uQ?39Q)Fh1$zi2uC^UtcANHMOpWFc~vb>yMp(LxNUT;!k_&zzG?L6mL z-7SS7EpEg3+w#Yd>2NnOPEr~^IjG@-g`~?29JVqNg-K9~F=G|J?>lm_Sko-X0+wZQ z7jDuxrphc$Cp5v;7CG91c*h?D**?9AU*jqgBpW|h{8WrsUjP2spsKVbN5nyt{L$KM z(h5X60z7qj>E$ssRr@nuz&#O^@dQxAZk<&_mp@Hl_O1OvSzpuhHBLH%c_WFzQX5US zH*famy|Ru@f`%}iJOO2kIuC+${ioW$l<@OpGEofQ~JaI&Zf zez%s3ss{I%WLK4po#k6@>@I{vI3O;g(HnR2A)QQ6Xxt~G2-URoCD;CzhVIE#(s zTe2*+(6k+F4_d(8d=|wwq{Dh*Bw3r**II`?*^?@)E!&6_h$foKFsM$~VNtaL&KlyZ ze9Cg8jR42(aD{v!WD`|_n0(y+*fsDq^@b`gR~~YVvNKq3=CN3P^e!1$#$;9nu6+!g z6!@=L{E&%LRFW!SJxI}GF^yn1Nyf_vsH8EQV&BZZSrEe?X~)7SlnJ$KUHVr31(2BY zHy64gJD;-N^4Df;2$ewkFjw2e&rgW$MQ0_knIYQkQFyxRCy_NBY>EK_;@rIyS^=$T zk&F9XFj_Eb!VteHbWFeJio}!L;&8BErGL=<#J5BRdr_n}-}nI-0#v z?u)Aowp=`BUeG%11P}JOUqMevG*Tw}0^D63Xo)DVVoev| zsT`~(iThJoL+b}K_7iU??&ZTmVp2_4GxP|qA*~Oip4?r0c|7$%EatbR0xIc z*x2UCKw4k)1!kNb^LC)_eGQ40dPQM#M3Bwb5DSXad(lscVR56LGjv|jTK{hK zO;<+5rsBq1G`B})2)naHpBs{)NEly8ug2rnPRcWCv5%WTFf->;w{x%Mj;LyE;*DmDcTU za~cv|9@Rc3dq*Zv8FS=POlY2e^0%3;6H`0X8Z36X_N>KgNKQ6uq%u0Z7iP9DYc$Bg z#i88&j4?gJ8#oA8C8lo)4%YSo`{{VIz}Ff-0DrM|N5xo*-qn4)5Ea6kbqlF|AdTl1 zH~Sm(Y&wciN(W{!l~4s6lU1zx;-<`%fD}Wcx`<# z;xeg-^`FvHBHvFVb6qxb>SItG7aZURbFdUhY|^V*?fvzAIX|tNu?O1T&z>$9ZR$0h zhdam2CC!iP;nN5Z3sCV{CnsG-OP?+`qC^Z>GOBm2GW-@Nq|dg$@EGwkA`Gqox38&L zMHk&Dh5)=s8@eOs@i;y=uPdu>F0AaxhWZ^ls6=1q3_99-R!KSAW&c)HELZ(rLW67l zaqGBz@+1wjbO^BIS~SDHV25nPXhlVdQ3!p&(UP~>IZeF7^9O-2ynCk({plBkY-N((<7NYBt*zLg@&Z<1+xG+yJJr5T!Jv*00B-P;wRoec1A4E=q ztk6slc0iv`a?4^C8kfm7Xg&M4dg2)ovdL?~JQ1mPIQ*X~cUpHUep6D^(XlB*Q3UIa z__DTm(g;_VVKA7Jp=}5Ya=7Rf?oQQ(aL}a)S0thSG}Z>6pi7N8*6bkyfy_!sKSVBG zYc)(N+bYw=>XP?V9|wRAN*^_*kUQbEj9+j-qm11YT6hr)3ndT0A2apS^6U zm%cdP2Z5{dIht}FB^OAUboXM2C62gn=IH@;G3bv87fFYLIsfNp`36&y>sv`nxS($W zmGAQ{xbwft7=$U_(|keFN3FausN$e6S#;X#ShQchxO|pItl7Ds`mjM>V{kB?z?Qx0 zVdfS2$VynSoa}*=Gh!!q-tVdR_M?ZHoY?gbBF2?R80q6}-$=g>U-BV_j3Iwqe}zAS zMr=M$YN^q_F;yc*(8XD%Q1hu6@^Cx2t0<)K=`k1M`g$h=F?W-EaY;i*Ai;VttxF&d z*z9ywcR)*ePwLIdUHyH&2S#p$s<`amnOIMHrZw;jebG^c96v~{RC6*$IM3?iRyT{! zhK?UN#USL3sO_ICCCgsLM4?!tFT+O-jt!A|5$^JYX$)X5V3Rrtc>QQ}{_9hTE|P0d z{u`xLC#6+XzF?@sd?1>aNgxDFA%}!&igN9C=$Bk&%dBg*M(NltT2gHT``O#*p_o&m`67wUE9_`ryrz1JZUH6 zqANi)Yo)c9w;pMTYqr||QiWXieF`12VH;xDN4u{!tTZlqc_%1R&|`++VzOv+f1Z^-9`AL`j-XAH)y8h_#B+w@y)Oa$-n=}Y zt41X~N>-71R%z0reN9L5gh5GwLt4&?AI{1!i7+RBE;J_29m`DH>KxGeo!3v$QafoT z^u{#pEi&X{voPC>58^7#MD9m%)NmsL@uKgUx zp30HcKO1jl+4;&2LQG{}6ZFE)iZ!t{k++N zAN>)4SF-Mq{iskORc=fuAibT*cX-oC`fRV<81LXlxT3{*sNTT@MPZ>FF*UmevWQ2q z+uN5{IPXoh8k1ks?N~bPJTXMd8h&OuTdsw!jf)p6U)o=ej|~rc?4hqaniP4~PR}$pc-oCExu2SkAPrD2Nd5h~atK{)JkvqNkR;U>lO2#(e5z@7a zWXN24`{b_N@hzHa>}R=OepcHnf6q;EWtD{+JBM@kH9e|onysoAh@8|lSoxjW%{^0w zd}HvQ+bQ1^NIBsOcNTuX;4hOI^O;=*YN>R$a%unl+zn$#Otq^fPkjRkGva3GgaLmm z4JRQFBf?B_s7AGS@+cu>C`bk6LhUV8e#Mr)>-`t##Epx#QB;lE4ZuUC3Bn=5HkA~@ zI4Dy9qXXdx<1*;Qs7TV$tN7#q6Ajtd~AD z!PpkYR&D5w=`u9CMW_9^EL>u`3^(FXZO-`9)jK-2kYnY?acSZ0+eI&yD%ZF0*VI;& zNMa-kM3)#P61$gXvuV`DVrwz<`uOa3HL%jrCaP3v9zY`Wl=Mtub9w!n67sCkNqIwV zPl-OSRdwr1iYjnfoS^$d!I-~5z)?(ua}}DAK?B*ftxs&0Zr1E#?eym?=LS}3qTz)U z)9$tVJ_mej0aLvKrkFa6L8pU^9m@%w}WLq^9&_mEHsI@ z?+jIfM=lLE=%ipRX>~fKI~jxNb3$obYW{m?3OX=WUE1`;uXP6@qqDagdkYp9 zOql)v*%o?fb+ZXi&P>-U?@pEuQqZYrsz8}SvV2pNr{mnldZ&s9Dd=U81V}A02;Kua zAh18yaD&ir|GdAQV7_;d#ge6?xOjF!IW{RNTyC)CYHcg(m@uFRPc2cZ%&vV8B8v!h zp*Og&rk9)SCKjj+>ZaWb%zZ$#QX(dzDl zn0#cV6kCt@v85jQ|>f6WQnR1MDvMX%k6Ore>MU_ju} z*b8~3D=(;xiQ7#{CKGWvxR@I+and}a{AYgY(zTW1U&^eeXLC83kmv!|qwvzuc*Zj& zhGq6bM<1UTT;T#S?ezqHtn+nCTR%}WAv;;gOk{Rh%FippJ%>p~fBev8Va9(ZUY@yM zuHddeT0~+lZ!cm~+})S&0ioH+lARr2Oy=ixeHO6rcebUfZE-bow7-*L(O&Py-H9oo zeyV@^!OOjv-i{M3twW)d)zzbk9W%qipo0F5B;;0|4FNy38sQJOup5&bj_UI{J954k?`tu!S1UJ*yV?IRL zd|AYI+wPJi^M8K%faXJI>GFL3TseoCpLJsz(>2QpY1Zrr&W*B(zRXvXP^x_?8{oM0 zPp3Y@ctbKf=xICnNZBj&X}X5H!c{rkb}hko?h(t5ei5T!y?8OYltp2D#cCFKwXT|l zJ1iq|GL}$*PB?VQAyt=^bH>JFncS*pW6RxjAD9YRve<(6wwc2PQ37!B+j~rZ*xq+^ zMp7v4It+KkD|0(LEqqIC%ogKm8>XgNhy##Bdu~)Lw55X7cixN!r7_m436RF1*IQWg zcTotZ%`Ni=TX9i}`he4yLmwVvWB1|1$x@s($Y&f<6VqMsqT#Vj;7q^frO5IRRR%+U zfL`|nvEKkMTgEbldk8sZO>mCYKQp{>p6wfROqg95+1# zTk3+d59!scqW3F70*48atlpv)p{-0e zZdCSB+Nwjl6Wd{s=PHt^!Gfp``zTohkN^pR0+R0&yA`*umrd4!S`0KsZfDy_c{a{1 zrq^hXDST$g#B6aa^k1z3ia#sAbPe2mv7#h5eIc_My697PR}($BeHg4avB%KV4ru$= zbR@bTRhEm_jcI$XIjaebM823>6l{D6z2?j+#&=Y`6^JLKE*EvtPXp}5@~g6-SkE?F z+g&Qq^vKA1%06gAPP(3=U;DdS|9~Ry#>Uu5`U(UX=iY!yv*ekP442wdK zws{~P>@hw2(b4{B`ir}4(^tOXCX`xarwv` z92QM>{Z~&8W#Xa;{E`3M|1F;_%#48s(WI%4)PrGTa@o~VZzYBUs(^D?C$%Vg8o5*& z{r1hLs&pIQ+%-oE3BDr>3Gr|BBI!cLv%MJh#I15!vQV{vPn>4)alwF<)ai-?O0WsE zVSe%o+J~sD#~<-nZ!U=qbVHc*vE0A_+rlYs=D0m*Wv$MfAwlEK*(*ghmSHbqv>NU3Lc zQnDlj&8b}NwO)fPgSlsyk^1Tm_hRa4J049VBV!_IE+CpC`K?0V>6W`Q1wI?3Zd2N4t}$alhmZ zIs6f=M5ZGl6C@^H#Qg!?M+3*rQqfbeHl4rbgT0*vPdse~kU0wlU65R&uTyUs%=Yaw za8%Yw8?&;{MB#*p$;CM2T_BYLZMWl^YRt>eGRSyo1jVq}_hBRY{>ZyxHF%G>98!VH zmrIM2u}67)AeHkT0b{ocH@62pZp*vpJ-r)4qX_8sda0`*|3N^4&Ds`<-*+`SmZt%- zsL>AS!`Fv7cnlGQvs@CVnQXJ9bcRBB+QUGXtSrDQsTGrtDD}@>7wZD|wl~U+ylUBy zLPhBp9|o`iTgz`D6@%U4TC*>BF4L?MyNVvQE{`7NRa76HUeUNT9=_okK-xE^@EoNC z`fn8CWr(@V@G+}ffqa%)GbvF2_M=0{H(a%}mXH7L1?PxeGftnejma&tYBcG~oi1fZ^6`_FEGvgI_Rj z44U!YPjc=dIvqq65XBuj-Xjk-Lw`Mo<9cX5FR>X?QqdSVLObOXf(v#wRjHcJt`x$; z3d<0y0?Y^^>D_#@h08?5&|#S<*E`be!+k|7Ff}!G<3$1)$1#kwfx2BnB5H*5h*)jLyKc}tM^=43}36+ug<^X|MHu1J>hN_|D z8|=PEnVF~ke}Z7aKkT^^R7oT<(q1hO0M&J0;8*={9*Gqj1qOKUZhe4Kmz*xIVG1=% zrX=T>p|GUj`Hz4iVM(c~S4$RNaGIV9k4H)*8&0Q3+J$vMyd1(I3=IpUja&^<>A{$GG8=zgOMFD8B2LkLpISYxF+W& zgMDNAfG+p3AUqF!)e!Y$LRspBQv~~PWizY$HNXz`ZNQ3 zr{c5)X`Uj?BKlZIwC;wTe_xS`%wa{?YhGf zY0_11XubD6`8=j0S%HqD(m{4)GM4F)okSo0zi3x%=F2a%3mhi#fbMmofl_^VVp0$^ z=OW>y?#0yin~$wFNvC1uBbqVic&CP>li9qE!e ziYx4Sd;&vi97w2Oq#YFCrQYEfb8NNbfyj45e}rMYaDE+SJ>g^F#2 ziab^!Ff0NesU_%IuDQcw1PIuZqWy=1>2itMVT$o_RR-|;NDcTCIR?;4gtRw!QcM-d z0SniECr{(q*BpJ+r14ybg6RlpM+w-jwZ4RXeDJR)KWBjR- z*~##-`Nk+gUQ1@9(_L<3VMT7n-qlrnRqQFv<(}y>i;B)3$xLbif&yT(uThfwJgz`b zcIR5%2R}(M35ZCGCe7_w@>kxjEhJ0?&N3Fz$7ouDw|5xH-7K{>9F!o%eDO90uwe7) zt!Oganx)&3)npr%8!99i-555nib&(#egSRR1zxl*xegB#Fo)CojSdh4{1l0Y2xK>i z7cHHgmK!xJwozQYli@UIIz8W}8#(&I5mJ)SkY+7$#QJ%L03fG+p|7uJ{`U$7ix=cV zg?GWAX^Cw@iz;&nqwGC970A0C`_g3L=Myp>++o@k-RR+fxG=`$uq4&p1Cydv_MwV0pv3H3x7syVmzmzJM0fxtnM2cTIAUwQOE>rFW_DR-bP_>;P2MI%RB0;3u<;K+xM$ad zDJ4FW*Q_EZ2}#4M@IjZOJawU|6`D>?)^gcFvegIh9gA$f!JmS+UZ>s9y}K%`wwz7U zW2Nc*BdN;I+VN&T%5QIGR8n1ePR(6r_1PR!kEXgf8=1BFnrLdBoowkr_kI%L!koFn zA8n=`U5^I%iNopS5UKgS$A91HZ^pc66^TFOO;HTGD~Dv{uZRr0-DC8=ThR){HhSPw z#BTL~X2)sQnAyuwZ#|MQmyHEX(lOeWE1%U0fZaxjStY-j&uzI!P_@w})$6dKtnUut zH*sG6Pc5(A0j*$TCe$8(a{4AXoa-I_?s7EQ(tcVLj}$PYe4w~Jig|hAYr1izieFbf z6z}*=S4nQv4M=s~05F!AUJ4;d+1;pjnb)M$8jlo^KLP2ed07Wi;K5AaruwIoeXc)G zfl#Jc<9da^xtqrg;C{_Rb4W?|uvWpGlHtZ~vcmBjxuJVeoc=ae_>jc`fjfym;ZGW-{CB z41&lgwsKMsU=hm80FX%Qam_T*I2*^G&`P(kTP5dfam>WMafZNMbo{=mg!z|UJ?MS} zd=O}_huJiP+T-3#*D-kk@%*{+{#AR&8?r9g5Wj?6M3c7Gtr{BNEY71~n<#%*HP+of z|9LF2?f+(Mw0Im8S5&31`(~v&+NnkPTd@N4kS^c>D*uAvz(Z}PZ!#56%OX`{m;eF? z04(BJuV^^YcZI19Ud+&7B_Lpt$qxcY&B7nb3I6$pxe*ta<(w+jxP?Gfy?~$kI$;i) zYoJnomJPnz0eCTki-y1l9=~A1|L6Cl)p{QXJ!oo^`5(86?=id(woxfWS7avSO-vKV zyYCI|_u}d00m#HThtHwgOJ@}$pgs}TQQ=qo}8LZ%heE1!U5ri$|yx1cS;3kM(9twZZvr=aO} zLSXS%xDO2FtNHtLA0A0Q=p+3VO1pkl?0w`{J3fF2UimirVhu3H-czADuUib>7NsL0vBpVmKW_D<-RW;)7Z*CbLQ75h1* zcafL+)%c{ud~;%N(L;?7kL-jch3h;6>QF{>qRa0|%U{XST+8-9Z>#vfPHmd+A4+hk zl00A2xF3?iO5p1N-J&+ctl`JkE&{?5maC-1+A;*PvCj9d7RqNa2JtpSoBHkSjAn?7dgI(+f z9dIyng^u?kW^*W%_SBt;$RhXO>1kpke`i9=>Q+rH;kX0w^6}U(ofCw2l`aij%8?^^8;P8xUQO@J_xnGVE5E4Mz8t7ts$18@LLab4b z-uV|lhC-1g5AFW@(ljv;;7{`)P3G6Noft#X&EN%yI#bS>o^_p+ zKtjXn_B`C`UFH|Nh~egDG?->+9PGS`sN#cihU}F&>?0Yy9VrLZtrnK#z-7V_PF1N0 zUOT~SD)qXV_uP745=)pltq<@e92G{5W7m4SLlU;4SdyNHFl^Rk8VR10ivC7zWpUn7 z^^J}N;XkAtD>8fSs%d2*a}bgPpSRNDD)iRw{#FdTqU1G51SyT)O5Jl!*AP63${U3q z@?(Yp*}4q$RJJbn7J_MJ6lWFlD)xV5BK2;`kNa&0g!B17DSs+ZM;{E)A;hh~`#>LSfCxm*J5JzVNrRr$s(xXj5ZTP{D`{BuX? z%-(gH=yfZ9J1gr(ER6JcEueUuKI3O^F|E4N5C_**?xC<;O1MWKVhq})Bv{;X3m79*8>67Z# z7ulW&@s*P%wQ6~Yqc!ScOUn1tf*QX&U_z0I^qq*N88v?An%GY3&;r?OcJsQK8aFP# zr|6LT9HPq7x7lXlxXx87r&+b`w&%dAR7pe&T(u-!^O)oDfPdDHaYHwN9W)S)Ks>Y5 za~rO|SJYh{`-(v1Tcu!CY$s6w#jeM1qdHA&kU-{(Jo4Bxnm#=XMbP>fn+VK&{-kBI zX$BSEmMj;3p6P8-QjJu#+(?b&BpZ9N(>)v&QdjmXRq|un>Z3RR?SL@ep{LE0+vueY zTMDaOM}78V!fYGY?>d~~a-j}HI(`x<-)cyxGQ;xVcezNgJHL3u$=Mg^2(^GOqUus_ z5%M8kF^#sNRuumXxh=B)GZ!G9@(GjMBa*yu>&mT`TJP7nCji3kY%+qM5QXgp7mHul`GpX3+2kT^RNRu(1} zrNNpxyQJJK+KO_hMdlYlqPKD~5=t9R^A4hV!~1y>Gznt@qY`) z_bvlRXQuXU0J|GHGA|G9{G2?X*H?n3%g1ur45|-{prjc9xp73`sD_^5zdSHNT z)}07-v3K&TsoB-Js^ND_6cZoR>}=O$S`*)0aA7rY1Gwc=Zu&^df{^Q-j!2cXcI)?npm&^q} zF?zN`=|V;DkMNV-bZiT%1(mM6e7qR~pSv9FH^gLxWKe|br?U6?B`pB!QYR8#R8c+J zou?u^DX!F+_`;lYR!NQcx8;xPirS=m*!}*~bk81Kj#z=1#+F}2rnSMs=|}x#;;fFn2!!tfa8DMy5pP3j z{l41gr71!uXWs`ko-}7(Cr1HIgWMqD>dNLrhTFQmK{^9`e$*}_U>!(TNk^rRz4MLN z)+rDQe%j2_REQ}oECfSyc^^a)LqdXsb5dQOHj}sf2Y4kON>bYpb4wOwvz z8E?WsIv_?hcbylS(4`Uwu-#EsBFUFH0W(i!HmTiq|KBRiJUJA;qzrMEY&bfsZ9TrU zVQu#BiwT}ccR-R2y{T|0Sp1sV&R-hwSzKb@b-oD2Sr%+ag&K5`cfSFlr9ZGaQ6{0q% zc_5DrgF8qk| z9c~KV;x}209aod~)r+V9z2=E(1_=R}LPt}NeSir#9lYg*+KsNreSk^CL> zr|u^!J51Zv=4f9R{eIdehvUxsE|VIH1Ue_aPi4f23Eh*5S)40N9&CFaOBeIS` z>lK(Wb&o_vAr+s^2ddiPad(*4{|@MbU<$W|rcRtuK9;&V>foDa&D;b8%dZvn^=*dR z+JX=KjFPXuhh)rO&Q4`?wNK6#H2wM83Xf~JJp`ix#<^ui%C-4BA|RkS_#8YUOC;~0 zLu+fg@JY+@;n7eRNBe^?s{56!I+pu?6$QsIszc=}Fl&qgj++N{>zS<@d^($d)3SK>neWGoq>uIevVd_ zkeK;TilsPIJo2Y#AP|TGy@wBy0tGXBP#EEUL*>JJSmAbLJIrjg%i@?w)M-Wu?biWg zQ2RWMFWUSMh&Cs(SNG^WDjsE1u`X-v`)g!=9SL$wzw z9e2tk4z+&+pK=A?gMyEMb(%T8r5NRB7Nko!Eu``1r|95(hLR~lBR^<$vS0*Pe_HsU zn&f}jYgcT@gEN9FA~PN3AeyZL8#g559oMST03RuDH@^o+Iuz`nGR^Uy+-zXI+S0$$ zm-rquW1yyCw>Na3h>cgrCz-O3Ou%v``=0XPtCjs)R?xI;d*Tpmh+1wbK2DLK>;)tK z8p*6u3^^3~Snr@wtXH+QvPJj}0uwYmJ~g@Ja-e;Z#F zqAmln@>*H-e+Om1q7*M#NTl(2*Skyb=HS1C0qFI@pTEbmwOm19eh@Ow`RlEqT%={6 z3Y)jP8vgHXUhZM_{F|~D<#Eedi<{rSZZq)7-|Nk5@*q!d_nGug0H5vu{=n}s=Z5M% zXGROui2S*3w>vbW)WA8bVg4J{{;#fsfD!#qFBBk3|9{4N{Sa_Lz^^u*Rc)D@Q6WY5 zHNvvzP~~Fu^Lv#mg{bR~eIpdV#{sOzli!~C|6Qghzh`?Z(}ZGr2R?aWIrw`)%pW#P?P|2{A)>^v<9NRz zXd=CYDJ1dLKtWS~q)dV3hu<$YZwQ%R#Vo~g&Q^oU(&!!zE}fhtwl-SduX&JO2eQEk zIE@4;R5N^DPeSj}eAsO*Z(a-H&Ba!xyIq$0ko=Y$^K#fO&RR=ukt^}scS0k|^Mto5i&UGi-DWKYBtzQZbgaz+vONg8`hWb)I<)mhrw z=f%%l8duza^c9Z9$?GtOqUl7{jt;iFRATw2CoZ(0t>vG2OB{ZRIrWOU=$9*a_`ED# zal~h$&%(^ux9jL45Uo+i=QNHQENb0bZ*(fC0DT%t^=?f-yT)kiIw^x>z5vx$II8h`9CJPivzm8#cbpDw@BY$$X4E)nd4u zYdt3?z8VlIH9Z*`@tZiAxo=VVi5s$baRLu9!cO6#N(<*vZ4`SIhbcTh3u8GK2w26W z5}5+6XSr~O!>N(k*@?5rf-E#I-E-OKkL?E5-oA|LTAKDW(e?V zQm*iZ0~hS`Qr%=Q#{{GT#H5jj+u@t*8H>c}+d~vh`=szg*d8|pm+8xSe1)`&T)nHG zt*(UC5&N@vyR_PlzvL5S-oQav2wkn7ojT5epT4R))PnyMku{QU;3RZRH^9`Z2MS#t zJG)Bj1$QXi&GRkT4D~GxQ$3D{^-}kjR`6WxRSD1d5KSC?<}d5 zf8ih-$__}-Zw#rDqULyOk^sIV(vEh$T!;roveAs$W-vJH6lpo41B;^vNlkqxIU5*y z2j_+rcte7^!uYz&d4r!d{`gw$n=G7sW2L;A5)vZX8C1#Pb?^5Wa)(;#+5XfN+$VQQ z_>)lFdJFII(SErdc57!5lU;b;QB+1$YwRLxTKU=S{CO+w*En`_L4q=vF@S612I=0n>zQisnQ0l0gL?|Qh~$wni=qzKS4>O_IJRf${@B?G1%IOJIGti*&OAmP71F@{cj#*8pFzz| ze=bVD9O)-O1)wy0=uv*dx@UhUg&9WLkZYmz1or*!p_r3?6+vB#oeC@Qb8wa>@mk!@ zzjd4QZUfDq9zB6Foc78^QXH^y0d-RI$*sVCbu3uJhS;=k9;oSHYhMpxdPgoKCV z@sZ(I7#u^l7jc8Q8BJHV5thW9YIyi(=wj=SE%-FbMEZ1Vih?VNp>!lkrf#1&Tq^2dW)Dv;&vsg4Vfhmm*ZsdMaszK;{H(S`36q(FwSaroK_~Sy_S4H zd+*!LM^``O07Z107%6x_Mq+7uy^lzNLW=M(caF{?vvMD7<02e8vyq1Ve#JRg$<%V_ zt@rM3bik@QmSX*X^bt(%kH5v@h_O3(B0)xv;WMl*4Tq%+X2-`8F7=omobPzbBT-np zY*q8L?ZCi0$rm&BJ$F1B^KKNmcgh>SHDj?PA)k=ZZ#viT)337rQqjZUyC!}011KqC z&EzaQUFyrDr7)Ca^X)eHQ-VO^A7cu5Hd5Wr3-A$$-qkVu{wTX0J-of3r{zqw-R z|9Mf0unP*C?lj`3fkwfG0*LS2=~pS4xv$dB#Kj~Jw(EM&NS>e(pbreONBXKUFWpzh zr!d0=9?7x7^q&J{>oEGqhnMVOlA2F{x_-d%?xrqcze~iOmS+(*{Oq6zHo`idAuLi< zsxKH%G1fmo=OK2lb85RF}q6b=D%$hw#)GL1NNz2%kk!0^`M=h z(5k6nCv>Pa7$UEqso43Rk{>u7t>Aq*Bcmm=E(yOSG$@La)UC6adbl^S*8C$~I;(m_ z`mZeOYoQzb`V`X=_%PKS4OK?D`naqB2gpynTrqkisSpH#fSb$@n{ZbjT4`KxUv#E4 zr&~NNCimq8lj;o|?~}e~BoR{ST%lLoRr+=tna{`KPFzGWS(WY;D7mc^Y4TGIGs?jC z+ppo}Ci|e4^_RNe)lo+1&WFHN69V!Pf%c{L;=D9Z%etM0H4&88xK!*tX9)qiAm(q> zKv6CtjJH;Va=xlw*f`SAnWY1q3RY2}#V20C6)Ch!Q8XWv6z`7u-uTap`O;Ks3}|Vd zMvlv!k^`T#@|TI7AGx@$uA0p*XETCfxK&xWP?Jse*%KX|G~RBTrE)dfO=_jVZ`HeN zBW107^LoaYSqvy%e!S{t+h3Il;U5Zr$B;>9Nd zqnx0V$4-;9am|eQS~QuZ?y@MI^wT2gD-Po3jj>~O@x3)D6a~x7FoVx|4!GW>@&BpL zBetCQHvA5F+>dM)6Q~ln?Mn6ExN9Qb>Rn}I%+sGMq`R+w78vopNh93Ok=3rwwh0>1iB-!lPj=Lr(eOae4OI-usCQ}Ix`du_ z<(|N=Z{p}SstYN3nWx`e3k#6U=&XHZhuw7TKx2%LbajczDWs<|_B9P|?y+#EYZ%sDey72`; z1wjQw1*G`W1O!2dfb^!IRB0kalp?)@)CfpbnskBCqzKYG1R@~Pdv8JMp(NDM$(i8( zoonw;=llVi4_*=`Gi%mbbKh&NdFB~^Z!^56ePNg72e4(haI53;N=Qh}tD-KWq+W{` z>2#B%%SlWuv^cWP@!|h5dk*U2vqkxd0lZ{IV)(=TAAaOTi-Be5r*85Pm$RThuD`T}H zAtW{=_`xgzRbLkooq?nwHPV)t7E1dG)k(PdjMSNQM24F3Qmt`_KHI#4ZdbullnPr? zwZ=8*$NW^r#SBpiC^jnj_WhjOCS`vfx@Ka{Xc`rw>@_rWKD}q7DpCw=+=IS)f3p@B zG4f=vnNosZcv`{>m3rWnY*}lShq$-8mk*QZ)oN_qVKw-CQHaj?hN4DHNQV544<4dt zS%rE7U624}6HDl|afi#sw1LL>7pQeVspyLtTwPzAxvrWBVpyV?_9MO35)3K*xD5P) zo2Ua#=Z7hpHvG9+nph}7n=GM3fs_kPBh+I2CU={jx;iT+fFaNxP1FeEvk*smylwtC z27}+5_fHg{%wXvPFOA}m%$50&$nuzZz<`xfF!3qdCBa0l=!-%!99<3fyG%wS+dtS$ zURAIOOEhe56u2NL3cp9`&%$JOO@jlpGVUA6nr)8t*`JxQle_UjM%66@d$;-A^?C=B zyTlFHzSj7%KS2TSsedVZ?W=GYc(i#M+z3p3nr-u3VN!f@B$7|!gYjvr(`dA}M0C6S z_+blcODM~JTD-i0dsAB|I-OYYl)kS_ZJf|}Sl z+vS4{o72l=b0v3zi2`kZe2X7a+f)#P6jQjNm{OE( z-m%Gp4Y1K6X3uFC9Le>aeNtw0__^#W0*?EU*W`SebMnukm(aQ8^`*W7o@4Y~a@wr# z>J2B10}=*L<_X{;TL=r)88j=zNQR1wsdorUU>B~Co_!?!^z7U3_0G&o`_Y$fXIpOQ zEEf*-)X3S9MrEC4s8=Y62=xASf2*%g&&DP2-`O^=jp+6mt6iD?ulB~{Hul-jO8v8J zl;!&0u6Zemg%GZ|r#)(kwo)fY`;>kx)6v?0s~10IiEZD%ZYPzgP4*P)%fi*Jef4Y@ z_c+@SFZo-@e0`=^by5tXyO-RwF_{dm7nJY9S=1?iXa94Xx$=PmjbDIZe;m zAuUx12?|hYdJ~9v{p?|Kh3KXL)9S{X5nZI91Lc^rtX<)24#~;E(ePPF794Q)nX}3b zOA@ghqBYfd6P+V_NE1IfT#7+BM`90@&t?PpD-z^q)0q-pw@X&B6|i!(`wOicknrxD z6*>NA6SfYoeG+!|wb*9Ovl+9s=L+}rVu&RibUhwiCw+4|tuGFnq?MPkS0DG)d|=;q zk$~SvrMTdi5#N8FDNEr>V1IZ#%#%<*{l#y<*=wOM9%@U~e0%uvnW)R28dpfa<|h%o-ss`ljA zdZO5SI8#lj-Q-3f9i1_4fDL)^*{s_`GPo}I30%tLx~ZT6mbqg86aPH;>7dfg*lu_a zRZOtJCA8PcNi6lrq%B>QlexKIVx2#+9i2Z#3QX_uC1?8ibIHCp8PaCbgO5_-B%LuSDix|~uqqh4MFu0~~nJdu}qPUuES*8Qmx z|AmkcNmS0#{Kggg>9cXAohno(m$-JMnJreo7$xR?|E{d_RWtV6^SryF9z*)5$&J${ zX8!5Tt^7nTK-r$gY-I(;t!mig;ZT?a3_c?IEziXCu6C5NwhlXwR;)X@{hz<)(E8Rf zjHAl26X(O5?mq~CoKpW+0c|g&us52TSxlkQV<~?8SOA^795G?yliC-+C=K&7A zhpnrd@wDpwXjNrSrSoWSNQK_(_3P5RqQ!#RE?qy~M2IdX$dKga0U2ValVk-|&Q@-b zskv)z?qh9s|F{l+5`#R!M1Pqp4}u!MF`wvVS5)pwAAVD+q+(2%sCVlR|`Kn@;c6GYqt~snMQQ8{vArq zR9xI^40iJSy|ea}ug$C5igryD zO4)7#v+v<1Q#P-A17a7s^u zP>N`7&2sk}{-<8*w80rHSLHQsD71swTp_hlYyIT$=1}vDH8PA&7h-O|c=6({7M+ZG zlxmK_i@|$i%OvfgT%K0~HO6@4?mCJF0u_exi%+n5wvP@Bd=9<_ro&+)OFkQ$a#ye| z(fF&|nSNqERNkY$Z+I%>OK!}sdFHIWy58ztbNVzudk}~JP8^V*vS(G(;4?^t{N*sn z|D~t@&U}#n!8rf>Mc)5a4)90F|Ci_F_L0WK|N50}j_@d~sS&;4SF(iAf;v`?z_M|7;NK&0{iNTe@IC+Z_Nd}DbKU8fCi|C|uEHrQ-raZHBP_g=h%PtL*# z%vE`o=OCr@%NG^MJDzSgoCF{XN=h(L9vxG75rfOGF^AnR;e`|R<`IgJW%FAMNk-GV zioX#8;2S)lR6E}|bqYN&S$yJ2Muzp4&#5Pie^rkhRVP`mLs!{()XiP^JM(hd}t zQTip8+i^DT7^N$ubDW>Vy?++l%8QxsgJdhqbbMezC0?>J%Xre5;G)u8$#;2Dr|_NQ zskan^l<`z~uPWwA*za-SRVN-Q0Pe`@C;jQtPPl~lQ4gNmo?iINWwo>(PIU*vBO@0k zA+(*-Zr$*A?OWleF1vZc`ML%Ai-Cso>c>eT|kdbUzD?=SD=KQ)0ND z114?8i6BDK(v;-h(e?ww87ph+j*sJDto5ng!SjnfkhEjL$o!(ieAT;D?wHnHxVu*K zuY!qZ_3zDD;V${PY{@bV=Z9^Gk= z&Z%CQ*TS&wi8iB4;+nuHexzu!*c#*5Dg;@`0OH`?P-QEvpKfbF*NRg;@QF(g49BDD zr9*^}emD3GYQ4}Y+mk3si>;rB_cO^-y&NZb?2!10#b=YFl6$hYj>SIHfxxG1Kb3qh zC}?o}wE)!fsIKx!G^dfEwj-hgU9Ttlr@ua z#}K$OqJ@Iln&&#H%*IL~v=&-2NGAkt%lKv55MWde3_BY@ZK(}Wo&joDz?Xh6yy>x9 z0ke>*)0%Gk<8mhmc7Lw4GBM}y+2~^veswX*B9?nn4dSR-fz{f?GP~qq_p5(GZ^loT z{U(n$65*>G^GPH1wmAOaQ1cN?@nsT3{Se`?5}EV->FInxN-w8`>576Barx-q%uaJgZ)@5-K<8e|EGMGu09l z&ueFGJ-`czDQ)gy4p}?Tb;(Jr@8~+emjHB3ca(Cy^MJ~;rOavsRqCze5iE^a(*7mBRr&W_zHESXzrRl5temq`D1><7 zSI{FpHLnHX$HvN+bo12hpiwWUMdMM|RX!)DOtV7Z%%(^5cE@<=y@Utp^bFfPa351F z+4q;5Ccf)^VZ~Lm{jof{kH-5MN_!!s!ZGc6L&w1> zunzYn6}-+hmjT~gXWMz|v#SjscJ((Om=6hnsY;bN4)56!Qf)A;YYb)!l0BKuuIgYJ z%k-ZU8!C3NLR(X%o<6;bcwSzH8LpfW7fW5V!K=~pt{kU;(QCkZ4ETVbLYUmqkR}HcOGXK&0lm(57?!7-$=;&{Gi>#2MYfXY@;x>W&U;4+ zb=CYC!LQB?6t4sHD%*7XT|(kW7y+oL^GfgKR^Qgt&K!tom7T~<`Ag{!X@I|UBD@!C zrdq)IR)Gt)nX1J1*1s`gtq57uf7f%l%5_(53V=xXU8tVL4K) ztu(9C^SM*W1{2Gg<%nWzv##1>Z8Jgl%H#raj9-g$AvwD!{kAiA=6I3FhMUT z_yBp>1r$g=JYdHAJyJW-&vc|`CcxFMzl%z)TWsdAU@!GAr5~p_2|-Zply)0XVHO2u zcLtTc`~fngxGk|RtzLC$?qQHBv>m!R-U2o9wAs#HuZi8-T@@h;n$#d(7rBd@u7mPd zhvzc5{=zh?J=*-xZBV(;l9!!Qce=@Z%5B%}?td*}c}U9>)uClz;Jxsv(`l6h5W+~< z?4j`_ah_Xd$Cg$Z)X`s4F7w-Td#erh7*Zb}rw;~RUOf(P^z)>zWbP95X{si!;M@5% zmyb3+FANLIsmvFjOXKW8z4AgIzyWq9?cHAfv(0_@dmauvn4*RSxj{qY#IHOmMR?81 z17G|mhouaYvU}N%O6%_u?`3a}o`3V%LM6-o_YOn+FFcNzWd`Zfh+hzUzTqb=sKh;U^kQGiap$%-WI=kJWh0RLqFgW zMK!i&Xv35#?8ks!*V}u>B+_c1m~^E85F;BmwsA+}hfC0>0E}$+N+OQt2|FvoHmMz# zgU2t@L+vKel?wo9m#A3B2Z{>-*W7WPhMnxN?guvMv`>NS>pZK&0fM&Jk_^>j?q06196`;ID(W)8XOaKV^8oR;ad*LIpgbtvyHJr&nQ=AKcKG-h_>i3cKyOxRPZT)0R@G!untK2z0 zTAQCakYk^i-_?5fSH|5pin04t9Y!Yn;NYq88oDg!Qzae%t;Bo>>V1y&d`cR{+GqQp z=C0vPx~WNYbm(V=I77*kGWT=eOG=WcJKH*a?jc+?SPxnO$##YnjVBgT=({h`d>IL2 z^Z~re{SC-*JC8y1xKiBq%LDgW5LSUuXd9bMD6s3{{yHiLLmcP8{|E(;{sh{Xnl}bP zR}VjhQ>)JNQOfQ*A(-EP)MsjUvd!R_X?NsH&7^&0!pCk(L;dFpayBv+I%fv0^~Q$* z9&BTvEPeNfWZFO*-8KN0#mnF1z2b0d?Ca)_NfRjZ?M1$tk4rFt)8Kq2@xs%A&g|G8 z;JZZ$z-<^%Ct-LwzKMH;()!zCs7vc*(FDLk6sZ{df%%K)jjk_-6+Apjmg4=$6jR@3 zr0)EFD^7e5_l-H9Z$pbD{NRM9z|-My!Iy+QPNi<)GMRqOY&$mgv7G&L zPCr?%9`=!EdtK>-O?;vM?73>dMbytfMWRIaEh|hK*T5J9^~@ag%(!0bIb55UI{Kv_ zfYU_}dvZ1lZQG`&dgGT%$Nq~?x!$-#A6VuE(e5`k&5Xmsm~`Qr<#MIT?5Gzb!?E)U zhfQ;^E2KG06Fl?X&>2etVv6;yI~2+&0#VB0*5dDoyG_MF|HTzs>)x{-RDXqUAsUYT z9i>W!UJ5H{tLyiEAP*%jMYffVsKkN-@^Y$r$1kg14_=fYYgfajTHrqRPtfJk&aabmc!be?^lK|GRW6tfDb*^$ph;8>bJX?nc%o6?20J)~?iLGHGdzMLM;wx}u zac*BS;kg9`JxM|Cn1R;X^1x;)(@!>5R$iuky=|Jy?!O!d-OPKBSLnAn2zC=StJu25 z`2O&xi5?o*dyK9h6wqU#6jJv!h{1Qm(3Ac@X(Q=NM&k{c%s4|OIk^b3^(ABMfPZVa znbNkg9Cbxy1QB0DLx zYl<-%IS6 z%QJr!ge!{)m6c>x&=5-*38!VV>qvZ-R2A1^mMZyL4?PzwnxT@dcCy>iI!VSZvOZM- zApX3_-1~DRht}fM;xgsUWQUjaFG+y?pNqTHe7*I8@P*iHL9qjvfJIX!FE_{L&iiZ( z4XU;qu4zUjAfF^ms^PrW?zcX)UQVU@BB2o{%d5zEXyRKcelVIUoVUIQXxK4`az4Bu zmszMb5>tQV?Hpad4?{@yXo?{QS((nuTw%}P64a$Jv@3{<@SzGu0NgD+UUU_Mn7SC|$MO!Oa|3H@ zk>Yn$Z69Z=WP7_RV_*ALQLHFZ^(M7{EBW14@4C(k8^M?-kDN*XcwOS4xy(RuYq87? zXB2cf%bI}jCR&+s%T~V%rvRJfB-BPVJCVR9^<#dheKAn1;^xuYT7)}H?704N9!3=H)?*P1I{a8g{UnE zGvD>J-;PN-m#-LW4~t1}o83^Z*uy81;nuIVbD(f%vn`h_x|%^w4m z7U%t0atm*M8D*;HRXtroz*Vuchc5gg)>B{VVD84cz+3fY+jfmo0j2g`u40Hlgu10Cd=9`N!FR2d=9$Z?rplo$lZ020VLJD9d$)_Dv9q?_i3P$AqS2GYhAZ7ZFk7htdax0LMiE&TS z$YZ>GN>Islo8uWm&%i(ny7Flz)fG0iGh$$8H{tbIt))R1M25k&#x;}nz_>Bf6ZM{Z zw&O_eliJI=J=I9A2p9sND3u7G>8;&~0@j>BT6o&(l!ICY@Qk5|nQfG0e}(J>2TorsISORkUcC`M0~)NA(T;L1-F^8*C(5j9 z(b)9-29Y;24JS2GciIy1HQUMATm^^3K=k9e0BalOYLmkuSdkYpe-rb?uE%S2-HoVP zw^z=R4Q7C!px$oytGcJVR%VO4kE}sGCNq;*ln{S1+Hq2x&fHdBX$kl*AmO;3f}Rg4 zg=--+Lp7Jf}7Wr}6x=d+oV=`5L8 zqVXaVm`G(ni3Y^CvEsp-#ULiHJE*jClN_sIDlDZ+~I8swJ$71)?@hp3*e|M1rwK{%Q}Mo=@|L-0(;z!4_x>c7hiy3%#9>6RYGLA zam1`uR@97JDFJ^FAHl1t^ZYAycG&}-`dLtZn0dEeQHAz`*6D#I40hA9BPS{J!@`~w zVLj%8nYlM?f=+(pyC-4p3z3N~D~c)iO_qN*1$jE-ipO_FPfAR19r2rtDxJO6sI_ zP&k-303FK9iEy6!9j%6S|H|{aNByy4|MrdNrDv&!x}tu^)le$<9^B`-XGdRLoPU!j zgqA%S?2+xE$Hh-R^IKlgzXXXfjJxL2kij=ur4aIg?EHJSiT<5DKmZVFq8hlwcwcO& z$yjkHd-b`<`u`)H+OCM^irOD0A>;|0a6{C?^*f?G#H`1+dg|(gxgfOMaY{HGdb{mT z#M`sp-^4l2PyW*@ffBhI@P!U?XvB62hDLt{{ti*k)>oEs@MzyU@_#CJ0qmyg@KHd# zfBTE3)AS3&edgl$3?2mYo`-bYSK)pOnO`kxxXzyOyIGYM?Ct*C;<~!-gY%{%Cr-gG zPE-Azk5>!L;k|#PnN{TfZ1)Pk{m)avYEnP7r=VEsGi1cV51zJXg3oS=_^cFk+kr<2{DFM7ps$t;)rS=vkl8z>>mKD^X^ zO7TUf!94o}vEUaXIYV>@4Dj<#z; z(w6m=)j0JIv2esB(i~Ou1(N1ja{Q`xHwf;OIuh&;oGlcQ^{>bN!5-SP$X#uy656UeeMubTNlgyi9BD*q|xE@U=al`(K;kVr3;JPOSST!K!&JL$gl zuPspnuj4*17le7vhUE0za0f>nM-n)0LBelzGPpf3Jfepe$Q%X|1&sxQI?(7!;`duy z5kl|UUR`B^h=nW^q3|Qxu;eV6!_UN4pFt}(IcA^zl86W?^9|}vi$IE`TRZ$Ax*0B;DS+b;o z(|X5c{X0A9yM*v;*!^*;VIG#3`xuPKSV6Rd%lI80kA)|rZNOB>(ij1(#J7TilOni~ z`P<#!XA97~0>Y?W;?;|ma3V}f{T+@-Tqw|igl&6`Xz?q^_LCfp8$8u^*E;ZP;Q%xQgh=Bm=_&kNEh z>UrZEx1Uzs)8A^_v#pi$kC6oCD~;Lz;9V0`QBg5-fZi3ZkegV{-zXsI>wWD`t03-% z^uSF^34P5#0?8w@ezL9|8P>0rTEO-r;9_t4Sb#20Y1{l+#!#q*c$CL74y*rNrXvPu zUtbw_>0`wt)EieMC3(6g|C}|Kq>uU^_iW+8VvE7G)km~tQUIk`R4{zTSN8}Y(0b{z zs~ePxFDYiD9XiJ~D2-6fNxJ2R-kWRbeYE{a1Oo-;Xw&pi(A%lZEn+?e*FxPDgULFxLg53R;#NSZ9i#J%b zjLwWao11Px*0`mxurRD1uVS8jP%J5@rNjRz4;z~rv7-IO_{=^WHkUHIFylgI4r>9@29F7QrlNj1MYLM;kV? zqE#6mIkzz<+aD`sJkPkRq2bbXpGp~WX=n;*2k$)fC#P=MUi7r9Kd~JwOhF;x`dV7i paWX`Q`_HjB;^Pnh&!2~u$M7XccDXZIkR7q{69rZILfKb-{|C6L;3WV6 literal 0 HcmV?d00001 diff --git a/images/New-DispersedVol.png b/images/New-DispersedVol.png new file mode 100644 index 0000000000000000000000000000000000000000..ba2dc4c3a3e3ad2bdc522d48597a65ebfb3b9f04 GIT binary patch literal 56418 zcmY&;19YU_5^W}!aFUs5Vs>oXwmq?JYbG{2wrx9^*tTuk$?N~#|Gu~0TFFYfd-eHF zol~`I*RBecmlgX4hXn@)2KG%tTv!nd3_=794EzhsSI|2_9H^e4Kd^S<8jfIKh<*S3 zeo3H0#0CTVj^v@N?quy|Vs2#&2Bse9tUg$IChqW_`Zd3w`Dh%tf!1*-n*F$Odnz(< zx-7x3yf>p4=aUM5AO8Sp1$v$&n9wlzUwcatlg#fjD`q1M94Ry z1CEOCvquWu6P=z`n)sVkYx6kXnt7ZlHV!!N@VUlGGBg4`6rUWaoYA5@XIW9fA~P-K zcxWCwbRn+R%t3UO#ncr9c(c>SUUM3Flol`Z7FVZacTf(Z10Hc>JWbVCyZQbO_AaH0=plTky0zm>|aP zF)G^g(X=4c;$rjJ=dceZ?6Tc391vm&uy2!mL?%EwA5n&B8Svkq&rCxUUN8(GSH#(P zxInYTyiAz*9>^`uFx{`stRvNFG({^Es{9ZV|A%;(yJnAr= z#d`gm*)Feir?iQytUc-tf*e!Tmy?Gnxl%r z1MG*Ww$X2Hdp->B3XY~qN&LH#-FcpV`#MSzd^&DdMaALTrw?%ehX>n+5IN)(hMV+lbns8;k7g~~drS8E`| z9iCCU<5qv8Du}>RDy$x`S&K~c`@-9+dyhXD`{w-D3q~;v7F#bb?jv}oHVTTS(D{cp zdSjDD>7R6;WnHZDKH#Ws??FwMRYsm*o~P|j`5IqhmVpd}P?2pE;>&^;RGZ1WkC;59 zfuF2KS(kebLm86QpL;SIv_@b?8e6jTaM9ywq1-!XU`DvG*VQZXZRE^1NtRp!N{{#r zxP9wpH4jHGc`*{}IZ{io2#Q6n)$Ze}5b0#jz2fpTpEfuu$?Lob<)064I($YV#)*F{ zcml-v|H|z)&pKUa^e}q@ehs;eA~chSGT4&8sR>pE*<9$On86{1`L8;IQIC*X%heAn zkQsQlOxDQk_A4tcuW0Y%#o)%C5`6(g>ZmQR}n8TMX)PXjOXm z#;Z7DjMn$o9T-PrOcb~aKpTo_Z2G#kSJ|&-OuW4`0kdq01BYp_Alr#6JghH!l=3Dn z7;GaU|1e>pBcq@;|98w?DbBbU?Z^RkkDy%if&U#nO>a&tDJGJJS!O&D`WMiv((|?a zv>#J_HQev#7=dQ^W5*@+$oqIO9>fA=?mbmhf7=Os*PhK6Pg@1P_VsVVscoeBZKs|k ziQ-KNB=UB83=Tr#tW1^L5OQ`4uedk$dbTC5gAIZXJY?mA_4?449MBKrl&FE2@&x|FxdEdq%Q$-)>T zk$D7e44MFyBDc9*ef0V3tEFO(9DhWwCg|FMpDz?@oIK&VU+;?B`7{paDVMT)%ePZR zcqe#GViL(#gyhL&uL)8MP0{b}9J&ASQ1S{7scD-kIggascN}DCvk1f|XcwQ@=OUdVob%Gz*~|KVNqj!@IQB z4y%{$$h{Jm|8-o&;W@X?=53*kSxttQv}>Ipm)9%AA41gUukGBv`q8T}euXg%R~q0o zjULZhfCm9X?D%i$+>Eztx{Rrk0eujE>d#U6eR>Z{r0e20`{RBi(@U7|_~5f}pRcBc z(QRO#Y(`wXdfjgEjh%kx=yy$|&T{VJ!)1P|G^`Q(9e+ynW`9mqQ{4=q{XhU?UK0Ox zdTS<|y~2DCb?!w5bF8^eiIyUCXMqmZHAA{*lTS(DhuQ$W!7$DySK~WnhLTqxx3q1g zS;FZ_9N2N4m(sDkbp6p$yyLvX)&`4`shBqSN*hl`Sg%D&>XsSL%0`63RcqxLiza80 zcdUS_6N|58!Y-y`DXPScaoo?J7xObeA;z*piKT6YY9Mm?r2%EoZrZ=sN|jA$bY>&z;#meIkE?j~=`D--i)-#BAbrtcUcCz4?=lh&(QW$k-&u$GcH=lY5H{VJ5#z zV8GkIy~B83U2hz4>Z&Kyvee$Hra@^&bCeXFk8Fbtmb^$Ul4!U0e&?b6ViVVA**9Gl zX0h&alt)|$-vE;kKZ`;1v3v66-Jb7As9@SUsddi_V>uGm3#A3>2FtL-f+BT__JmY$a)7mk zt+xYi*Xh0!)*{X%`pMx3smg<>PGFGQOL(R9yUQm3%fXGeBhQ3!^``4nwd6yV0j^NO zqh`uyzrm}q-O%Pvv}a=GbdK$qp=xjB${J1-^C$&MzusMq?-A?IS2t9|$ie&79^rNL zh}T}!ycbctye`OnF2g?4v4}#T7QOp&N%9sz*e@Ei*o*P z7hji>*nsX`@Yq*EBt@D`NvL@7=2!Hl!d;+8P%!7^;q(vOhTx`$X`bJ;q_X@n>Tv$m zMJy=9GrFEb-C~#);P*2wyb$;B(P++jRgMol_HpKdp_yO_IfA}Z~-=oLdeVOqQMXmbP zD#QdwTBc#jRApTXCw-yAp2dHMZrkq0L{8!5;&0ZE6aSReKZEQgN_!n5k+JA936c9d z>Zt)K&q0ZBo&B=8CS<&m)aFrcdIZmJ<%kqt@}OO9Xu23KA9^(DB8}&ue=<&$Qr-cWSf~3B=z^csu4CQ3fH= z2h_dq%FEs}GY90seeh@@Rdm_eiF+;d?>H>DD97=R5$6u-jQ)lPGKP~bqRKV?Mt6`>bc^@)E zPNN_0ZUOp&@IVBdCaD3r>6Qsei3OGHy&7}a1?EC_q1xmha8L2!Ut9iQk`yOt;VeXY zN*O1^lGmK4d zcUh~o$Nd&NKxgfeo*^h}!hc?jK4k5pMf>T}-}Km3l@9B~7xl_TcCw-DUnVR3(1OP-YGnm170fs`p~b7&@`wJa2z7!R88mw;9UqQUv5n-=P~=PkNr8` zo`3g^E2+$t-aqJ4Qg|_eBTv#uc~ulp-hfW}AVKr0_tYEv?=Jt5;rMbZq4{1<+(c#- z#OyQ42O>V_^@IkN^5pt6Yxxs<-(d5HS4ui7VLl%9DoQN0*2Iy<2%A7=I+<86!^`5j zNQ$?4XWJX+Q3lLfhft>nltrG|9+DXhQU@FqV%u=x7~@aZ4d5C&?zrvNZv>XZ2x(m| zO6}mlixx@h#5x##d#TYmHdkn(&gkf|Z{uva)~`W}nbA_b>|Ypc2G3oIN_QR4*Gt;& zZ!RX#{NzQlFVjag$IKKLAzf#ET4D~3;FROuUL19V&tCFc^%&}ShF--FkqcVF$rh69 ziYC;=Uj1Z#O=zO%3aszihSVNZ5o=YtVPEcBn6 zYU*A9&1c72Iw2A*Q8u&2Ip8_r(7C^POhZLB9cn$z{j_Ym$KmbaSameFN7>6cmgg!+ zLcYC7_GR7n$Qb~-((KByms+RVY$CFHTw^XtIhyI#8qqfY zc+Y=b<=KZIqhjN1K}D8TraIqwCjS~m%D2H{rJbA`K%x=Dv^oJ5!STh{QEQm~o5dH6 zrQcVtsbl14)6vcq#zaV0KC~k%%^^4BKGk(Fca2B~fYTTI_-6ZOJh3n>H57#R$C5N% z^d1@eYJro&cg^GvKJh0p!q2w_c*jq8KX>fq`MSg)Ffalz31I|bIWmQAt%In(0v7;q^#i;eU3I2J-LWSCtme#M6 zPQPNSTG$b+g#em>fU~;H^XJ{MbZJ2tSb+5J&9*8hV9I9tZ3e9D0oUf{x{zT z=-Eycs&8zZnOT!A+mw@%g0HBkK)_NTDx_IO#;padWp4UXKkU_RWt`g!dKUY61RjT< z|9r5xgajcAOG5mfk>c#kKQHLIewUY1kBp39a5w;rjg5_rjY&B7eB?M++a@b?yE5m? z)aoqu;V2u`!icr8SP_Md$9--$)(a%s3aAxY_SA*zZxB8pICwI@z0o!r+Kio$W( zevAB0|B_HgOR-XG3@?>Py`9$|{$w_c90rZFcj{;YOp24xIQ~8&861<55PL)q_1`ni zQJb4*Q>>{2hH0zdlv-{TG@&asToN#ujOh)GNww?&r7&15lyiHfi|dFeJPtElpUkce zblkSxYnf(Sj85E4q&c!Ml?LNtGgFVQ{=KK|cndSDgS>sqC$oKzL~30rAw#Cm8#*=y z!vksBuD2WEVAqk18j0zb`%{f-y)W?SW2TDFEnb&tjw!!*6WiC$goFP*37GehoL=X0 z^|n~kj}LUq1>3y?+sz&swsofk={l}FVD8F_W~S!_2l|*VViJ-4t#gv&ZTsl(h_sxO z*}vXkr&(NPYt&73dGkhfsh@0j+hn_)sAWS)Mn*QBbDzRQL{VB^zLx`ypM9{!E1H0A zf7@Qg;IdaO98B`BZxuB}YPr^KB|I)Qdh$U*K;MRYUK(C)b%!FBEoLl8OgCDux*c^x zt9M#abTz-KcD|06A>#u7*@fSzhC|R`0q-j-XfcU@u{WA4v)1F#7Z%c0}h^aQ-l74!q-TPD5EB2_`qu*SG z!ou8q`vQNXJLo>^HoVPte}&2fzzO(Yq)>IdEg0Z=oOQKq`8j;tJ;eb)26N`4xU49L z;XU_WVyLVCJqw@MzJ1L42^;(GW&u;qjwZF+{y&?oM$~U^zFr-DdIF{iGpc>EP-%2K zBc_SP?DPhcm~B4;-0Mt9|L@bj=XUz+Q){|We|CL~prA6|$DU-oPd6adcH^Z=q?2(HU!S?86hY#>lH&Dxq~UenrxORp zAKCq1*!U8pf0bZqYxdh z;iHwZle5hC5$fn?LT-Un5~KHv6Ytw=Rtkf)S8nf*f7j0W>HRcpI@bBT`Si3Kox*4= zncK@D*%OybhWE)t$NPzc-}S~`yH*9K_CIjCLzbCM@p(sT-f#OHdHlG40BuHAur5Y- z?H2!Rxi(5p_dQ%ZyVqD?WsSDPejr`4a(SeUB2zklP5cv6*3;_xO z?f3*l{ktpVAXK!w6L;IsB+H@EVS|x;*zzYU=PJCmB3bjiKrP!B(xzXIus`K!`BaKf z19_vqCWUJ-zB&#}a!^Fh}gt6ayG9P>KC_pmJ~SrRU;G|-cUgUS8_4O2zn zf54obogpDA@}Ibyl>=98TZ3$M`402b3TZ&=d*Zv(>Z9c;0a3>@9HE+gcP+&-akzI)-lHH$m$quYGX`Sw_Z5{U0w62q-Np%e%1q#^rjsooM@OSgzBTI8Q7- z{pwoB^U?62>x1Fb>wR1&qSo_;o8o1|dQ*Srh{)#kAi>)mOW?>k%XV#G65l6%l1-J~ z$czCBs)H+9^*>IsEv=xfEwi7w9>M2|`uSYP#qPT{gu*$YoB2tmd&WD4;_F_TM0}>1 zMJm$Oe8_jD;dAjEd&EeV&Koty=CIh|yEQwxphRCC^7i&79Q@zh3&PjR%EFhwJ|@x> zhc&@If}$Q4(b4vw_@80>qdQ5Dd`r+jTq-9u9r&*Mc7;$-$ z6Lk1=|Auz}9yvMrmLL3J-7P;Z#Awx)TJ*asXsyjI@8HDVSGSYlt~-H%jMF&D>6rql zmP6l9I$rOFQ|MZKC8(kb;wGQ=iRJ3rP6&=MQxGud!IDM;7f(f04j1vyT(@jJz${3qg% zjD5PYzMb+N^7*{$SE5Uo`x3r{abGgzV`!TfN?Ny=R%2u`BB!2c!bOd9two^~%{L3@?d~oc&K|yhuvv9=+*nZOb*woAqH}1?0_KX;U9Qlya)SS$4QgV-1+O>{Y$QnL>Tp`!A$_X z#`8;@IM8YCJSC4-zo%~)v|*tDLBfS9Eur(mP8!1pG~+2q+KxNL>q~|I1O}l%&o({2 z*VUJW?uK=&s#DIN<85&XWo>MoMIOd4YpymOftQ=?O|t<?J3q_uFA zGM36hHLd?IDN(3187H#5i?E68I3FlmZq~;wQ>{3;XP5POSv!g~ZoirHWwda+R%p`p zIzJ!UP*&`3jbu`EbYub^m=2E?*%(%!s4yN=2E`)~v#`+o&nIB)Q`6FlH*`h%XVjv& zTy_^S*4!Tby;|&*ss`*7)4+LdhVYx;fT0vd~~daYk& z;1ux>Cm|tl&y{*(eegtMB2dCcMht`r1jh)9-4de!*?i|}Zhj2O=-B+w_&HuS`PMjE zvn%>-7dU))y<`~eu&C|&sEI)K>mOF6uc!ZIe3kF`?!nXd5m43H6#<<2cs0>rcjt1w z*b3s9ZF$_Zk#Zu>@Hkucz1rK{$aY!^5e_DTfi>1ZFJ()K-*W;g&4`Z<0Z2$NMO3dy z+{I;O2hJ50P@!O_oW5xR zf)!BFG-B-stp{)sgNr$wV*FoA&1`HW*H8%mu||ul$#(bR^4H`!8*Z1etKzUS2w}Hw z?`7HcP-Me89Z#E5n4EDICpzQejW9)Guie*+OQit@WV}DdJL8Q>JmMSq;E_QZOBjH7cslFLY^`0(ulzzl+}`aE zn6D*eT!M~L@TrAALd`(PCYba6k33A)qL`)yR!T}reqrI3i@TY*xzXna&y$uGhgWk@ z&_ZPn!*aQyrflB_F-|Aj=VwT+u2HI zLMe+a31@s>ysisVN(&U|?npRM1&$m`N2QjJ94RR9!gd^#>Av5(D)yFbXB4 zq^QJBPR*?>NHO3HF}?x;gc8W5xiN`}lB%j-@shAb6k?1qMHJ+XmL*D2H5!XnhLLO6 zwi!LBB@)l20Mw&<;@%MR^TEX{P0>QZ79sD)3Pvek+@B-5s?+~Kxm&H&hmwSE7S-es z*2oouP%Gv;2;CQgddodtTXv&(4rhF~eLp&5>~{w;?*2J}RM&HsHFi(9MIHMy*&m(2 zl3xzP2+H?;i8KWTrdM0OynchH){7@viPVBQV4h`^&tB&iC)iPTPj=ttp^*$yO`4+l z2())41pgO3NpbT-{ZM962s4Hz@Q4 zvlLK&_68*}nAtHr2SVkOlEtO(K#U5`I)j`Nv^veVNEt{!LeOmo=@X4I0*%!6_swpQ zeJP}nZJMyN7Q-hGNhpScNB6@>g{Jex6VHXkV^@)j%oqWqwM?uoh#Fz*YCZJQSlDUB zl_fc?FW=i9m1ovwq%AD43P?z#QW)q)gM zQ*miWX+0eTx7V7{e5ya)NMzLw{-^9}J}Bj|dVj*##&5n7Jec5Vm2s8xx*gC;@t7tN zS3E!&Ql^IYjID-BwV8GU{7vNf z!n=%`qv<8TOk>;mY|_NG!HEVs0Y;;qgt24}rvIGO@X(mah)5Esh8A&fU`$ClHcJ&% za%4i|a#7G|NU{UWBt8*1i~o+wK$Spt=?zo|0!H0B9$Ja31|v%;hr+^;%f3@Eh5^K= zq#m3ly#xVG9YIuKJIr=505131>ZN9Pcq+Au!jhU$aX`;l3R8>8g;mcs%p~_MWEz`| z^qmqd2tCpT!+GCgPQG2nW-i>9*KI2uUSB|mW3Zm>yXc7pZzV!#2@gLV`2bNr2vt7dNB*js zh}2x7iHNGGaNDriL}P>K6BgU}ghW`b(ph>bkVvFUZKqUB(J4Y(e}*OUM2N*|tv!W~ z&BkPINWtrpt7*%n8&{bV0?qxQGD9^oXXE;nm$ zhjJ%gJgKXy8XChl#!~LvkDoq@WarX80$)DuC%thv6bFm&eaFb~=?;csHBU}YCFCaA zTSdy(5^?%J1=y(NQIXuCME1~7zYvV-rCkYUY4CaUCCCDb@15Q5r6G z)4Q4^A+OgLj<+2)$E7Y`KL4-I>*+aEJd$B}9#=j-_Ky{0UbU5HD=l#aS+vF@^~b-Q z#}`u^=ee=4PZd;tmYxuyhJBAZvJQ=*UUr<@uZ#mo0IT7JX>Z_y9M;*of{ifMMd2OX z_{{FrySGd+*q_Fn zS;*zAIilkr;UyB33n%yVUVArH}{$&)(Zu*TG+UZ)& z1W>ui3#Lo2r&)Ea)1P%AmCXeHCoEW4S!d?rB_u5vIW#nkOiZShmijmNI+7rqhc~*0 zglCx%dP7ci$Og4nluqK8Gy$d_+SKMVv1m}e^b~aXay~wIKeSCp7p%QM!T!msJ_Xg) zF$q+Ob>3=d#FD`{j%(*^B%b+t&cF zLa5e~Y^s)IOeVdd^&z24QOp;Jx!knaI=XJfSul7JfXMqo)<=8mzr#Svv_Dh`61^MOY_&5;iSv@*0*dcGs4?L zKG&YZW0$-@dqdIN9pClArP%e;GrQdNLq)~er>-uqtN!8htCN^wTPxHHIQTW!% zU0lvUDw9cBGaNrVN(6FhgT0~YfZiYA-QSOo&6|JO%*@Qdh!M@r&;LLOyn20if#xhR zWP&Q0C51b;InAH;Lv`)SyP#`eu|4uaz#VApK-WqXQB-8KX$p?Gb5aDxf#}E=2~ibo z0Lio4dBW#*_~hch;D(&db;-hJRl2p1#Xa^y*U6pZ=w$h_29ia^t2~X2xzO zdHP*DU4NFAvWGjiNk*Nq2r-t93VF0*gGn$lI6?Rl2$%5%2>`|$n4 z!fwJR&CQBUH#aCPED-OPZ%ON)NBz`>htX)71MBV2oidk*>Q145--BdW$B&q+7ex#P2Wn!`D3Gr>@d!FPq9k~_T)#O^hG;Lp z4*faNBx;ovaB;~M`VpWcI7@WY8a2~TB&HE$M=mbqh|*e#QW6#eOdr$cYmHm?d~j&4 z#A}iwG{S&<`{L9avBC6`6K6#xlxVD}tFl14sA&y|qm;VX78N#Twyzno zDT0n_6qD^%%R3IW<932envyTLeC!SnznyHu>)p81`}8rR&q!VYwiYr-|9g~hh#6;& z9=T?LLb}WsG?GN}<+glRQ(WMClWzB}Om6kxVEt5KqKC8yJ7F@MFyX_=!S|d?bqR|k z?gp8L@G8=lUwE~3&@r*G<2MEPb?&pFwV$=%SOfkeI?6Piq!H zA_A8ONGRgJ)bt7V`fyofap^-@>Y}KnhVShiuDXW?rJCEb;qD{L-`;)c_UPVDd_F?%IvxH~m8@S9O?30$K6aGKMdkh&y4Ul#o^l492%iFd0^_uV2o%6Co20-r{w@3C|KHVBrdVf(g$nUxh(5Khx&*!BIb zH{!;_$22cNJun4n!2vm^0loTu-QT!bK7;uqVK9{X$S5RC{cTCyN7i()kwFK`jMTk(_Ef zJtxtQsnrdg_VoPtxqpa6QH%sV#>sUD!Yu!6{ZTMGfrJBdPHc@dMDNtJ^Ea9B@36rj zO+mOvfSD$sGI08yz7?Gi85Sip0Q70^6axb8sLt9kvvN$ru8i|VXQ~i1gUsy{#6OgR zNmbE-(w0usWzGKcrGiT1Z5x`0_rpFe_ot5ApW*$(yQjZA1?E38J$9lY{RP@OTAvyy~tWJyynDcm|}nPuv<27`KB{7|EWQS!x^?ZZgrru!sjoj~;*)hwyQ;C#w2 zrezuPPDmnw6Ben&DiST}43r7RZ{M`fq?`~yt{K_~%x%mkVMS~1TG4GFoBl_BfPqbn zi|Z8|jov(WQ+#IsIJEUWy}oWs@2xf-MX#-692(_ev1NZ$JvX@>pu=D}NN;wjuwUQi ztancUu$7qOF)}H_K=#%@K^(P801P-HF%8yFLCQ{~4bIX7#j(8yg$A%gPd!0F`KJ!P zkx4!%zmLH7Cq zP}}XF4o8gM@bK`Sk!j82U&60QV|)kAf1n`5AKaHay}5`*BaR-f+oZ!1sWo~?lt$X{ z&tWl=lLXEGM9Z-)otG!kXbTTl$KtM-O4-ATzB==?;ze8gr!|+&YlDgwl4Ti>Rw|E-Q1cT=2>sO5_vwz<9c<>M;dFD!fCBLL1>S0E zZXKk6%Hm4uAKZ`YnExVq&zei?cN`K=NXGNQ9inwoF*^AFYDcQ5A^F6C{-|EFCu_twe#F-h(!qQ^FwJzd^tHa5}djw&dGK^!=i2 zv*)HlYR=k@=&U~L=U*}y4Ew*iVIxTB!xm_{pWK`@? z$rMv;%vzbk)4b1>Mb1TZ+|I+@0(5wjP<;4ENDMb?!-W}4&+Qe}3~yF?Y7dsUTqopT z#DPI2Nf8nYi7Yx}rD?AA+hCX*A+Pty*HX!S&lvdm-W^8ECMoyGxQ#=HGVuk#fFx{o9G6t;CHyrUUrT@C(yA&c zr+)%bkZ=3i8nvl_-Affp6ffhF~KtI{BIQ9S`X1nFaT2f(41k%|+_b4d%^!|ZI zA_5i}DT9FbD*$Ib0xk3dRA}MK{%dD!YO196`!)8reIsLedV(YnX=AimYqr6CO_rEW zWwC7bnHakea|iM+8##2-N2cSjiDw666U>3hpydO;5wKa}+;4NIHVBRlJW5D`GnrG< zc>D<3gOWJ+T2~0BaMSeKm#~jZ*%nI7DPZB3lKQT_czyt9?@7?05}lh+VCFD4&)k-u z4{++e@cPM>Ra%x@8URRH;_^{<@q=X702+%p625v$gt6X4Nu{G|kkX7t2ACR*NB&JQN)a<4?gj#><)6vs}l5HS5K5_@v&zY%DdkMhLV$yb@q!D_vS@fD7qO1{4 z0TLF$l`ezo!TIE+9ZH_$TVv~N)MzsHyQGjnG&XNU(~Wer_l-;gGLDKqF4-C;r#OCo z=$Fg16s2x_mJ=O{60DO;qa`O%->)fejf}-M#lm@S+uU8GJ z=RfT{;-Mb1qY1FgF-9qo2o$HtGfxQ0Njp9g=A!19C&DDL(M2S)SKOf#2L|L08RZuc znTkKX3;5;Gr<%~o+wgTv$VTlo11J)4hHIr=RKJ2KFZF09QIQ5^>h?eZ>7^Z_SO zn*x}-snz$u2;@vLFrA(&okry@=*m>0%*8w_vB|cwlTF$M@0Rb%d?AMM9^PD$X2%gw zY3mxO?%b$EZD`nQWYav4CfcTleo!O*FY08;t0XM&KW=~}B_M7GvhiMg)I>WAiS)aS zNXls!kel4QWW@E5J zzPoA8^tHhH8xW_0K!hljM@KE)5F{4jT2g0`ZZPP{WfxJIK=ELw(DZG{iHr=G@jGib+?|l za#%oJqWRI;T1!T4?L`w#g>A8`&Z&WljmWX@=QoYQe(1}$xWt1@TSZUOp~=a^lA_F$ zyGD6(VId(1twyWuakfpP$BXrIo5`Uey>34!u(QpsEc~}EXm7mB6qB>1nzs|RF7U0o zNnXd!%Q0NnwQi91hWPiis%rxDp646}sL8du`4O~u3bh5z_w?oU@$ynZdgqZ6=D}C0 zTd!Ela8xoFG!50UI(TuXeGh6|fQ6rxN7&#OH(?a^@$pWZSuqFzzhLGceJ!r3I&wbm z%yr!RLQNck#v9;%#L^y|rT_bywq!x|mnuXmIw?-*_t00k;D#gPw|lJE5K(eeLezyT zXlB13arO16aJO|5QnxvjjIEVFV1i&Eb9K83y7gl8=mV%h+{*eqS4L`}-iUg%kCBFY zbVCVtw6>~F`jzw0W%e2GkoA@KwY0b1IHag6#H6}%hrm~>J=S@(%)V^IzDyX5w@U1v zv({YN3>ch!9}ho0#Xc{sn+2{`M!C9nH7XGzhh|&;csRTGz^|(dT|6zw&imA3M7a zSSRK{9vTn~)v^WP&ll%dr)KO$%;us2G!n00+Ve{p3(cqIa0oCEI4xu5j_Nt*3;4;= zBZd2}^eCnq*U;@xgYu77h+aLVYR^iX#KkK3EUY7|D@(o6PMpwmF~iKPGww#jP@PUc z#w&Pq&1hjvfn#Tal3~Hs4F=GE{$4L`E=8i7X(36h#vAlXd)+3!ynN-R4HW1N=XiXB z*Jyd06%L-G^SYZBo4}gw=@FEalCp1GAFBPwrvyNTdOlu25#S)!xm_E8ot%o~g>^RC z?}}<_YJ%$9g5u&3P_+i?tFK0Ke7@agTqe=!`jiTx1~5rFN?A4yCVrzT%+2+KMIps; zJ*$b!g_O<+e_Tey&_>o1mT*{gV){Gmk*mD|*r{y#Yr#fq!$crW-z#j9L@1FJo5TpI zjVrE>DZoETP~NigzC+C8Xq})Vbfs z4WoI2FdttjK$8-Gpz#Rk=#W4mx6stt-;2bDz)z2{H!X%Qqg-r?JIk!m;laXWx`OLT z*SrP3;rsdSVz@&kPDFtmpj1@ubTv#WvPw=xadQ9H00LG<~0*WgS+=`s4QTfg9F)L)UVOCx2 zqC}~)6|!NgtKvXFYJ)U2-MIlUDvarZLD;ZS%W-uViS4B0g*!`9n?#6Vy#g8553JBA z7Bt4QdQM7AjO+V18nZE`W~C$4Wgv|d)YY3l9wO*3-3tR>De<+24JO9nw)@>+x#(~- zxyl7s7 z#4`X2h(l_n%06dJ5H+C-i+Q*bJ(jY93Vb=fslDKDle zUm($!v(L*S7=%tU6wWoM1m#MI)Pv97GjoND-@2G>2vo8`OO2E4a0UO44d>fwVDmkn zs*z1xVGTTAy5XW27Lt+a=%9-w8YHoGQAC~co|bix)#Wh7&wZ$9S;eAqvKsFI&5b?s z%Ob5y1o10}Lgo}9;;xA)DZx`vNbK#SRa-8)NYC;!At0>tFUrgzCgcqik}8Wt%4X%! zP;dr<>Tqn<@@$7k7lhfrwbZ|C*m{G$k&%$3sCH$I@T{+f?^rc}#xp^!py81b(3F{I z1V%SVX^rOC*uB1L+kB7e_J?k2}2m~gxT4TkL$sI)dz7c2nc-+N{O8}pa<$Om# zLDRc^?Fjg|ZnVAeTImMuSXdu|#)Wvlnao$_--Q@$vEW&L_$~ z&!f7?F!c?$9S=EF=+_QQ0?60FoXae7l1U@o~`jsomxVc~EZFUBfO11>9O z+gIC~?Rc(LU!94hrFNc01bTAJW}zf37OlEBB6&KO{mvpBX^-u?WU*P6=u}OL(aW6S zD_6SW(B8O&fP7>k905jOr&!vq1=(>3aXKPc3_}7|u~_@U)MLd1^vFSc&L*kLEOC`q zHANVkOnB8=OZAr{6#UpjB-+YAGLAW?H>C(JHn~&6_dmtRu!3@dZ{s+;R#mzfY#xyQ z!3h1k*UMGTVPShRqPIrbB>TB!NPi_q3QzdI6&iDTqTM?~bB@!OSi;ua>fx5LU2bJW zt6fDe>Y5Ht;DS`k++sIx36M(Q*uO{%k=>b-^8VWT7<^_08Yls2&rUuU$!AVZ`w42C zhAtOt!@JRZ6ONNSgHgC#>94x3>&_V)kL|Y;hL;;1?KdM-Akgn_`KTSGHQXDDotWHw zJ7CLT8j?13hESkCpRr;;)~7;mD=!2!?JIV4&^0|Y-@J@ao-#2>#Oay1v7m= z+>79pl$HC=n>IJ{*ky%PRnfJYZ1yXfHgYQ~Ct7?C#!{NsZFrxyAXLlMx0Nd#92}CF z&4El8!v7!$<&zhl1>&NUrB#tHl$Ms3xWt7ITV5yXjNWrn#aMFkwz7i3ICQpki9$Kq zpLd}_`)kW^vo5|EN0C>1(Q0(!=457;P-Yffs^?#RHv}hdCH%jXg47d+%q(qB9VpCF zql`5$f{fW@?ED;mFt)qJ$2dwF)QGDFFRayot2sD|)oG|VS=~H8Y%Avyr4fwC1)ia_0(fd8FK@-Ukcf{70rURlLr?Jlfb=g~woTAPf`g;u zQmZvvNeOR+jWjOjDodZWOT+Wyj#GW+i&9goliDmCz{?O^|FlwFlE3kC(1SI)T{Nr(9qC$?P728N6Fgj zHro->{U4&fGOo(*i58S@>2B$ik_O>W(s1Yo>5y(vx{*e@ySqE3ySux)?!*7R_jf;> z4+uW7_ss0IW@gQ4IsX9h_Pi(P3gE%Pfqiw@RV;dypA#S)NGI|d*DGU){4S^cak5*{ z;&i~0hf{fRDlm%DUye;=#hdO{-6I8G4&(s!g@TUGXd9Ub@~g^FW{h0 z5PW+W8?}lQcr0K8T>`JNZK3;w5AhKe1?p?zS|e!+LgAof4SW5&5lRA?C&WV97Yf=B zYSoNAs!SE~+Lg0QR_^ybi{p|K6T<^-qq1b1{f>3A)I4eXxz%;~RtRaiu)?U{3X+Uw z#5ERaG)jeQ_8bmg8_Ci&_=c78Av^44+obu&Nw z6=QTF!U$;^>Ak3?bpO2Lapu#pzhBLsm-1^9F3<0npk|Y(^gAnPW8oy@*qycb+&Y z0akJ~CMX9$u7iUEp(i>8TiePjtx~PVD(%oypmBWF{fysaWUNBJ`;nbiqux68>ZQ%| z3C{nU>=*nuKI^r%70D1SrUZgig z%U7Vi1&xTP9>o$~hoLrM3~!wfx+_HInx^JBWKOThZL4__qA7a?2VZ3F7H^tDM;(LI zd!6s82({sWjspFg9f^M!N7vGLz5+b}1y5APi8I;xM8x*HOB6gl8YiWLdeb7fRAAg-_noAPv4$g*Jel?R%_Lz&(j z)zm#sbU_*^iBus`A{hZ!5q4~r*ZyVfV;mJ9E2@5g%hY~s0R)oK74Y`%F3>9`Bhzog z%ru`q-GBe0ZKH7BM_|L^dTHb#KvGon1wbtTszK-gB9MBE3#(20i&H)_4;?-I(u)V+ z?LbQ|&_CPK!bd4dE-1Lx=(*bBI-JZ^KJ^s=AtH@RTU&dDiR?o`Vc`aFpc+43ANpyF=MyaV=uuvuSSTao!QNg za6aRA3xWSYvMtWY&H=TfeijF5?e1xEH8-fiuysy$N(!Hl00shy z_+Ux`2{Bwr#F%Y6HqS@)0F+^BHnvm|j@pJ5;>oF{JbDfePOA04e62t~nZADJm3@Di zG119tYclU?gYS3d)>wc;aT85sl;U*)?sLCI@-|<3)gMjWT|w{zXbG?>h7K=BJjBFc zBqgoRYn*Xew{2ZT}M*ivVjl_{U>*%tI<- zSK*(e6w&WocN{#7bRgE^Hp{#M7HD@8lAv?}SX*Yx3rWR5TgB%aMTV9A`NQV!m)wS@ zPbW|~L9au4^da8N7LhJYubg!dbTGER_FC?#(LCc_;)ZWqWxLIb0tlm1m? z5yIf?Dr(P(DzSuF<3W*sLXzx^)=s2J+{abHRf;)bmh2P#7HA4hMEb~M@>e1+*Z_|v z{0^S6iBT-i_l)HY_7*t1>04{WuGyF`qd|F)y>H8 zi*$sEJxVb|ONzCQ!`t+e3MS&~VTh#>jk@g~-+FCYDY?niCjNRkUCz#%8zf%Ll)iSK zDAJH9bTn*2QpXZXPMC3%v!eT@A;Nr2)hQpqZgx`9OTxyc>kdH8r=bu3FeEM!_(wkz z4;$7zVjUGq0gR>U&>Ho5P)$NjVxHunM*zQybdeTg2xBF)nBtgB;v2J zDPayCOn;{-nbi*p8^Hdl&36Dz1P(SpB9t*8&h^czKycYvZXORv`a}Z$*8SiLoU+=o z=~oXP%K*&zwbHN!q#uZY3`YwP0^dJ;SHo<8E@eIOWTCv}QYxNF`|$Yc3J_qdG+Jpo zp#Hh1)ivyou)2eBUGMlkoW@V^%NG{)a#quZ=#CX|rn$eXcX#b?smKJJ71k>}zzNz9 zd>Wrln;}zNii?T{hK9QLt`xMij@dbadP$NGZ+4Sy6t%TS0KdfpauY>Gb|V~f=k@I` z$Q8x%s#>>)x-$L(ZuLzbR%^qIvJ%rY3|Ja3GLPwUL3#G`1qvZjIL9ewPSYL#hTj9 z0%ORR-)5366$NqWtBHPxA+4t0ZQUUyWzqF5mOj$7Wnt<pv`H)@K zs=Hc$#iR4&&o6^f{kbeS4!?N)NHlHAqB5I=cTTOs)a7!d}UM$M&dUo^(r}P5FJ`SC&M9HD37= z&{<8!@j2Ki5;lQuO9fM_NR$RsY)*!uet2}nUH%7j`q*lDX>mK7xcO0p>(atLfNdQF zzWy5!;AGiX^gY1@;6oBX3-K{uR8}uT;AmMh*sOp~7 z{CGXF@m?zJU!I=s*`LUUY&a@wb~&k?D**IsK-L0}&G_q9o#n!Z`4N)7NRmiO$#*kt zx!(*08b-e8cR}UT_5dmGR3=jZqixnLS${mfod>K{M(C) zvT_Z9lCJKUrcG-vSXJAw># z*hE6sMzYFsH)&FtIu*M7TF%Fp`YWv#49N)criHB$%{7lJnf%g1(v(sM#xQ-t)~{`B zN!U4s8*ob0DdO6m{7TAE(qH(jrR#luEFO``U`P)+%t{p%g%!~O zrMMzdW-uO>ubw?dU=qOuc~UCnXX*{R#)2shEhg3ayelXXVL%!sduzs>-LpFFY?MR= z5Pp~Z&>P;z^0!Ck)AlznEhe0)d^u-)+@AfW(e&5reA%;RAidrfPB3S!kzkhe9yg|E z>tq}~4h!>LfJ8Ik>4pH2A5P(}Ua`ey(%P^2Cy@4IDC+ef|Nr#@#M@r<;;?x=y8r|~ z1&V4FK=&zIU}fd=$?weE$x%Q$BK4$?SimDOkl1bD@9rzWQoopt66X707E3__m8EQL z#H%#5T5~KQwG|k1?VQ4ca|PJ0qK3x5`AWmVfq|b*BfZB<_2nyl4gElqW!KYL`4gecNTtr<(x;)_TAyV`5=V0BZ-Nu3cVk=G><> z7TIOIUXFC_0kX{sWHB$Vt_FvPd(Q5Zl$5$RdqdT0fGYS?%Xt@~_gnxEVs9_|=wNBmk*%$4&tZ?oH738XRJ7_ZaSjCgOj&o|>MU zvj+?-2549UV#UMT@rttt;mpkW#oDl?l1=;%EY+X=D|%s_FgQ(QddcYMq#YuNHs2&j zg}xy{O$H5K`B75l2p4qbHR-YF@mM$9(JfUOeOJ*oJBq!F)Gqz3UiVr3vP@-f$V5=0 zT4}V*(fM|`(f+ImTzr4tjruUx{yLL4H=%g)V8bNJPOHM!T}Bh)18zCoc1q!@6DePp zBRbjas%FD?P7U1_m5IrQY3HQFzvALwkvu|Gb~#Rdb{*z^pUu`9zQ=^ zX+Gbf!{Jj)M^J_DuWa@dWWSC2?K4!_KTYP#LH-u{BSb1hx=xJ-hmHi()lT9Hv^rc} z1$s$^FOB^E2)K=I&+7}wa;1`O8YBxp(ukdu2nH!k%V*^jM2QYq*OR!;M?5c|H|}~l z%@a{;*$iemt5v(H*Q`x4*^H)eN0|=-28e|xFAPVPRmic%OLnbY`PsgycQssuvBtPL zl1Gju<&R&$AWEb)TGK=Q!VYqP8=?t0g8BWn_co;Y*YAbi(+Tzw3`&Rfxvz*KrmfqQ z7&AnNYbntwL321s$FkrV`F6LV5A4$4d)XPKhX4{g9weJ5X@uDrlHqU%AK`7b&A!zMRM|JUg2J*4==2AO`xi**A%vB&0>9Hwid|PFpk#ggd|}t!0g*zvBz%S~sG-BRYBRdvRd8Yx zGkcbz?-?$qW`TEGR?##bUS>~!O&52HzgfU0Uo;X?p=g{+?`}@1m1E0x!7PE^l0f62 zM$jyri$y`P#X?24S3AiNyGR@OT_PaKn~YSC@2ib2IWW11V(d&=wV6jTe&& zQZ-$`yKF~^liNcO*`QF8`31qIwmny5vPghjJ!|12kb>lOAhHlC`%^?fIq)8CPRo8Unh&zCF5UeQ!mSN8`_RC5~0Lb-^n&uB=|@zFtMc;X|qj9J}V7wb;V zKuJ1kP{<#RT*vV-hsKnKuQ>yo!ql(1pM%h719xRNiOJ?d2zkS|E+aX0%8Y`x0#b&R zb!7aRI4N)QF0jcTHv|zdWs{`Lp8WwmMB{B@iYdR#LsH0A3vF)4d5%JK_z8rNEO~V> z$iU#k-@oenj~8bmB?ToxTGI;Mz6_%D@54!QT3V3uyZc_dJQKw{Rzzt1(8zCr$AQaO zEUw6{*a$m4(Wb4!7sM}{HOdH}z@m&xhibYqHI8}r`-}T~1>?}NKE}nz=~pk%4vh7(a9bNF>nPyiYYBrzrgHt8$5W-^ ze<&!ZcbcR#<;snHh*nZ8EG*^DWd#fw4UJmzQwPpbOJdV%XIxgm(^p&QLF@V`QP#r2 z;LPMC$mSJUmV&LH^A}09xW+Dj{Heg%5(Y-<({9N4a&?q`XyRAXvx-_CXsSM;oxU)N zdskamu*ur>Sy@$`d}f)yu@9RkQ|XFT%Z*o;-|SW!F$P9Q^YZEFRKV}NyxdEL>Ml814l<$#nggG)yV9wzsFMEb#S-g7(*$er*i~_Ad4U)(K09xm* zIDzzxzFe0L4=WZ178VA^j3Ud-IEpd{dt>p^iHUn*^~x#*ndozsbZXNgk3+j`WEwvn zr#buV?#eMUeRsa*zJQ{?vI;w{E7SGE%tTz)p#}*(r&WH|Cd)qLXxdkQ8*Mk% zrZT0Y3&SKT>w1TDjBjwZhrM7$Wfdz$Etvd04P4HyD{FSV{sHso<8ew!#jh83^v~6k z6%5lkQkbKE2vNd&I$+Qh0~L=hqLWhm^8CyClH!rsmWkv*y~@HIN4zKM1$6fxsl2i; zuylZkCu>^akKeTCU}&QN&kfyrz@-NvEdJ#Oc*X@?#;0&)f}aD*oW9`aod4Np1aQdW zQyMv9!L_=wsynwb_`4YhE94U?8EyC{5g7Zd_lB95Eom~qmQ=%maE0iCOU z{esBKc7;AJl^O!T;mx<(D0`8tbiwr983<$=quNR`cPWHE0iCxeyn_cTk!>%Jn+)X! zx)=KF%6!3ii7Rp@x6ee|RBG2wFh2`!Zx00W3mW1mQDD*Mbu%%p9~6*ClFT-wrNr1A zsM2}UUOn9n60vt?94QyT>;!~-q}qFaXx~J|wLXK;q(HOSNhOTecCTn8O8rLqTpoIJ zUH0X53{hsDUyL|nO#S1REa`%!pp%jU^>Iy;Lj3&;sq|rxF>B6?s7&c_Gt8+1$|`Cp zPhW%m(1-?(3JcS9@Igv|%8vxzY$3<_D1wdKcQOS3--?byuNXueUGhgZvh>(v%BH)& zqHP=f#?!ycQ3!aPuq+V@q+VhnGXo8(vw31VPAPvOw-;`*V$!|HAKc2Hlyxe$e$_(Y zR1&H6xuzf33VZb&l#(IGMM9r`X|OamGC1BekPP|nwY;}pjjDSWyS|JPudhFEz|+vF zIpGiYPIx5Li^0o=HjfZ_yJoe8Z|6+G!#HAsgy)cvG^z6XKMcWvgXJMHKw#Ex+Xhdf zt5rC%&6q;iaYv$FMM%UUmO>iFjh|p-euT$yM*5g(B>(73^Ftr1_+3&xA1>33MJ?c{ z+L9WepzV~59+HioikK?XpHpUsM#A_I5G-iAegRu}13kenaM+@=Tq|Jy(eWPXqkQEO zyCpUyF9sgxy+czzRm|~ZL2)UF!gfH~@iS24DM?FA#n_Jf{b$Xr=b+u^Dav zWIW6UyM%CWhbXA?gU3i@9=#vh2m*}bupU~ku{fIyCjB*Qj=RNts}+TY0Y2?t{{6JS zvK@-&P|_Y$)6);5FC$c}3_&myVZa+#>NKA_SVc~$%biKGVvx@1LQmsTyv1v#!GBg%qyWp>WO`MZ@ zIXE&=9IQJ@g;soKzBgMU=)VYaDO;pmyzttn89Yhuh)F6T5ex#>pnbKE95mR$!JJxH z1O;{B)StOOd9wtqr2YD#hKq~V)~ODutjv(nB%7ZlE0%`wttQ0`ci#;yuqZ==D#C zKH%%v5INS?PIz7cHtC?QO-1W7&i?Ibh;euLj_tOFG-gzSin_Z?k#Rg0X60~QLUGE7 zx)iUex$_a>PqL1Pf;vau-`_G*ERZYgItuiy(^%*SQY&Vz@3>s=6}^3!BC~7*i@fSzYuczL&T*L{K@9+IKS0Dg3l`zF&a@3e73>Il~q|D%yaSI|&?S0IL=_m9o zL>6(LV@6etM<;@u#E@BbBvy<;FDxtUL5dUz9D%dR)>k(+rO}KGR7B?A3!^>@c* zcQD;|RuZ4`HEqIk!_6#;D)JrE4i*{g&xIPSfiAcKR~gDdha3voQ|j3_$#WnpGx8-w z`4knIhJ=_;eY+E+!-gE{==i2OEN_=XJI@Xs$e056mySuMH#lVIi>(H}hVPUd>AK0z zT)Nm1BXIhVHCZbv+w>C8JDsy^!SHa~1c`m~3Hcq7l}f{Hets1kaE+oGPslk?Q>U~q zL&5kNUi@J%ldKW4cf6xY9&RPWGO40bt!WgVOk?cK-hN#Oo|}6GLcUTy971C}Ql@^~ z5fRLFF{_-V!Bey$f;_8_IBRgF>|@-0xio&Hj5z`$^_5l;tPJX$w9Jh9!hzh(@6aBl zJ@wNR;r8dHp{lZ0fR7T`R=@j4VFLqla+kstnblg00+kikH@Ht=Z2=${+|qeeaCXdT zdU`lVhpzG~RyYWLt)bFBjOaBNz%!4gq(#VC3aNnZ*Gr-*fX8i`Sg-qx2#s99GD6x+ zH@S4@k3iNG_OJUhFjj}~1}`sBcv=sF5sh-aOo-LTw26d6E%4Ae`+=@f8CjViq$r?B zv(2(i$_3)*!SOlyHKs(A(q$2$B=iMzZbbJ2!4+eezb2=l+uK3HzuwuIPdWb=}AFmb4P9Dwu(6p7t_l(zGUoyETlFlDIX{1cIWkAT`J`0ycE=vcUq(^fl z+sRnE4)>MsyA=QD)Sh}BvFo2Ul_sjP{YC+l*tKKSI+PMeBpYt`g&O6;%U5=ymH6KV z%KFpZ@Crh#SS+NZF?9%Ji@{m$+*)oy+tXoUL!JVHG*S&3nE2+J3L@#kL?F~dhNn%6 z+q0D!Ti8$U$Vrk8#JGJ++j3DGxz?uMJDR0IsPW-GOvR}Xv2p>^u0|UQ<4ND29u#lp zPcN2f9;F*iPA2O9v2w72Dhhk!CjSV0*;sKUiX4#Q@hEYcyjXZjbeQE(J+v6lQJ{1BMP!X6CiiXVcIXIxvfG}u9m6&t6f1{5!Y z^#hn@it@gVv(uIy5q8@iHOcx6VV$t}p)qFY>BE@AA7(;gw&>UxLHNrTY6ydu+Ml#Y zagBBoLJJFvOz%7q-}!!K_m_C}&EwXkIPZ z4AVhjA_huV#v)uFX>POkwoK{hsbiYVx#IB-htTm0M#)W`p2J5^de83IJobN?1_G%0 zGZ;F*==@Z(JloW%>Brutw5g~6V%0Yad;F$|A9%`Ou!5>~L~~J%%t|QWFhQpK6*;EI z=VZg0&)h(F50PU~qQa4qS{?tuB0qSTZ)tSg3vd1zQko#cZup;ql*X43^N+o7a)y}r znPojI+)bm9$||w;7mYF5L;+}*A=MPNad`k6j?S#)$_9dben z=N6f?u+mdq%9`Mjs3K2b}xeFcrB^T;>w)~eL^*ebwoX&Qha zcC{jg9+N_upPNyHAA*mMFju4JslIm*p`xHLI+Kt>^1jI@qF`zEUeR75P=DF}{JDO= z?@9q^%A(=}2kxu+a~l5hs)o-ik2D1-d=&7~vch5wp^>i$kSTv z7UJT#1C|5gP>nqJ^R&OLk^+Z6Li@*7jBBAn8emX@I>pzw6xIn8(ECGu3yPWCh^cRT z-qWqJIx_YU&=n`4%bR(`T-7XPdy1n|IVA$DMJ1u=f>gwGTq}*Rd=LJypjub)x1> z4*!Ms1g|i$x^k@9NV=H$zo&qkVB)cqI1NBY*DBF@CPC=a=>tY>047PglkByMNYT*Y_ zL9$6lqV%!AwX^)1n%Ki4DVXb{-tXQX(lFrvp30V%4sT%Fe$42y(WJ`uNOo22o|S&i zO8+IEk&vmp=jckDg7Q=Ry4@rry>FBVEFkYvO0-=t`Wq6To*ZJ8PiN zLSM^@Y}*#EPB%V%MvZJ7MJ#6y>QR6Dc`cCKiD7a%t^4X*cEl*KYG|J(GDt9_jr!Wu zyOfwW7-=jx0(`=o>&kkU{=3&FR31;o>2cNKSwnS|?Yo^~E-}`mKjrP}V{fl7SOTwh zJMeS03+#{A!`aTlOz=$9n$Fml9T?u*cix_bz5RfEAyaOQujYw1GyYF@*)E^+&y-h;P`Q*E_az z%mX9(W0%gfTCC~+7{51eHLHF+Pj5uj^cW^tb>erAZy&-CY;6V|9W{*#>UvUNJ`LG0 z?dlzDV3PnO2UxtIQ6}r?EH-fz=x;ORzjiD*4b-s>#gVwpSGuvl&tr0F>opgu(VRcg zO?183%G+MNwjZ?pK{T$0lKWhl6HYhUODM6iirPv94=+Pl7Tiyl$&v?xQEbeOQ9sr! zA@ojtBGv9Ua3DM$Z@Bnwj=eRW-IT#QRkV8_{k^F$L3RVB|Bg_8dU%mM=QW$(P>lD+MbUQ`Fvb z+W;cN>XA`ox4y_^k!!2KXxp>#>&3q%pw9emm1`wyUG-yjS-<6?EP9O7%K zGbF@V>opadwfiqD3r^d@9vHfukNSXb^BS6P~zgQ`n}uw zfYlEK=WICh7m|S2Q1sOHx5Y3cVX9omes?zI0Vc%#H4&+|i0@ ztI69HOfy0#E}I9-QoBnG^V$RT)$1!Sgz?*}&ArE~3dvKUxYxk%nX}4W-vr37@`_R$ zuhA-J`32i(FVtF~H+a>FldwX*r!GE-Ht#D2r0(@j-52;@s;|#bx@}i-P97IjCXsl3 zzoQjj&q&INoY!3Wt7x^`{Wdv5|GGY4LbRz>md&vKx4@rl0Mq=C_;VMo?s#4V^n+t) z&eGsHQNO@^E1|)B#Tddm#St$Uo(ZC?#T1=;K^@4KTOIN^ZIui`d^H?DFbaVET2Ojb z<0jZ-U12}w<#f6r2d+(q%)i-v^LoH&18OVBbPQQlMFC;g%4LlP_z+A9G0Dc-C>@8$ znvu0=3UB(bWX-ZxF?51lcKuh(t-k}{G& z#x{uy2T&)T`JWFs+>{>~rCO%^-)O~Y8;5zrH1Julp4a`BNN9?E6rba|!11)njLOq( zx6)vQ3PvVn@PX}g{c!0NcD*uvS@@ez=yHnnHPZm}ddZOWCg6y6C#k5lw0SkLMi9Hq z4=HPcPX+|Kc&15x-t}c}c7J5=sfBtYTcqcem zm+Eh|s;W9UDV2rBI1~4p*EDJI&fcCji=GJmia_P<&38!xSI{YY>e&8*6nD zgbEm+7tj6g9*W74WSE zzHb0ZuKQTp1WIufuuo29>LS9V9T44mlz%>*WcZotu~op(IDC~D9oebb5MOgAWYj57 z(6*S1)=&~T6Z#b;16Q|m- z8wSBg*jPQ@BMO7H^h07X4JI!k5+P^U^zw2+-+O8PwARA5X=V6}%%dm=KADEG{ynSZ zIH}w18Ap@Te0g>r!9VWpMZ#sHqN_Y!Ys5G^UC}zYV`HHE z1Y9ox66Zs?&Dv;FG`g5{ipxL4pL5d{o;y;bH*SB)eItWp4;j`Ori>&NKIz6Ku$`Aw zOUQCJy02uDm5HTeWYwxLX#Xn^32e|sC!c`7f%G!R4WpygpP^1qx!gJf+6r<`mKx=N z-$^d~CXkNNEGkz2>Mf8~RHWo|EaLBP`Fi=?*qtvLEx3VYr0U+kzq@ZtFyCRV%`|Yz zJQ_2Yi53^>^0mv~qrNWwYR8C@rxB4M^k~I8o?nUban0kIH5-L!vkya>kWtj+ z58^Yt+S^XTbZngC{Y8lLF=>M-06r}Y1V;YN$lxkT{Sx!!O zguUsJ$}z9g&wMR;G}~kAZlRUBa9>_0QQlZ=0P{I;lQ64yMS2Rzj^NUMOW~31X!R0I z_qf*&Br@zy(6!T%LC)qqhWjQu*Ba3I$N|)v3(f946HZoTe%@KPQNu^=f3W6|`LI-P z-%!D_m{}7<3CjAeUgz|>N)u!or(ewFS`JhH>nt5B=;C6nHP z$To7KESvyO5!|bO(zX|Y!E7}YAKSBFzx#6~f@oP$b100%0@q_NHKXpBow&3vZaE)S zX>W9gx3_m$S)ubeT}R)%L?$6V82Mv1}*_s6$Okhl+1>1h>B=%sZW>wN^0|Hks2)&z7; zXF_r8?H4}DiuONm;)KtJ_twS1UOP1}G+WF`o>?p!E!qcXwrf=Sw7E{R8n%+8KSevTjDN@+PhA0Xbh z=KYGA*seHQ5FJ5JXOTT|+y+piB_$L`(rLrF?<@P`v#Qq(mZ!;R7$J3|iQFh4x(isf zN4>=uE*@v^Q*%x&tF9)J{pQYTv+FTLUP)xY6StXb^#cj-gV4-uX>6$)WXCAb%_&s7 zBuVfUEvU5)Omephjd-rA{Od<0{j-h{iUnVgdr4iXg>=k_W?jm(03d?S{rs75#jL_U zG(oDwpxX99oP>3MR?{{wKii$pfq*0A>g~k_X4UNozCKdrGiR=iZrSs*y_=35e7sp! zqt>=u>c1TlPtm^dOu!!erH+*|0BN#rn6hs&g=AEadoKut3H5=M7gYg}4+s?;%}&qh z=AQvt!K!M=`|DXvbv{oX*eXe_67GVVF%zq9yVA$JB9?V6G=z2O1I~vI7W(2Ou4gUUUwHj!a zWVu=!T5MSH&yDd% zKbJdy4x=qu4MA!l0THusZiGO3 zUcHlT@T{S$d@&yF0pvS_Cg~UHrz@_N;;80#CnENZUtb=N2uca)$C(aY>`a<;G<-jep1>P|8~uVt5&4WW$>m`qd?kc3s$9 zAks#-LiYUQn0FR;YS`0=4gLu*f0hMd%`Fs!qusOma0Nd*EgCU(6#X4yfS3)&L=2{> zzxx#ws!{6=nvz$J>s8h2JbYL7{|hH5RI86DvRcx(A*Q@L@GaKl0uEOwy=5EkQ1klg zQ|I=rlj^bE+SixD#iqDzpf$p2%@e}IX)hI5fP{Oi-+m+6X7g|s832GDQ^Wk&r(NG( zr&@WLjeaOxcfvx1dF+!xAf-o!I35)M_i5Uq1k6oa^g^6O->HhpXbumCm($wv>bAMw za%1TPcdQVT*{Zm%;t#(~i}1x{Zswj9XCMC!CPd&|N#^d?mFBDkUL#t3ZW@m`<4 z4-D#0`=SQCTIqQ6S4FDXXihOA-u?avwS=bY=*Ur6TifGhKJIp*80_eHB-Yy(ehkb3 zXaOdB1d*%6e!bD1vymG>XrSj`_K2Eh&}v;dzvo_Ho@;LcRSpRATHLr~WZH z^^Q?1==7q-{0)L+?dQj*-LZUwmq+aO)f@ZW^OUUfGZ^g|H*Gt~eZ)Akti=-t5-t=t zhu(c7zvjq`p7q+9<_=ggQ-%yIKCXRZA$ zfNbeHUK2v%a3KbA?S%GkDoz^PItSo`IEYnXtOK{*(VSvAHN^GvHW&QI+0a@QW^ z*`x8fE@5pfb92))aH1qWGLBx9TDi@JWUqI>Ra6FxSsT9*y=3tOQ_F*O@A>R4da<0O z`?NKotA3Rni02Rpc{160i#1|POX%nl!$YCZrzq(a_=|wh0@q9MZ+AdcV!E90J))ea z>hIa(wJ%7AAtAE2KR#v_t2OV9eEdeTextev6KF$aLvwOHMImV8wu{%tcL`V2ExeN_ zgGsa%3LZ)07x!9i<-N2wux6UEzJuRVVh{H*n^N<7dtr1JW;HG;tF~Md0D8AIYPCVG z-24oo+bAJN%E1YBag+7TILvB7eSgT5tycbR4lxT_$H5t7TVZLrkSXbU6Nzi!`=7OR z_j4_yNmlTDU&6OaYH770X??u0m~r|6i}KS7winZ?S;sy6m1|k36NEEiGt2Nek_0WZ z^7|v<{XPZf+f8|_*9*sZ1NzggAG}_Y^mD%;uESC3d$4fH?_7rf)}V67#@lcV(3jLY zTI8TF0nlXMaqx4j$K{|x+`Iy+;PU1RS4qz-r`eE+zyoDAu@~+y&rs~joyNV0Ct|{K z^ycS$K~U||-$pO~178@{e~PWorga+s0}GI>&7(#Lql*^fV50deq9`ZR#i*2 zB!P`Q)~pGt9}{@;gI8dvDTU-(>G>Rry&>`4@CF#7(g{}0oSOmF9WYoOA!W>;^qGQG zkDHo!O7#9bMp@XU)LIq*vrhro0rqRC`h@*=HHZ}V$&E3=q3W85~NmGMupipvM#YMw6 z>d6{ir%3y;T(>8umW@;Gyfu=>e=`+lw|;^z!N7kF)9M`}6iC?+e1M1Ww`s*v?~h9J znr0OjxF@aKHd4-ynb^qu^*w>n;zEM2a+0LERPmQ}$TvWXQe4ODa!yLYOO>NUuCo|# zUe9(C&h-vuFF9yC9`Bid_yARRo?{5c8xr1Q!+<04=q-)0vn8IAFz{dAvcy*mb_|J_PTfOoqLedUeq_18PC^?j)lJUED& zM)ale$F;XK5RUHfBe%Vf0`Ap*c1M(yB?B;=&qNa&7#A^tvPA>1f)k-Pnps}$VAk$mmn_83NK4u1U7IEuXiCzB0De9C31!@ zAye2<+@8dPc7x>k`DPbqJ2b6>m6Q7fUR|;iZOTgcjKeC;GxXQ3K?IkC0@C%eT4wxF zE`>d5PA9cmeBJ_EUQbH{Rgb52Gzjo3$#0lpPM>dwsngasAFc?%{)bz#aL#cNGg6jy zKQ`c1#|?rhLFCr1^z^o3yg}Fi2f>#9+jR*`yncQ-1GQq!(4AqhCC2%@f!e}qXS7`u zUtSW%B?f%kAAybDcWKI8Ig*eBrXL&rIEweX$&5TY<0lbX3zMxnw~Si2u4VkUYa0B_ zZHb;TWtb%Q=OWnBY0>~vi<)g0Od9G1Q2oY<{b;7~p6qDD9`QQ?hM)&RU-zEj%~s?` zmixM;fmHUwiC2N9S2q;T8?3xCr6xmf`(lw9FeXr!j(LX2<>KIV}+YG=4F@wd~`2q=5y#dBa}@h61u8Kj){R8Ew)KbVuNv4uNE>M%5cY z_7h(RsR3IR#78WzWlN1Q0F0}9emV01=DUP1CX}}jY8sFyQq|>9N^6^R#?lf#AG0Yw-Wbk4-em4{ z>Y09+J5Tu?%GBtW)f?Jqz9Xf=cP(#(*`Zd6>N2{sd2IZes%Zes_9*?Q6x&kC*o3M< z_io%DemAs9QHxsSeAH=g?>%~Y@tWju&6Nncjg{SRnN&>EV<{UG29PVq|Do-z!=n7& zb@2fd6a+*{Nu@(TT3YF$yF;Wwx*J76T3Whm1{iAS6a@k42I=l@fwTDR{oVUq`?t^U z{Ikzo7Y;J>zVFIsJ?nYyb>9JRiR99{#{j9(P;asKj*f84R;lDeMb*L5AN6i~t6;Di z&1Fm6gTV&2HW@CCRl!^l9bEj$t{)PPd+1+fmq1{2vQcnZqk)cRu=W^gcgZej|7fyFt>sFmK?z9? zGSy~~4=S)xc=a@QB>*?9G#6gYg3Vwffka9_p?VQn^lK}kE~E_vYaO7<1Wf#Gznrqs zlFXLyBJRbvy$iehitJRvV=A0~C2qwZt;6Ccw4VPfyIcN-sa%;lr~h9GT&{o6vUdp` z{pA0_yzsx@cCbim5a-PUJhuq0!3Gzx$PO!odI!rj(Rt{ zQ*vRs{PwqoiDAbes*9R5h_g*_gm0yo$H%vgiC*;R-Y;pWkt`HiC5F##`zDHltQbbs zvqIHrOW(P(3XH$O-33!9AD}YXzgDtU#*6kZ%B)G^DHOp4Y0@>zCf;q&lURwL@sJ8# z`=XDJIeJ#1g`tA#TYNm>A+@foWPF5P=Q;M$HiJDNJp2(LyXorfjc|3(#%E$>%?DId zFc{NpI&YNuYF}PPd)u!Hj-UbLA1*ZBD6x zF{Pi+?9qT5vQkqA81^Q$w;eiG?9~Q%I1D93N&&<}@_~dE6Jd@Nr(M?*AYlA>$6lP+ z2^>Va?P;82nwyOML1vZPTc& z1g=!*Hmsfsl>9V?>pvLJezEy=#oeCWew&6O z9B=uU!-8**>1`X1`T&xqzE+_5*2kC29LKM>FVdBPToTlaL~Fru;TdO;U-S8U04GBA z_iH+SqE0PU(Y+Gt%1}ZCNmo&Co~&Y=%KE~(eFGeXKYKn=5^2SomyRk=Zubs4>sFJk zv&lmrL;l#eWB<7Uh1YR(tPPvz7)5AWrQ>{4(>yU-!_FzLV%{ zohXVAQ}x_aMi1Jy4X;BRL8Zo_Qv;fz-)l*2=c^m`Kp9239M`~jDYQnIYSUZFmf-mz zq`&ZZf`Brm(4|FH`{Qiwv%d7+>9qCLO$1lOaGywq@AajG0Kp50`+Nw!J3Q8T!CQa$ z8#t9Yh$HsJ?D~Bl*=ona$U{&%lwx&)%Ww*o0H!bBxGjfh4gu|)PZbcp=e6Ov0kO|J zNEx2b4zR)oVVgA6n=a(0=vlewH%JAkEyMz$Ih%xhuwu_x>ND5(?5Me zt%@T!l@j=BJc06Pdrl;ujX|0eA2L(#lB2jcyB+HKd`CKlYH+1QRlPKsuTt#PhHP!w zNR@y2D^;KVMTuQG>(pDa;D^P4jRuew?3EcyH$0&?Uae{A4SKpGTGM5_R-a-Q>H2i2 zLp)5+X;}V4#dHq2WJ3s|KJl@QQgl++?eA}cFRsQ&QU$BkbVu$P$*Qm4N>Bu%rFWR8 zU#@LpD`-k%`^_@6*4k!e9N`mU#>^MnI=XIJl{WA4r>BchcK76v%T&6eElGv15B&4= zr9UC~9=H8|=q(~sS!643RGv^vMuh%b5yDA?k~L#vvpB}C*s8)oo~UOU|4~eIw1di3 z*#n>baX9w(&~_r`9wTey&+c|9h37IkC6Ngend-YGHjRCk-IqRO+UbdfuNbg9%-UMH z0?V}{6nW&0@zZq8%;*B^DRy2(7Lokg9xKs5rQMNn{+yahCWB2qWyA&V=DOppGwqD! zp;M3q=kw#)rxK<_@0wN}Jpd3AR9or}qpI^Awl%$L`B@PBQkV}MeF@Bv{*a%CTyDSl zw#6WIqe)n%rGehlE!v~9O_ak)NVnRWcVEVY((zLWu7`(*+PwgJ19lnM$4{|jL7;or zr7tcn)b61U;^b%(`ue5xOO?E^k*f;+``5eMxNLDsJR67^Q!+Yiy7?TS=buz5pNxtK+4W^Wu`euP^X=(ZTL-n`q&?%zB z0=oJ}JB7ub72BGhsq$q#e!}w*P$zm!@rRvPh)W2;&biXBgJ&Ug#O1BCh1f!rWo2$9_p;?xU)R? z7ZxDeJzQ$pL-#1^GkR1`x?O$LDn;jXbCU5g&FA-SY0FRmc+FvusRRH3&pT=;;mh?L zqCGpnOcT8WXnkj4T7Y)|f3jLoGPsC{Wjuab-Ro;Lqk{O{Jz>|_+bvM`P+&FJ5v)$^UZ)=*fIkzQ zM_k<~$jAqz7$880kf8qz6dVr#odN1A_x*XC5LIWKq0*_SD)~V&OXD5cDeb=i%PpFE zFA;r?Pky2ABPM8U$r%iV385@PeT-yR+1EyApZ71RIS89Y)_*~NzDmh(-ZKFRjaR1K z1o>bq3Fys!19sovEH!^nTR);$BMCXmCbtTD#esPqje7 zbaSK#&*NK~!4$P75LK>EbjfpuN!0rL?I!p+-ouT25QtSVaE1!d-5O7ff`$KA$FkcR)4)+X}={^ZPUN|P@DCaBNI5cw@k~FJ3)%@%KYBnNeW{q!5OGbffFLi zOjmnTg0soU4*(Y{7&x|-(|=dk{EPrk)mG>D;MICaznv*Gn%dTdIKsN`n#7^9I0C}>Cfa|sr@YYJ`vq016GHVno zxx3dk-&|V%?C7YUm!5|nGoR(yUGiLhGau+Dp@^YseM=-8C?lhIJt&;2QQB@9$N{x_ zW;5nt(>|Id@nx!}u-=7YnI8Fo&&9EDaUCt!JfSj9bMuW=G!6yZSXR~hJhS*Qok{_P zmjxmwR!AL}zvm7@SKF0S^)7JM`gJNmKqMuf%*B`U`}|;Os|4&Xn_|?KK08*FOnCzu z%$Qu@BIFemzyU3&D>+uUREs0|C7qQRr4!4g_R?FGd#?+(Yv4BANzSkGlrU5=;Wa-x zlSi34iu3V4hDLES)BI9&kPjfFcyCB7=$NgwLKxrO$END)n}m}~#K=6Cz5Tahv%3GM z%IYP4Pgk&<;&`4cF*XU8G`&LVfO&>bKB-;fLHM|ssOV-%O>yz&x9w+GnL|9G(^nBd1FWd^w9tq*(OLG_-&a8aQk!`Tub=fc8dW~@NPKA z?e(6Y3pl$6VuOCefbQk^=4x*X@PGs?VW39k7}gQ{;2*P}Oh zzQQshxc^vx*a2PsJ?cl= z%?ShXY3h2wFjNKT!i2yo!NkTkZg_h+PGTwt7%XOE~}A3 z9m9sDNP!W!CR^}k6Cla1de?@^V%V$)jM)&NHG>&ikC&!V_?|E5^d$9~^?qX)tN8qsXPro&3o-sO7q&iyfVjXP!acuzcC%E}xi14cIab-}CNr~wwLu?m~b+;$= zBFgsrP{om4Xsnfw&B5YN&-NgG+wnSLMR?D#W{t&FGui0r_-tR;J0!=XT5~$Hw|Uc_ z9Mw3po1%iHw{6VdUSrpr%#1fWTDp-UUHmfzso932rmf* z+wgsUjX4cWo(MP!S77)&DqRj~cCP*zMm!x~@NE&_W>fPIBPy91cR;_kCZg}aZ8qOY zPDmhaiNH+N!;az;MEaPyb!Of8&CXr3-M@dH#?3m7q9FLk)6+r3RLVn6!UH{d%> z|N4%QgCb?Gr!l-)$F-`%+T|vzX6dElZm_NmeEqnIF7iVOhV7=U7sMZBUDMxdu+x7k zngB)No2wU_%NN|?#;8+9~XPZvaR)DWD z2sK2u`7Ap7Cgzj;;Z+d3>|o>&{FS5uL5Q`W{s|LC2luhXe@2lDr*j=q#~0AB(?!0J z_qZUknTsv?;~iE_Zw`g=YyVBwC^S^XR8W5x6Lf0ryGaS&5G1-n#ePl_lJ{Uz2}}W(O0tef<>)87MgR<=NRz>{Spp-AT1K&1VyPXBM~_Fo$g>lHsFlYOeJ z4b(Mg`rXt!=LZ(g*V&GVYbC8Vm%wSWYD|cZ&YHGs(SP{xAto-aCRkg! zeJ-5|iHJ1zDggoECJ%+@dKPDQVqBaB;3MZqx}%{Q{&XKp&#Tp*&v1*=C5x>s-gRfn z)GY~kyi5WD4VQBoaCIQ0;$W)9D(2S){Vj~)&SqT{-F12!%qEg?;Kl zAhG}Tg^bGEw;!M2jhlDflxwb8YtkT z{$yLfLD#Stuy&!7QH165u(Zm0oD~>+7K27Dy5XeZHK3H!FvbB20=5KWz*U5A{p#v+ z@G6eXY?-G4qf3_i6n}8?1K%v5nqCJg55vH%L^^M%sB379>5K!Gt6~82>oMW51o3tf z5#L5YtX2r<_LYDV&miDn&j*qXHGNulaS@+?To5S=4{^Z3mk}2EWOv!xC!h70wpIpk zy@9O#NQ0XlkksnS=g_Zrz7vu`T8+XseF3%RCa1v>-7#^hxXL~E7lyC#()z}h0KW+W zFt=ZLZs~#*oo3?`7V7|z{&@-Y1YB>&fW*-XbdgSzr%M`rzILUVY;1@zM=YY30|0$a3k4b-X@Kj`kIzMKpX^^_ zMgfQAu7*S`I(K{VJ0B15V1@!Mhb$1AB{4}paCQeC2oV=AY4L1NO6N z1>*+L;ZQD6NIU+H#byC$8mAgPDgnTbWd>+Un8!WD##RTwfQ?M>l@10Q4xX`Psz zlN0~->$`#Ik;t1*!wv-xuyxd8rE|pW!(!>Pfl`ST2w?%qiJ>aXVVa^#ap!9AXTa2M zGux&-Dw>K_`)J6gmgeFZ2qaM1Vb*J>0Q*rjK$#5~cZ+}g5a+g;Qd1N>_&il@!wl7F_Vy@QQJBTkL0ZLw8c@SWVOe6q zfRVC6*mcN|B)^aj&$ej1!B+r908Ia)?CkIBuk=dGGrsh#t{+S-0@Z+_Lgk#e;C#Uvz*ug%*Hy3nLNmzVx>ky=wh8r$JaRS^gb6)RDg1dw1z1V#=9 zlJxo>EAb%O4=X+T0JfatptlQvRD#mSj~@ZC-EbVEYM+)aDs~v+B&mKNktA~B!I>V4 z+X$hiUe=uh3Rtg+W}$YNm3nxRexY=cl9KgvbfdX4pLF zq#Oq(wF+#=XPk%Sf#6asjlSnb9x?%#(WH9U9T*TO0Y1JWFor{bo>%Z$z5F^`AJEcN zw$x4%ytt?4;ZgTpRP>JuFFs4=TP!SW%Q&}CaChW~Ne)_CT1F-&nY9eRW&@x zb-Jx=W@dI0VgV*~`lC79ND4cBqL*5kzz>v-0d&K;Ahv8dSz$Vgkdl&uoA;-?B(&V# zT;1L6&cFf}IO{Ht;fcqC`PI4ApV7^yq<^aY-w8?mJ=H*?+uDRIK68R^~U| zp95~Ur$#QNkg6mj6B7HySI1hsw42~M}duY5+JTz4zeH{2OBA$n~ODm&*MU{R8@hMiR9rRNKk?Kd^e@r zv3aFqz*?KXX+vL-O2P9%>!i8`Y%VE85-NfFRtcC%k(F9xOk2{oUvojw_UBJ=KDBCL zI{2uLu~tc!Nes7XlP<|Uy^TEHxpC`T{o>C$rLem(WH+JId8DkYbKZ_SYW)~8`vKU( z%cb*!dJ+20-HIBxm2@x*U8F^iK4a8?`f(%yQaDRHcYi5C4x8j^k=sVP*ZW3& z>B7fL>2Q@Y>kvj_&K?WA2Y^%Gnv@u1)$-8EO7 z_!3+w9!Zs}&0@SOl|Fz|nSZF!DG4nC;B5a1fn%`csSGsan*B?r;3Rr|=*f^Z+As$0MgXPff5G?6W=-kh_23!CgJgnckpj z1*53FvK>b^d!-;5(T{faC&!>Ol246CO5NwXhFPU{WG|zdskB5O4nj-0=|MJcmfd;& z1ZmIZ>rHg z3!VS9vqH?6R*BW@_nQWx`xkZfCdF4~4`>-h19;8OA%XYC|CSMxkbUq{F+pyQ+e}vh zPUdlRjM`s7sv~zvz_lb@Y4dwPGde7d|@4o~eG#_}sM^lp|z` z=hJ@Jy*an%!3y5jNKZcHPFp`ei60$PjeZsug?~}eTt?bXOZgo{k0|`~Ks-1-fE!YJ z0{E=gceE?HR(?DT-g=!4JCgl^rs^1c55w}eJ@C?8LhWYd+CHLi2Myd$>UE7+yJ`6? z*R;Q$Q;LmaA5Bc@C3m~_;~o}Yh2JB$u+?7V&jlj528Lf@w&-<(n_D(fE1zO-avpN8 zG3TMeAKI8)8$v=WqqWhUiLsKE-!QQnGJ$+zV9AdR{ANOQAZtyhn{ZDXK$t5p>jX=e zhiwv{$|x#oYDt$ZzRyXsy48~o9bovo5yb2)$Z`^HvzjlzLvk}LsiLP6Q(ED4-@0`s zU+!C`#?}}gm+=^sNd4rY*N&^0D%CS(eV<4ax z`DK$xFqa$hcY7O`MF%-#gke6(zfm1W;KSjRpMQP5cl^R#&(*b>!gn6U=imo^{bbw1 zbb-Ko9=L~h50cE>O}A^$h;oLb7c z`&}T9TT-(1it9K@ad>$61TgKw#Zq{>(?twbBLz6W)puJ#1EZru!>i1pOR#5wxRiX^ zq@|8~64qb>WBU5~)?Y_yGSJfIdFsl^g%2??+a!P7bry4UR4aEcFRfpGGLHUmV&gE3 zHRhIg&f;rq7}grJyG&=A@@@>_|d_fB$rv zzs1QMdw0(8Zp#RC#k22{GQ?dyT)DXn^z>49LDU#pSuU_i#@-xPY-(^Bj=udX)lWnO zLxd64-cIx@>R%q0H|T*ydbfGQD)EOs>#iXY`TkcgyNf1C1K*TI#RisXm^P=4u&>@} zpE}1&HpMN!^S7~5;RDx-!bMoDwXMiD)U|yOH{HmH63hl8tSIAW65l-RL-Ys-YhPAH z8lXVH!X^6(_iW_`Wj0XDO)PCth&?*wvRo9uK3Gj5BqfCd<*y=;6~Zh#!eR4PH8aZx z`egY)Qqbb!aOHBYVXs?FDYN<6v*d;Eu0%<4RztA(_LjsKFJ4&9!r~LigkV_YsSGR5 z#CN~Z?9A0FGvksCT!A%?iCI+=8iIO#9q%(%?;~k7dC2~=vwtA`8K?8r4rc^TXO52W zn92d?B0SP(bw@H-wP51QgLs{nq1X?Vhy5W)7UtpgKYA|C4gzyYa-nv^l5MVX=T1Wl z0T{;%%u%(kvy)#RWe%>7)d9fyc0O=ofeWb;LoicaGG=aOrR7mKb3&{0^3r8RX2xgI zG<0u9x4-2Q=6Slkv03)f>Ta(+w_7jUSz|N2;^CnnB<5W2f`St2@@_p!4Y{g$#;SkZ z)$CLM{XXS7O;KGgI6R$9W7(4};c4AVvctxvnea2aaNmAMM!p1rS#W-c!EFvgNr^Nm z;|-39pNUxF5@K_{eSM64@JQ|GkKT1@)rhIjYOV@&PPTrct4s8W=h+yn_+wvvab;KTOLdkZD+Eu>ZdcFn7$LLs+WEnhb-6`% zyr$J*93$}XG`zhVb&!rHmQS1K0Y{4r6soMDksy4#jPG-L&?)mOWt5p)U9+w%%^BKx zeLXtbCLj?rlBLVbGTCBCtXqQVq;G*42cvTn_9^yku2g(AipcWTI*_E-?Dc3LV4$(q z(HX_yqt8IX`=xw+`EMJKQcF8cI2esuA@YWGDwSll(ipX2^sIJrSPzu;KBAE`c^!{a zdtTM?u|^iUf@M+d@^m{tI^c5iot4hXNYO(fugw4!UFQOPINMpw2s4ZPWK8w%l!8Pe zwz0M$71Q_xn9YY`9x`D)Lqdwq(S4@b*PpWt8(c5Bsl^7{+BPkCn3-rL&SXO!#dIkz zui&y#6bj?R4R(e;wKgfXu5I?ht=j45&)w995Yf?_ql4UNAyiaUBU$E(y_41QZn00> zr2aBLf0O2QqEbtaMVWE8QDtv`GJ4e5XZxj?4wxr?yWhUj2iJ6J^b#z(NR^@88Gbdj z+*5_I8MPq>D8G-pVpfwXpF6r{JKTMxf5GI*&gxv+b&SHdUXu0J;{x-ru&D92?GHYl z`TY&qK=RHJ7dLBVeG_VWv*r75fnptb_L zeOLz3&HQ5FX2-UZg>!bY_&TNAT%nKGnVQ|!P(hUNgGXGim|Bm7A3abq$1uArk`3OS zLDKWKl2k}QX5J2{k9WgbfMFD!s% zZGT?pK-OiRrD}eZ)=Zt=(!s`Csl z>mKneEx-QwLi>rld#$gt?2bP>Xvpog0zg_B`X$1}%gX^YSLYWzPeCp<+1hu(lJf$w zP{q$dFtBU08tXWiT;#EvDm+^g-uV9O18|}x;i^|-o`-D$Rd#lcC)Fs#dYPKn*EX3ZWvCi$2XbMu~~XzvRirrY~`NiPX;@xr-p>{CPa z$eY!*0&WWHXZC5MF*jBmmQpD^n1n=EQ%Kv5dWelUc=S%>7Gl7I ze41gWDg;k5iV{KGYex9=De*jxN6Zc3lWgUsPOsc11vK=+TwPuvO0r@W3QIiu5*6s) z<;8}Ge6q#&yhJ7%j-e%Xki>D>8kP#^j2!LnO}11}`bq@lUp$b8Op`}fNe`uFJ}|7!IG1pa_x`sPGaLuhEI z^Y)L2wC3hLDr&hvu7*(eWhl<0kwM@cij`=}LJwdvZz8S=t;0L)w@buC>7GH5zgO48C_dZrjzS;r!GrsK+IoQCbjgFro~y~b6N7V!bHE_c%=BuCP6 zxFbS_-&apFF%cf{@c`v?ifx11&+mTChoT#wD-!ZZ5Edj9axDg$W>q`;Mm}zfpG8YF zu;GPDUtpz<8GjwUfAHDc62?@hyO;_{sWj)#+}uoQC}(Nl;f2~xYY0tMpRALsO9ci6 z!9Zk9rtcn=cu8c{Lk2IMrjKvkt$D5^BHf28*sZ_$~{P*rN3# zP5TH(N0jAh?1;rb$ru>$+zJ&V6cmo&&kV0PR2JCT>uV*jE-nrjxaV0y!@^F66=j>K zUc^X;&Ic4TZV~$2JU!VR*2Ke8#~tnX@q@wa_tU)me5=#b9V-LvqK`yjm(q@NGKBs+ zhcq_PRdfu}o$_=pJCmvbI-b6+6keb|#6j>Wx=#o3-3r-lGQjnt+52Qln^7h@mvgrk z&A;i;(FtjMr4fHyKeny}izaBJH{H>Lea6!)gbr&VEuI-RH35xGtI3MINJ^2?TxzM~ zjSze*GY(4&iNH^JoD|O3oD8m9wt@U7Z>%F>W}P}adkAN^`hkN(sO7lAjNoh2vWBPe zk5M4u@yU*I_dO(PEYApxHt0(En9RR)a(Th{N%87s`E2X{fm;>|?EO{}QCD z$IN-c$!?6Xf*z<_O5-+oUhNS;Adw6V3(9X}w5Xggv(}keP|-O<$g2>gpd_rQP$e5v2i1+?d?R9UW%N15|whkMIQV z+a{n36OQu`0?BLxp3ERn8-j1KQWh8zUF+`Xc%3c3p^<2{1S=R(p3Q|qNwUOxqB=8h zsopzFgB%`qN4aq)2u{f$tts+;lZ#kgHmOG7bkTupw3Eeywkj|Btu~~b2nmTYL8jv? z8Axu9v{@lHf!m~0_rC5u)u)rATZ3U>9hn7Rj;s%S=n%(xL++V2Cn4g!4?Eo!DPA+U zLaesjCV{v;Jb`6tv{;)nryUIfd8Ov*Sr5W^!0JhHolG%a?#D}w)QOSiv^ze4;weRy zrcG`1t`*$4^t!@{ovN+4pw zNJS+IBHow}9u(djo}KaXdxLbX4$$%`Y;0`2gcldf0YK$lND}H$PDTA41e)P^z(xD0 zix@h`LT6;HJ}AzT0+5(YkIJXl2i+l;OY!)$P^ri~%d=*&sVX6D(i&Q3A!aGS5v1$@ z?Xok>!+eONVemGq^F6?=Wh^e`33;6|aJ!UuPMz?uTp`sO7va7*VW2lK3^Lh|u>sCY zT}{KYapq=|4XV>vYjpExUOOTS!3WDq^8#KEtgKM_g;}NO?>0>Kl+k7zX5_|*%(bVp zdQ18Y2$6arL+osw3vG=nbV|lZNag8(jUvUvg%Bs5`O5a{K1EEILI-WeJ#Q&Z+>(f! zwT`HoM{F(*+S^nVZ)4DgaAP8_;Ihb~8WA4T57{>XO5-6u5L4y>HcP#OtXBEew# zAI@(7*9`FgA$88ux(-z%}_67eB^8Wvq zmiY%30CCf%Of5cn+oe8-LEA%O%l$gLqsw#pl4N@Jfg=0!C+^*(3t{0nDEn5VpH8J= z{Ab5lyRaWa_X4OzedILY$c}=0cMuwx-dHjS7mc0x%dy7&pB!B@@L~=CH;{f4V5UAQ zg?{9w~b~dU8HOMt9JsUure}Nfcu}SPkc6c@W%Kjrv!uq8B4i%{t5%K zPtYfX4PvCA(IFWf`+SfH4L?~;#@i=3?H0$YEa8aqooefE7Fq0*kE^6eKb@tPm-tzZ z!-Uc2Q!f(-6-iJ5q#{D&y(KE=jeBd#(dGDr>s2l`x;W}T1D7=5EWMm%FeY;v)7_oy z%~C6P0C@es0bu+$9pNw8XN4po2PhANUXzlQ50)HBCi2>L;(`zeJ(+MaOk$30JQSIN zG+S~tHCy@hfrpusdD_P2bkC4;1E3q0t53o-MAPPAZ-Xhc+4WaQt~<9PZEn-#<5!( zM%MTHfZ$D$pCGrC;q`f#Ob^ot7r&&Dkr5^ijt0n>4=#8Ff-6Bfx3I8KT07|(2gZGi zp;{x)%Y_D04i8OHoK`Iw3CKKkdfHWKy07U+ewW?s^KttxLAM_H17~pAXwRIn#C;sdO_0 zj?^y%H8@UHWz$2~#C7B=pY(d_?p7$eu~avksBh3*dgjuH#+IpR#1%rr!$$xpf_u3_ zx5{bnh1h%dqieR~0Zs(mZSDqqzJGQG!sa2K@5acky^MN|(jZF2H;3RJTt27PIxRL) z_0Bt1&I}QfXX#C&=708tW_J&B}*kiYMZgP zT()oCDEmftmrHJ@Z}b;wWA!T*&SSlQZ;W&~^S%DtgC13NHZY7};?IBfFq-uI>Ix23 ztIF^9{pY!mzl7-i=*w3bvCG7d3qhbXN6fNns(LGvc+PunX9q|QZ*AqZ6xuYL+S>JK zBCY4%X86_UfrHq<;I{SvFXjze#s1Nrnn9P+b2qSw7*d3fF$)d*V1b};Hf@h7Biz}V z^y|wNLY?Dl%$#<}{0aa}Wuj`j6WX!ILZFyHN=UoYaKFTbgPE(<;&kCHg7Pou!h-4t4YYw zA#7j)hhU3uhCGtke=$W3Hcu|gTFJW8(*3{b+D;&qlf}p3FaNiDclW6ttvW~sFW`%c ziY~A1&8nL8td`SAP&@|d9KihZpI=C9zX#9(IgIpwrhu>>#*B`L0(8|J zJCg#8dhqfl`FFD@2?_#%v=-cdN)Fzx07;t~wClN;@%|CBn$`rBn6bsLZvKQK6Qz(W z_H)8Z#t(R$bg28j;4c1Frl#ca@h4?WOpV3~0%3Up$vi%j0QwbI5oy}(`C69HflxA- z3R>Y@R=CmgQU$BE0dxn121-jCaCh0DQEFDLdofM))5$^;`;ZEX(^77a&`u)RCn0zc zLhXXA{L~n{(kUG=V?1#I*98A1NL|{Vz6bRxERGh3$ke;2D zlqe|N<8d$8VTQBZ9u+~_5jbKo1omaCZwQ`HLuSLlYWqAaFL>~U8fQu@0in86Em_E5 zowlb|I8IhFUsrKqVvf}ZdUn+Lk~`QO6WgAJ^8@n-*w@iNekrVw12=l%QI<$h%u^JL zS9f192>DUCK+TRipaiggkVc{q<#ws}_cC8R2iQAzkk^WKKyAMrY=}LvHl9uY6hk$ z7!Lo58Vu@3fou~IRO!EcJFRDc_&KCXB)ij(--i5DjlpQ4`?Y5ad0h2tsD1I!6Qwon zu1#FAFsJf={R(77@Ukg_5&EQxMLHUbTHnAf{SbW4_4>bkgjxBqlJ5<-Z4q(0(T0)o z4fh536+%LC*SRw=I`MT&=co3c@(Lsp(rI2np0j)Z;|50k%Ts$Qi~%Vl-6G)jE>^t< zxkGe9wmbMGuqn3)P+A%P+b90#$9FTLp;d%>B;N!p`R(0uLQLKm> z&}gBd9kq=HBLwwaYA-~BY8z)4%YQPFs4o9!8yp=8l+M2!XPX83n&MdLi*A1u+onH( zHn5CRNgRI&*}d~d!0SJp1>d1b%0v;q)6wJ#1tv;{*t-Aj=OedV1kzXV$ z3~0Oykz?K6zE)g9BCv07Y&74;T0T@|D3Y12WgzeE9dOjYV0nd@>E!%j%4rEV;~j4B ztx>dW02sFjfByi<;Q6x5%plSR=KEwHYGrZJ$scj>gXLSv*C?9md%Mk4%~&o2m7w0m z-5SdIdzA)2qP}DpDhEW&jQ)n2-Tlun+Atu3+~19v;iZbZ`#1S3N8q4l?(a(+9#z_W z`N;ba*KZD>NF~p+?Jv?S+>hu3TkNLQ^&n-MzHg&JSop(K&8dJyG+sB`-E1*jSko-b zmp4qvFl@h=w&Ou4yPs3#wC|Jn2KriEI;vg}a;N_I)!0$IF9C}5hq@(G32y;FCKrs1 z02~@ZiBHxD#N%*zzpRB(QLi!cdnODTxu?zBw1s-S&VMUtNWdqeGFX`%frIvjI>AsY z_lxv&Ot#LgRR_0S>0*pZBKm#EzVFA67zD&_2tddVrq0*Fg^|Yu&R@;HY~r~;S8_P( zC@MBi3dJBKC0D?HixJ=+^uS+=ATbW+p@;QP8;6Ei=8H+G>XQ&aG8Y~^63lFEMUxCh zp}$A*KyCix8(_jA?PP*6+21C#!1rCw^}tDM8R32SU8jJTB2?I3{NYUfWR+bBR2dw? zuJW2Qx+@oWrB`^};<-2hes5`Mr98%p7{g*Q_z&4yZHHaKyt79SGR-Z zI|!un0<5!^YPn#c2p>D>ozd|FKHS|Zv0#pUo?eCj3=;t2Omc13dijYtrKQ(l%Ra$+ zgv5|mD7$cS&J41A*->82gCGPQ* z_D4Wx{j?s=^i_J?^2cU>3_xdQ5$d4@LpPOplty%C`iGNS8yf20T;HY-Ua!ZM+tH7e zmGQ9h8gz|}vVw58jh&xD1h>V_BGGY3J{;Y+&`AWKV-MXn>uknkLLjB=T;+)hWQv?% zdAGa$7*OxLZC_LEa8TIR-QVj9{DDKqN^Ya$$9K!B5hd&8+W_>*<_8{WP~{9^ALnxm zScTJ}-jk=0Ru4@>*DTb{zdhKRKs|4)77vXOpL}J^_0IeK*qCHXOg4{z5=Xw`e*~e3 zOHV2Rae>XjoZw#5zKe19-|Nac^;T)aA5kEB#(#djtLiA%Vh5OQ8&0yWs`+46Z`CxoH z9Ou4&FpECYz_`P}0^E;I;tueq2=Hy9ojHQxZjKl*!7HN7!=ks@! zlfrt~gkz%x@p~lFMptkwA2HGFQaPJ(l`BUVJD3I8FU#xm(luFvVve?433cycpJ&S~4Y%BHt$4`$Mt_WqFZ z<25>0WFg3 z0E|=XytaGbqI08Blco&@i2*3TcRz?A6x!SwQ_90(kPn@9$)5dzfW-Hz+~cTrQ*+#WKr z%pUsiu~^*MHcoMWTv>tWtY%_;#7NbWYa)`7@p8*>PZR!N<7WDHUdbN<>3^NzDhdux zlmfDU%ifM1{^)30;K%-cIcGq_l?5<=Ys)a3tpt*ZHE(^Fjn@$*Q*-NOeMO+GVa9As!nK9(wDb~ z3!<2qQ;X4i0d(IFPE zIVv4ZLKOwq#usvmJUwni!A^M8?Qc<+dvZFj4#2JX9L#V8Pbq*(;^6WwDL8b_T7$@suD{`@CmC{>%-gS zGj0mgy)RgTNzovg!twF(mp#U8>t}wRS}E1^q~thO2SX`)Tc;QqLt|-Pgye)p0aB40 zf#bJuDDNLO(p5WDNTTPVp`G>Ru~Zt2x-~rw0D|FIk`ZGKlzt^527H4>K`0RH&R~Q{ z2{Y?6RpW-1mX;gAle7A({5igv4j70t*!C#+?EgH|sRSZdCl^eLjhc!rht)J2byB;d zb2rCQJJPcVUt;}&6)5S+Fzkec)7yX82I9{`PTQKn?$7g8-XPx$ z=Zh>l3%SxufD;s(lki00n?tDrQVhs>(Aw&a)cEZMyUk3UrjHj5g8;9gV&iMWM8)gr zA|Hx6&6LDOM6c|u<`=$+Rtq0ppFQ<2d|H$(e7bi(V4+k??CI``rwI%WBzeLMnLBD* zo3m{7{|Jr{#}2zpa(nOQZbbDBVEfhTXd3X3Z9pK9*}ZPvTLa^1*{ECfvsp*^Ceuo% z!{Z;aw+S>p9Jku?PdVj+qw-^)MhoeIx-L(>&Ej=1hl)Rp5yAp zt>Nv>8w?yuRZybDNYdS8G!%bN!HYvwpYsrFx{po_jGHqJHJ7K9nNdV6gL|wE1GFC@ z5R!sk0dQg~6JWAYbXrUkd2FXypjLRqImSiwRhC{A9fq$a<{4g0-WmW~BXm4M*6Vvh z4qS#q+ps+`6%OZyurWGl% zL}12wZmGMs$4T1BnS+|MSrOB&aM{l4%lB1;d ze6^6u0x@R|Ocane)WZ#4nn}HF-Q9hohq)0kFkv-A=iV8TR|}p#XGc~6)IXrr&6;jP ze~UqDl@K|w>s(xTKcRw9RGM~Dl<2hpac`d}*)10)0ByXmV;Nw<72FBc0x1`z%5V9e z^NN0PH2TgvLL|Z-uqoVnxj8o3n3k3dNnClZ*P^K=Jznu?u?4uqK5S4Kf#{;urVfzt z)4D!6{2M4qf$A^0n*b*D-gYP4n#j6v)OFEak@Z)) zz)}>1MMNQVuuue~D4T#(0SVGodMJ?vMUjqxbfvcd5<(!MsI0KS2G9_Y0Eq|)p(Iiw zBq8B{*>mR2oH_H}d2i;md%t_XJIZDtHr@o=C}tJ?__-?p;9s3U?j;y(uQ;F_aU<(k z(3yqMXJaOeo}1RjQ82*YF;7k=jqkXx>)kg6y<(nx^sNw)%k*8z*S-GiSyS&z1VHrR zj(=+1yKKVSoLaq6@e3HQZ>#>uyO_qAHRAXX0r8T#T~eOo9vhu*Zrj-hBmbvqDEa;U zVw&|ytr?Gw&p%@Z_{-!o`EE^qm;l>3U`+zns$FiQ9{y|O>tC;wbhh>X|ngx ztcCu{>)8lHP3yW&vF;5<4I}Vp$QOlZ_F<{G#dZ8*v)wp{)9%LA@>x0LKeeP_R%T-E zNaeixA*=ha-#Bhy|4Q}l-z&`-y?cO$oC6Ism}h0C_tksEEZ6Dzc13Is&v@33NJm%6 z#%_8H4H6@H2|5Z(NRj9eNEE@5#xHBN=4tUh7RUxoqQR%@HfGW|5oEDA{`x2*7)XOj zt*sAd*<*nMtSxA&6W6~woCEd$yimtkD=K>tq2Ms37tL8{sFI2r6xRM<1qtnpTTDeY zQ(MT4&sNszlx}8bX%Sa0jli+Z&2wIB>JuN|ku6Qz`T3C!dv{olwZ)kQ6I?h0x1L3x zGvRESmIsjjFt@y%g9igDGUkOAbx^zCJoB`*hAzeZOk`%7?h=R!*p?bft8xjze=en|{-&W-hz3TL`eWuS;vV zQ2R(5RLJ+Uw7m{1kEYLoPFIzp@9fUdzjd=fK%lSFoIRJ(8)zX&E$uIHIT9y?Mi!*Z zzumD646{uHk9O&Yp=hhP1;J{3Zh$nZa`Kc!Z9*KOp{wiaDwLV2Bx9r3RB>*Atl!f~ zY4`UGP4!d0xJ@yTv(5&h_t|tAxZP2j*KkafK2;h*{W`Caze1lh8EXjX^0{|f*29gJ zUx~%XZYot(`O?!(+!^$52Oz%miOx9Ij*Y_mfZ3%eb2w{fL%k4&l9QB$^Wx0m>H2L! z;pUxlhq%j29?*MaygL5Q_5Brt6F~ifBj6)?^DCO16{%^!=4XvqU()ijr-kORwA;Iq zedoIm+-U=2)ybrphF(-d0ZPfJU~*zK!~%x-^M&kDl=MeK$Xi=57Rj^$a|;+M1S(WU zsmUuC0Q#IFeH^1^DcxWkC4!v`HV*=e4tlohTClU%Xw7#jwoa+JYCJ_Eu7 zvVZm|r;EK8#``|!DoAlG#xQcZwCL`|g6*$eJFI_;aV5G^*SmBesiDjY4b=`mHDWYN zyW(0M!U@QYmk`EHNz_E`uub(=r=iqihavZm6TNI_WrhaTYWc=z*2hCl)eAq2tB6aU zXewO_hXZ8@8e})oA4CuplPae!Mb_bn^1K-TwWkSE241f~A65(WHSr8t1wYk|+f9oFE+SwYYm}!_)JMEZ(RkFhoLXKhQ@T8)>Z= z4i^9qXDmHm%lo@q-%=`(s@C>>XQ8T&y=UJm`^%C0YgurU-g*-JXuw>@PhcL|u@Q9D z58Dzoj_xrI4mDJ-FkS7a*z(7eW0Dt5tHeNn%lnP-wv@^m{)TeJwS$o2YJpfq&w53i z&e0C~;cBK6RG!*cymEVhAGK%-l+_96h|iCx$nbX-NG2(yen04Wcu}P4JWhzO?z8BO z4SwFke~_y*P!{v9APOvM?`gi>n{p<%PQpzl;F2H+_wF(n`!$zH}bR#HkCwU?0T0hKH2|qg}ZRU&&4E)7DoHm8E27{ z?DwNBCQw*<>21uRw8q-eiQh~9DcD(_Z%ME}ZkC*N^0!}@ z7uwz+FTV_JZyTq{?8FD*c#L=JR9wvVR6Lho`X}i)?({hk{`^w*+I+Lo&RO6`Kx)MV za|w!y%*o2Tgp{)At2PF+Rwc!IWhfzjKkCqz;M**-Gz8J5+RoSoJ?W0>TPsF|)8wwi zMj)oL%{FCC>NiDbT|c(N1UuS0dQ@mbUJoasHPqGlwyb^z?dZx)B}*X@Xno+I*P=~X zc?K=!!`f*-ryxzCwgK{ zNsu2kwY?aeW;F;i|8Y!a3z|Ihd-l~kr-WlRs`AOC=>~kYn9(i!a^mz(VToXyPC}=k zV;25ZgY)JbRtRC1!+FV!D{AtWQk2dC7;7Nqrl5q?n>4N>m6w}aHKeXh!PeLcTR7*w*m?nPYK9sJfKrJ! z#^H{GrEZnHfsuQ5Zi!GBT!6%^H5w=^i#T_P%E-tdQ6C7Lx^A0(C4$w`$j4JPo)H3! z?YB=oJMI5CU>$leGFA^N>Fbej6-~{Lpk%OVYPn~3clX0)o~vwzOJc7Fj6S1~J5p4! z+o6f55lBYzb5|$LqTAQTIEAyoo_CA>{xQ;X;})QbQr6lC2-CE3@RK?B=m~Vz0LI;+ z*iTh08cvcUX2)Ian(_nXFWb{~X?6XI?r2Jf4Keh1kSF!WiB)XQtda1;lie)4g zi{;=W@}Yr^_NhKE0IZd3;$6S#YYUrsnMH^Kjd)ZUIi z*n(XkTnEl7h^5(3PtBC3NeZR@$rB`bavc`eXAF3rq`jJ7Ps!~(IyPV`nf}a5LT6&_ z258ck^p3Icn?3CjYzl=;-fB?AP1a!u7VK!u3lDqX;1UZQzzR$J>RY7EbT*OQ{(*tx zudB`3&dv(~MpZ!PkW4=%nSK`>Hdd`57>8B6#Ph%jiK)iCfXO%Q5@x1J3-a-xnj=>d z6BF?Z%>3T{)`PtKMG@CLlMCDNcfO>I6mxI)w1@ip87R}}m{zAhE&}^;rSG!lq*kYE zQ%k2h1OAw&f-mUlF=og1yq+99iT^xX*Te{B^A50i@$5qhuU9W${&94k9Q(;P;vyue z_;!ZfWDk#4P;vkwuYAeKW=SJ#2!S2)#|X{{%pd}zDsOznd%l10pC~6foqqAyHQ+Ra zP!=*W7eEA$TKeNWz!madszTtQaoNcmSSP?M1f&9K9Rl%Ox6?TIyKZ62&&h&YOEX*3 JrmJ`U`acTBoT~r; literal 0 HcmV?d00001 diff --git a/images/New-Distributed-DisperseVol.png b/images/New-Distributed-DisperseVol.png new file mode 100644 index 0000000000000000000000000000000000000000..bb96624e048fb71553d39483898c1fdd1ac85f62 GIT binary patch literal 65543 zcmYIu1C%Ap(sg6nwr$(C?P=S#ZB5&@ZQHhOcTZb?-}~PC{9 zgwr0%>Q2^fCgxVg003tGNgg9HRg%lc3tDBK@+2Q21lyi7&^KKtXIRr^zN$3NQfXXB z0XXmomfj{H-@Q)0D9$P0z<^7wHs8IIeDKI}3q1{-v?;B+-#mvKp6Av#BbBu0-0z++ zzxa2108T&cdw2NgArNoCpg*vP{7!Ek?bG1C&)`4VZnIx>0l~j+daPejQ+zW_It$xf zkZ;y?;Adc4~h@eQb=)aK$^RA}j+K0LOBu zQUx)4DAzXJ;ASk3JVUy&h~>|it=3M!~}x$w^6_z zd4Ax9Ia)J^&^4D*SM}m8%oXjFGGQw(E^d_f+4AMhATd!6OTPf@fZ%t0%7idOBxmy&4=pGM-od!=0*Ge0?7xuw*lJkLi^$dP>~*=EA7c z)brf)h32-o?r%N}6CFy4lv3UxjZ~JpPw)LFq*T&mYVPFz&tZVVbr{Io*n0mG54%gT z`a*V;4^w!uE+r{j5r)&zjdvnqQHQ870kii(aRc32(CFu%XK!t`K`W~F4wYMBlWXN7T*3rPoWG|ahr!Y({mjcMIUwfTB{E$%yT;I?Pfb+IVL>h8RN zmWPr)HAy7gdR#M?f^QN@SmGAP&_XS2aKzrG z7oE&VDLo$wb??EzJMYH;oO9`Jysmy7oBazcsgrEyPZn^BadnE zd{bN7HKhn+^f>XA+$Tw0ri@7%`)PcEje@!5iDdGsif$9Z0t21Z&B1w!^?-7jUPLY? z|4T4HR->ZlLQr27ZJ z1b;Jb02%&niT$Q|`)xke5HanFcQgtsCPQ(7mnm}}JvmXY%jrJiyWB5%ggE8fIdrs;Vm>&BW7z)nAA=GW2l|Fx*L zLy#if4li8nt&3Fd5|j*WpiezU-SC>84d3}3)uQpg*`1AiVNivXm$dMfN-MdCVQds2 z`;fPxhUkl&BP`<_DnNy4$c+gd5nhtQQQqy~`NP7yBiFWhrh&|oD|Y-j#%@~?1;aUs zo~?0saA^pt?;gPO%XuFeF5le4L_m&j1ZlOPpJ~u*FH7~KP$R~8I%Q3_8#Rv)vqRR# zC%qgSQKYFaPU6qWqf?qu(!ynmkF?rOJG?=$#0(8H4g--D0nFQoTSa58$RaV%d6*ue zk@WrVcxjAHliY2T{Gx?%9de@~m25VWzm=9B)CV$HqQn+^k(=^nEJwGgk0VKTB-PIE zH2r_AnzzQ<`O5TNa?(sBVt1MLGWNA>l0i!qJ84c4z&Llg4(exCmVOx_or9C5k0vnCOH=tQJf2HOn70AKX8@WpCQKKGx{mtWAo=wOJCq!LVigM+M@7*9%6 zS{_iVLt@1Hyuf+AqB00AHKHBw>La#+?wre{dMg&`y`21n+7o5yH}hTw+JTLb+bWVB z(=YY08IfOG+VH?87Og`SZN+RD-hxb{63sqbLOYX0)&^6ZKba=C{^G4|_-N&IOGgGu z71}-(jGhV?Auwet2f$bx&um8Yo)iVfs!tJK;jLMS#zf_>nkutsSM&6j{9`HN-YT!u znU?hXiu6>eLQS!G$x{st-?G|5)6a>eJ``$&dPy-0O6A=cukK{dL_5$_3wR=~ShTLj z%U;SVNgVNF%wT4~OVEsAu%8a;{;+6Ltg5Rl<=Q!d*b0LWxppa?b;nnA#CxvEv2t-s zSk;XQUm@7d;))?@nVrHJ=sow;2wxVtCfbtQ2q0t=UEm@ej4K#XrX7`lc3FxU7$)4(qhx}Y_9eLoEK5bQ z1bi!8QqY5Dv4oy2)I0G7`UVljjjWf0s4~fDVpU~7eqx!b$es={*^`|G!||FsomvV` zW!d%LM#LaA?D-8B_yC0P!IFDHl%ueo<~@EwF<~G$xM-aj#=^`61)}eKKPD^Itrh85+0%aHG0*y&AP$wbXF$n^EbZW!?ft2}eC3)jA}be@~_fKD*aic*_wp+ zUQ}QrFXM2$tDHn{lttX}KH|S)LXnhofc{!doeq1^g%7=Yw4aD%boxt<^T}>?{8zao zb>=|hD9*D|RBKG5OCNy)kILIB=ObSuQaKFcJfFCdEc{@6Y^{|kO+SV^!UuX8{Nnd( z@Ekfz8(F=(v}r+`D)AIYM|_kite;eLHt?>Srs0*5Ote!;pxd7)g9qd?muLCCL&UaL zNh*8DC+Va#ecn2}Ni4tIk%O^q>+*{jI2E^AXgZFQWX_jFNIWn|9nlKk+DB(Js9mza zPFI@Wk2->!DsweTJTM{Y(m}cT6BQ_J{2#vrCPf#|l2b4N=A5rm#~GR^dBsZ)mhR5p zd1UI>i_<-5t+k2b0_4QZHY)nTu)ZYIfvkL5A_?hGAk#Nz#X-~hc>q@D5JFzI!Z6?V z-YO<{W^~_W%-9ed2fe>3?8;0v+w_s2Z?8_kP7m7nUZJ5n0$}l~2EYJKL+84>8~ugaC<@7ufI&@_GA)28&(v4he;J!25}(wS4) zG%KceZ`i!4pFO(=T^^KC8ZEj$Q^jB51o8Ja>j#ZJtoZKV9x60hh+fD2GmO1}UjE*` zIRygQzu*X9!o`C>5Iejx6rvzJJPz>4mq!48a)qPb*;|nQ$)6=YRT~}K?-qbUZM;6` z92`L+TFJSRy9CR;l*v8Kzl(mXxk&IiI!P%0vlsqURCbo&HuO2?a9dyg*FlQv9!0h7 z3IC3f>ng~Ta)3Yd`bK;WFO_V-ptBmP!vnC!m>Tbn;-QTetWi7{!jY%Q@(zj4ykb!I z(s+S%BkyP^n!2u~pW^`zH zeA@o?aq*iqFz;nk_ItQLb4JOY)%6m^4W=`Stny1&ufj!Ie`qhwa)-Zr$aYZ4FH?3SNONE*#j;cwUPWNYmHeB@uQw|N zj@!hy6Q&1leGWCuFRtgdV_Uz{uJrm0W2<(Lebt*2AifbROT=x??Td5Gok%nWYhEHn ze-%P>yus@cZjqUJ0+@$zw#+wQ#3PYc$!VsKS51hX!h|<{k1sU^*zZ_!->hCP{B3|< zUQ0f+SLEwpu1oj<8f*g|NK0WJW&(`DPMd)F!4C;Si~E^~Ve5!^jp-D6&WKWHf^5U> z--}-S5PT7j1P%KxG^Qfszau~=;Z+>B+_k*G&z)iUX;q<=Llk=3(noi~vO00pM4&WZ zBiO*IY*I?jSjiw90d-jLIKVt<`Enk%X2}FI&CETQ4NCIYR2J+Wt-7xGuj_Yf6^HHAk06;2591c^dALBC+4|oyMO40B$?E&I31~%!;EZFr&|ra101xE> zu9Pz1l(WEUPnO!tg#l>W!e4U41C4%dY2+(D?~BH8vrZ>Pcswxa!8T}34%&^nz_e$Z zEqd3SkUp$|^Egv_6M&>5&Y~e&lLRMOD5qSFfQj8*u>)7io6qr-;xB-u^^(C5l~&`f za6*E78jTp-C6$O;yQ5p;b=rI;*Qrqo24oWU8FjEz3@|i-pwE1a+sjsH01HgwF<5T- z!p^d`5TT^X@qO+ZrF=k>!B0~=-aN1z+N9PXD7jub$B`vCIynqonknbXJV`2-hV*(? z=6zGJNdjF|abFQB+Q)_c#&zvURNtwE17$;oCOuIT;UFq7se_b&vE|#Xg7qQnJJ}YW zdYr!JmoR0WT~84KIaXXt?5~%&73miC*2h>npHe@sr{!IO9tX~r8|^T^Q6 z8VH_X05i$DF_^190k|{Ti`)RYd#3};2|L?GC?8565M9UbkS09ptkGAO3O@3S79yPj zRsQt5Co6|kW z&oo`&c-fQo*;f*7b-^d`YpfKQ#3)-fE^FYt@+;bLASEe@49%J@FL02yrF6w9eGu*+iSq@*n` z-vl%fVipu)VhnZOcamyc3^5m&*lyITc)g)FT&*hJ9`V<&rTrm0@9K%)Cm$3;e)U)V z2jmK5Lpkk<8$h#ZkK%tGm@)-?yUM9OD{Giiu52S3qLt7L-HzruDS|u~Rw8-QWV{>6 z9m9H}OA55;x+6HEix81eAD%20Yto&s4TL;nn8xDJ8*WkVounL%2|ZE@2ntj2lvlir zx%)}rD(y^%$$*)H#m`XCizo)c!mqfl_6FOyXf&vzI5~)tG#uy*GzXfLC}vX=An6pFt}#0-|CZtK_VZ>4OJIbEgb->klIG%1ur>6Sp9$ zQ>CXCQtsHr>n)WWLzq2DQUoU+2>CQJEAh0teh=RX@b-=uWfPg8>aPluRM31$N@WM%utUqp`U_pHrtcE?_Rz+*UNXk(v$vP&q zgVC*Li8JliTSn(S==8}`O(HMh|H{k2g2VRnBb}`s@sG6%9dlQzyPEm(c~oI3ZKtfF zZ2og}j2^_${d=)~5BAeq<5wUtbjiztC)D37 zd|5+JU`YbTd6y%CvZqaBrQ-5n9gy@}(~tJzK7f#tc8wX(^8fYkyW5@R|@4Urk?r z;l{6hRJK?_;_(qvo4Y%#Y7?rv&GXq#K9h_f8N}8Nn>YI?IEV-VrUs0aX3LCZVE{NQ zhqxq9EB|b?SeboP2A54$BbkJgmsErnZs?YlOZP#A zHl5AzhyIGCR|28TY?h$|cl4D6?_Am?;|6X*-Fck745gb{(0jjo7CFvn1d2VWy$-Kt zHUHj|KpeH3k{kqr_+?dHFM%9%KRtHAm3^?Wku+SLC~BqU15TmV`hIo)c!y*J0VJdm z;Swcwsv}b8fE$8MsBI&`?Gw~qb}!Wac07JPJ>NCOL(3OFH4Rh5SKa`q7E^056R zH9}X;$uM^lWh!ly7sTopto|Jr;pK+Xk9BpS0zB~FC}#WECc!zy^5cPDr-gxhn8>`{ z9mN9*xOAM5UZ7;JuIje$S|_uyzkJ;ZuPFsZ$f}7tb3E9FA;9!Pf&dHhe0?}u{>bp9 z+N6i#Vg872i|G0~uDE!<&h zDBpKcv;L%agC{x%L!F-z3*|xJL%P*9`taROcoEUN zA4&wT+c&jKt10aa;=iHF;geO@7rshd1q2U_Z0t~_lxV88@EFuUi|W zX1kYT5GY{v5;a1i*Z+b9djjz0dN_~GDUDO?Qv^v2fx)TAHhZ2x&OW0LX4=XriU4D} zKg!CYk4g>FmS*g%Up!;7B!Mwsg5<2d#BXlL`A>B4~sjPT;-x)BGq60 zOpLnnO(jXGp;%2M2}+_{&qQ+qgN^mr(mwrOecS30cH$q~+n5UnM1c=vb}d=*t>%uQ zF=uzZ9)<#U<~@VS=IqS5CCkI44h1`K{YK~hqvOy^Wa!*4qWBK>>n#5dCgC-#GsPPS z&loch>;qzhB==M`{cY%F*gHy$R!{Hhz}iQ0f6UREFJ45*Vx6vG?<=b}in(IKrw0Ay zbfQcHOVD`G=RzKw5tftvqAmg{_@G}xlj7F{EJT1%qVm)gQ*wi&cV9zM8;ZALa{>vY z414FSqo-N7nqTQ@XmlcLAnZx^9HjrSei>b><$lh3EOGNlT3s#C^HhiIFlr{ zOx-I=nilk4A#|-wLKIhd_5Bt>6HTVrg2ia zO@wJukAIBMBMIqA?mKV)G#+_07THVTP;rc|z>k(RNN6c#0&wo~xR|RxB&%&5ewZ!J zmOG48X>fptzTJXbxPmcd@0GDXrPK3_U?l@UO4kr^W;(qlXAVpi{yEN3_?aE4N*8W( zb5vR7$SvQ!g8jP&jh9fS8#BHao9{EVPVV{yd%vF#XzgM;_oNvn^gepz5lejbb{YU( zyBh-TyClqV&oQ)Jt$w#|yJqe70nH+FV#<*{TD@ByxrZL%^HQy@1q^&A16Pq$iXzS{ zi3qv5S!RGhE4S+W?Xz||D1C7k6Bgj?T&Uq0ZKCp%g>=;L^O91xu|M~8cY}_sov-j4kgX%{uj+X(L3QT=G(G?hg?sw7_q6c;V3MTX7_ku+0edy8Gm} zKI6j>#LnU#3s1c$wd5u1dEB&;^;+r^ij(~IE~yEKk2eiAL{gS!Dr|{v*(s-9Y=Qf| zM$FG-CrJD7gbNhCEzY2tHMg%MvbE8N`B}Z|$t%AG$R)S4U4)~+(*cqFjmeE8Tm(R@ zf>-q*FzPq+3q2dPR?bHn%vkE`6G0QZbU7<*$u3GE%knBzEH(A%j7dQ+?wm`_V`O9B zo7Rci(1S`S+(bBX;gk;d&5udBbu*-834g-`DM*EPh4E%=MMy;c@Xu_y<;8vn$3s+< z8#ncraG-LL^x2Z5{^oEcH@H5->ZGA<4<%Xe9xlb9WQoX8r4iGdnYFmQ5q@RnX=G2mqgD2(avL$8)-J0Gz&UH6B3}tmTrWRlI~hQilDo;$D)Mx!J;cXgesAa`*0wpc2E!I|QWIoldqzf5>TDq_U*B`Nko~#nmtJI z25~xG=$!YVCB!irU>3q{=|-R`k|xmyz05>{qdBM%XJk6j^NByoOoYj%O1g8)pX8Xt zoq}nnT||nNj&WO8o8%p~s^3`9@&rS2MRjz(X0EYHIyT!IBj9_RvwURmbhrZC24fLsTi1j_BxzsSqt9$W(h~-SM5{2WQNIjb@dp&zkv*K4kspJ=O0e4# zd@&qXoNd&l;q!}PE;4dXaqf%225H9cYdm*}mvsuxdlk*O`3|x|O5FLZd$d1r`Wv>^ zg6||CuZwuf>mVF)tS{>TxvcNLGlp;CV35?ia_+nMy3{9Hyk?Mc(p;@jix$e~u-Dk$ z#r7VHsf2~$(R^C!&alUbuYLo(@Lp`k3OwG9%Y$3FxXOB5(8Eavi5oj47}(16J$11& znTjKlO;yha!D1lMX-?%zEn)4OOsu9j1Wj$=%!zss*dP@O-idqJ4@w)HKYf3d&}SUd z$VQy`Yi8w=7^;w}Ldh3Kbo~qWxw5wH_s<{(Y7jM<0uNx%pI_oahSo^ve;ZcKso*nj z4^2K?MW4K#Is0_6?v3A#VE(!b2D&5T&+r-wmpcp7;I+p)Bd zbGo^f$Q+k}X$<5mj0{GmUbbfg-775Syi>d%m_0IG-1}*rM00;IS)NXNo(i5Vv?py)^UT%zRQcO1qzu&%OWd-0y+xLyDAKULp zCqKC!u|-Zx>V#3+$du)B+o=#=sT@%$GhrHc9fP>l+E)kG&b#dW%-_3N4WDDQl!Ih| zv5OR#kDYwk{3b{oUm=Yj_oD}^e@5!Vy9CeV8;-~>VU_;tYXkrQA3#D_K-n$xa?90C zYpLmWZ--)mnJHd$sQ!pl)4>+qFsjcFDhM`D=#)SUC@2|z9-QXStb^lNm%o?)ui69v zQf1)cHBhXL-2j+shZHSiXY)7n9XuuGdKK%jgZdqA59ahW$Laa7DATO2r}cPa4b%X9 z@c$hB3}Ujb0RML?=n)nq5C2Qx-(wG*1X1umO#%2I!$fg#kpG>!i9RLEQ z-hOB)DYc^XyWJm$%ax(f2wy1TovE-DuOJjgIN7K(8@lrvx)ay+vUwzWCCh(6U+%ev z;J=2BREiTv@WB@{z(;vMgWZ7%F~0$)77w{zuqw3|Wj6c}YdF4GCY{BZgv;Ym$TcanpDGUC^LmW6 z?dywL)ANM6w4@@P+Po^XBpUvCJAH^^wVFuCq29@FUbZPKO0=j6<& zsD1lAOmoH78IBZlb=o*Ck_L6S-k1zWVMzNP8Dp~sZ`igbhL6PRz3lp*!OHyCvq1x! z>GM<6UnVp&UH5p*1N(1SHivH)H6^*Z_*;3-Wd*bZ(!~Cjsx`;$6Eg>IQZpulfA*b1 zN&Xiz2o{fTGSa>ehQ%|PL#YQb2kjTOyX%%UGOp^{`=#RGFax`FJ3QkhIbMUk-8^ky z@eS+;jSy$BQvZ8?!6cck*HBaougSy{Pp0>@JNNAdSR66MmOf4`kq4e&&&~{;@2eCx zELgHHo$lm^Zt?d_ARz|wf2Sf()9dDMLh9Y6kQc*)@7%d-0XbyP*JBODFQ7BnD?jWw z2z_H?64xDXwl0syK~ty#+wb&H-zOqu5eZkb|M>_TSXt$0?%k$uvkkchIX71f?z-z) zr@SJz@^dV(_PX6ZiJV#*GLg`TzNefNy1li;lO?S}kX)wQl6DW9O6h-`k|Y zpD))27RO46K&aO0KD?*XJRI7E5dRlyr;QKAS^d>0c==0LM(c?SWtElEuHsYJE+hpt zT~Cj0IDUZxq6( zR}?yHVtILa<8b^FM*j@&dlZ&!YYa!tC!UaG#5_jC|3Ml-gUbNw^jqK7|OXy9qH>Hl58m?J*w4fz1!!%EzU8)e+dN%>SMcbEj2lx!K%Rj+RKH;(wc0C z$ymS%`GGirJ7d#4b4=P4B-fBgZe3LOHs0YF^vOT_Ut@H-E+#FR-{)MoM*U?AttZ<# zE)#c}3CTgXC(obIRO$mvJ)dKFyiNE{86l#4j%c)o*C|r3cGrZNvSct{02tNK#3Dln zce_rU3MwcnDhkLT8B-y|5@)d5|5~+|YMM^kUTugD+Fo67!MSIL-m?>HeEKU8u)Y(w zKMwpvmM5(&VVLPL5$V=rQyX=l3yv?6sDn(7`&Bswr-3Jwo1pW0-4b1CKH=TwvoDXk z>c>@f?EQbgy?VLueEZNMlNV~_Y%?)%>%Dn9b!^)Qkp2@l?dn28z|PK1!U2He%{NBp zg-9=7YYA;%^$)ch)?nTrWI69>7cFXc^Aigl`0GzN^Fg;~WWI7CYoj=9IxeNM6yRI? z33R9rDJkCrVvcAGZt@Ho-C7O~4)Zw4|8cHe*wW&nyqc28*|1Ee@3Hx&&u3Al`Glz1+0dk+g8(x-Fz6~b?aTckUgisDJoqTYoyrsUqTKJUg9MI zeSQ7^3wc;1F4yZp#{9*lrMugiugD8Ju4lj4$IBhldWQvE9go)}+9*5UP-23QS8u+8 zULyOQs?cuB7<{K--=0r|9=8u)`!+`M%Z1p<$!iU1>i@(z_Urk@#adHArWtX6LeDFN zn)i1lFy}OqZ<;{Y^)8$o_u3Ew>)BJ#ZLF_X$X15h!=&xa<@5r$V2gTPdt!yHFMw%C zM&_m$H?G|O4KxPcrzdBgmaR^5Z?2B*OKM) zwdDGd99i8OQ&D~X7BOKX1=)kt2ziTSUm6B(cbQFf?LLe;Wo40~FulxAqKJz;mMbfk22bW(;E zXdw1x`vTpbZ{+9;3rj2DXu!8jKHAveqtcPlT)Ue!|GU6B4Ihf5jZ9WGxG(TQ#Nrb&0(18=@@UE0$~tl8QAv$`{UkE0^N&RAT8wTq;2>?f3!^}JfVHj@E0HC*czNzzho(X5XOp?v^AZF() zZRXh9UjF1VKInEY6jvBh^?p{)ckz04;_D)XTB9u{ULbIYbVn>KBm|Vzc70T0Bz8CY z`#IDp)N%G_YWv2}wkLza$x678|C=DpOz!gOG~V{8kRNHRTrX{N-l$?SyZ>Pna4%k9 zS@73Cc@^S7PH*J?c>@~_Jml2zqb&-ZB|S4UGmxX`59yMMN~La3GR_2V?~y|>S0*qb zc*rOgLo`#5t7^LtLX1K(NJPFt1J@6TjYb4ekBQd@QBRC2esoxjn7bN`LAIlc#){6t zz(D*VhMXO0uoHOTjMtYJ6qLA81AFYL3lQe0u%T;p)=U#7J%XYk zR9kya>wlE!3fyk#71F7(5tEJcIJew3HClPnj7;8d-8RYV1E>c4H?D?llzy@U#H9l12z)E z*_fbW*)k@r@Z34tTD4?}HAEGRv}C!;DUqUwOi`J4Kon0ZF7Fs%S#aOi$ee=)k=G%dl@`w9e}G`5$15VPU<7-V;-_9Hzfrw^ zF7cgOlsNdYHKSXrKHl=iD{O8AC+Au|sK76Ol^laPi1!n8wxJeHrv#`Nx`BBCX-$&%TBt5^PRNFLIq%j>SrHe()d zG+$_a33vz-&hLELf2h=GqTHfu`zJkOKR^%YslB4Adfm^sjG$)y^;Y%ih`7AGgvHZ! z|0xIXuC3&IzLw}XOy|)_v3K5ZZK%{z?tST|3e9?Zr*U=I1=+D-yEU6a8pv{lV><^L zh+mT5aSQU7^kh&+(bM5ou_qiY7MWz%%}{Qf$^_a$&J{AwNJY58>W;^4sL>9z-c zFfnyhwU)!=U?$Nnv^LXX$$I;h863rTSLhTp#ym0x)QPNp-e z^?nL!)sfG?%a(R9+`%Kh-@jx{N1DSgoCIH1Z*Y(NJ#B1UH|uYfSj@ zFSj~EL*6_rEG^4wYPfO@A+{z?CiKU~SYdfb-mhB zZ&Yipj1X5{=2tL^%@M*y=a_jk5X;LcS)1Gq&^7r|%@mPzl~So`k`$M5hR0>aV$fsE z(~VlSQbWQwuCC+V{uO-K-F|vLI^V$C?ck_s=D7Cv^cgga{frsGy-kKHiKoYad*F zV#luD*Z(V=v40W8-86;O-;>LWr0gS!+uiIQJo%X*fG6Yf09D4$&4Q>&C{WVs->2OV&2NH z<{VwG66jXPF>YCnuh1?pM}!Prz!7Dt>3N2LEs-J)6ik5xJ@*qNjDx4@(UehDC1uUX zN*InMLyT5bQ4#q85_oXVEC_(kRnGr)HJxVSz3CXUhR@%{rBB@Gp*_^9&r z(TRz@iLQQ#+N!!bRuKvylH#$N?@wx z;uNuQB@j>i@ilzttxNM2Uw0rYsIpXCv8lMQj<%2UVKgKg!WEh~;E2v^0IfhH=xj`K zv0PnTZb7%vlG2dp$g&4!%Jt z{iH0S(Mc+9SgPscod@-K9DZ`Pq3hIw(yq$B`!kWm5ZEqB!y%mcbL5&W!n4lgn6o+A zJWxN(Od_5ECd-ak&{~@Y2Ik8|TUfJEF5Eodso<)7;qyWfNGT=oOg)-e_SN`hn zxrU#GlKtw{suiVXv(3mAY{H(4zLIrmd4xVX^I~|@#y{!Mu9f0)D`kiK)hh!cZ$Q69 zP5`#1`;Agf*V{Q`qR zq^imqzuD;Sc>eYKJy{Y1l{_LaK*yWnbO{YW19W0>wZW52*XIu6BziT9TFMW$IGjYn z4@9-{u><8B|HzK~@6}PUEggTr#LVd4HJ>#Z`p@oPU=Z;A7@F?h)FdGj6VpG;2}5RK z*4ePD)uW`fPHuFIEC0l_?QE?1S^ z2jd@9tSe*f)Vc9N!AVE8cJY_}mo5$bn2H+xsB=uM-ruMsXXU*4n36TsP}8}|0TBr8~B%95g7D?M3@Icp|*xrt*ycANt4jx({obRE8NG^2; z$7M|x7Eg?jf!?SvTM;%<-3>CvSQ{mJI0`#!Q7gE?fVJ$05}ovikU&C}Q0y0fy@67= zY=$uXyfs~*-Jh@fc`R~PvGxJ@7{PQ@kriiT5gX?yY9V6m{JK0`?UnFkk-OHdP3IFF ztF_drYG%qVr3Bl1?0p;nA4;|&phh9 zQPg4}aa0AF0q_iALIlD+@Oa|5w6mcjhkd6t1(hT_V%6oH-%Fu+7eDlh))(i$X(8U= zfR3(b&P34dDe*_=xeoo&35ogmdY0^QB>!1()6OYb8?WMALk7cCbo5q(W?tRcMj9k! zC1UpqH6zi+7(s#vKz-mL`nhSurqG5(1rCf7iTe=w^>P8|QZLBFL!ohd!2HvoO|t-1 zpbhpYs-G?DCg^`f=?m^6D%D*|o4+*V#T$O4sY`b#j`z#u4qAwQ|9KXig;@n;&ys91 z2}Gh$xFX*jUaOtDgexu=dDZT~`Fa^;)ull?6N8VwRi|E zgN$(XfYnC0yz=r=RxFncn2fEE^3v9=>Q$fMEUksAGtNH09;F$CeEm1?bZ6|oFRFJ zLn|U=9pRMWP0L`q%Q|%>j#(N1@p)rJB-rgZ7G5LA@z1`1{@Vmy6G!ou)bNhi0}O1& zco@M@R^%;!H}4ISERQ#3TAD@`vtAgB1pc)42jo>;)Uam#_DR)`Q_|OVS29UqfIfoi8lD&Ldi7mS$B^ zg+$gGw-H){iBeWD4UIYnS_wu5r$B!CC_Z>mLJ|(?N)9W17y}Em1z|x84Lx;6@w6yh zSIUBzoFM&EVIl!cOpCQnb0WNMzR2?OaiM!mh<8h~zYk0IH|z(KH$_~?d*sWr@?d|j z79F7nh|5LnS8>K<3+kzzHAjxC>kY+zD&~KstUQ>uwl=SiJF%ZaicxF&hddq@Pq_Oo zdxB-dcYm!{bl!d3z;e&SwjS0~VR(K(P`}+nxm>M}4Z{>!ZMMdIi42X2Le>)&5HGJa zY9OL??9?M0i4ZU?h^iK&ZOuXi6P||nqb`LDH+e5(u*$&}xrr%>g!3f7z=PGnIn|l4 zX&ALOoGw*_Dar8#>f!Udud#N&KVkil(N!HCQIVl+8}F;2)HbM;xeB!zr%(dyBSHTV zSw1obp;IUJ_!|;<&m9gejpU)}tBv`8^zomJbVYu-6xpUrd-TC1XYD*0bP(dW)fIbp zq54E@#Y9k9TAJJ5p1v2@;VReC^7X(ug2U&3qw4`v56uX=G2T41{3%drACNw;O~-4A zZdpA0q~bCWk(4@K57V*%3a<-h1`jP-TJ=z%@%A-ke)u^dg`y#m?7?ysoND$?yXWMN z&h11T7#cHJPwm7bDI~lo@UMu8bDBPI%x2$3!=xA z1C^*INrXGV=pPR8V3v{kcRw=#zj5H|?tbnhazh$i3=VH_I*Y5Ux|)=Y&9!G_qf?m2 z^;)Y~)}kxPTrK_a+IN;2MusWu9MSs?wy@UpryTNj`TV{XvS|Ka}8& zH`-v`AXP6F64n$nxa0B-;b_Y&4JnTsNitvGF z0!B!E7tOKe+C*|Fit=I!ScE7z4vnd;gd~36yNRipq2%)5ZS0wiNTVbtN^Ws9 zSvRh?(k786oMN2#ia8?jSvo19`LtNrS4#Cg=*?P+_ChG(m{lKtqam)apuxM{@wvPI zJCh5Z?<1!~ISr|{ar~d#K-bpRDq|(zg*7xrWw@Mc?Tp-3^bIVglV$ZxtM!w3g=BxT zR(ZO5$b}CTt6`$tSdyS@Cs)aKnwHQw29?Iw+|(Er0qD#!G_W6a% zT%t_Aka=vZFg#o3#N1r*u^_?o3*VbsA>M3mPQ@>va<@e5iMJBui>9_jrJ$27fb8!z zk6I^EJ1t%N=?P9KIe$QqaG5s;eWmr&t%?{@dT*5F|8W5bORSnM5R4V=nJ{X81Y?lO zvmY2|q0P?D7XEL4O1emldQuuPyA*|PLo_Ynb@j!y3=CqiV^sB~cmI$dg`C$iRcQZX z>XC|eSX2=Hfn;^SFQP4#aOA~71QrP+j*bug-fe{v|3`wN6^6c?x#W8d69Q45W4PHX z1N{`LK+3!l_9&a4{nv&KIQnzeR?$?Ikh}tG>{_fiu8AzE$i8$dEs6!dX#6c>x5&gg zYFP;}^jSxwI;(6Y`(UZ;wL&tONC>akK(YgK&$dgE0P;F6dSpmua=532y*-m^>z5kJ zVNz!;nh+BKq8B8W*QH4hFUf(~^ud`zhlr!FFhVMTqQ&Dlzq-15!Sp+I2og5qpZq$d z(`FgniIz@rXBM1`kNpjHH!fY4$c%W+W7z1baxTe}2@c-Th-mHd~j#!yQR#?kP z6dLe;KQ_|l%|hu%c4f6F358_dh-3e7N0)01yD`y7YW^yNEMTL!kFDN9v0@BA5b?Pq z5Ks)tJRY74xVUQYx*_&7CL=W{(i1Wv*#ZsV0sMzo^Z&B&toEO3@EFi{o8AANE0SAY z*<7~p2yRe5#aB^QrASo zqIo~L3%@rU?0Q~8coO;edCItoaX;naVM^X&dHDK+(C=A$8-&(+am^U$itl6{uzC?AyI zziBn!+wIhF%AJs-BRj~4T$IjOB7vsItEE@CLf)KlLRYv@NI1+}gEYB3Wj>5rdcdgOLJRX@r`C8L+aG(uEnbDr$sRtksR{DfpeCL0ai%b1#)rsf`AcRUOAguR9S zbJB7OX>DyC9-APNiX$GOh!cF>xzYFK(?MzRB>o5Bokc}O$OPQT+9`Y|O`AFj)BUmb zHZV<2>)qmb`K0jHTWGzA$_bY2QVom-l-eD=reohna>kG=z@FAS+Do93rb-!7EV5oZWLVrKIr@ zGJF4XGscC@fS||rTA*PL8neF#C6;=U#lQ)?wr2H$dOkW+`C-ckUbh9X+((?v52wzBrQdBy89v&{ zu@LgNFN?vV?2R=~#=rz0S2cd&O3o`qd`0(ZVu*F*1huK{#>U2Z9_tPnzca0drrK-$ z!7ZH9?Zwt=K3!Nq=`rI_QBhs)O+bI%*f4Qu`=^=nX(xT_=HKC*Y2#Ahd`>T%+)1Z@ z7gMAJ`R{ZNxX&3Gmz;cZy(<&uR4euq`I3WMcQ>1HYb<1LKlPA__^~4N|7f@S-jc?V$7GYfMF6LH?X@8iF@heN|>8;>gOHrwvGQM;-zT3)(P2uYN~_Mu#ql1*K}$y<18nF1)!9%cuReA1?SLLpfBLEuh_C zhX*1nDGd*5)xa0V;gFu==U{R|qNm5x01l5MEx@aL-3j^%Ui(F~%d@m4 zJw4uUUfh%)BUS7As_BBk!Qhi0Vj5Dk2O1q}}y44Sq%JZruje3+R-#~!Z7Kjzpie&Y%fqk`!5uu1Z4 z^&FREa=O}Px7wNxJQVOqXzA%~8g^K@cKn7Vkq{A^&W9NAc%4W?Lqh?!o?2VWzUp_q zvb9zr6aYSt$7zLzc7JNRJDNQ>pmTq^-FCeS{Vgvq@cy(T_vIA`4F}N5z6>tg8xUDX z04DVFsCGDw*O}b!?$Q{1@$Vd=%!NA3(^TG+q$H<7s#Ik?Jp~UB?iYq0u;rJxtvDCE zKB}U{3KJ>9zY0TPHU?dhXxxeE`1-z(>iYU5GAc=Z3?oaJ0*u_M2$u=#2CWpcK6x4{ zC?xR<@p|-!rOmNwR*Gb_(vbccFAl_b!JadDsyz#A{#dWfYl$G*f?ui^c)d6Ly<5MH zAIkhZAUJ%wqx;02yOs(8Mm&rq3`QfWVk) z>(`oLeKp|-QGIBTG^r9R4XsuE;@u(Tw>SRehf-8xQSI)@B!2g!gL{0v+O%!~<-K|k zTAk&b$H5`NnHM`P?Oxx`2mz}i^av}f8?-);%Jokvm5+vTWhG|`Dj68v;fZ@T8-mkO;S~oB)ku5#8zk~cXwVw-t5t>v9dDi4vgJTf4krks|b3_f~XZUdV3==Z>MsfdLM2T&F4OS z`XqR_S8%yMRUb1Kf`ExD=;c(*(#fb(9|7!&GzF?53C$*>zt(&>;ravcgO-!#@`cms zDnk(vxd-Tqv${K;9~~`5vldEu`G>b%&Mv1uZ2t2OoVRtk+b`y@nWmks)E@>;kYIX$ zeL5Bw@L&pP7pLv_4(CrCV}glG4lUocCC57hp)U*y7B6>cSyWqa zZfH2xbwPc7y|&o=Mt&1WK^*l>hI110vhv(}Ne*u@PTRlgIf`${CnQ8pwEJA)b?NW< zi1cXuLtJhC_*ZsqUEO9PUuQ__TIZa>BR9n*fYZMtOa!f;B!ZNI-k>VHwN=4;%RK)V zMxJt-GOGeBE;%kTCSs_`b!fK)dCkZk02bHnp2BOC$C>!rY1-bTNNxmZaXx1G3T_LV zG}f5Jr6$izq6rHw&5}d|lWBFsdCsXT>lrJpEETYu^@1M=bNUw8=3}b69gQ!X=Kl!j zW_mS9%282UZUua`6YS>`D?-)mE_6%Uf!67RZSXBEQzaH@>Re$9_2p4iw%q6IyI;H> zS3DP*tSJELjh0^NBW(MT-l))_9h8j?!F)~Iag2Rv!SL1bQexvh_;*K3Qsw3>E&#cg zS+)IkC-cREUWk-~%teiB{sKzHi6AmMt%u^&0t{vABH%nR_b63C%%&~%HZ zqnY-L6Sso6H3;Dq&W$^)Y^m%SSeOIVR5vuL;Ma#w>)oMxUhAPRX$?xx@88UznNwHL z8^No2Q-xe%p-&Sxz>f0(<@WZ0Ldc~gzuf@Ft=*jI^r?<;)LdNFFo-sF4hVgRD)*3NV-qw5IQ9H+cPOYn$=xQ2%C z!(Z_fX8nwc!e4`f#*>Azc8m3q0Jg3HT#qf+QIY3|t>dgL!k5ZLU84vDw%r_`SehKK zpqcWI&@Uuxhvt2V!y^&Qx)b#<`^=ByDl!;co#lWI_Yfw3{4*YIR{y;`COkz}g~+*% z@g{~fN&z*+%2L~dJ{GVn>c=|N&mu9`mR>czwO(s#WR=TRV=D@m3{Y#w1Fi~ zZ4<8x4*E)r-KG4M%a(`TO@@sk{4MAyz{5OFbHTEz)KCf}*omhcYt?2KuWAR`d8+KG z3_8orE54ao7t=H)`D|JjMoJb;*n!qC<{m5YshLt%ruKC$a&0sfg9H@;mkQj!_mRzC zrNl`;S4dW0qSX?=pX`w-gVv6Bro)p-o450DXnOJnTrjOhFj}t)NnGTTszU6pFh$eC z%#5;;%Sv{w-7hh+ufcxFYdiTPR2qjRio3gemdB#?pfdqa!`hQz7l<05uMZMd6(wf+ zOQ&Dx8Ze}ha{ce}b7j&v$((p%vPoYCYLns3i=)f%N#$xqF=~!5HK^E<* zWlUkz4zkWz_S%doDH{sKG5CeV(kcqRo^Lxz?X%Xx7D#}pb_p+r&=VK6XTort}EEb_6S!(wa;8}KpPM~yNai8Oo4{GTF zMh&M~sfW<{c)gaJ^^Os&q5b}(hf{IY|K1+htfZMXL^_VR#HPab9%S|uJD1D3jwded zggap6f4h-bz9uL4fmIKJz}r%!TS-c&D22*kVXl3#Rnr&ZGzC#qAWUhmjOi-r-IXV} zeX7rzZpc(DG<#<~wZatB(>?tYOH2tV4HER7puCBBDyTrw&5cge7SXlL~So73)Gl#C`43aUMB`Bs}z zLWe%d7}Oj}Sx&n26^j5uStn#>nz_??@|(uB7ydk(9S@QBT_SQz&=lm4I+Yy;ErwP~ zWFA4yHDX^_tip5>ye&4_NGHi`DU$6pC(Kzm{(GdrN{V6ngnSBJ#HgZtdL{LiAQP$h zOZd_U*VU1pq__0&wQTr_KJD!4M~~ z%)u{!+odr*JDZT1naNl_=PtA$G{dR*64?X+9noTYC^^gjZvR&;2&#TsjSl*aPs~A1 zg^A|rpe@JMQk9I?EhTsoEsmjX+r9|j0|$#II8O8-OD7H8&U1&=yiP=cl|q7{Ln5I) zzjKO2|5#B1PeFN%I)5SKiaY5#`~9*fOD|*~e_|dveC~#hx|z$a`=da_v;a82;rX< zkcGM%*DCXXd`0r{{%d09aK^}z$`OW=nHa1DGJ<{zDskgkHfwiKx!cND-L#89{;dOpJ_2muyQ@b(T|o=PySJ@T%`~j#M3=P8jzyOf_Pa0q2IgAE87wFDdQ!#@PVL z7%1O+e#KL~(yB3$taEp3^SOpKL0G8NZ^99c&rR{5SG0Su>No!<|;k(Y+Q~ziBvJS;H)meZ)hpnu#cj ztUrjGui=T)nRoQ;$QF)=)M5BK_AYF^}X z7`vQM*?`dn!zMsWh>#x-L1nA8t99OlB^*VLin?R-m>0EqjZR;NUs7V*x$l-I%MQz) z0mIMFj}5U#`*#S4^9SnhWNXM6=%e!c<;UNdbw+!#%y1C__9ils+8pZ-+7x0*_JaAh zZwDjt^T~!`6U@m6VH42bPm-I>A1UMrM&_>yo!!>XtEoh;oRBh~gptsQ1SZq%umowXA>-rblAZoY&<(R;Uq-)69~1gf?I-R(`dL zdR^1>PgAZ!i19|XNog@1+x^AO``B;w7<+ zJ)5{K6V3T<@3m8AcKD}aQ}B_xYknV|7>Ui#&qvH9uHuP(Ug4r%@_?bk`x8SY8Ch7J z%svUh>+%wNm;1@KrGA=xvHBH0cf#rK9@UG;W+_b0a|DU(ky#wbD27~VT`1j>j+q&6 zNl6Kqrc&~YF4FcWhOeY$gGfB_Q9SXjX@;W+8ClVEB*a0arc&?2tz;}Rkt`UlhJq*^ zYgD}-oJA>HGkMlb-+pI<3Jr_3e&$WLpDEqZ?d$PzMvWxun=WjZ6_&VawHX05A~k>5 zyRPZ2dC-_7&x6*A#D{0NA~s5wfkc|%(WV5N;mf4NnjIJ&$Vg!gMg*bvF~0}BkA)LG zDHNtW$1z_sS+P0y28X3s9u7dFj^Bn{CmbA9GF4%s1b~B#;;NE8)KrT3a7vBc0y~J2 zzth+hvUuY`7$pkt3G7+_^P6LA>onlm$1^l*8Ys~ouj(1->E%{ef!@o1Y{7GEag_Q! zI8G|9<_PN8I_^)4LJjbM5xlDF8qT;^PDoWh%W26eszB3j=5JW`lj%u>pbJ^z~ z8*^pSxfD{8`zvEL<_h*Lu1c3&ku2#&D>n^Gy}07{5z4M^Qs%xGJ=4nhdeekpo> zawExjHVItJc)siEC)<^p77whSM z3jaMHX4ze^%vBWj=z4=gnF(XRXep#3f8|GnSrt0GZ33(e?$XCiU0WLu>~WJGGb}Rh zcm?MTD7h3;&POx2%C#q%;k%zqs0v2b7E1pJ8OY#ualRDSK@|TJ%cM#ADoQ@!P6} zT7vEe+^~1HXo<+9Qu9t5%H4x%VruNPTua}C+0O7-0^Z;g6dYCXJy^mCWWv9m*LV{^ zJ9Iv&e~hZ+3~q+i>+c_E1T{4N@1Qq)aBy&g-#xqE3<%#wx08}uDc#??r~u>?eUPD( z59#`oW8gOY#aN6s9~eZ0jf&e;Z>`yHuYaGd` z^g$|V=hXpUJ*YBM%OhP;w;_KBk@;f<|9b~AbcH8}WDWg09fa_>v&{jTIV9?gh{8(8 zOpyTpIKB4Wk`e$8d;#eE z#`S%=;3syJaOemwC)wX#MJZKE>FjS>Rz`D#d_cp)t^C=eFr@|nN30IZyxb1UzfzcV zwc368KwV_o9fGh73iTHNf>_A=FTnOhe*WuG`ylkD7k>;$nBudu6#>pPCU}7inr5UT zBDvtbf1d+RCnr^HY;3@BPy z)k{!m*yws?Vz#T&al8`5`yuD?{0A}XJ+RJUvIn{FS({PX=3H1GSAKnp&Y#T z`@{YgQ!-Yc?Z<+;O@T0CRtsyMwkoZ*QQwJTSQaggJaXCml&Jl$)uJlLkNjUHSjqDD z8P~>d8GK58-XVy2E>#;t&&A>{t&3jWRBB>baj?mE%OKT{;74SGQ8(DmNw5awr zL9Ag+x}hi5`eU^M-c+p#=I4}1<1`tWlCtPB`kJ@3$v2)FQhlV;%40cU{PiW+@@Ok~ z^LE^2ZcIO)Kl+HRqs7GhgS$V64=?S7U1b7|Gt7X;L&GJZ{!~N%!TsXg(VMeP(3szp z29p&0O-z zjAiq=TAXu@yJx3!Sh@@|HIm9=2zg)103n5y6)6@X*HT?I2SC}gvf?=vT{~PGb(24um1h@SZLJhb8z3Q|DYClvjC_pbz_;whd@oW{0jg4D7p6j}b*mfRdaDbe#mg ztrbwn$;*G*jNxf|I3FGy99)kTII+6AS#9(AbW}5-qenvrFXn3C2^;eyT6eGWkOb5a zUF*EQ39!)=Ya#>|vHEGR;;>RvgO8l3tx%tF&?m}i;v$I{!`mZ zw7rC~_o+L6fK%~xAQadqMuXNgfDn6@)O@kdcvwdb_%p5c!F;ja8Z80g-T$}%pw7Ys za1bflxBUDdKGz-2CD_*{90Gt2l;zZoP*S#Nw|N%)!)y=(n@OjaZ#PR^MFkyLb%j<< z&jQpkDox5 z2x^L*%9fLjvhqq30OemEFQozwF}LID@0Qc{VE}n&5FLVJeKCNjzc4pP0M$NFqb>k< zVrjd?Xk77FdPNfgj+t`Q=H(x&AbELtea;uVW0oLz>4AnUG9m9*fM=-y^!x?$*vf#f#9lRZ+VtQot@d5E9?9py;prN6i0bCqa6T3{sh_%wH71z0KD=AN$c~{ zvBmYte{m31_~x6oY&j|XcURscGjs0eBXVsfJ*9X`1lzgVVE%MO& zl$4_I2n(xY!jRD{zTMxJu(w{TyBD{uYfN#rHEUfZb8}9VgeS_ zA^0eb-(5p-$|%9o2-=g=Z>GPI72t8%@YZN$X)#JMVpCy%!HgS7s6rKFXP^9eJgZw$ zUS+w~ZgPcnTvTGkqT6sPqkyDA{}Y8TzFw|WLY2PAjUN8eOAwBDCvbOU&Gzu;+@{cm zza#Ufbgo?A@sW;7G%2W?LscF=VfOQxNJ&s(?pF_6ymdE)U`8|)c}ZQ}9jZ{tq> zbh#&(COZt@%(%-c=qheEJFBtTP@1R^9i;z*V-fO0tfN8cJKWd0H_Hg$i#f0s@; zp0Gk5aGw}#5tZY4vvsMCZ**4PPFHu?>GH0Y}&(@p%EJmuI zJGitd)AK64`NCY8D8qHyjKcL6rD2K#nx<;N=qiNFciWZ5jh0oA*9tqICnEF^J`_K; zm13~_cWmNbv9eV{k*&9=sFtq2WRnU+7Xhs7QDrQ{#|tL5{N~~u*5At<--7;+72ucp;~n3no8 zv#1Z|mC95@%BO0GLRC>J#m|FS*)U2pYAL0r?z$|dG_I4h=N+yM1&Ni^jJ&${1i6ps zO?xbhjt$iIE^xD0clPE~mv1{ysfJ5<9q1Vt)W(Pg@*t3$R>3v&xVjBzlNv7<{1UlX zG;Z*`$J+fQdpFs)b;0x9i}ThHg;$<3s;Q&x?PbHOSD-IILEb20k0rjWy@gV*s}1?y zNR>ETm@rAWuO&&kN71&9DMtBPOx)JyP<+dRCSSDpb}uzO*4(zUX+~xc?~%P%9v89+@RL zbRZ!^4#mgg^VNz=Yo-rD6N}ia`lyH(D)ISpXh>&6g+b*TUos42A!}IOdlW*KYhS^m z4qk#@ipKPVJ?{sIOrGB_ylr|{-suh`$WD||iCOeER*IY{zc(yyYKUSEDIp=;if`TZ zd45>@?Y!%jFgj>re#dnz@4S{sc&t7do0N@~FFG< z9haCKAGW#pJ%JhN#V!N(++UH2e#+iBp7{Y6)CGYrqr*fZtk2f8MbU5>Szn*UA@<^Y z;|Ev8XZN#e?9{wEfI>-7<%lf}n{*oYRp%iaf;9PH3LdY+)ZKd>%Cz-wY1kO=js5(h z^^@BiM0twZ0*%Fa`)7qz33V?~s@f%Stomkjqs-1; zSAsf2a?SmpGh6Qccx7x3-P!>OyNW?;2-x~69z#fMtUL9rAfAZ*ggX#Ld4R&*a_gAE zuac%?E9Nm8Semb-HW(!#fC>`?moT3+bNRjHIvfevJW2VIIxo!o>V$y|4W89~h*E4F zKHh3cvYny4ETJH%Fky&OkwnPgMNP>u_K-e>lOtzfApGOs+awQ@8Yl38#4efJxJhBO z8b$=sG&9&u+0iR%@uN2=GYn~I)$6|oB(>5Knd+*UIYh!adNeu_jF8|ZV- z-oh7Cr3j5QVv9GUcKB(xvR3|Fg0in1m=}!^wMvfRK#Cow`f*Owz<@aT{5;Y~6)=ig zn4~)y7|K9@R!6(o2{VI(p!VKU)`~xHtF)gZ`bL(rBPJcEUkb+13(Y8-GB8`&wDexW=FP{4WJ=&f$-(r&&rQz5JH^Yik%3B9Y z_tu#Ru9$9)S*SR_i_EK#?Qj(%rEiv(y~pM6sF`Y12;}sIyZCL@`lGb z{QCO>SH)K*WkIbrrd!HvCLy^@?=o?d1Y%_vrC9%bn4y{uBN^sqO^!(f%2>={LKXTN zFJ%TN{i~xmjD|`6&0-8Ow?HRTeR_l>b|6akfj4_g?qfvUen~h~c2H7*YD$-gn}I*| z1rt}Q{~PJ20wv2dqP}`@Gz4Rl^$8>T@gYU3EEhr?bMNtXQZL_L)=2&gDP0w!(1SZE zo5MpVcibvNV}97h7!HdYWk?Lk$midrXI@!N@A@1MrenTum>|H10>62x_y7|E#V{9| z&OQ{*&8g2LCR69?)Eh4zCyic+OYtc2G6)9t0WfqnoRO;L#Cen6XOwlzM5AsLVkmxv zhs~DwvxwG!>KlfZKj2(}3v~L+y=&%k9wqb)dHImot{!PNCsF2fzNmyD9WiIPxxW~$ zq=bzVklafn{-;jmpjRaCJHo$>1&^8jVdD&Yy%d;sj25%RDYeNdv7r@%F1F74?=3pM z2^o3ATPs0G;J#QhOAID;90dzUwp+*hlDt>Py$IfKlU!7q=2Il~V-!ZPfg9b`31g!9 zYocU&PII`MU}p=9r7h-K?0$=h?}1zp-YhFP@~IR>=wd^4af;;i!@ufCmGQ%Wr4bnu zEVa1-!5F5ciMJ!iuPbpl;s{U>yOcd^3dM{oP(Tga{I)D{Z4`QVV|Q4LYprdzg+k53 zlbKd-70o9>Sb`)8Kfu`SoY`Lz%3`;Ek>({tO{`{Wm7T$Aw8ZT^Ookta8PjJSEn#gJ zJ&ubqHILgLB`I@(i%iU(l@-ky_I8NcNVG67_COgq*F*Ng3VGWof4oVefDOMV@8szg z*dhEx%$<5+jay95{JP46^=h!xH3~6V;qftC92(?G#7pAr4|?|wMO3{Edt!PX9rGiU zZp?r)p&%sET-!_NeSR#eG*!@hIJjea!ff8a*~o4H=%_R#-|`>x0Amy$XsD^+HTXVW z8NA+vi()0<>`J4DS=7+{dQJwQcnD^N=Bfmqk@v5)a5<$I55!cE?Tx$@6vRz&@sp3# z_2&8-h!_X@8<^~dsj(ZYnULaK28RbJX+Cz#J#K-vXq2#y|S|s(^iCU7p=?6_VT$TOi@%?%$YdIB*r#) zxzDVys^)^%ds*$_>|1cLaT~rgycdWnAu>_#$EA*wEv~Le`A#*H6xw;0j^sDck>WCc!lmbQ@6Npu?$NWus_!|y{IN=F*F0Bx9BU)d%bv0Pw zqBA<2IMqQ;&1%nAT@jUjuQe)ObnDi0u+Pf1&kX6=~nk+Q_Ps94l7`hS;# zUl(i{$Ozqi6FW4JNo53Dk+=*J3gu#_2m>=+H>P{fJLnEX6oP5Q0Qn<9Z3g5)a701VaJ`xc=s&Dj?0k8VvMq z^5S|a*XOtpM@roM4Gti#%9VU62Zgk9QQ%+M;J>S?in*xz?#!%>X7+%FIh9LGR(07G zXlxmc#=V46YR07<2-_O-t`}E^l}Xgz58B`Zc7uJettdN;`kvXWN+^=VhW^zQIMk-w z_s#$G6Da79wccFm9m%z7LXY|4vVnUBFI7Rzp%?R=OAgP<{*O(}B(4LGFydd3awUs_ay<32YbtB-k3e6-X;<5EjI$J<{@ODm5i*IrB>tu0vioY_xY zA2&x2ePg1?Jt-xOjS!l~g2?35?DJkTWiDWapc1ZF+!MD3)cGac7Z;`9P!L!}OJHnM zZaC`^b^g$|8|Zf?BwXrpCe#DPq@K;;rh4d|?{8*_wGD}dkM%fn?n}M`7?IQ|hy}{@ zDlY2l-zsp2qQnHFW5XxTq}&)oV!5bubYrX-q8kcum5{f!tmdraXPSgGtGwfmb-CTU zao)XHgyFa&Fz8-e$zF{n(Vms_B`=c{mu!fsWS8AcCukAWZs!xg*if!l96~&aFG%ev_%-G z-(StxL;-Mh!+EyqHe#k+)8p3i<#3#Tz1P_Df#lTf#>bso8wZ)7kJEv03k65=TC zyON>I_(hqOWncM2PQlQTNWcT1!QrfHsF8NDt?p1*;=j#N?RX2HS+V#=_xdBNV}9He z1Q+HtJ^_+BAswsT)q{*VLxL?*8}(N65-qZ2d@_2k?yW=q5zIF`3cds@X#9ad#Xi-q z1`!4$kt&92Ve4-pvzLeaLs1sfS4Gg2C>KY%(w*N>f!r!<_Gj zhIu5xhyP4Ioa~h`fyYN)!E^7l#C|DWKX2^dAZD+w88=c^yE~B?PTbCK^@ve6CWY@r z;_UZ&JqR+e-_a%0rh*KiTMwJ7*?NAxs~i>{7##KoiSTjSY0lkQA7`o7Nk0=n(qcc7 zr{|K0*jq~fdz8AAq=J-v(gMakIXX(Xr9Z9w`CoOw4aemDjx~D{R4(y1@`hfTwA^5L zmflEAvf-9XgNOJ0crUQ;15O#_bFpge0jN;1Cr?3eGMB;N_s(}|E1+Lm=I~S$R23{qK2dL?F$ZQ1(IW$KhV+w;j)6Ryv*-EU{x6# zr}NRaz_wUVs|31WEWnz+n25hP!|t~^s6qbH!J`6)b88%GivOHWWV-4Ajrhx5#D?Rk z^i2TEY_+K)v;hSadW*Q{j-pz;;}K;JH|(4eR-q>mPfgBvgR@$**= z=ebQ+k++PE2$N^ET|*f9+?B09zZ53@FaejW-upw?1Q>!7sYc`G$?;O&fkhFGo4;Dm z!Yo$SUlBnh=aIl7vHRkBio{j1p!sFyVXT}jMMH1$mgPOc~H3{YCQ-x1Sce0;cF;YDrJ8@*dWM^-@A4PNnZ z-mCu>cUgg4&#kPJm(=ZS?Dy$3JxE(aW3X>Gd*c6pt*K%qqldlq>A5-28R<*qs7J29 z#Q_qZT(sY`3~!_j!C^PSy$O?}3&Yw1C80Qh)pr#+^dCM0NfXV@{M2y-ebbSqEiH8a z?;fJ;P1a&IuHCpJiYzT8lS~x&=@fdo$qIG&7X>*@cNIf7O{AoMw zrLHaa%`#=)*&#$-*+kH$_?tKn7~0;L`Mia!_td|wCYofQRF*4vt4QhKLinT=D+_p9 z_YF*fkAJgQR_0h?x)D`k96udP8B|tdOfj9#XiTO+Xej zQ5r*qy*_!079wd|Tv0^-zXwD}apC1FL;7boaW2>5CX0p5`dl3}1e^qg8{pJ@HDPm; zL+R$xHrB>|QF2Tnt?J2fUoU_Anctdqywq5Gy*c;ywsmEN@P7;8cj!?G{G2-!9UT>AJrC zpGEogI^&VqojemizG2Yx?|oi=77(7lple>w(+f(A$if~nN`Zun^u4Ez7x77ipmAXBkQ{3kwTRo)7R7(qK3w20RRy zE5peNs$rcWKxbd5HAl+iwm&%8aZgi?dLa^7Pf}7Vq;&#zx~#lB*s#}uqW^t5JUJyL zAw3;zJ*2%o2N30Fs|?4Q-4EvhciNMuYDz|wR)Qu?z>^t}&4d9Nrmwbwn?<$NK(?PE zWRG84TU#{0YH(=C1hj^JXK(>Vc(gGtP_oB^fhEat!gnQ^y544BVDyU=$aPF86=(s` zBPA$rFhK^qy}gBkdHu@8)s>Ex)(A9T<{K0RLH3IiHW^Hy0z)dlgAdzLl45P(zK9QB z^nrkqp)N)lH!wBw9YZVvI^%UIXo$Wv#eot#PUMN#5wN6r8Y}D#?-%EIFMe~Vy<#Q> zJD)%B*Gmu_{~!ECEVJFQfl8?WkhtN9y*>l>q=)APzr@eV!-mBYEQhm!l?h_9*bIFB zaKOa*)8xNXTAVY)({ZcF&d$z?yxHX6fF?%Bu+#vioNdNS6R-E6iX5HnK)uj#U4V83 z42hE~J#mNyzw~ancD~Jr=Xxzb1aR2}PI5cE&Ne&-9BuXpGr~U)d`HPc^Xp@3T<*SZ zzZR;ktE0lj61$WOGya;G*aL)B&<=p{?qL5#HR?sh0%#=084#FYu?T}fPR0IMqG6zO z8NS`kU0WI9qsKr)bjI?($XsK!93@O|rwrM{1h)!qa}x-GG#Y)M#`DBKo%P}1iTJlx z)M1NVB7lw4`?Q_x3;s&<;R&QZ6QrejaaP1j?Xu@IUVR zmS7r4Snk6XjcTd}{KRfLWH5bp$KCqr8)d1K=ZS5 zm?hx(qyO!F$0TlS{e9EZpBKX_%K|a`63(D1)esN{^j#djbD;|Uh3Yb@mLFvS)mzNK zCXmM<_=4>{I)M+r_t;z}%~ z_1~)THHk1~u0Bb-*m_x?x}$&Q^F!R1S6+9wbBmAZJpP-A9)357o@?jF9>tu%Ldw|80m#7Q##ZoeOAee6L&9jxSwBfTC=&7?JFb z2yAe7D3W%y(R*M8ihhuf!q)xjq-=8Vf7dfUIP0hKO%>NKHs+Uw9XhmU*3cMA%=S*D z2R!`{yqF{?z4pXDk$jsiZuFauDb_Y)^k-FJ^kQ?^^x?JUck`#)xdb+UGMa2pE|LoH z2O=-3+WTyWKfeAudSCwYbfS3a(XW2ici*-6wX&X0&f?QX!aK&MzqV$Ig4y7EdVlad z)=NTp{!N-V1!LEM+xfd)_dNU=g~JoR|AL(J^v0p}(Vuac9RJHjuE?F_!41jsEvate zd-nj3ZUh?EfG1yBQV`@)fhz%iju8mM*&d@+{{PCic6K}#0hX1|Gx+=WR*J5}{)t7s z*x>xg|F{69V=y5X7K-Awex#%dN;vwgyS>==p*`q3xdBW8H;3{Prp^#W{DFginYTlY z0o~8Uebc{pM%&I|qoU*^B?^379WYb7V}10IsWpSb{El%)nb zrmC&=f!ysgO9SdO>`r=q#BN}nBNP{{BW(%YvfSR`)L!;0%N@I-(?{WPUF`zKGDISI^+v zACDb})jPsHYYe<0k z7nuFnf5-KCE@lEczGjE|04Ws(jX>fsiy$0S$Jzi_YBp*o4M79p)iRFXx?3&#l!{!9 z(^CZsSbEb*^C9KCW;C&{xC?-eb{M-)2;2_{=%inZk%f?=V_ns&bPFd7pCu(iE`BAB zT@-gP<&`z3BwFUq+vGwR3*qJxr})nnYmYVWi6bYDL|pE00cs9H6^ICvJ3i7=CzW=l zhCE>E2^d}N$pg*+-pIh2j~42*+q~VwV>`AI#vfi_T9~Ms!w_a@qCN)}~JaBu#ugJb; z2>&!1jl_DF+Awc<6`Ay z(%lV$G$`F5($Z4W-QBIy-QC@A)_&jbn>jJ(&zU*QY-c3)exCKLb?0?mce(3~Fg<-S z#qt%a$-TMdF4o>j)*k{Z;e~;43vAVX13m`4F!HgJHHjz+c50~)&Wy}V8tvVB0%7JH zJ*xdEKPwe~c*)57yfn50{(e~gK6!PS$-OUP&)oVwRAUI?^AQOI{-I((1yxg};{~99@ zwclte&5IzgeLhK53(kK^&xyX(?bJ=Jc&6`XKmwa(m(t>$IV0rr;(hw_XWq|XgyA}a z$~^2|IJJI4r4HOw)zqEHAKc# zVSt`?IHN2-5wyVwz+vF_S_?%>t=*Xi9W^4+2<@xR^t0ci-fdkD*?gWYi`JV%Nec_l zL8jnfI(qoAqWtUEXZ@3&KwX1!(R)kpW8|>!ytN!`)2sdI`q+w78SSZeRsjYCAYCieldh`!+@5MV>s7tUF8b#9ez8q%dBfd+PB)6j=5O z1LR7D#IAH7FMQ$q%ZY-Aw5NsU9~?#ux=}&BiX@yL@Q8$%}8A9WIK~_Qs z(+M)Sm(>X;KvmEL>@!s>_|lerS@1(<-fn@F`}mZq`jT(&+Qv{o-bwLZ%ZPeu%Qv%hk2Jf5W3K^* zU*$!e%)C3uKTB~0|E%|?vNx8p)73@URc*3H^6Uo11!qO-9&&F zR5jJDS_x=vm64EumyKu4Yslr|MaoyD{t{x5DN%?iD#krW>lUuia+yd5cB^hFLX^cT z2}F79C|Xt(bE1%`zyem(*nwY`C}V^DPG;;Bd1fX8`__NeL(}prD!xI3Za~`nG;jm( zit~rp@~Ej}RO&M6$!rW=<$#rK_?bIXJ(>nVQ1R_s7{F?Ko>5nq2$Vnl!965EIY)q` zdjy5l?u_N-9ey1Qj@S;4498GNWXVL<{Cju6FyEQT3Ww4+oYSzGp-fkM6WckuW$i7w zTW9=Q*?~XnRP#d2Yw;_ zDjdwur+YcB6sCrzsqwL#r3P2bDk1b#MQEWe4g4v-_r37v6FuP;zx0K(R)Nc|^p7{~ zmZ6t%HT>D7<|!%&5||R<1nq)X^PK>p3D*Wlst}q(cWxa-E_uIDiR^i|tyH;0mM_3B z=PPGl_i(qgw|L$8b}bC%7N`PX@bbLVC{U+{nxZNqdImrc9AJz=n&NI7xXPPZqTezmPOt!&&yVk)+=XQ8sM6NQVj^DAZ(&^ z;-NU%2{Z^^{aNqA0PP5l7%0ZB3kd`8<;_H?cfi!12?&Yzr_24I0y7{3N$0ehteUTPcQy_hanHcmB&GY^=yxUrVsKoG4 z)D=LQ+8myNtpRh~M;%oL;5?wrL`O%L0RZq5$$=m(S5{M#0CbY{r~56KI>0f8)B$Qc zbawzx4y7c61wOtg^+^TAKH%Zj@V3frh!d)xX%CfTsUBvI%Ui z$NFkTAHUelz0qs=@lrM(1M&HEodfl2^^)}v+BC>Az!gV#q4)Oxxr_ncV_GslW==1H zn7?6yYs~(}eB!RDa#jA8<0-b^7wRSn|IrdM(w z9HGbVr&epwg41t8?jweJ2;lTP#e`C@(YaNIq=vkuFaJQ2EsgOfD+J? zE1!}NkW#><$fuU>bmSFGAI?@=Y01BM9MpU4I9}<50jd|Eqq)Wd=02HFd5? zE|iYY>)c6XJNaUPe&fARe#`$Ho>Mpn|JkjeXRB*2 zxSB3E3esLDL6Dr23PE=J0#kBD=(PP6DPV01&+Y-k z8x#XyQZ_`{@CSL8=GNflaLoSJX~><8)>plip^WxbLW~&~=x?vFNtscti#`3z_9}vW z6o^_R@)%I90p-=cWy6m$e3UB|g(dzXJfL{Pcpo~QQ8wpAF z!`;=H@Y9(vIBF!mPq(E{mQJnmDO{{>r^Zlbu!)A}p4QT81oxsoFyau#!}qYzpa2vM zL0$7^X4D(L?f&KrkduJY8M)NBh@~d`Wf!)h{a?a>q*mDF`4Ec$JQ=E{>I_S7mj#;W({YhpT5 zwdiNh2scdb@=FUloL8^CUU^G9+Cw?GPSB?Y)myWksStZ52TGq%1H4z-nr6@$2o=S8 zGq?&aO~9_@#VNo0Z5;B@pck037?~I$^WPLgH|y9;_3Kz5*Q0;)W*r(mg0gZk zQ#=|MSP@{^+uz~qh=U*zs%`+SCjvQwrpMd8f^TrL2}~QgDYjG~45_x7Vgt2jGLL;$ zGSB07h3g4_p2t@T1fQp`E*w6JB?{9u4vs90N$b&f!8lI5^f$+scKi)>RDuQkqmpbu zqepFK2zap`AlUJ@WKA-XBq>>QBL3^wuWE3KDf|!TW9UipHbD5Yz*!*ZTYP*E5K^PV z=gbuVo5Og_4-Z=pKWaH_)Z|z=a5Vay7Lr4aEw9!ZCwPBGBW+d2>IH2YLRPc ztbZp)TF1J z08yUX&eX%5>>maVyWeZ=6;UN<##(AEcZ!pxPyU0aPWBu1&9qu(i8uOB&+a z{m$s-E2G+PZj4Q2-N^s5xOhELjLRF{rR>JXzG_FLKi1p$Z9-6Ug@JXNtnrKr9Cm8r4D8GL%s__n&kT9g7^ie_KJ*XBo|K!FHt)h@d z^9L0HMEEhyrbvIWrl&=IeCQ&YnV zd7{@TY#`*NbN(`>L>I}D!qt`Z{q47;9I0E-tG43tD!~-5~pT2k41_5cH2Ahy$P9PtD*p zdO<-ct(RLkWv#~KdCZBO$4zn!R|m_npA*WR9vEb5ePpVv(aQ&##RspjisvPN8j^E% zH_qtQr9QlCq+hH}_{bp#K)1Szdo6r1Bt^)32p1Ut+B`{H^cR$x(9CZsCqZ`rh*toa zJ>K$dI-yaBHhCherRc!060?Ib<1CogL~JY+GQa8hU%`UsHyb;!sXP$ zFm{%hT;+i*>s$>hN~&ffYDRU~UJo#o;>7T|JCyp*Sx{hGN5>9Z&mM9X3(w;~v~amY zF8CEAYhgs-!B`+y;T12>mjIBxvaqK%{XlW_6Z(sSpvTtV9%)Z>=cJg?qEC5Q#k`W@ z1J`n+{+s8K#3fJBP{-&Mu?(?AenE$%zi(vh4rl*y%5%Byiz)5FV&R5&z)j}>78c?d z2O#YMUA?%1gG{AhkYnNd0wdAXm^@|TeG;aJ5!G7{4XA7F$e}|VA)tvy7ZDM$FwaNH zXWDa)JvV>IgQiJrp2PZS3;xZT1p3bS*~R<+tRd9SAS2N7PM2*OnJN;r|M+twH=szn zL`*wXV&bbRZ`*{z;G(Qi7qg%>S-v(lv44M#9a>YAQ)7=Uv0-NXPNpwBJ_fUxf}nU~ zC<(j%wqw+3FJfx!H$glN0x64r8)QV_Ob!CnX}--q0yZ;tST~sMU<7;}RCYplj0v6sd|dDs#JWentMu&tK+Wx|_*PCST9vR_$*UqbUCMpZuUe zg4oG24T5Mu5rjw6Ugwbi;(jrq4oz7D4ay9=k$_-UH&B-7A0CF@3RE~PbuvvDWWWBR zl(uy%@Q{JQLAU#pUa(jHxiJyT&`BtD9=ri)KSvOAR{%aeG-8H8Qyd^Lu9ezaRD=Sg z3kZ1Z^({Tz-3^l+OiWDjyznU_X`?I*lXXuK(0c^)&*Y!`_tR(ZPT%%^K;5x zm@z~e2lXo0w)(BKK|-@1Fl``ouD{(agi#p_jf@-JS_9<(5vyT9wv3&-^j+-Ai=_cq6vFSazYMA{R?aN|565D+*n~HieE5k!vJ^({ zz=_}~xwRio_$&_MLKB>*>p$HPuOqz}we3L21GfILab!G3R$ML(Lgbh`tARQVZy%!j z-G^^}BhVs*0`CS6NY9|yfG~O`kuVDtdHN|J(=igODDmbDz>n4Po&gVjqmcl~~fNEVfwooz&TH z=S$fSWv5s-n@VFo&$edvF#lxm;e~e;qG8WhV$}-Cf3fHxrLZ!mMX$Z5KgNiLd6AwTkhZ1;!Owi5Y=hnGR zUZ4@P`)qsBr~`b@$& zdSimazXRv6)Npv@p{dfF*3P>rf?Ig8XD;2^?BSB?*RJUEW-OfPAuXy4Ao}q}5IRSxd zkT{Oog8^nMKmdZ(2p%eRS`VNKe8oDVpMsLc@|Jnf$oNCKHN$ANLulVCxUIa}M7dI4 z{;t%pqQk&dnw4n}`{!F~*8q}3VHlzWgH+T9gUQ2p>i7l;M;(Z9s}b*Gq^SwTrBoeo zm;y8JY`Dz+J$t$Hes)i>AIEp`eCA0HFjw;nGM%7tPUMY1DJV|TR^=YmT|MgkF>n9P ziXAJBM&(Z|rhQYdv3|?BH)*%*7o|qa9!V5J@dr$gRJ4roY)ij38_O0jJ=L&qX6t{8 zrq{TNCHGp?MeP->0>k!U=^KBZuRv0)Y~cDKc-jpTMThn7`4Zwbl1v#|)i60$SY(Qn(M95%x`cSF3D`|Cya)dB|C zFc_cJbYz8O3DdxPH^e`ncwQX_a?5hnk?Q}#V1D4Sx6u@YV`RzyClf0-l3;Zi5}z!7 zqN$djb3mEu3Fcfyjr(Hq(zeY4witke(7#kcVE9GaP?x7*JWYwnxPAGyvFt)|E;svb7I)J$TdDDa7DFm1c;zyqSByDqd+6gnD^iOtXp4o znqmAt-=jegMRX`Dy`Q5};Ju+~@i}9Jgs=%W7**@(;0yWm^rMhOFU$-B1Q8_w15wU$ zINjptdhX;HfAeEW%a5Q%+sI^iJP5~D(h74m>ont;jiQZ?Y8Ix^n+HIdn%bwAYcBcoa5r5x7ivm^2b^9 z>ifQk7JuzfNBuAtMnd-i@m!g@Z66)?UMt>V!grDC5jiASVCqMTZQbeId+Wst)YFoB|Sg?I@kM^7?0G3XB?Prv|Rq;#@Nh`BI4_WxjNi1VCG>$$(J z88`8qqT1`e>P^&CR8&~x=!7q+8jqQx$8(kbl0{ghMR)&$p?dyrD+Y&u5Qn*wsKhRC0G8a@^v*7DN*j#I5$^==Zquv_7(!WZ+;oe zr?7DG+kE9jq0da9wz)gWSLcm1sM@2u$xu{NeNdU*hG^?qB$l?`tVB+xaXNdKuVv&Uqr571!ZPL|5y%Uxw z#B9&6o=&3bEJjEY-Z(8JbGqJbBsiR}OKay$QRkG$xFLy$3ox>3P3>6LfaX$#$D$uR zY6s|zp%$wlXIQMwsYVHbBt+fNw^casy*f2k>t;UYG6<(WnGcj>iM;U5e*2cEaBQXh zUD$C)VX0`g!$RZW#h7y!iAPf5-p5UM(VPy-O2U=5Z-Zsz#O17uI?r@ky_**vQmAZs z-e`$%`O~=R*dPL}G5gDHW$}rrH9S9<$Wr%lTsH_;fWHVB54U)`3=2dd#iMV?gggn* z7o72N>gyAAmD>dtx<0sR8rH0C(`iW(ceDzfq3F8qynF+Ro}5OkJ~mM$c83H_SbAD= ztv#i=!7B>i6=j@l-O{6}M>P+9AaR3C=D(+Vfhgi=$sj7)u;@}rNmDu*7g)wa4SS)X zk@#zh`cJoWjPn+R1ywF$hVGER$6EWR!>nVSea z?(Ze*rKd>L zryKOmgt!-46wV<^&;>|qpRpz;CdOPSCiY&M4%R)W6^td;QCAN_#%HFYp+oH6*iax9 z9k>`{M-;qdQy-0fP9dGw8S1nZZN!%8q4G^DelRe)N%V1N46$#ZL+4fObk1+w+JuC) zvDQaiR9sxwd-A94zgBv#_YT$|;TsM$r+Y=oU3Z?E=eHl`dO&b3Fvn};Tx7HrtsT~D$W)=Jo?F}@Xjh>vUk z9`j|10L3stlxtpB`Agi>l@E_&{@L0hIw_B@VwwjUazk0(*ZIQN|19d!Whm0N&kfv% zRO3&FZ7^^C?qTlaHso7n!OVkm{f>Ll{K~4%+g(T!dRsSS90dDBpHqR=GCoK!>Sf8v zY&SW(`eV*=UXW)$SfE;$SA$o(wP7%uD<4cKIBPQaJ$nTX?xW|ZV5-njrpICX@z-bE z@5M*TxD{oG)QEDoFL*r8^M`xG!om#bUW+0zk+{cWmUQjtwiVSHtxt= zXe{@*vTaOb?zyF(@q9NvQLaAZkZ?0wTIryUXpJac1Jr;qbH?h=Wi+z&JW#3Ae8}3p zZZod)(lbi)MLMc166>94smA_l*BNh#M-c_^hlOEsJ7i7gKp?j;=1s1c#9Y>8H$R;B zCf_>Tu$6kgg1Lk~b{R=w2#|6B!edpp*Y!rd_9-cmebKwu`~VNyP71W%vNIqHJ(0M@ zk$}T+z2J4_d@E2ssaUYVe+ukR>&*P_)+D>xyr!6Xk8XLdPQgau@yXlUFlNKWnaaA# ze8gCxJCtOpD-wi1Gj*?@11kXvcB4im$gPQV6PD+VX9@(nd*#2>nTtSEBpsb37#p04 z?=3CcK1a=9FoW24FjXhl5C9LB8%kTgzGB0Ysot7L-4EQgY?^6*abnkeO_i&z-e^2C zuR`B6a$2=MV9nn#PwEu*CpgP#YgV3_j!xvMM}np<6TUy9RsA|3$f&ErgvG7R0!0v} z>%#{FP;ej>QVhRP4ZR6OB?%5Zb;R+!(iv3j9~EwFM)T;^E$`70Nxsnq2wZ~GE#d5z zI)ayyQCUY#Zi!8%5p(f&R`a+>#G>DF04L+Sx;phpz@QQW0ypWQ5to>OgI-=XbF+VH z?NVeB=&PA78JA>n~ms$`>GP)vfYrGx8tIASP_*2!(6e2ZV_P*Tgk6d zh7;>7KMbA)3S1H#a;LD~UQCfD=h-P?Y?APPEq|@nCd@Y*a1?H|3UmMIoDLw@=LF+Z zQzMLM6&v0t7T6So%gJ_zr}8^|HhGDs-Qdq^*95av#!^0LnUgKRSkE^XDaCkI_X68~ zVM=eU$}G0NLNVChY$I7PMD$J1`}e|b0s7#*X$|_6BzwU@)3Ep zY8%$Qc7AQI8Ww@ZOiEN(wYLoGQrQ>Sn2T=wYt#Md`1Dj{{DytOitaS%HpY+EZE5$q z%X-rJOldd8QJG9Fzs$uf=m!+i z{23@9$Q2bVHfN?kg-)J#z@vz4?oRa0RU<<%xE-iUo;Hb}clA)MWk)TqZ3TfaKDT92WGm>S@9 zuK`qPPG0Y-8^3Pg-Br~0*?5`OV|%%!3OI+g-faCN-9%J)6whr+xr*!KGn|@#w;EOT zJgmogTa|nYhOh~eBY+6Wy+ZLmSJr;$K+YPEwdBgq7_OIed-qPtdF(WY>+zY$dYOS# z%aPE&RujRi=EVuS7#$TD&xc`dE^>UUhwqv#zuH8ezoMad^AZl3Nv9+A>em-n4=f9?FOR8OhpLd1xuv#lo0!H%9&cHzQ zZXhwG55EySQ;`m`su49-Nh_Ip>+v(w{rQl91SU$Cc^-(!?phQq#Dz&a_=wCMF~zP9 z2EyXn|4r@eSBop=QBZ(EuTAY+b?xwbL63}(V3F}M1EpUR!KD0n4d1ZVj~4cR3mC9v zp6+YBPZLD;`Vc3(gfEF&RM<)R8lRV^!j4=;`+VhPHDlaDopyh8Mmt zztulbuH5crhJ-ryd#uxGxoJ9@)R4w#9s*kv5GCP94LbJ+wew%?O0_8~?$-O6Jfis6 z#*M$0{QS9b6dH+(Sge`N)7naCW=L(`t!(de9Ud#{c{o0SQSswPfT+FYTfW z>}a(X16X(n5?luYs_0^gPX6GaSS$Zg*LB=$om;Y*zcbUXO-qJ4pj&ea%PFFkHHaQB7Qtd&H&8o7VVIsL_8zEJifdEeT)bpwy?;U0tc6WuDU zyy#Qok%YY=)ho|)Otp*cFJ<3z_YQ`w*?ABy4pDQ`^#+#+$-NQ1OiTuslB^~vY*)o; zw0#7Dp;NM`=Bc&B9g@iIJUjoS3Gq<)Vb1-~cd}OY^lV3GtSN`h^Ebttz5&t0NYcKg zN`!X^XcSHtmWib#&!(C_lyh<86Ow!R`|p)>PUF7LcwgvXd2n8MwfD0=|Jt+O zu;`DQaS^wy-#S=~DD4e@Ya#*fn=2RyQ37S|L-#?=KhN^Zc%G#4#@fM1kZO4Ph}H*u zMp5&p$7`3#HENHOA0xSrOiY`>d*nw zkj`rnzr`gQ_GOzGQzM!UH)rK!Zhk#W=5n6c*f1E*c#9}4({Vw}MR5^AYkxYSz7TIW z=b$bH9Af(O&}9RW>cNxI2#wn`^h;ify>s~t?15qijs;G_rum`_U^qKev7P* z*=t4Fl93u}mBHr20p7Q@X}o^F*Nxxn)cGFy+-XgqzxH{>D9u6H{GRM#RBBjK3;`WAFbEh`^he#o) zl>I9Hnh_ZIdh>18EX&F2x?eI&&|V5Z%~x#b65~&O8qs|RcaDooC+tP^w%I+Cix$C+ z|8ocT8x*m0PpsQtztqSnY$ovWsCj;4ubB6}=DjA-eUPyJx-4_%Z3monA{(43_fh6fc=3N+7w1*wYjvuXhP{8hfY%nO zGLtqYJjMK~l2&ZHFAq1DzY4L>+2lH6?xJDj#Y1-xQFGN7ktNs-I{v=T>Yvjc3e(>&bI1iP~d z!)%Fg2n2fYIX!uNb5TDZ{&kth#ezlvgG>!bu{jO*oKB15FsRu{@&=1ab;+K>v818vE-x<9fT_?kFV0RJ$;)_C!gFTQaZ# zfkGhk%ns~AP-W#o@+%zR)t^H?=u`AG-!a{eqy%-4d$&!#zVOHzsc`z&e0pPIDj~p& z0+Ve70uSixMngkNy9{#obWS_IvBm#r%Cqh01*RDppTom`vem8`?^#k+A0f5jx)@}} zoX$pwY&pjoh$MAdO|{o;o(V@HM$P=Vmp6VoJXeoR{dmb8eh)@>cwAaPr+p<_pt90c zSlXea^?E4f?>kZxTD6j4nuMbg9lF_8@@BKqp5i>kPG+iSkAMF3X*UyHFg5grF~&n} zsWKrI?Y_tH+xzPv;S4rv*gxue5z8%J%AVIOF3g(#=}(WJA@bxnT3bV3N`5|vCHKS- zB0_C>S~#)?zz>BjpOr9X{g#S}hmJVr*pZP~O{+x}-b0@!F{xH>#<60sOvxj-{g1a+ zz)6MKW=m3g9xtPn5=;YgYPv23%thlu{6NWw>dKm*1u<6!h~{|CF6XXI=Xs)R;02S$ z9Thd22#}d{yUIYP43b}^c^weQC77?o^WZIo*{kOktI`4tvHqYucjo@JgLni1jF=6h zxBDg`iR(Tq?GoJ0HmPCmaFO7K z+=9CDgYpM)96DK@Hiy*40OK*LR{U4ck28pGP3Je!OnZy8wM7qfu${gt=UFx9{Ov^u zdOXFyzprhj1YTrlx*WEem-nECflQ+YWlgO_95|w$ntCU#%jcZ3HwSjCw*&PCgyf#S z0PcqhK^v}|lIJ$qjT;QX2Vrg@vG$Q}6(XmtfM++^BnWACt@){P=EV?D+I~<=F0bu{ zW%(gs&gY?XC@c3Zw2%_#$vN!ZP42^FnCuIBT)w$AG1fQjWu`>0f)Sam95n=T{|98) zP7gF&pEy8%x>G^&c~0}cRRY(-C!xBa=-{R_aJ(h*@@nrFZH5TCBFL9cvnLzYW*bf^ zbLL-<{Ak~_7*neA5$;e?(YH2l+rfQZ=CvpQ78TT&T;|23zV!k|T%}&@B z?fa9N$>XrmvuK?D_T+@FBwwULEvzkzN*gXCVfx*d8KidXijBHG0Y#$ENqJ`i^oplU z)`N)|E4B#8$U-ZAj-0>6L9nFoW{Ailyv_biJ`WiZHRrx`7dMjzE@i1CA( zra#_arN9SmfkU?#*OADg_yz+wZ3= zIF4IyOq)6a+ii3`u5_-4euR`hJm@r#nC^L(^g-XW-VyqyTf5**lexPN1U>~1XCTWb z3lQ?7lS^z5CGp@8(rc1JpSC9s=9e=K4a0<$I5sx+6NC5PGtLAu-c#L7YL(xoetw?H z?0h(UwJ4U(>vS9&hDHp^UzmJak&UYk-F1Tk-D{x&bE|#+Yf%V{8#>m&a7IgQs9+oIHI0b%zq*LZdCX_&K9I zA<8Ew9XR3&Vc}7dX(DH!=GL7Ay(=_I=w0#p_l;q3ce$Zua#zgyL9eDF5WcIY2i#TW z&go1JDyqV8HkOEIsqWWtu@Twr1!CN1!(lV-cxKQ8&+898WLp?iin8D$H2mlLEYTMe z)0(~#in!XdfMfMMNzcNM&@v}$cg`lZum8~>&URnDar7gURVmW>hNgF@!c=pSP zPGX)XtCpK#$!UkcM%c^%GBbnNr+39LEXiMJy}4E{^oHmxAfjF=-lCp956u%C)!n0U z7B=Z~E}GJ?$D7Qi_m3GbBp??O2D?w7Yn!1&r}^+49-c~2kQ7*^gX8MBxHu&`94RTO zGAj)*dW``G2`J{+G+x8+d*#*sP6nN5NyAOhQreSL9e&Us;WM^g$aL@x_bw zc0UAGtI0P&fxBnZ%G1-616iH3D}Za((fi&n&-i4}v#* zHdkX~b>lHvr17!UTM&qT2gAU^fAo^q*QWq8@Mx_msbbyJhW>EHiMr1O`upo2FSkQ{ z0cWMk6NcAuJ$P6krPWvb-8tM-({l+6P6MHv|G=N}lD1=s|2o;Pg1xy%$$k{YzMy#0v}iadXMylVYZX*TLI z=z7$AWe;A4V9wTby;EMV84`Ht*8K0IY=S~Dh)dc(H2X#4ly*le>7j&~w~`iK52X3%rRR!~ z&ax2<_q{Ie-HhezW5|~@xGBxfZh3n>m?@toP`@#K-P9xiC6HY2SB`>lBKlAQFXLz< zpoU;#VHrA;4b!Sc5who+%!0WDTHZ_LMhFlfOd;euV=W;n`s|j+K}<_M#xZTE+(>0G zUQcAdQ+p%s_1SmzwD_MuB@uc&1Ec%C>e`9RSGQ~&2b@i~{}?T?e3<@oz1~`rsTGzm+;_(@<|^>mc~a9ov^HNSrP?VQV%evZuRy1v)VgjEM*V z$gC=&ZP0wQZW*!PFwMur#LQUF5BM(@fOJGJQ;Fi`OE_A^w652sO<%zrN72L%II0AJ zW}J=^O-+dqW`45fJ7clGoRbA#Ee6{3l>=H7ukjiag4wH*y6q<*eNnffffbbX^J9 zj1hHRSKy#D5HjZ+PTq~3VO~w@4eJ}YF0M_>6dWg$v>ItvMOl@R8b8RGKiqf+NO~P8$9kaw7jQ;^KvWkP zmlBam z5&1j%VF}fp1Vw^_EnD@QB%*^(6dXjTAVxW9TzVMU?LVr?f6oBufv`~Y?B(GCa`vBK zaKhKXh_ss9dno)4@YGxZo9_g$;i^0-LgfeC*m@(uPWFZ=M|X--FBwwRb<_N&SrdPK zGfM{Y8srv+fC)xjq`Sz^+ouMYEev;?R#5@S_=W)dnkr-m;JhEfT#rHvN=nKDh2*HH zs9rFBp=iViDA|0hFqR%%l9rZc4C;5qq6g0EfVvvG&O$>UbnDp^-8L+wEoYwHdmbP{ zKgcQfT->4_E#0=sHje8@>GbBkyH_vFs%X4##^QYA<_-V?9UCBJr|W4uCG<6YB1U%k@I6X(YrtXITzL!| zhcu1h;WZo!8z&L9YBsB>D|dLE3?FUy;ES{G%%-mu!Oi{qmipjYLZqj<4E}ODF#6!f zk5lhCa-zUvEBs&-`*0B#`>QnEH9P3D-+*Mu;)}Pz^^QlRrzGIk5{m^`|8WZ4);VFQ zl*;(s;(w>#1~no$3Y61e%Yr92&cdf z%-0AMY!s*z=(7-XF=fjFkt08F2FeF1LbNJPQK3U!%Lk*20hieb4BfERB0Kxk406C9 z0&Xh%Y*G?XS3tlu^v2~yDjSiN!|f*yA|lIfrfyr!VzuuZGL)mHc?xh~a1c2FxEUrG zCV;r1&V;wSyE`~C(q*usNyEXRJpbvyIem_l)yhy@tEE{+n6CTSrn7u=jT8C@fFia6 zp!J{c9CNnp6Or4dL)y4EBSm6D=-^uo+DR=o`+2Dq-ZqIRL zXSsvl-DNPp5ugsb0^(pd6D(x(zc?1n|L-`K9IQqNm7?Qyndm;&ey);Ko?5vSy<(h9 z03d9GM}qZkWR_%|&asQBlL_AeGgR>>dkvUYxo|M}T_p_??MEW*m7DwXXRyw*eiUNS ztk*-PmD4YO;{xP|qdOT$gm_ZSY-~b)CVTi2TQ{r%S(ueuh6*r|9Y^@-zLMh1FfC0i zc=Yc)ZhfCl%xe+O!}1p?pqCO!%&oAG1p&!86%{%jE^aq-g19)9>vm?)@ozEo5FPk> zn17c@15oM$L^I8A07KQ>Z68eUv`tF;{+-4!PJA(JFW(I8Y8nmdB@1S()! z8`JlIt~`BMkAO~Qee(cRpbp=}l5}0XzpaauK?lYL!M-e(Ym*k7lShSgA=Dg|5D@=X z-6|JO6sP6=G1oM?2WXb|rKP2Xjc>TxiW?Do`RB(ZqTia1f9nHe4qe>gT+1JUX&pfQ zD$XsO*j3x&=R|h_5NCd0J%q;FTld-tnFCf~Xr}S`_S^!Dh(!QeSXNgbEi5bskH9}X z_RA26NZ!~A0H{hSD)s^@_fd^zKVM1SX^->xgWN0Z4lywjZ^0{!Z|gh6s4N61wmP{R z`erxSWMqrGZjgSbO}snCEUS-k&2@(+D|wQOf3Gq9{=^TMGb0r2?AXoj7ag2a+I~8XE}&9g zB4%S_Q@E9zmnT*_wFVfgj6p>19z7%BjiQ_V7w-v1yd!}!wXy>N3cNWi2U=?yDy6ip zZ)@%XS;wJG!C`)X9pyeFxJ$js1SLVzq3WZ=#Wn!zl2uP$D0IUm`=pT!3$90R(?&91i5ciqlfk z($FB#hXCNhw4e4SQ9-zfUxZgs&UttXdoXffX_{27YfeV+puuo%$qP#@8oiNXXs1*SrU$UMJ@>1*4*41= zOBhSCJIZP(OR}|5;bF6=Nc5QNgQ97eUojLdyx4qfY00-fYg&Yd1tZ}0&QFsO8aL6| zmZ#8*BjFoC=gpz4aTO@&Y}=S96+XvUp7TygtQJ^<;24{IcAFU*}SnNp-YU3X_d(Q80dvUU1Kiw#E!Y>-pwKO|N^UJZJ-z&*eG@Fpd7Jq7c=*bh z@KJz20cTrEUcMVlz>JQM5A2Vnt@ZE*#2`Q&*Jn#=5;D?SD;enRH5Hq7dBCAkEXnIA zi=+L0Gi_xv@BB$jTT5Fqz`#aKTe5y~a(ww0Ob8k?NH3&y9SxZiQHpE7-2?=!nWq#7 zcS3&fyA17YIFvc*PVS7Axun)nBf;Hw%-O9}#M45;EzR3*IjtJ1tT$kDNN&3?2C*L7TuHjD2{zfNbK`*;T$>0WIQH zzJUpIb8&FtK?(hV(KMw#OKJIqRu7ysuek6eXGvPtVJQmAFj{@KDf8lc+edg?8=t(- znHehhu1P-7{bnpV;9}i#d)9{I2c!77!Tei;H>d+|leP$`=?dIUwHk)T^@7eZfRdGz zm=fcYM${C4Raz^_0orzk0A>S*uZk!rpaHbMAYke~gMdQ4K358WMXs!^`GKjh?bckK z#%z=v95~sMQGqkoF1NC!h0uSn?5ssTHTt4pKPd7QU~tIPSj z6~I~EULEWlv*DT!^80crz6UCf{OiYZA@c=ISx82z|iaMov#%8t0cH zLOT*9BPoQ07QNnKK`8k7iEDMeI1d>aEU*4YZ(ki1^%KRrC?FspOLt3)gmgCoN=kQ# zgdi=A0*SfMsLH9SP548b%c=tS)OXzzSk13T9ULb8JG$>A3J3_^V`4$( z=jF+`zZN#N;)vsoXrYL z%PM!H=Pj9!wl<6RdStcIC2NMe!bT10=;T@MkNFi9iI-r0sCA;Dty`we=wlX}3PhUJ z9E|8~Qepx#G{A`tEhtw_)p^LhQwdeoHEnm%6R+x;iiGg_`-?eZqOGs9?h~s6HXC$l zRWh)xtjI)Lzb*OAp1E5h;QCVkg$>+Lsa984x6S&|=oGFY00~>qongZ@OilmT8&;3% zC)hoD;rsIqT~MHape$Usx;~SlRuKenUg&T%Sn{=wPZbZ|%KdFGc42ABsSc^hYohHs zT;J_!JjQv(=V7^D zZ1wWeI&BMSkS2}nGBsM@7U~G^{`0P8(DeZ)XZsQBj*v4plQC|be_`4I5ymCxn1+}H`~R;iwf_B;ghPqgjhg*Pz1RyzzvO3n>HU(VaNtV^aw z)w4oQ6ciX38QY3a4UD9cznzcni{QBXZ&V6wxyq%SdsHwKmm9CI+w*uk)@k=Ii-4|< z2vovq?KxP~lgGD8FE$*>)ziCzF@r~tPI`H-1UYd00j`(ko+Kt+_SFCoDwxjuY=}4}4B0eDH<0L<DzyiWSA-q%|+rK<06Pp}Hl2_fFW?O(K2w?$yQYtw{;z=SlY9Bwv<|6q5ZSj0qTe^=V3jYSLFJ zUs7!k(E~TDmj?yqkg6AtcGRCLI?5`YGm9?gi_fSQy=hg7^o@Rz!8d{}Ytxlby5qUs^?lrM(wYxj&-l)t6mr;}%+)8c72-Dau|PUik#m*2jv>Pb#k5kUA5L8^ ziPJd&#MD*d!jTLAVO0C~Y;SUwg?B$`O$Iy`-r>Dy@Pnvu9w%-nPrUmMche4M?nLTE zt;ToJvkD>qHpb5T97x+JV@XQ-5YKsd)O4Cdwlf!Llh6vE!2sd%UEJD$4Z)d7{zL!kT{UqhX2LfXW zCu*F#cXtuBZftpl9UA2ndC&PPAhM8<+HAv9jyoFRIs4C|If#WbHRw4W5yysyE0^u- zq90kN6)z9oVEx_ns{GFmlAh&whZGt4?($i)loe|Lc z@cDHQTkq4o(OOve_8ZXO`ZS)K^4}ZF&%acTUcqmTsj;CZ!4SuUg?v!&Ce51P^|tD4Y}YN$$Ig@l@H^Pb=}2h$Bs;A{Z!m$!=uwH ztEyp%T2{Ko7z8P37=DV>u9x!4#ZLqCQtjD}FMj#UXhn)%p@K3$-%0Mtz%6vlV%b+a zwwWU`(Y^iW)Ipe2bpxSVd1Up}uD6g1+u8?)P+SQ-n2#{dnrfBP+>^AzQYEH{Br>_g zBH06`DkoD}S4|Hi5_{6$^sk=Lzm+07I=Edc>iZYOLfP{=)de)jl{ilj>~$S5i@DjV z$p#nG3J3s0h%CzV!~AJ&&HfW^6uwaKA8;Bs@YzjP($F^G2Jt!d%l`9*bC~4H4Ai-oPWifmG9^ zbj?S4t;--_z}i8>mGSwLMpExuk^kCX{GQUE^r{~({zJ;B>W()Es4(!|r&DjqEeS)s zAg(7y)so50PmZhciE?**Kc?3D3Lx^1ruC0(Q^W&wHu!ELe(E+lN}2R`W4cWQ13@o* z;qS~seg6)$WJdzG_{})2t0N7PU(ZI5k;kspojafcH+DAYww1m%<8*c^{8#9AUS_6j z$eq)L9@r1PC8VUxs3)3mRN(j+ z-btPR6_hp|ynn($>EeFQQEejKMG0()dKGRpyPQ+LeO zd!rn)$d~Dj+@2yD;lNzU>b>T5Y(BpNeSMM~D4X$%UXa^#BM#LGWtKfRx34@xmIFrp zJd6Sg8?KO7_ZT$An8NV7G6v`vFY^H#s-Xjc_LjhG1Rg?NL&J#R^T3YWj7yhS3+J@M zxVHd+$9i~g`TBj>R!Z;$-PK+D0!}C;ZTQ!(^rihbi!(g8xL8ra06XD1lV@P*t={m) z4(>b}Fb*2!Re&$9@_##G=G?x}ZyKz!6nPRt3*4j7`bVf&_cTaINP31EapMs|Wa$Cq zU|W&aGk*cb!7+>JC~cnYE_VHDJJz(bZmQg3tPol7`s*~S8~f`Ma2SNdqv0J^o-Fg< zTgX}Ojx^Ew2Hx~&-B=~!^*xi{81;oX`<-AkO;G6@P?g z_@WZeGU=uh+`tbO9ueWXV(;*3*A>5*DiANgt>16byB|-3Bin1>DImgAjXa$i+p8$l zzuW?(#j=-_#Y9u^=&2mW#l@S83-QIJw70*MqFP#{fxj$QCtp`ei&_{peT!Yze68j@ z6=#T5_uRa^H@!b$AZ8&1{Z8A9N3-u9!+Bi}u+*p}sIRUlOI5u1SYLRt=^ZIE0hK%nGUl| zlH)$|ob_pl!h5oCaGUyDXN%Pcim+gN{{M`2{C_u1-`Jc*MBN#&AUl8Zg&QvIQhG)- z^pXdHWYOLyi|K4=^*pu<&hO-YGg+uMmT+36IV6J?+B5Qj=?9M-8pdFOE3fzw8kkQq zhr#m?GX|4p{Ms9?4TQ)4yXS(NfDBGnkB-_EnNN2=Xfl$)qMYopNW=}pm33TrL0V@H zx^~PF7>ZNID&4q0%1Ee+x-a+l0pte;6NTEoC$2{?$>#Yo@Yj=w1n1K}ez@j6XzWVY z4;mbkV8WRqiCaCLL94r&d?;SF==r>y0*6y*15XHJ;p%fT z*77sU@X5FT95l5$rZT*Ojx%8FSwT71S9FGCjSVAox@hZv{!F(3uU3#%=Gh z2fezi!Pz#+iM2>*(B zF}u8c2dG}9{rgy=srcXr;7@Xk3#C8k2H$aNrik2bQrP&VoHITh_TgWDu(wjxo7(6; zUQs>1Jc9!)06^1G0PuhT*&vg+e&oaJW5P_vwhr!aYk;_dLW9P~ef-K_J`~)(K$D`C zjGq7H+R-IHm*fV-G)w+smLwY+m!h>ixgs9bdn%TwuEIPdh(>1ew3BCMQ0Fp=E~yTP z$-3&bOky_VFfy+8Oigfv{7VfKBH?G^e`CL<>_|GTtU$cgLjK`scLMf zl~Q0=KJxkK$o;VI0u*Jp7ql**%EgyGwS=n|(Iazg;XJ=>O-Q2w5j<6ERFkZcT z(0XbhAWBLusGa=|1kP9viRy_B#!CIE_M-hFKg@!P87P2vP~LR4yrg=5cR9&w=R<}D zhxc7wCGsf66k$*F67l0ZKhh*ovb(1}rT#;do!15J?K>{;*q`9;YYY3_uBL}GP|%?_ zRd@`|B+%H6#_}}>@zWR*az1zRFEXqEBuOvuQkd2^K;-IpRR}N?dD_1S+v{Lh4>@qX zWdj{MAwh3p-?=K>1gDpvnF1PHxH%Pw9vHYJf!h>eR2Uk^x5B~Ye@e3<tB!ljMl@MJG@d$y)AS#@Hk zFR~u=6`Qdf0-;WNEH9%2YxudmUJXa|?6xFU7(N(G>2ITW*i0#i4Gf*o-aqjV8=`k$ zY_u$Ewe+mh)M`+if9=MlB{!#94$#$(|wlJJ;3$v*157Zw9pG|HF77?Q1oz8VH60nP#_1pXFuZa}B@FTSi zKI@0fkEp+RUfcx2XU3J`9H_y_>Tyd@o2UeVBFn!P=I&k;i2{gZS8f9mJ(jDpOTLW3{kD{p zl)Ms<^O!*G6M>eR$$eE+(?R18N5O}7H7U#w(2{_BXJ(7aw_LSf)CR3XPSPwpE0q5?_ z;iu;W5Zrf?Z0z2q2NH?$^0J6~zamX!?+ygu$(rH46lx66YV6+XWOhXd^T#Nrqr>%P zY{NT>>XR0MAh-n`jnzQW{*U6eaQHcCtgtu<(ghM@Zq{d5uq zeHKNHl&~Y`1FYv#b_P$O<>~qpi-AH)Cc6mDCZP8MZpoqz zp`x@;86AsDhK@EZSr0tx^Oo{{kld!hms)+ob?-;*5j z-y=_m{hClRtPw>+Y_)6-aIpRMcBneTf(J#HyJs>!IzuI%y?)^h7^n|Gst4vvbBTlC zu?DC}@bdo>?BlSx|)r(Scq1Zp3p@d0g*E!I5QFH~*W5R7Y zOz1N_IZeGeGLJ0)L7f6lgs7L9!;cx5U1|S0!cpqO&D8P$;S0XZLTLoC zEwAZNRE)!+l zArn)!*QYxSzLI%ksDu1do6Uxr8%BtR&EsRAuqJ~h%OvUFd28dM=ot1!Ps*nWFflUf za{fF}K#d=Y7WBMXaf6ULvrt>5%5#q!7?!bcaO?#Jj%eE`|EWWa$JiGv1bvLFCi^_E zo_!s?PcW8U*EKB>(%Om|m3yd)hYYBJWobOT=bX(U5zAKqw@-BSQRwH}l;{D#?4JeN zBb8}wEy@{_v`#>+6g0@i#ZLYN0xaRY?N=Z3-nr;nY= zcUT{%;j{O6au)}8o&hc|uLr0dr050_H36t}_i*r+y#UZGV-Ip4=0Y2pDWm);HPcNW zdWvkGJU`t_k%`M%qZFfR18B~c7msd8!;XdLkD;JY(q$6z?QLQRv94d(APFf(8$hk z8nDmR9p(_2(jCzkAbpmPxAo+AVNSu4?j7y`Lnou(pFYUHFCu_GW@cdMJ-y5@W@dn06?+9J>-$Z*tgf{8(Rr;KhmJP?xlDNAK;HRZr_09^P+yNkJL{E^C0x zhO!E~UC9=s1ByUnFM0<$!}*u5ctjO5J|rY`L}f9iXnGnp*Jv{Hv4 zLb++_89W~W#lgEYi&P$dH_gnD2>t%9Nfja8Rp)yM8wx~LO_qvh2 z_*G#58YT@XGOV1RnFuJeXM;i=T-;n3A8#Azn`mhfZWRwIb`wB@J@yYXKCL1)na>G{ zi1JLzInqWrsc01T@*YTSM|A7_sIM1ua&kfsODj|10bTpd>&dq3Qks>MLrIg}0|pIuFOr)g2x=aBs=2(h)M8|r4(hi7Lulfpe1(UBfGSEsLBX5OzSygG=JR<)cUA5$wmfKOvfds0 z`Ey_IcLJ^WVwZsHYMlb())nQrp-Uegm}#T0ZQV?lqu>|1Kdl*ZVdMw)X82b%zvk!U zQe5d2%-H~<%<1T3xv}EG<*156?=#*@dVPuM9Jm9};4~S^nnsh8Q>EqQ z?bFkyF<)*!FHZ(;JrJz|BE#-q8FJaEi0~|qI5muw6d-o9f^v~tB)TP2%D{2UKWTQL zN%P2Y^n;Nd0Wo28wVp@h9b#yy;Ed-30CuqI2us(RsHbI#C++Wx1~9(2F*Fv4AG{ox z3e2k1D#W&(Z)0lwmN;gdu9Ine@9j*qS{XMH23xJhT~I*n+~ws)@(mJmSIGP7MEBhn zd=6HAe>@rz5EA`JOdl33%S%!@P(dETHOYYfiQiUG)Ant$giDRW?FiUnj#;3TVtNJ< zn?YYJ$Fl+o8dftAkpJs3l1P6+Dv2dGKNP7oe$2&aYjHXIQYhxVZFIJo$(t8$>pKy0b+! zR%T zRsXJ+1PuN3hnLvVeDj%6RY3D*#%7r@L9t{TiN-^mjwQ6>QDa|dDodqiwYcsv`!Jdq@ zPJIp1qGwGB*;NJs*2GRRA3mumDA&KDp8!i4JY}3`_FTo&n-Kl#*DOg^nwz}8mW+0mGJGC4h5p6Np0c|+;416o6j#vo|m~8Nlq**7M z8_o<5EmrH`1MjjOS-WXzYnT7ND%~O&A06Fk{lxp@!q2X%?MD64kLcAwkMB*`x?|oN zS4mtn`o4?ij(9~(B31RThGA(HUFXM@-|Bhn1})>_oo~Hbvggu7@Io5K%ZVT!;su%| zx_FT2&=8~bU-D%y&)#OgrxOzF-=wSMxeV@7|D?p} z8k5t}-_}MLg*P5)FJrjQ_P8_|dH9}p1sOnUghi$$w(K3LA@^xi$m5I_Eph^@LTYLp z44kg@y|B=zsi}ANsX8u#1Y&%1Vajh3C*r9m*DV)G{=A%4?8s(QHHn5PH~pM)R;$|{ zAG+1t_p6k*6!}Rs5F2uhlQJ+ggYNob!XCJ~X0|ULPIz8N$c4|l@4r+PnyDgYcRe5a zi=q0*^=uL&VqyZ3XL7f|MiQ*hEr`t2t2=tH&yy_+(J% z-N&k`R*PY72F*5NGsU0OPhFi9l?b7|WcYW&K>VC?silI*t3~uXF`*ER!6|?rf9S@> zcF=TKJ@B00%GR%y>v{>`DnAaSs~J@PAUh}JZEGX>BJ936YDE)Y^!8_GXXuuMFW>&Y z&3hU_9x)Rbj9CBPmqqiFuhR|thC|fpR@+VO;HI~f$}iQs6RlsVk69Vx=w9M}QUHS* z6FrK7DMOs5R4jg_bgmwSrJpWrZ6lqTdCgCluPQxQvsFdDdm?Z}&&0np{$$kU^SB~B zJERMO!x!LGGfYMp3@z`I%U^l9Ek)_o&HkNFm~xVr$4L>3r?$2R3+8>wqa@7fH0tpx z(P2qlr^$%xGCcIql+3~}{QmWx))zZSz;iLo17`IcFFeDZ?M1?fF_-%5Zq05iD!ae> zJM#@WPxNBgp9j!J1HKmTXeydyD3HIXs}{>uOYR-2z$plI+5KfjcRspJFwIHNL(q)i zcl{L#gNd6DwvphHUon8)mX%S>%ImVLmP*#sA3S|-_9dK2xWOMUthywl#{bU@5dk!I zVhx!W-q=W^2QJZAMFzKQ&TofGZvV_$fCOALn?cA$odi)`0J9@8gPu>GAl~bIfP3p! zlJ)v1HA#p`-9;2Jl~L7|t@_hCD3o2JP@)%2dMRQl&+Hq^;rj0%2ToZqnDrpg(-6+b zcVl+;ditL2SS4cW?r$eCAe;J zH;j~UF1%}gSM-t>A0yOtT{i;3t3cZaI{r2$z2|6Ggd0mjT>Q0f4*|=7!H)z)u-kYl z*c(Yk@zh`mDRK|m@F-EgwyuXdA6Rry`EN~-x>@SgdCFlPnfJ8NQXC-)W4%5O-nUF} z;lHF1aAkIK8cVkk?*=QP<8i#*MBV+2x#gWg(_TW82pF-7LF%G3;9@w{3l_Gcbnc%w z?>7C=loIg?+c9+za?G?r3OyJB&1WQxJjMs*vR*lymeH*XI`z3ok#b^9O-}9{AAeh( z-TkzR*RhBt+> zG&p8f6yC;!&7x5Kk`K;$R%nWgiwpHDKMP8{(}(+_cpWrWHhZbxxh$YvF_JXcJo_!d zSC4hU1_5_w|95H?fBrc_<#8Hlouh0z>aAWNGP66+s|;nZe%7ngeXg|c&+XkW@}|70 z$uYc%GV;l3mseoh)71LRnnuAi>#S! zQ1|mW%;7ks-yDl^YLmz$44?K$Ywu2b>{>NV!_$h_SS+?^(s`*u$J!-BW3 z0%m)#3!amBC#>H6IImwtxQ$C?QR$}NNRNK<`iM0oCFXfRi(ajZC^XjZG5j~M@)J>z z3`D0NBOG@0n@;DhVUg-J&eyQVZpll{)XRL?c?O4%Z|Md;ITq^9TJN5oj`k0`(j(Xc zylVc)Q+McLT&y~2$>PQmdd|SfX#R3<;#&gq)xBpLZOKDA#1YC28JhM$H`(1K>$gKX zV69g>s#iZ0}eUqxR%n_Y~ndyW3Bf-sZw;9%WThPX!93A4H>pt+tHBP$?xpR zoL-pQJ%@l+e+$QS@}^*ta+S=XFk&vsY*J{i5lUe=N9Q47k8`4uE?1m}TzK-pE|sFS z2d9>tcG)anN=#FR*#ya^ee(gudq1Q3f z{-zlzVRvZpM88{I%Az)yH8m7k@QbYyro2_4UCsf*M?=ytE$~QGFITc#aGVWS!R{1`>pav5c6p`)SB<#x~ZUm!sy13d`kf-^D__Dx9X?S+%mR z>>kOJa!;7Mqv}W8)bGZj{rQcAn2P{QH0I{{FQ64sW8=dw-3Is@C;&?n3RIK_SH|S9 z@^|D*nw3R2wM#LsoM5*{$n@OCUsxlVja`B$ z^F`q_9Wk^Q?V7@hT*4Ys?UYRbHb9GM>Z#uB2JVF5^*S~l7y~7;B=rk#Ho@xOV>DJK zA`wb=YF1WOxpH#ADuOS_E`#ybk#nXc5>kOQue{1Ips{af2iXp#be%F~&YOsUi7s zi(t=p_1FzzTF=iPAD%-A2$YddQ&G{k7fS*6Q5rI>a9WT{r#11e%V5=>37l9_W@IE? zkg~$fR!y{Cs1~asO>aRB#m}&79alnFg@)RPLkJ!L3?mp(Usi{%zG|o7N=22Imm^%Z z%6PvFJcr9!KOax%_1g`VLUZdv)cSM67f^N-Ae}!BNNsg(H`=|Y6o@EY*q<)*^{%5w zhk<*|wh!NoW=+$Z55Ar>*?%hV0HvNY#>Ga)>g`ZTq-Gx7$3Aft0jE?&r9O?chgjRA zWlK~S(>qot>k{NrmLCuQe#liqh>;ZX~U zvAux;< z-Ezy*q>HO_hBL3gShOjBpoZiiN^knioGwaD+!ujoWKinDIq+F9(Q9s0~*j)y@ zrAIIg1;IvrOR5Iodd0uLjc{p3Q~yS-X_a{5!QnSlQl|A{7JhGb4|A^B@pg`76)yP> z$8F4}XpmJCf#Aw-Aq_be^*LahaXS4BGF-Yb%ri<33FZv&tIzE_T8S6v35x#9RZPys ze8DAlykQIWvMn$`=zvQBK|JWsT2;NZKSd7i;sxx)7;}x8aQGNVr&;gmXuDi&0${fL z)nsXK!)(@_LS1Yi((`m{-vX@$HM9R9q`5uh+--YoN*3$mqO3{D4~3LaN!x8BkfswM}d#c+DQJ>SVjf5 zXJzQw)G8YJS?!xMs-hZw8R&pDDVn`QEn?-2#1y~ylaBQcR%XKV9i;8N$2?JS{O8YI z2qM;N4#*JHXASom$FsKKau(A{dqkF(l!)#cXudm#oaBq7I$~o;_eie;##!0TX8E6MS{D#gfHiwd;j&XwX-5S zduQgIc}AXj6RiS}K|v%$gn)oRk&~5FgMfgdgMffMgNFrwGD`605&VSUEUW7V0f9R7 z&ja#1BPtOD1b%_9hK{>~x227}1q6h3aJ=tG!g*Wc{xbHgP#erMGs=GVWA>Qe<^4T# zUX_3!nl<_3G`5_oDVym}oc=eYve)X0&No*0ir*fuNS2_GAKuxZ>6147oxpd$(bm@E z^UIMMmJ`8OD`aTV?I4JwcNft;)=bHUM@a1SmGihW{MDW$t(blL4$-2<~$1lrg)$(VNpYyL> z_XU38P$15+8G#gc zEsP}yQ>a8fEyge|Uyb^vOVXUBkw-)?Zkd8Vqky99w@46-YRW_jE-Og(gD;yuQCjG% zAR!cHpq&AA-|vkz%FTf*g0-cB>1Q9={9Nf)1t+n_!oqrWcl8hP0y;~LxU4J4K3;i3 zAVj5|-dO<77W)I=FoJT^A_DR>KwVp6*iNUSIE^geXop$s{7ve@vQ0yZ#qHKl86RWa z*JEjvuR?`0g4w5Z~;?k0F~UewxutTPI|Kl^yg4T^YU<0t6Jt#d$9XgpR};f` zMq%&Pa@7+&hq({TJYx#e?zK2lHkUJh&s(%}UXT7hF;P-U?!(rqxUr8CTD6hA&;9fz zc}~vyKF_?%Hs~_wo%O1%8EQU`h!9PamPOMli&l+&*yM#AMfLk+cJa(U#TZQSUks$X z)aLMNKj$;WhB6_7cS}s>UJXS@dCsHpt=G>q@?P;#;?^%ON>)aV2+6M$m#-a;QL9=H z&m$5G)cP_Sumik%d(KGhJ|=0T?pp*y`iB4_{Lb+Cz`yJDrf0NLgovM3{qEb90Ytd? zkQNciR-ikjnoOX;IngCdJ?HM;tRyLR3GI+m^{>2w5f7mpjts%n+JY?+?&pmJxJ#>= zI+X9lE8fD8r#s`I^Azf?M-nR!pn!#(61B%oc+^=4=K5k^6KR}<;mPtf>LO$t>6p{t z#M7YxH5Xp@ugUrA9`r>uA04l~`TAl11>EN^2#MPme^^*7^Ju+Wv2?>Qodo!l79-Ml zj&i6!XGEKy!^c!*S1e*saV&N%Rv{Q5Zkp}Dc?2h zv*M;GDJ2{|$#{1N{jQ@{$)!m8I9&B?PwP8+)NGpx-z2SSkBl!LHBuoy ztA#RF{bJ)+q?Evqv8xb};6O-MQZ&AB@tNVd75P*qKiq2MsB+*r1PTO`cfKI&TI1Qt z+lf+Na40&=m6YfShHC_8$EiwbiFJ-e_ z++RtZkh5e+#)>6teldzxgmv(SWKYcxeqTlsJ=s#xDrx_!>NCEu?+ zdC+Yx2=Oj!FNgK0|8ii9Z3c&rqEL)Yu0$XSZt7m*?J;a_s$C}7Wf}XW8ztHdqE6rE z7B#Sm-r3eVV<^xXN~qaxuSiuHQj`wr;w!qvItW`G4P6GB*QWP*xg1Ho;WNU}(=_+| zoK$~?C0P4`6-dvM9=1Jfove|!qZ%8bt1>cZR&-K|`uS>ubd<2*vRwb-jwyz4mg*K{ zp4+PZ80G@rmn9CjSUrco& zhG&W6Mz4Jgc`Ob=aNCcP+Qhw!f+D$UZacg6;D%fc;~$8oy)j2tl!Q@FtuYSjOx^RLffWQkmDULVfB(l{o*{RY@doO-DArQ8SCW1(G#}hpB~RoiWPX z{Sxwnfg0IAU9{q-;R|kbz`_cw1r%3zLdK}@R)SHDHleO&9*U98y5)5YRw`cdDM8}H#SL5x4j`K zaZd?0ZHgy$Zc+@V5Vihwjn7XtiwDB{gE2iI43^)W$W`ius_alQRCM9@@={r)11Ytm z2^1DHV%>qB`aUpgubgJohE&z%BC&+7+njIG4VfR0o@Wy(QSJ=;>oj zolrJul8po1CK3jV20UEJb42dpb*6J*K_`AiB+!f^lMKh__B0=R5XdwHWQQeI_+r4V zjz4HxU>aEF6 zp8m-{BdbTwKQVlVRd@srdb3OyTsHl!`YC7_t_)P6Rlpa!>zYfopg}x9@PIymt|LAi z%A2e+!=8jt$AI>hcm_Uht9MO#=;&;?vZih{!vEE7j(QwXw41Gc!Kf%>8>2NRo zAvB?n%ec2hIj({r63{zIWYaVbDsj9t?>@Qv88ElXG2)pZR`;#suLTz*lnoJz3N_XZ z5p5aukNcK{HT_vKb(^@(VO6^Cgms3#Rc_W16mT*60KxSb8rk!>caOBa9iaZ|^hmjXXV9DWFi$_6b$`3xy(3;%wW!8|3tZdPi zj@AvUbmZDHv1?(~e%duqUOp)i@(pV=hmZ9wG(CEAyIl{Kb^p`~68tSR6lv=N!w|rCPT@_>l$$S&ehLg! zDoG%0ia7RDDTQ$s7JWwaQ?;)_ujl>(O?OuyIiJ~T#eMS1BjvgjCu~@>qwMcEt976t zoCFl9%1k~-8K_;P;mP}sl-YX?OL=}pB*~!DAFdb)OmeosgA@G0@k zfnLoyhvb#+`tRu08tq$?A)k{Z9zV$$wjY{R-}(}|6U)!aTVKR>FHsYb&!*Co&Ou)h z6+p-UaY#{@N?-2gIW9BI0X^`muRU-o5O_;>xE|7-G4Pkz@Lp#=vOTV;pNtmz&x_w{ zR!V=eTg^#LuqQSdVhj5!bICV<=Goleb}!fadZgt77(b^=@@i~Aj0-aGH# zgFDua?Co9M&}6?FY5g(HVOt_TGBTjgU92Uu8U;s1Xi7b)`rNeb&tKj}g5r4--=P^H zub2{aH1+p0#*4yw2-&o-S%11`5DuxCEFY-PP$$6|Y1U-zltx^`_tRu+`et+GAl^Zz z>x{FuoLM#!7xd|zJiR?b$SeupxgexLuaq_J{m=n;txTnh+E%-@{@e|AFL#JZ#$T89 z{?hCDrvPr!4ZHApDjU!>c^t(>{+e9sRgG`?tu<2&+aZ$PhL>ch(>sr+TS$rNT3+Hu>N|auyI;3Cv(%s~m%tG^fCwQG{k5cg8nb z8J2j9+3>0N?<-mi8Ym_gL;ew}%KN*4W3p~Qg;xZ73{bYdtt=EsSoBSe9({JE9R*W( zW+cJny#qhcEJWb(U;~B~zdqfz7iJ>=opwRg_@zO)wWlA}Q_kF~znSlVs3i9b1t7gc z;734ILdowhgYM}0RP>*v+5pIEC30ym+~;rgks-l;l&1=#6Y9EKx%X38F3V}a+$??^ zi3YNl{rz4zUx_~&FhAFwwrJIk1_~io8i&68m47DjPGq*|f`!zk9!bF|PYJK=M{ru( z60KheR)XA)LHf0=jwvfKs`?$(Xm~g8Eisj!KqF?%PT=h-1tjJsc9S2Ymj*}6+SMES@A}J4f}B(A3li%yTBUQ2{U0W`ox_^BbMiSOJ}4rk z89ruJEnt*(gLo%vTkJlQf9A$6$KX^iy5Z4`hiTx(Nq|$X^J>uM&4EipC*$hg^VVGo z_I2>y9G009pYPRM@e@f(};UB#UG7Jy`H!N>EODh4Z`Ifl`a}1kMM4ZAF?k_vPpPTw29du zI8kZV*BRQm5@nwWZJBox^EXC!U3fp=TD4m zDe1+g;fvpc2?bbSzRL-2QH!uHWTlY_+hM$36yTMMDmB9iBYX`T{)|b_u9LX7Qo_HQ zrJE0gAmTQHx&6kuk});pxhc(;pFhwEv)T^wV?)x^tGh*|+wm2N@J?n8yjtaY*Un^2 zcwJuxutl+Qs!D70eFRx}smjaxMu-G40;`hLL=Vtlu@L^}6av1O2pNE=ZfK*d;8SvzJybh{;B4qG~vcNWyc^~z3 zVrOYd9I$>I$8vOxs&e1Y6WQ7^?4B>S3UmoD5ABxFkBcE2C-q_>hMtBREqn`ZJq@n9 zg?Q-F)BXg<1@@b!SY}u(;jNm@lh39Hk_0qIEifkL=B|uKirg^x}Uxg zm9zSCWosXcI9b}r5N?sWtuBLk-)^B%&0|(M(eb3gNss2ufBwUt(7n0GT6ZSeXl^9$ z&z8!x*!S>;w2iY^3JpZ3Y?wJZnt=OiVMG*GNB!*AA>QEQ0oQc$HW55l_r8Zjr}E%Z z;U~dL;@DIL{>ZkLo*^kNgX^%i&tmqD=(XF=A)eb!2hSVEzCVV4FGQCI>> z;r55nFg(BP3u9MY7in4W%7d8pZkmN z;%<+KU!5$C?zoce>yYxbR5zG+V~NS75r}Bp_OwOliIl{nV81|io=0nr_pWomHa9`& zFZMZd652Y+iyT{Ghm~@=EoZ9mMr~UK8g$Te{s{?dP&9N)+Yx=S4^8l7+hY$1%qIG5 zYblxq;gD3HYfte$BB?BpQb#KBDRR=PN(d=;RxCFzm7Kf+mo z@2KAR@?w3~o^0OuQcNt|+k^CF?K*z1hC-o{tUcK{-6uq!j?#7xjP8j!bhSyeDm!5h z*|xo4oQugPbK^2}1{$cZ+2a05D?Rh1fZWdcl;(koR8F-l8aykHR654ithO%RV1BvZ zP~G$|=EGUb+S0qX!Ke9w88R%nM$bQTLgEtKek$8zvxN`R{+Kc|s^5(%tg<{j`n-xz zo2fW8pK;AA+i0im7Qy95lOaCQ+r_7wTSKPbt@ry{Ot=qi!ujJRxACvrOqtqGI{?if zDGd&t`A~zmtf5|04gyMLj(c#W8GfQujC-aMjqdGg8eyy-g*Kd4C_zyIztzAw7k2Lp3v70Lj>K)YZ@c|om0+9{2E-2Xbqekw z8M3i=5@nE-UaJ+PZq8yOjIFh45G4?;AmFb{NycN$;W85J^o>Y28}+jOB*O?~@q*Zo zwJQWjq^WFf{F(SP7(#6q1{YwcAO$)YC{(#_3hb<7w_pczr4>THUk36jp;je0Me6#C zO4gxwaCs8NsmDjG9Ivmx)><-N|2>)Q60yt)Q^IUpcYJY)LqmuW<7^^WZn4WrmxMsG zcTN88XCIu8m#DOZ%kJ6rGMP;~bInfW=!0L;&st4$IVHxUp;s%F)`IuK{u`;?toaZ_ zX5ZI^ZW^gh>o5}8#diri@z6wH`62jaN`+^F>V{?mqlfPv)qx!CNgj$n9)kg>0PGBC z(GKo=iMG?eEHA*INLkV<2ofv6|7Of9zPtlBHj+hZ7{{c(w97Bi);OROoZ^~}A%=z3 z8{G~7)}-YuAoOH7vwEi@_-yrK;787g>pViiBcIi6?my5N2jUo8PgU7^8sUs2$>FGW ziR&tt55{F$NjLw|3}yw!^s>B>@R|$7j`Kg^ujR~3RNtIF+I{2`PNjZ8E>ubB=#VdV`eo-dFsD7dh#Ef*|x6;d9m8$f@+N)(`dU zbWh&0q^EX?83XNHwe8HA`fW|pg|V|L0ba;%Vdc<~jk))iaUp)c_+7KC&jfFlwrAe8iLIulc2y?Xi$<2O9K(}0XZD-J_s&{6kyOdo z*SARG0;^hNecZImThV<$A2;CpPW{4Q4c(ucUCX46=!0zshgyyFEuU%fRZf2NN-Ng8r|O zw8AzV zM_|0!S2cb8$v-r#^?~P+XDiLIe4I08(}&Y0({XNNWVsnfSO2vEFid_N zwaNC}QtRi&KouGfyU&f}bb)Z_bL-~sE8rj&iD4aYaZ0KvdEcK>h3APByo#C{V-UB| zP6Q*1$=(%3G^)~njSP@CabXAw+U;d}A6ef!IAH&1SDIn49& zzGxR|+&RaY>~$n<)xU=k=J9YX8pQlpnlQr#&)qNSJuhVGJvX$s2LV}iLA#DDJx@E5 zksf~lr+enBjI6h#RK0rxHdbzPzoPX6R{u&yQvcWO$`=~z3l;4(qC;R3EKjB;hn~cfkcN z6U?pD{3R>`KT}eNWASdA-~z{o@p4C$FS|7tQvUmM;*8-^aNc*13O-I7xPi~3#eaQI ze@3H%Ggs`M`CtUfUyqSe} zfnm1RWIg95n=*9g5~}lYa7)SlUwnl*X7aXtOWb_nR3F)+RKN@Z&EJB>GYD`zD5V`Fx8@2LWgy?NhR zMW*;tfB%1CQ8{e3#PTg~3>q2vCr_~UQqBQaUKCv$w>O~hVnhj)&6@e5s{eRr~dKUNh)wK zChP60rl~o67R$1C$}_#Yxn8GWP9Ng@=i!{yY;jHL?X?@le&vk;Ly3-=8K-_%VYam9T2v@p+ zE{l4v2dY0z?)}J;l9T5odgUIex&HC>59j%Ko<`r*c}0BnqzX1@V9D8v`cT0Kh?c13 z!aV4hqOq~jnXWYH`}gqqV?}rG|NL^U$pDZ|Q&Tf6G%$gr+H|kJUs1%1veif=(WN-T z>ol|A?C->x-6in5#mDweEb9d@N`Td5Yz!$DugP+8{p~oKjArL9#956?Y4wjh81>5xK@LFrabLG`J0^|$aS*H?U@OMU;Bg=S_wjb?>ZMnk(^!;+4+=IZ zKR^Gd{$C(}JDP6&jiQZ(Z+5+t z0|m_F%-p5(Gw=OWy)uB45iF#Fvtjr|FZtBe9H)+CH|hQB?z%1KDgXmK-8o%vV7B(= z8>`$V*UfDt&3{1^#=^kB(7&ZK6R#Z@EOXJ@IY0CD=}h5F&^Zk+{jC(Q*zP!uh7O4= zD2{q!q}8$eMI#r04p@UDYH{6d4V1Rk*_s>_*j{A*CyujLiYAvw_Z`9=1Kf7?TpT+ z=HbN?dEHA=jx)BzHh=W*?d{S2PfbHrRhO{heHoozfA)4hU6O>LovdCp5;dQ<=L2uVayR!k{*a1LNNPmxwc5^ab$h`h4)(%_V|_mI_L)Fa%6_|7A+yCj_o;L`?VgOqzvGnL>Ho!5xdt%b zz~AP9E8q9Zk0haczbEo(V1L>5cB=JGo~b(}x9fH~$lK?b67=h8L@2;o?8s#~@LdL* zL~wkYLIR5<=0D|vxOf^Vy~$*~pkd%c1M0{o=zXN^dD<^ip!HB-?D1ddudo;MwY=x$ z3)Ya|#URzb5sIPQz@p0kPY?u;r&)?F((p&nwSA-+=kMvlJyDxPp5lw^?9^2}gV7TU z<;Ob{n8J~br}F}=S?c7>e?ue$6dXzs#SgcM5I6ziPTy6|$)2MD(!@d;D*&$lYfjwc zz3U~(^8Eq@OriGV+qeO=M^-Qt94$Zge_$6Tynj_FQ9@f;WB4$MFe21^xwB8=L$QzZUFc7np3pHB#H0T^s#eciB#VgCdV;uNfFI7+7QDuQW#KB@Q~2-! zA&it+CCAp-nmBjbc(SpyXtTB#1-DGWK#iio6Aad_8{Es9)yX6FUCjSksep$o2;Zr0 zvu|OHvF*4@VV$Jac9rvQz_P>J{pAPmv0MJ#~l)q3VN|t({!EB_WuG-`&Nlfr9w9e%^d>)g; zu$6>gZEUEW$GJbe>3>2D*nl_XXf=6FiQv!nz5{N&{4X=~oyYe729A)>dDNx?0Mv2= z{EhANyLI5@UTJgS0YmzJbRlu}m5`8-o@H(1`7q9El`QD{k#mQ|ET56-T^j;59Ua~O zEIKc5W=8cIi^#CC66g?Zg5MSakl~3-Ia1UBSUXL?C`>^|Nx5#)a` zH@h}Bj##F|wB6eK7T9S9dddj8ak*B_f8Q-~EGSU2+Fafq^7#?YhX)>ed02ZSoTnyAL2qm9vosr2ES@#Qk=?p>;RB4TcQ~~Kf$pgM z9Z&W=M-<=xI0!abvmfUBIkryEo$r|sv^8|S-ATle2^9@C5SUrHZT>o@WapCz6Q>Z~ zY>YM&*aQb65<-sq3G6sR#&mSVZuu0P6m_-hva;kIB|2`Q(3;6WZ$U?DU1?QwK}Wi( z&sfotSV>X1?<^DYXY^A}!zTdBmTPZ#^z5qcSb%RlWWtMR;TR@kvHE+v%1b#x-{+AF?5CY zX?hQj=DmA`gG*(R>?4)6`;|-3Ir7a*mE)Vwg;_ zadmJbOpG&W})HloyhSK&G2DUs43(ytBR7cPiQzfNiqhbi|9ui7_E!rCsE1PRr0%@x3C)!THYBy}{V1_>l{#DCC6*<>t21 zU60d_<2g4SMZvR3_fAJ7)hVXMHw2sk0TsI%;)Keu{5KW)eZU{NM_ zi}!;}jU7L*68sL|QU1Oc(`B+%zE_d~@0#D|9o}q!%9)&QwnY+r00#r8!SA4})~-TmT(uD^cQzl3EH}P*^WL z5fQM|Q!^()g5VgCaC`qsX14o48G%dyHgXuJtgp0)uvfqRi-l!MZmy!VG+}m`>ej3% zu*VcNEgd64G8+rML@QeMbu5}1gE`JgUdx=IRX{Ie$CFQ06Gm56HQQ1Vn?NeXjFWlp zHSK9=2$WJ}c6KY7ijNd~F5uMwDvn=RUIhDnrhpfV_H<{QGw;CdjL6{h`9@9(9yk?E z+n2(jIs@Cyo_l33oxCQe9zh`rbzQ+ZjvbW>BNwi&99729A9^>`RFP{ZDeYeWdVHQv+4Lr&rwyi*_jMl?zNulnHx1iV)pEfzG0w=cfdo;aV)D;Vzg zC4W}6bB#cX2%+bg%-;0!9DJEG>n&$DPf7>Z!C*s;{yJ_NoxX@%Kc^L%nt&s#wc3!$ zxq5!rFQOnyn-dq}#yCnXMv+X;WDlN31KFLKp2p^o1?(uQx36sLAX$MOUk@#s4%?(+qD6 zjeL5mxn1`c>@9q*W6LqXA8_h3h% zXS8I@D!D);?iZI!@~XClxp||@`m|)E$vjtl=>%vvsqi@(Rf=~Izk%-{3K#f}wtPF5 zU|=9PAa~#C(9gv@kPIIzie4b{ZRT=>3?36v%1%F|kPYYzrqvbcv0o7*5hAI=1~6V5 zoX>ik?KA2Sc1NzCV-HjIu8j%Hto1&F>y%%1)*!b0pkoifFZk2oSI5I-BjJV8-6|5a zXkRda(B{0R3vS*U)puJjSxoniTnrG)qHVHya~*q{wv^})oDz?{iSVDA&ooW1H14!B-gShh;qQKDt!OpS|kKRE%+3Jis;p!Rit zUi%o5oaaIwj9NceNO&bRI4R{?tXi4qF?}54Z!9v#r&TXgwCMXrOxZDAyW!{if%gvw z0{H6V1NA(JH1YIszTEu$#D_KF4AM2zZFi+{JDfgWU>C^C6?`1AY6Jc`&LaV6Dv@Aq zY+O?IJPbZQ?UZKhx^~Jb%uq55#ZnCZW;-pK!9s+gXB6sNnj)&K{CRLRCqJG@+|M-y zAnOgv9>Oc9btyr$J>j-JiXi0v(%vB&$Dl(&twN=|BN&9W@Hl=_u}dZG<=kQZ_6_ ztq__n?BL|Vk`X&zytuIN%^B6H};^n2X1K_j@qsKzVIPH zf2z^Bb^=|$!tvuVpd*H>E&>jPM{FwU%m!b$C)o9@FZFS8 z$cSE-rpj}WU@owg@8E?|c;@Yulo~U7+nrxP(6YQSvQ#01vK`KQ>9~0_EZFK6E!wNQRkblH>fVhIIvLdEF(CXW7 z?nC)G&&eCqoDNQN?khGHY3;Az+6UKDJB>REcw43nc)t#kZH|vL1mm!dx@e}QFXj# z3>G%_?vam;QKX~@@oK+T48A=B@`Q*V_8?X z;>M@<*RM`vHab7(SzKM=Qn9{nQcW0V5#sDeK3~0iwb}c#H!?x;XVPlf^AOh@)$%D# z(Cf2=_qUHBPs32RJ?to6_??gkUoq&&vbe9pkvUK~ZU2yrq{uDgQnGVSP%Q`s3d!S9?vFYEC0ye2t{g}g@3^lDpPQ%O1sb#gmy8VUn0Avm-D#iaTAx z2f(7*R^cU%#INs4YHErDkN$j>QeAs9KsLZL*Z~viVW?mp{`z`pFfEP5awRF)A))>1 z&5d5@3ca+n6q&?#2X>W&mQEpq#RN<}pBVl!Ful&8iNzNP#3Ufd8f}!fw7kV6agygq zw?IdZxP}|Rj|jHJT)x*4*D@C{NzUe{YL(Y28jB1mF*l~RfFLgq&9?fc6}s9+QVqgl z;)M34%1mCVincB*^0c|N0BYzqdtzn1ztHd_1Ch(@_{$25=}GD`;_2itv3|#!9Bqpc z3%X~9Mxv_fc}5NVp#(>cAz__pZaEEXbKU5v1lc)@a@o2fU)4UP54H*Lc>o0crFKHEoIKj5#~92!23z9X7rE1lL#rP&cI zNF~#m{!eb$ib9{>bj7b)-3BWyCN^f6G<(^C(6m5|MjY`!c1gboC%45WUzlZmq3E7w zHDrIZYX77f&dEMv3**d|W5XYqi#F}?j~_#h0!#s#6whB9-AMml9SYV`WVv zmk=;~a>4{tV;w3LIS8rKkZ^-JCmuq)n>dztfoG{#y}d6>y616mVq;R z=iSE$x}tsAXueC8&{k)8&fK<$hyE}tKk62O2jT-$Mt zwdAxCHFLQO&YLH;!|8?r{l62vIG)cKqERJy2N#!?QnQzusaM#LDU-|gN}juEJ1S+i zJSu)s&r$qii}L8RGkf7R-<^)shT@1Oi_;Ne0nfOzvop_;JT^YvW?LLLOT}};w2eJG z`)yWM!4VRpuivVqm$evUTnCgt6_Z_zRiNV8*vu(&(~QMUS3!$Ey-`r_V16$n{QkvO zYb{&1fr*WYphSaKsr_5@;p7mTJ0%bT1*I7q?|k>XHC*k}*p17r;T8 z4xX;Kun_e~kBnrHE!rAGACS>)mcgWOzT|s{6N&nrmLI&<&_FJrYx@;U2yrE^^yaq* ztVBM}X|u&DP5q$v`||`o7uUucg9HDI+zXjCIc)0xB8QCNO>Z`fzS( z4TA54|J9!AA~S(o^!PPBq=vqvj9*sazKDpw-xz;`ZI+TB7`0TzQfg5|IqV6mo_4U0 zGe^=>goigL{?3(>GCw*sbr_*zy@aZ>^Y_Z4}6?E{(?Q|37IWkwx-|y&`PLiOpZD|lk1)zvK?(E zm+Q+yv5j8Q;C~82DwfuqfgLr=8D4Nw1pA8x5L~+r?EcEhO-oB7hkOpb?I1DNW(wLR z`|JC<;TghaedTP<@DeFR9F0EbF5a(VzeLH;_$Mt_5(^iGuLZff*mU+_=Y{&lRS3UK zjv8}=mWz1qe8-m|uzGrllaV!EL0x`$Ies)?rIM zZtz$hToPJ^ZIO{PPpuzcN0^|aMxL8`%f<8*-0fQ!aC2)$qyCesc&B)CSz0a`_<@-r zCZPzC;>3d9N&TVghO`?O9UlVrn>3q*rP2p<5mBOSG7y6GF+XOfBv;wjkrENEk6hK(GY_*`r` zHGwwtjbXK?))>rdtmMQL*a?BIgb9-Hkwrq#;)Uoy*-Tk#(}FE7B>`9|)zJx}6^(J$ zyLvNj5AW{0ULEYPRN&)l=UZ+^$IA+`eNAYmmGD1c?B4tG=`CJMtGZd{PE=`D zuLX2%WTh&%IP8a1rgq>N@L21G9p?MQ`1QO)X=plVLjCF5lh@2Mx99{DYZA-wu-yJ2 zvq|=#!|7TO-*@owAK;9wRT}tgu+4aOnSIW1=f}{o*&_y!D;{VvH$G_;Cg{N>X4&0ZI%TT!Q3O-GO5~J@Hp@6^~HFE zYikfld2Bz3N`qHn*~S_fGg^`jg+!^TNTG8l6`O|8O1+_bZ_;>Y6q^>O?Rh{k+L>)K zABtk>e5ul5w&FI63tEht$>8u2ZtUDNln-2kI`f9vcsk(zYH7h`Xd(JRJwZNKo{H5V z)$tD@sTor=dvw{>Ohe`!l~tBWXJVQkB14{E!LRRRVf>kbBc*zFE-0{GUt^@LDCA4-HMqoeeocb?3_6m ziTb}VRb2HsShQl$1-x&d)cJDrA#r{w58Ppb>+csU3r~>&0OHHLzPlrC#?<(3_s`}u z(pq3@A`g4u&gw&roWHp1-?L~Ysms`};xrQ+a&n|y402#+>Nkd0N_0G|{3WlYq1h69 zy6czYUQ>A`lzmj(jwTUsQ(N3`ZjQ#>7qN%3t7LSM_~tX<1K7>-vXD^4Ri zPUAa|DHS8UqI#yudNB6?Mmv3RTU|fP2k` zjG!;wBlPlHuTD!G%$q*1+=N6Y;MvP8RL|?iC5PX`+3p385>~;CjIJ&L(HDE;;TW8e z^J&i+p-1QA(u_}6Ls-gJz(}(El+2ux^71D4V>Jj=68<5c-e)BUTBYnEszkw0eQV8j zK5y3}$~O8!=W{Y-7CS@H5KKDt-=N@7fAO(UQH6pB?^F{UWj5g95YO)Ia{`gqhR3oC z_zhIOni$-VV9G ze0g|kC-pmH|D8aB)EDr$0XBqgVnH#FVm4QFB&jd_xBJuOTd)&>zIt5WH`+>u{&*MUJ*=;yu_cA>v2!5(0>Z3 zBGZl1;;Q}Pi^0y3Xs9^~N1DQ+)0&46ZDe8svvi(_M)`RIrFhv3i{U=NEtE4M`@5&! zkz>o59!%+ktR!&BI+GL^=z|RIayPOneJEcfSysi7WzjSEjtkx*YL(_=~lPHMu&%Ik=HceA#p~ow;<{7 zI07h15&M-!iwTBARdhrFRdlq5QUe1yz{A<*RAz{JMlM8G@8DkUr}!EsI@uqQb`%;& z_u(bf_jCeol-xjK!e4@u9Un>A(3XFo3Ge&yQ;4XW&pXnz6b64iW#JFYNq4~MMo1fd z`~91|qgEz%d)K#uYo=_-QqNRwJbJS}B_(AR1!a)!{I;?qrcz#3&Aprjfib%R!n4Ix zw;`9>cO7KT%@V9?p0%JshZ)^lfe|)S6c?w+#PnkIXlA6!zPV=(aDUe{GIIJb;tiBQ zm6Ee}H3c^vL2vBqEe^mQze^fEhOC8O3t_`lcln9JFHv|#{td$cJe%HBM(-!ukZo53 zNIv`Dwa>ER7pt|KeD6=xNv%?i*+!j$Uf09&dRg^)1BJXcedy4Y0D3uGHYs^34x3$B z&TUS$Fd5X9>OV=tIM*{nD9%=!%BM!w6ajA_=`z6d*I4GQ!=&W^H7`LiEs|q|YTV^gX7T7Jjt{ z?@>eP``hlO+vUyfywed19(KCkFKTnTAE|I~a8y)P6_u2PEgSm9cQ;TjhlG3ze4a4c zx%NIgWY%v>0=w|-F4;%s+nayjW9JrReZZ?HsqYcRMb|CBayoZxH&FxtjutuGbU&3c z*@7ACx>DH9eybQ5Og)^hKaYrj#2o%6xV3Ht3TPYp&_ICq)pLD-9(&J6{<--5aSN?4 z28Yr6Aj|4$E-`RnXD4Xwm(AO`Q_$`|aIvDomI7wm)dFjprcGDsO`tzPKAf!%R@e2| z&ZrD4000LEIZl>0dt)UfB`P{PqyI#+gpcGMVjXJ%WC9W%%7 z5=J_@$kV#seBfYYYDUJ$T(JxTGxL5t&u0C{6#tFL$E`>lF&}0W=+Wir%H*}-0p#Zc zP7sjR*4Epg*ULlj!|Le@2p-NgxE%uiSzmC!*?y31mnj^;9}0`Ib2L|6qtg(A!>C1^ zF@%kc?YyX|&2Bmb|9@0{1yGc4+$|zXhom4#r=)ZvNG#pm-QC?OAdRGS3R2P_4bt5p z-O^q6+5h+5J9C}c8E01Cee##*oOfb!l9q|d>~en^`n3eb#DJROE^==)VQ&~OTK%nh z`oO@`<9*}iQlKtsDMLVbw->Q^!@98eMdWfMgIzofJ<2eAoY)D`7yhnJ6yl&07W1V5 zPFi$Y6GDL!m}O~^KAgp-8d{Aw1>=&fA<1&vo-Bn3pR{jT`=YWbC-*^cA_i>3B$yGk znpe|H;svxC7Ltm_8ijhMJOX+!gOQBO6ZAO4_UrPn<0xIaGp{7ZzhTk3E=J-Eb6$#? zH(m_pt`sS(It-6ne8|O5Oq~7bDWMA5E5RGegxnH7KIF8t5C9KR)Na9>WE(?yDZ)AC zf>k%Y^V;R|vN)1JYev+`(Glqt?5AHgT73bJUM$it2SKwl6C3*r1tleZ_UdRr{D{l| zvRNp|1Lg+?8XPuw2nEeX2;0sxk82y4n3!T)TKG(dQaiw1*HW#}28}e(GOvHTbLy^1 z@j-?HvK}V*Kss3{bD($#HkzvjsAXvOFWv1hK$&Pq%50^5x8+zkE~`R#vOsL7<`LJMM9a4`!S$HlwxY_2A5!>fUD`x#_3P{ zHQ{ChM^#na8q;Az5Rx1&)J1(`HCr$WLnHdL?6w-+we}Z!{Epi<sfOu6^&RR z0~}CRqh1uyF8Tx;5K~`;bN5+^Sc9&Rq=NUo(-6jACGjYTS4`~G*y{Bl!iyKr#JK7{l3+u z<)Qc8;fIB=iUXR+$i;-uVxEF+7a>fr@=A-pbAk$5aTCU_K_WTU#_xXe7b`{uJEZxz z?OnL9{?&e2<=NuuQ!+3-iz`P5>8lS^w)N?)Ue}JdTPZbGvyAt*t4~+;u~OC~n&%UV zsm@!5K7W4)^j8nmEP5Cnz8zjy!rUd6gv4 z_s#OXv+#X9(5o@(LxTZ9MAuP$_QHyH=fEiy_4uxp7gh|%^GA+5g403@DsC3(h?Lb? z^{+PZS)~i?Z->1WJ~J&a(Ec+sU%5j<9z+Oy(_QLLR8RgbKamA4$>4K2U#BJ#uH#^4 ztLCMm#r!VIj64fwrUFDphLoQ_M4v{QD?ewCU@d%lDN{Eo-R(sJ_L41Lj=b}7k;Oo2 z@)whDS3?jd9f;CH6+V_!Rc@arbbGYeGA3w@PsTj<06h%H+ z7Avv~1Ka?8|IFqq7Sjs^sxL$$T;*%%H^o*LM!AL%S&I1pdfqByYPpuSH}iX>Orn&N z3~OvS$|>*~(Yjwm&6I?MUvppONS9UrDx}N@AcX;>{*V11lglyU2&X=4Pr*~{2NIzA z3RdIxP6YLPzJ;KZmM|KN_U2KL8m!xMHM%@`SJ%|cwVDa)R>F%2UJecgcSqrd+!*9>C8TTTX<%mfC#od5D7gHr9^nS53jUSE4QeHeQr< z4=h&z)FHw=9Oyl-yS3jFp@jtj3sG%6fEh!?JF~~dLk^M`CSR7cp_HVg)6E1qLk-+_ zoVML;GPcb}9Lg$NXM;>;kSxsTa3uFXuI?xYcpFs;f0(iSNsU1P@8)gu4 zqTqiFcG&1$J9S5R9dyLF*YvOH;BdYa1XSl!N^m^lz^IQ~(|w|x->3~Y zjdk-{a#z8hWGl8qz(9F@M37Wydr>M9Ag{!R`KBwRpgW!+s-X#!R@Do$)dr$U&lwj} zCU<&EW=(6u{lTfpGBc6w+s9I?Y%0Xg5?mBGh@4zOaYOK%u5)g$33o~K_W?%JpQR(d zo4y%ipwM_VACS+*O!+P76FfW{o~#>3$wptoQ8GmF_m5NxZk#2wSM_vjUxngwqjgc2 z!`swo(439Q3O`HKAXvU``zos=kyl9S&oqaiiQxk=2hs4&{p$0| zkBynxSpeE5L&>vhmQz(zJO9^wydX#{;GN2DHNy-Xsj-IX#qKzNRt5-3<_044=K}aUP<~>unxO}S9Q*$1aHX|b%?~ff z3_zh4WhfT2xGgT$WFWQ&IZCEo0&h9_eiyF|R7Krz00Dsvg$OIEKt$8j{wnmieALZ+ z6j3#JOjWTrW0QzhG21J&jtAe8uF{3X$i}}VZ6;D^Ge#S!xD4}p4IoudwOmNT?q9qx z=0@WfP3*RW;jLA_=VEkVmz2{}2Ur1Vc^2T0a6c?^l3u`chk z0a+xZ&;&te4A9l7s<|PFRO*bD9`pZ6nlp$QYHZL?Rc&l&X<)g}(CLW;fk51e-m|5= z8{QIj;(n;0w23Y>Q9vdaTZ1UNv{uvUngz(1JzQDdeJ%}jsUF8>Ke z96!1ce7f1db-@Fb7P#DjjyQ60O+t_a$}1>*^7R#X9~*XY0niZpP1sUqAzf>@&#I^O zK|j*2>gp2ICJ$%^KMp1{SY7jZczBR;bIW{AaUoijz+TAYK23u^*m2#+Wpm7 znO)P)u!ovZv8)7h5VoY`!1J7|?1w{gz_|LC&n&hABT{im~zW zDrT9Wa5-wi6Q!b3iO81#6ot+HOIrEU>!CSD{ieda1D?Qxy2~)VD98&x@ z%528-LeHb zmvHs4&Dp*dgf5^zlEDosD?Pd&6EiY~MDcXGFZL#Hfg4#ZFpc`rJemCki&Bk0r{w#) zT!(`r`XK9lPomYwrXNxNX7IK%6%-YPMMYnM&DQ^uNL#McJoDrhUI__EW;^IGJWHX*aoesyzbE~iPIVy2Q3_G z%0_UgBCgmKHXA7D?x^03@mFQmANn*L8eS)BAxTL%ST(NKW3T8G_C1mng^f^9l9ih&B_vBh}s%=jGcJy5i^^cC$+Y$3P?>6joA78-f zw3C1or{0c9r>VBbR?#M6mBo5@EQjH}5_Z924e!Bh<<6fEl_22>^Elsrq5n2ir*?G{ zaH*h~s*1|M;Gh+Y*Sa&$5q^X{*HQtcJ@q}0YluY}w7&@AP~jZ;lmqABHrna*&U{3h zV~jad$FsG&Xq#o9Ki`tO^;S^N613tS8gX(}-pRb$=(cH;in`Yv66i)6EKuFhWPFZX zF$;}((Pw|-8n-t<^}z*`J?;}+G@J7zqT--N;`aLOHsgs*dW0xh+}u^S;`wKU?;0cK zPg9m93#VF0eZz%wQti3f1j@4qLaPtKd37UH|8u{Z5MD2?R4c|YL`EU8$RHyp1;&x% zXMB^a*domr>Hj%~64jM7ee?2@vP#dJGVv-3d}PEy+bRMd^q)U}*BYi_ktHw>e!Ap zsQg}O`u<+&DB%a0TuoFnGyyoTsuQ@@xmGXE>t#1$p=K@P`nK=<8o*U<tpP6mibVt+Y-TR6QklaHdJqOSY{ zb+fvQxm8;&9_NRsXpxbTJbY#cvsHU_C+>XUj5TVti<>1SCj(G4h$;<0ZG<8DSO+8~ zSB}lV9oI{cgtYx=_0GG#syZYkMV;~8HyWvR_EOC$ksw^>s10n;h@071w~{lT*XwS} z+u|a3Mh=*X(6hv)T776=DA><9K%;VrwRvRfy1 zFk@LoMIi`^Rh=dTbY{apKUmM#q75p(8Y&}?{5kg%a0!ZF4 z1lp#lCx5|DS79IC4a1A4Cw&=7mcwJRB+L0e7THb`bAS;KO{KSlhD$j@p5{wMy1MMQ z+(*jd>cBp%%(|pS3CY9NBXyP7HdWzO7MFeH=C=G27@oHG>%fefJFG1y-`#mO`GA5?VW&G5eUUvYq6OGU3NXK@a1z)E!8^8)=$3`(WA*2kx`W!WyZVJ1e zk~*Bl|E_2f0_EU9vC@mAop8%wCxybUnufSUw$B9F5HYK#Go-ve4~I_<%GIgZYsPN19B4{{OigYp^}kP5ba z0;OmWl{Jp~!~n#B?K5L$mG%6`LfOnn3dqAcUi)8AS9u4zOBtYks_i&HDRi;w0TPN< zf-rl_i`z{&{mCp0yey*}9%Id+@NRbV#II@hVG=S?1?zovDt&L>$sIQHYmM`r8}5A? z3&NnI*0}td#)F&99xmnsW6%|pL+ju|JDj<*@hvohOk5D+$fayaj=$Lut1lb{CtCeG za>XiasWv~)Lf%C5W!)#U*tRzQ5?%--;b&ouiQTN=Y4|F#4u2%MTOXG=UcIz znR+YDuXhWUrWtOyD(vipJl?4wDRj)wC7=ko_<}M4qMM>XlRv4+IGJy&q*1y)3k$25 zD62$p1SuJ=s^H@U>5K5=pt^w0a9TM9^_o3b@dSHwyO4Mwz$-;xQs18;_$VvYH2uQH z!8tvw8Fh94f-d;*73>T22mzv9!$C!J%sUWM72+;$Mny&*A};{z%;NiS!{L?b3B>}0XAo?2T!loZ>7P3{6Fjw4>lxTlkEYdROv)|EdU${%KyMifOOvXG)nJeH!t=K zbgxc<(81|R7kWZMLKmpqf_=7)+G#uc^Wn`E%vX6-K0krt=~)Pq z_lu&*h{dB{qagE~2ZqlGsL;+FkK=>p4ZYT{FIJVU$zXJCmp|<4+#f)Jlkcpb+-7DP zp(q24$8_Jw~@Jr*(nvBV8@rpC~VEj=9D9+4Yifl$JhU~f78jsRP!e0WH;I; z9ZLJrkT2)(fe~ev`D!-yv*i1!T%natVO0w0ysZ)Jc#1V=E^`F;0SETsEA`<74%a2s zsDbxhwCQjKNjj_Jo620@^7A3yYgr^007h5G?vh{NRw2Mdh-lPnxkEuQF`-yoS!s0K zMuwLB#_L%jS1YazO*xEQbI>q=et@Plu=Mjittb}OD?o$c&TP<@B}9XVlngBE@;DVA z3e{&(XZQj#$?>(4s%kGtdo-#HMT;UQWF|MESumcq{R?0b08SUgXvTl9?@P+djko?J zG5op;*`;2Ax(KjqqAJt4GFP%Qz0vP9>TQWqbUkT7NfMwF&rkb^&u818l7N>c`v;0i z{)@KF(U1hHxWvS6U@6oAt~b=6_FKlyqc3Pi_CMU7<1_062oLI2b>Z2WBQCxAZ!ioM zD(TeUe>;LKtgZ7K8d5=jBq)>D?HAV!a516=2Bd1*4zBwAu|^ z8?>llS#>DQ)e2F4XHiH2Q5WaPUIP5&x~N*uKdowJAGtUYo5Po6rPmL{!?h}L1V0E5 z?$o`Vy=&WuHLK%Biy@jUcGQcB=P4HrVp|?ND$q_gG0;qI?OiO@>ASg*TTiR#iCL^= zub$SvAYKiZnUv1b-p;GevD-=nb+MhEf~eD1jdG&o_2~SpG9bE@+H~ ze})1`(r_almman*3r~K>J`sAe&88J^lM$u8w)-eKfS9;69D-{oh`oU6xq$+D$?XD9UP}W%2a$~kD<*w zx94fX%TA5N0g}|0?9Nw$%l2G>78?CkaugJew1|+0FF6JTiHXi_dG%20<3;i$9rwI; zzQc*Wj}u}j1^Y5gni<3TaZUYbZuI@pD_ogtcALi!*(bL$5t5S)LsJ;0du0r!hPe>& z^HH&-m4nxe(#Z7WP5i|;T6(tBbgt`$2H(byL>LD1N6ZXUea^{N_T*DP(xtrHkv&w6 zpWq3=3ZDE#G3UR!>WWn|18iTih1hD**RK&a;w$j+33m$wfiJ2kgaKkmqNS})p!j7W z<>{>3v!IvTdtw(Crl)5Yf$Y)pd93)KL5B$ajAiueswqefi!%k5t(IlQqNi#pWYc%k zngyyy(^?0vJyBAhu#Ml8`IGdw9S30a3qYs_th2YS`;0^3VBsWE>32Mcm(snrS+hsI zded3Y1$KN!XwCMSmdwYQ+T%8J_RWw#*HL2u&kBG<2{z|mFyrox-zNPQuYv1Qn zVAtsWhv6(MI215p6TT@h?(Wj{TUX_e34y8erJhtoAM-2WRCOv9J=sojipXNkZLW{2 z`I?aV4sW8a$Va?y2@w>uk0XQn=TF`o$Ae|(gxFy+1$rrym93div-Mk+IuqPV%F5V} z^K3Gbk}bx@T(Y-({Mck~QB;yFBTC~E_K(yKH#d#3kOr!yA@LNnS82+Qu=0u(HJr_X zSvr`-+@!jML`&fnOD;o|)4bbdHeQYoRizQ9*v# z5spXom#VkG_&gJ|Ihd~CX zoo?%>U3`Ai{!|rzQrmCZ(Hdy$+tJan?$$zbNL-eVj&5(RO(&0rDS-kKpHHPS^J8~13V}SvObEVG==Sne|L717}J%`R^?R`BRxmDLJf6{|2 zUig6$3E)9byVwW+k@iqBHXFDxcK6w ze!a!b!&7O$vh;_KtHA^b8{Aawf%bvxm#^pbrZ!(U!{+@9T^6Crs@^~z;dI}0J(3-_uXFf^iIWQ_Qg5lNC%Y5X1q24|y zCu*__M^0@fkV|Gsq!1zPD`+~+6r*?fL15(bX{cMDOt{;|dDtL7wkYE9oVA93qy=qi zVTi@IH9ynKh|_6IybpYu%Br~e^kdEm3E@o?w2l{_EhUkVkg66BYprI}zS`zw8yoem zug9NIxQimXd3{!c;j3@aWqQ7KrOjNZrzB-Tvay%g5YH_)fra�N9Hz|?#}QBCFe{x*K>KAV z?UK}}?+X<CS8R{K>~`DmE=|HOOYy?DcnFm*y&38V8HZN+S<`EZfe&Ao_$z&A5hmM!IZhW}EON=ORok z`QNaVZ+y1kL|%Zx_dtI?2|qcOAOYp4q^~MY#y^XTxtf+5F2930CL}J-@g(UL|f9^f#g1B;ua>%OU}mOi_pJmM7G{mrpC~RCPc8)t&3FSyV1jLUz(j_LURWzQa3f zy6p=)7rdUHkniyL0;aH7 zl`XJ3v$T~cObct5$?WDRU>5PpYBB=c^BFkn)wz<2T8$Bm3`673SUOT%o-O-3`+a#* zZ`AxIs-&=^2xj?DF?-f_sJcaL*Kn6gM7Z54pwozyMwwsaupTLu`+VDqFK7z) zNlIV&PM$qFqrBX+wf%n&Pj*HyN=Le0GV2i@nli%~DO2uCN;3{e$J~uLD`jGZ7nbuE z<7*h*uQ)gGf>>m*x|kJILWD>pS{6e{Gt>(s@HHC4PHeD4NY`~D-Z%UTQL&KeEf+TS%{Wo)`-+En~ycheQyg$8;a z?!Y-5t;a+c#8;K!dCa?H&Fa*f=p-nDr50=9QR3{*jPLma|LwT^W@d<#{19G4TTHOI z-5(FN6D6nSX9I{rC2~Lc2gO%y9aO~2J>PS4n@*9%muEOnihl}2lXbIt?s+R8Zp!@1|yf6*@>ffj_`dH~ELiznY(i zcc3gtSfYC=?u~TYV}kABvnxy!LqP+wqoeJ&yOg&Z5{ z&BRMCcFKhT$D5zhfKeZx-nN^~P@g zqjcnXth~CS*uhn%MN+b)VdP&41zH?ZEU7vywhp|H@@&+n4yRl?HW5zT;Z6(TPILBi z%GRZUbibKN%~RooUnd1%Y&~Ik7Gxu3quVuRpXtBy?SjF!PKz}C)Y2u>zOvje@r8P& zL{ULE#xOLzrVPP=^e3}(W;LdU04>-s_V>e7-1BNMi`VXZldKV*#M}{Ca&=bngW-~! zJmSac=1e3caHKl$BqK-pxf8L27JbJq8s~yn<$JhH4kzW7zkKZ>= z_PDGGT+>5kl;m+JdA?e{eNDPdfr0b}A+-1czPS8CY42P?V^0MA&S6nmv))2ZGBDkB zZ=gZ>Bqe371YupCeu?9iN~$>mHa%?B4dp)DrSKD*EJy5MXU$VPT`~{OH732A3+m5R_V7wFqBK2GDj!fIo(a z(v;pWL=o*Kb}wJgB^?*nUrWU2-0Y%Utbr+CNcHcIAV$L_(<>4@h zi;I_#G}I!D4AH4FPe>CAsfV*Oc_oXZ&G9Oo?x%r)_;*As`O{mvVj8B{qjYMh_nT%b z+sywywQ2vt*~-Ykd`)IVmQRl&-ao_m^Va7+Ii3mOI;Rd>H3xXk4aq|hkAm3l8!lyb zz85pKY(u5|YS`G)udrFGt)fD5g-FaXnPXN(b-+3AVnY=UfN^a`okq#Ur;0Sl+5b*} z8~GxV7U3<9v+S<$G2^N6R!a4Gc-RBd&qbYvLp15Z-^|j1IVYWOlU3@3Ua!R|i~3W+ z$)<}{OLY|@93s&qN2n+W_ahk?ICg9K>tHn~2Z$gL3ips=6Lxk|eV)c7{ONYhD4e5j zy~+ojS81UJB{go~v#$E5=Mkihdmi(2+dgbu;j&)@BM#)$_H5%^lRgSSi<;Z>XoM(J zIXTHbe@NEQn~-A|H|uX?s6ncJND7*2aaukdTqlZPd;U`S?0>sP4M*6&AXh2i)7hTR zQ~F+bdiX?;oG&tW_wTjm&nQ{5HT@;bA~YuLJdW!4tsloyefhP#18;%Uo%WDv__SYIY?Zt?yh6sCY5`Z;euP07Q^ z)Q`C!i*lYOdC1cf^_=7d(~SvlnJ2xMXD{~;)y*MMM*Xbfm`k(P8mr8(Bv4Gpq2(?R zDWg(V2CJolX^4d+EAm$!JHwod;Z;#VE@I50oXPAj2FG(zs8t`gm|69Ht{FBIQGL_m8vS|WOERIrQ-jtj#9NzFn3RYVuE|#4yd*UVS6$3~3hoti z7k{3LgZm@u`HDom@-ttExP+u+@l04Qgr_%A~+b&fpsZv+x74-tpGx&}|gE6q-b%gv7hI+$FArxmZUD z+FNrUkP#;$x$2`pK3`7=(z_o_A62#57MKE9h^%V94P7 z+rj{gwA4^QBUdjBG0{x)6v0+wyMIuKPs3vslD#6OA1u8zXcxrD(hL;}KS>sY#ma$y z;|U`ykoXmyQaJ*%TnJG$)Nkg4f-`aC8zJ@}1SRpRA%e1(Gd&B_ALd$(wD?=(|GTMv z3{>0!_W!=>7@wZCz!3YX2?vM=d=Du|uk(&8&ibl+AEt5SRGkXwAe52yNFN7Kf8CSJSUI(F1(Kl=TIM%}}^YV<>!hV=hJXnymm0Z8}$<4@N5uLk@?+Rkl} zg9ZBE$)}VkX{UV!rWL!v?;aB5Yc}_vQN|@=gteM$xYHU4y|tW1hg`Z%k}}$P6HCOB=X;pR*I67 zg^~0L6nz(1zO=TZW|Zk{ndx5u|GQ$QYN@l7pO%|S;|*+lorrl(M`|HrB(ex6#&iXi zu0~%co5&%9Y{e=b9b|%=`X_VHR`A8iXSeK8yv*JA|K>1gBp*{;UWt&-l;5OP$FF;~ z|AL04?AYHjls7Ivv{W%m6sf+iATTVl;)QQ|Q@lW4P@(r;wynlnGcxl0H=4Is6vi)X z=`M=+ECe|HK!5#RHnT>__^IM~Hu9E)7K192McMVG5#V;Z(&=Zygh+&*;;cdEjfPYYGY-;{`7*F!AD_g%< z@}}WM9GNp|rxb}4;Na6Babb7hJ!eJB+tf{x`IIFA9?Vy5-gJS!m6L)RdcTG5-&CRLRp+{up;oJZ1mXjIM_M*a4+FPNWO=v z!Li98i=Z2PBmMRjI=kUeX<6A0;y`vU8Q0B3$9W;V$5E$sNB@#k>Thg*A8{Ri!MLi3 z&njYwA&MEZiTN{km0iJYZAZmV8Pas}ZM*rG`lObso>rIoTK^Vpg>DV)w&s)owOQ8z zp7*bb(dYY?VE(W_Rb`AP@64u*N-P4UU6@GS@aV}~ZgWjE*KVmp11wVEj2+3K7Y8Kt z%&5s3K!(Zr@iXsFP8}RycPoeR`nEqtY84R(h`}Qk)U^Csc;w7g2zD*zsp4*7=poW) zAJTe1b~aUPIR3$8`=7_`9kQ!CPSCOAx38|sr^M1CcU%$PWQ*cF94n$H`64&({YIMc zq3AJH>L0s$&w$D*Q(j8Z!z?2S`|Gv$tINLQ>x3+-6uxBWh#>JJxJ~&YBM{!OG$eQj41Jev?FX#cql{bci~H4Wj1Az7kO9F zV@nQb>j{|h*f==|!5FEy)^z4PCW<{hqS8INmze!(K;D<-*2O{l1N4iWtFoC<`A+ zCm$@_FW+HFFaVi(YUMlc=dVKh&!kX5EU5_CxEqa!n8nckNf{Uzh^!PUOv-e$c}JxP zd@xyCAbTK1RZ$bF2LcYFZ`LLlRX!=a>UqKIi8XiF*ianKf_G)M)ZbBs_Mg-J?{VH( zz0W>AuX)|9t$qz6&8jr&{nXDKVI@xbY3qiVwlBo0%q+cxXd5gqul&kSBCqN=dhh&p zuGI?u_}Rv@TqN#)jze0U(;36fn4Df*W?g*`HO-sn=ApmGPUmwOTOvvs#`y)`7YNPM z#Kx>2A_j6^-g7l<52}a#@9}15U1)5G7~C>mo;{xQ?N_8MJN7?a|N#A=Xd|*W;e&(qDp(N!zwiK|5;~C(-95M*1@fO^8?OW zxmdHug>0G+A9&U3ZKu$%=^l=dne=R2Sr2#x8*xU|!<;if3uxwtj!54S1ri5Sazw=c zOlFE?WS}47@_fC0POsAzTJ(H|TlUMAl$$aMG+jXxNVW(|hIfawCxeM*E}hALj+zSG zv+H}HKeD;Aqolc>Bc2lDeN(^Zc-H*C5BzQzkEqS(NrM;r{!cQI*Ue#BLHT?`rp#i! zlKN)H4th5kMM>n~-ggyEq?dOWf6wBu8vd0=Z3$3;X(|++F1OJwMgjE~nkx zxC5|AhyK@_n;Z2o>NFut59m@%{|zaJ14L38A8E7Sgj+(fK2R9Tp4_Z+yF%=eA$tFbgTiF<}2Pbk>;o|YK>E^t_Ruzp@ zGX~v2it06#sP=GknqN==)Kg{64tJO!iaw|JkHw{u0Qd~_8%zL{mci3$wXhU))%osr z^PCUo-lPoTfFCaT1bjN^)Vc$}Hvq{*xp3_)0VKkHhDi7?kzz{JxPgeNi@K@J?lpU6 zlRwyiP(+!sldK8ljQ}Li5NLaf0h+N3mm(1q=Kb)nBbY)?sTif8srkn&+ou;me+-HK zaH|jlg|7op*xm)LJ$3+qMJ+Au1wCG#9k6y#mK^BCnf_a9YyqABDl`aB1n9_xA!1Se zb!|2xiloS71-r83eLf7PcslC$#5%LaMjP$L>j;Url#r14dpWBI1^*-hpv8Jg>7Qve zz2Vj8C%1JlNTL|!s6Jx9j3}lybt)>ws7=@AM!+FR{b(XtE@Ghwbu}{GV>u!o0LZd8 zmJ_7iAr@Z>4i%5&nmUK8m*6)$+e0^p&yzDw zGOkVi@S|DJMprDjTKlv|rR-W_D-K-RRxeUkg=>jAeI}o@cXm<>2f9WdlESiYKdhWZ z@(9XVx&y@G=GCHI3h`OIvN+%e@E*4$fL1J+JZ=d%P!IcR?eR|16eYOO@BTH2{Nr^+ z0)@|#e_`1i)9UeueJ)ytZ)N8G)TO&$9>uCR84lBqPO~MA5G_oU_`K7Wh3CC8KAp>c z>eYKXHOM?p$w(C1L9>BKmE1cHp{ceKFXOr_M!FGNV z@M(Hjj0g}X_DLKX8iM+=XjK632kbcW@-FkKG#N*rSw?e}SZ#Iu0!C7@^>E1xCWY0N zf23^I&fqaQ6UKTqv**E1HRIyp%I4$z^9G*rkE}4W`}(E(vH_Tu=K(dBwjB|-)y^m4 z_P=tmt9nJ}E;>8eHQQo-&#R;DRGfOYwli-Z0BAHBYn5^jW!{=Y*L^KSJ#0J|YHk>H zn!0&VAeI-Py(|U<0t|`7@}{lh6LMbO!K{sA`4i9DbyU-bUI;!nI3Y&?cFVP2?m@T} zWS={EO>MQKoEteW_IQ{l*qj$mEhWt*ATGWvgZwrzIZ514ugddfS7lpJ@IRxscQuPp z5fhkf?^{$o1`V{|ZXagPCen{*Z-KsWrwW$XOM9+L63>V8TPgh8bG?KqCwd)m?Li`? zCw1~~T))Z>z&VKocBSv>g7bam6Veg99^~LD<$O%Z59rjuc3@l!TzOyI#>)DIWU3%$ zVe_^>T~-3DgiLwY*L)q@AVMK&W29$*_$m)i23uX?y`Eb1#kVzGrX=q^jXP%#Z6C;G4V(j zF~Z4YuZ{L|dLIi)P)e3(ICas|tKWXhu0*T8cdIUKg(e#GH&^g5O|F*czqN>cM_k+l zd-pH3_7=k|f@VES zerJ1&Ot2?IdQb1#1??BEefLVj`ZenPFQcCD{J}08n-Fs!9T4)wi(e}w>+{Y=f^M_Tf%6$B!}iHt zW#iL6A`HD=>uIe~BEgCXI)893Te_eOB$AdicnRi+5TLdKGd9Q}MIA@3PJm6`%kxF7 zM8oGL0eh|BqT6xNoYJ4zgZht#_U#*On~${)?_lbe?Qv|?+%0CTn24oJJqd7<+F0o< znvO!tV0OW}m#AQk4}PGNyXR@L{b}aM;dN!Eq_|%oJ}Wzvaq9K2<#y)~oav(cFNvF< zZ67>tp1)0bv%?&49CPi}RknK5Ln2NQNZ+!0kKy;xu$Kek&xqi&cQ6Jth)a7# z3g>fwMajUxFq6Y(B@&7f;err9l@I*dH@#q&EJ|^Y%fWCmYcwh zk%;nbm%|i6QN-tpcfAff;0bSxRKNr~oNi+Zakp$h=e?K=|x#i_CN&^?S3e9F^^9yHuLO3Col7I>P zR?wFZ$Q@b$1D@REcC5QII;d2#5~@)clKY24+OiE>GzYlm!^L_gFax?_M(!86l(OVu z+Co=rEd)C#ZQnVGM|HZxvWIuFw5S1cHMM@%D`pc1*fOs6dymA#kR3ZX*iRhR^8@~E z{CQZ5qsq*0dCGR&cq?B%4v@cwjM)8yMd-%FdB34H5R zNxs!{fXLxOO*S}b-F@6F&O|awRXQVDmmdiU;^{;(xxO1UD0KpURN@wrX0n@DRme3-F@ z(Z4%B?%4v);=lg{k4CiCS!P=#vM7Cq6gf&$m>=v8v(2E}tGDt1a2eNStW7bcr4P3e zt7pHTub&(C+Ozbh#$JFMyZt+dyw)ajB(d3!JMTcOf{1vh5C~#=6$Pl`vYkO&D;^kgrcH^ zbU5hPkYl6DC?SS8(<1}CuK9E+PFi8E1%c+Yla!Ryt?m3ZAa0}Q&@3@~U;=R`N*2-> z(<^+YIt+Y#d=P9ZD3Efd{jh8GyeyscKphXIEXx1t8p(n3xA&mZip56V!GQ$;mZh)!%=t7ahId{DJqD=R3M3^hbih+kXD-vxj>@!j>xAW6_E7o|2P;B;6Guv$UwP??KG~=D0Oa~;~ zVX=Qt6aUgOCLmtsLqndBGZlg74^ex5PE{% z?x$HfUE#SzZO^>|1k#v$vzqzciaw@aF3!h>j47vcMrP53OY}|ngj;Z ziZpb=zj+>%=-1=&C#e?DpagJG6EHi<343n{WGm_fuJ+p7DM(2JZljX_($fCe*HF%F zjasXQziiMIS1ECoopwqrUA0L=O&N?>w(TmkSko1k=`uyW7O=FuaZ^zmQ;yCxL@zZp z>kbKmNLjp>3I$B=0wAHg865i|vJ(J3e}hr)U$*rnf0|OIQ_!<*SbHt-croco+o;}P z{~yE-B#hakYI1L~c%$%`w(Dj`%JU#RRUjOW1!Ta4tVw&D)R)c6E9(nC_f2#F060f3 zTVT8omqD|8D@6}Y2oQoZd0YUJ=L~>LKLUo<)$yv_Xcqssk0wa<%eem%8R+P80UH_y zqz0TVH|tRkHyg1R|CZCm-+KfDO5gipFAX&l6G}P$OKfCZ`o91W=zK5}z54jiZM+*% zAJ`GT`@^c%|N!hm-zWc z9B}5}@XsDxo{fp1v-mxE1A7lW4IUtZC(zNr3l?ws*RTS+F>yhlu+*5gw^WH>6?UXVfW#2`)*0dj&N2~B9)o(VL2VcPseddQ36{Y5B znwK56$bS9el9X(y!~ee99j|O|;&R-gUm3{&5(PjwzXW`~TL7DE1cWr&4#*4$x4TsOZ@GCgM+BZ^ ziS#3w%STpLd@g$ukgslosGpjXGo|mnS9vcxVEN)oN=gFCY|;lc$1YS(pwUZwGN|`d zEz6>w+!Kt5r49tzT$?jhe`rF@_`kM$TacT#KOWgZxw=qc15gFa&!@n|!kPy_Unt)8 zhh|+a80V7T^JB|t?>iW1)}-6&2~8hCl*I-3nEYN>6zuFtqEWb~fCyd%_(5$V^X2{j* zbwvU+TxJ;tJFDqYx=64m5MKuYy@VE=BsdYOA6oT5Un9u}KIXf{m1nb^kwdMTDsTxb zwo47>a>fp)Wun7o)_I*Td_JD@6yU`9rdN73T!7FS|>f0Zq-}8Gy zO9Oy8$_Q8qvhL5(?>NQUg;bah?f~tu0i0EclvJKhvn%xVHH2LD&W+~)y$ZnQk|~Tj zU^2CKt65sMjUhl-;^i9R_c$iH%C%Pk&*uYx z?b;ym04fjmfb{6N*}r$ZI+fbL75q+&#(6(PImV`I>H0VpM{H*}1u7|1mb6 zK(ar}odx(9!Qc+Q!NcpT&}r@;7+AaA$%g;igV%Nc=MfMu#Sl7xQ~}^)0P%y*;$CgH z+v&SBlHIiyle_aZ)J&}xm|^I|reHqSi@&0Qf{qi=FMWdmm&##700X>{>2Ml5a9vfd zN1EX9K@}u?+n-TZ*FsfN+Ow0zVxY;hL-h)l%(y)ECk+c0k4A1FMbxt8`x<38R$fI} zR0JX6kEEck9e$HRZYiv2@E|b|$ESfUSZG1=-!(7Am}BE5?GnOqRh-bwp+nlU7&u(; z`udQI3F=+oy$hS&+pXi8K#dac2qDU(f#WShAW zet1pgceroxY1MyO<72$fd!daNzrV%AM{vPNDl+eEMdvTPh{)p_=(C}F2 zpCbd{bBfPch5F10!85ZZJx-tTMfnY}0B=U4IbK_POo3__y*+p1Hg25q^MMJG`hCRu7@YHz-$8 z@D-&%q?9){w#rvE`Q8qZ+rwqQ)R*&(*w`M$#U~2?%ea(71Th{dZI$V*h^FNEec&}% zIQ2c??LV*dE<2!|Gzv*G}KMB(UCjhK9RMQbqZ_|N^ip$I#)_cC2C72x?9W|RN z*Kp^1kISSB)cX&Y8WW(x1S2dmK-OlG&EszXJh&F5z>orl0B~WWMcKZQpbZS>+P&vo z4a6^i2uz>bst^2c;4NrrX#Q3-|66}JABBfrbLgP}&jzX)FM_G78G#=Hmjno(0f(j! z=qF$m!Bt|GNr3AAfPV*`uSfpEDsod&r^-ppMqQS_&Hyspj2a~!jvgWmSd7VF1FKyQ zRCWCxU0?uD|HWL589KPc6wnW==}`F09gtrt>b)G{J>K;zlMsF}<*~`;2r3tMqOm1wj}Zks9_GJ)Na^-yhY3%3{^XtZs|D!$lob#9gSt`v9=;4wsv+ z8vvw`3mj~x%=HvQIcU{PBEz~TrpibxP@k)nWV>j@AG51zoYmRq<|A)1=0Lm(C-v?h1 z5u`yvx&)L)x&;K377&mY6{NdFx}+OL8WfNck(81S0g)7uZltAa@8kFV{{NYso!On; zospS$RL*$>iT$7I(!L zJPT@w?djf;*S9pWxE(smU}KupU98b2Z5QcqK2V^af07Iex?_5uNgW01Ac6P9R>Vmv?w$#kotc=v=P5 zhOw&)HU*k)gO>+C!pge@`5OZ)uGogs2?Wa0Q2&Nd0Sd^1o{_bxY|#^TlXV^gmB6;)N^7O5gSdISGQNf6rCIS%R#7RgSlR|ni>Q6^)mkH_EKB&BcVlrjV>PB#A*`oMi;L*?h}P7z zZxnzsULGza&ykOO42Qo+uRIu-;Ba0N*Mc z*WQ6ccU~QO1*RI^7j$~A($_A12d+ySOhi+F$To*$muXk#LvpMUOiePu+iUvrT^Yf;}TholaoK9`ZZFAMXWr%zXE{!OEEwM@~bT*D4 zkVhRt3BP5wEgX$P%G)b#BO^VXmd3^dp5M>Df2ZGj|2B(iY9~}n$AJF}(!*?5ndBp+ zvA#k%-ly40-jntO<`WIBAiSQ<#tT|3pX=49J3ZG$o!OvMldE54Tm5@R#X7@l=brK) zGsG((6Ebqs``(90>z2OAPOK#`wg~44g&0sE974E+WM9zPXp=hMj(&6?nId3C#-tDh zvj~C-==z$2a6m6aIc>A_!>y^82Q&oYTPPY)=gj>4P$)AXG&Bbokbu4gzBBf%T>~($ zn884Rf|s*1Ne6esp+9JXK}hWZC*z)~DhoS%IMj}S!HC=gggP{diHT31J_SykJCgUz zcg6eJU-<^in406&LJ*0$AtLeyc>>*Hb<2qqotbyc0%qiY#WEJ8PVJjJNblak8px#k zW2@7%>=E|&-dk4&C@-Cfo-RutE&i!6y%)~?sJg>=^05$?a(k$1Ha71!GY0C zC5hy9E>Q>rZyq#wmOyC91dba}C4s%Q0y%Fys~U8!pacayB@oC60JCOMWdHeqmoIq* zOlJGDazwl5Hco{~#NNiPqHTqo4ZJccST%1(z5)%_6 znjYTGFQER)3nBRjqBKf0z%4)VBt%O*h(lmfQix_jl^sxp|&O|yckQKoLBk?&XWf2G@%>>#r z?tqF+{Oi|>kWL}VA}|g&p10Y9LS`9waZr$p1UW6pRzQF#?S@hkAvw7Mn1(m7pCmm0 zN)PgT{$2hGry$3n@ofJW3Zw~wU<(mn4MKK03i5moWPzYOgYXk!U-H;E8+u^3r6m!X&1X;znYhs`9tWu#~CG5OR2r)yp)o2NHt}SQ&#di#^ z?%j*HE)gcM=EHamE9>E!5eQk!5?z(i_--xe%w#`upx^N4g`mx&j!2<%^e=|qAC=;q zU7t{m2;U5oflT@_O)xVWA95d0f9)Io{WF>P`$V&=zrV#iA0zEKMRSK5Rn(?Hx7w~dxNP3#H(HykS#WFtdZWnZIO zcPo~;;z_Kawc0~U509FqTs$R3E_QB$#H%-q-p)DCgcuzD&1Q0)H1l#_*4$oiTgiZ$&G*1XbBnCq*^pF zukrXH_~#94M#BHTp(m$fMBgqviC~eVmYkrW_s}0pd%7w5ec;ER0@wIIVqEO?h*|s=4JZl_ z`P}!7`KE~E2)8k`a=Cs4L*$draEV)()n@Gc(kHz5-1l-wSQq%&hGw#B)OJ+&WYCi1 z8s@>DNCy6^xBRJZvAyHF)})0_k>wcC%orA?byfW%p@|+S-19hVF)79op?*hW9jVLQ zk;kKv&ike{UZKh=c7k;j)CPG2YPx6%`vZW2#A5}&J=5&hqe^-`#y9>EWaQ#5A)!jzV^c$R$YUazu2>X-ISZ{y4CI$jEQpWawmd4szlg|Z8xqaig`%*!nb^tFAg zi$2It*F&pm>;)#t}+dLK>Dl#)0YC~!x{l=q=jg=qr%j4t<;Qy2$9XzA%y$b1*Uh$nR2 zBVkDkpyZAOW6+p8QjYGC@pNY-C1pPa4@L ztrm~g>wSQ^`Qg4Z({0q)es^v)K;M;57C+6lIqWlGK3i<_= z2cJ-p#e(yLPP$*p`Y34LDe+J+gK8LPDxhZil0a543NgoYbWnwS2?&9!Mx_UVqujS{ zg}F{bQg_?)wBck!7V9f#)dYHzj`7PNLVv);OuEnEff}dm{rguz+8`UE5~y?q0R|-w z7|X4%NV1y=Wv);&z?4CG~oYm*tb2%P^HwG(JHyLf$c$NfL zOo#gcFb5dWr$hM~FyaJ89B;M)MZQ^T|Y{h0bo*Zkxk@LV^m=+0*6(l7>f%u8ous+MZyaj_Q)2x`u z$KxL%am)H}3!;FzCcjHf1C{I#e(XWugOpFTw2=nYpQ*+SFpqI5DcxXS+@U_b#+mK| z8iQI|T3gcr1nBDO>P=D?M`{pdBTJ3SzF`gq!4zV9KzJa<9ONjaF+u4K7rF}-lR2>F zz?92?s*2FBBYw_EVEh!kTA8fB4FvM9{L$PK_!#Jbj~#w^1s#oe_77Y30zWAP&Y7@k z6v1DSa2phL5VpC%{o02`x?chGas<&#L4LlpwsynWzCKs|-v^Qq>Uf4PK?TnoIQvjY zL+&KaZL$j@x+5qRZhO`viO!deT6R&XC!jzcK`9UhlHr5(FWjJD2IX&BFs->CKYko} zA=iBi;@+GNQMV1;K%K_e+~aGuEw$<8f*g~@6|V(3BqSsr)mE5%MMuPHfp%_{;0&VL zZYTqXHd2Ui@O|M7IsUS#LPIR|s1gS#{y{|)L*SelXz#3Zgg+7~YK;G-9qpN(K++M)}Sk-6*(T_M9^18EZM1FtcY#!BS&| z_ZeG%zO24->CQ;8v5L%NKF&-7vHMtGkvKf(9T7#yWOcNq!UX^%=F426Ev`d|> z)oi?G?|$b+)JJGm)TmVPD0uRvlLYh3k-TuX2 z$|-!hNQ)*uhi8ZFW3aOet1sUe_IO73=jT(=6IXN1M-P-k{^Gv>?L(LTfE#+&;)hJ&1lH zEBNKDb;i%%yB_H;6`$p$GKZ8qc@IX&r%Jx0q!D}6$o|0Qea0Hp2Zjfgd+&i#^#m~$ zgMh%jd+k3Xs9Z)qj^O>s?EUh<94|h+*}g(c#~cqQ4#NnK3d$IJ0@M|Bb;6cIY*6jE z_Qh>e9x9HpDJfx4Qjskj&V~v;QX4BS(c~b@fnp7!BQoiIVgo|g=#~|=p7t%;NRC)gs9%#J;{(OH`OGZI$+@8JkApSeABUlG2zw_R$S=! zvl1=}0cVQGOf4D@A6*R=kU^2npgAVL9aqkCeM6hByUS$=Fze+X?{Iv(2Wy>0X;4VD z*+h@5tgOu9cWuS+p8VarX!&()dr-5^&(CiH(ftYHa;;m%Q9aJ0S5V^630?+oJL$En zQrHto!HbTv|1-u5nlPC~MYtfINb1{7b6sYFQ=B7vl9DC4ZB)O-k$=|HN>|rn`nQqm zM76`PLb~M$S3Fp7Bz(LJ=Q<`WEfQKLrT`TccKxHf=fXH*J@yd|Du=SVx)k8w5yhq| z$%7R@PJRU#Ds zI&9A~w6+OIM=x?Z$lmZZ$db7iM2U%I%Cp==sYemX;p{2VicT-bayL{iqV-uM{f|P1 z+aENNd~jm#paC18V;3~X6R;q1nn1o>oLLy z1=m2e3gt^6d6;>e-9L^=KpO$^7#{FHK#9#yj`5D}>l0Nm52&U)K*7m$d$w(?#)%bT zve?ALpz3Pjw`w5`5OjS9dkXkkOxP%aip>71d9q$!;vjpxSUHBp`+PzeyJIkVRIerB zKI#31y&)clGl8Nz22ak=j^4<_R5W9yXOX%b?a+ZinmTWHq0meYA%?))q`0`p+rE!b zy5-MQ_C^n;d>Y#-EJxl$M|1q~BYS(EByQsk=i3iPWtm@XW-_q`9%gc-jW=8H%UFMx z&b}F-tBp~Xjk>Hy9y6jCcFhcbr;k=qHHd?{7Q=KPx^!j z6Mf}%#k9f7e09cDx$QJAnw zESHXSR*Y*Ki{p=ze7)bb7 zA8t@lAR5@wskj-6jj63v({4;SNze$F9qmI*!3 zD>fxl@Uh9Eln=egTnr?X+R5NO(0S>??fr9COwf89m&q#dtFA~k+cj+1I`r|E8AN!o zcQ5QdXsX4t1`?{}>$QM~33M^G&Myy^*!_DgFX|(q%wc#6iOlKH#}mxQV=ph&;0F-O zq&oIbH&mP3PaT^x2Og4A^4$m|l)+*M+(H+C>mZ*9jgTAo_o*3YDD^*|WZeDWQpmr` zU~#KE{rA(%WlPon{2fD`G%V16evKS7Y5w!+|MsGHT3t|qhbd6Xms3z+x^?R+@hc3l z+Xf+WSY?AS@n`n?DJd!GcgqSkE-ra!NJB&}aqZ2LdM{((6m_xE{?@{H+eeO`N6JtB zPkZ@C&M(A#AG>TuZPa_ttlBj)u(4r%_+y=rA$H}tR)Rv6ia{Rd%{$7YC0Ht|!5XCs z0be$QHSz+O#V5pmadU@AGv1k;X)(b-@3fngn<>yt5R;VFf8hTSpN3vZIq*?|+#rLj z{pqsCsly)%)pYSL7k5F8-W?l*(V|u1eAgq663bBt64y5v;B4tWmxU#1=#THcckr;7 z2Xs2-X~jc#m=t0vUW$Ew`jqx3**5LHC(J1T4at46dG|+?pGl;AATvv?3su-3Ypg(- ziF1IMjn}_*>`;5{$XaKv)_SlW&OK7v@rqri8o!WonB!tTGUcgEMURw6p{oYGAMw%D z)2aC`3nJ2>zco%8UW*aGM%4}(-aDE9)vv;5h@@l?X86jvrnR_m$6*fh{Iy`DV@GtG z^G3wyM0SUQOcdSurjJ30tN^>Td-k(8>g!i=(~AsYH=rK~u`h$(Fh7Umy%x3@g+cq(d3ZM*qBQuviL1I~ z-4h1ZR#WMc6PJ`X(j~@(?CB*vPi#Wl9+%p_wR|}3So7~Lbqz*JYGHpW;u+hH2WEV6 z_B%axDUoHMX6x^48s&V{lsO<=SENzeq0^h9v4N2v_D%WE$_SpZ=h0uT4|;yRiO3?d ze{|6PvMK``TKi$c9+~UjCPBdcHa%R^-juA1<30D~A2*aWZf!3|Qc;|*l`7H-MwGuc z_=**z_;Q({)p_Ha3!CepwwB3g<{fo%(x3Ga@2e@K_E%b-Z@fNK!WZ~6MVEt5+e-58 zo%^3f72NYhs=&PWjx-O`oJmQ?UT)UwLY>3tXS$G8QscaK`4zg>F%njuyIDKo{~4}hmGzyOS0t%zgyXr`?eFq( zh7)dvTKxRv_{fou^0yN~@Y(6@@xJt4J?lP8F((#GpEH3JseOvkEMex&P9lTDc@hiu zTdQe0IvRfEg%7i)4;{GW9t!z%WsQC2j~HvD!#my&x|8F_%!pn?A(;KBJ5{LA`kmAh#_>sxUdDs3i<0IwQzB7&IS2gT;MrUeT zV^f|s=Z$N}H_q#;+>;G9`EWm|;1(&S?L?=G@I%On_?PNAdH~cBrjfj3(nXSe#U4x??C6siu+awys=><1cdPhI=V8M_r#1Yg@ixyYx!!B3R4Ft>nL>&->5XPJ6S%ihzJD+<^j8))>T)~$nBiE7eF{rXdHri_~t-uZ^M;?FqbS)yc1+P!v&a z?Gx`B;O$ny+nt)%PsHC{WPo+LX7Bzim`Pmk%b&Zw*x2qZ9Z`2jgr@9i9E_-Bsk zmhZ9P(MCuD!f9(riw8HLP`?TtML8<$CiHyE1SQ>dw98iSKJaeO+b(XN8gV^^^r}kBS%bieA66E?pfQWFQP5*m=cDc#S+{=bQ5UNT-1DLu1$p`u~n6 z9?GAD#9`FuB0St`l<{JWxBs+x;#+)0kI_(SjPJ-hCGv^WUA5#5=UFaR;}>`fI;Aq# zs3Z2B7EM~u&t4lCMP1=GkS-;VlvZ?f+^dtA^tggvMv*u5lKj_;{Z=q(nSZBzh*DA` z@OPKq77Jy$*-^hZ_xMA}he0}8%2fV=o^|4-Q0vSL>Q;ZqW#7VIl{IzVE~X4&Y8mp| zD}$03TBh|f8X}J7vUfa1xg9%NjUAU4`=#c_-KMlReOnKf{f0v5Fnw2TcGHZqq*Xmd zfm*gavgfFQynAJg2RGk;7^c{2BL|1|CxZvg^WBfLMq&IRopR;FhhHpr^1Rme&QEyq zhlS5)!_$wRdids9CNrJR39~S$))pFy87y_J@6PM#jg6GhP)1YuwfbEmN=^zM>q{g0 z`5uq)*XKKhH#N6xI-=AU=jj_12vEMve8h%E-q%r?TpSpv)`w{dHjYnspQQqJb#*iR z{G4jx`gJV3Nop`B)9rm}iePO~F*P<+7>~=8)((W0q>eD^^(*{yS6h1Iz{26s`FzvR z8aXn<7od@%y(DBCsL0ZCL?G1~nI>jSfJc#5Q{Lh5gOmA*{=MX1LeEkI?0a$;k5tmal-5RN z)m^-u->wXDjt~vh)%-jledyir&F0tlg*Nh7ac8!n%a;j*0yF2EFDDpP`+jeaG&E|I ziZy@gSFtm1kn^>vC*ya9WB%Z7e@=(K9U4lN<56IvhvQqN7hUC_zJIujQ&kdQn6jYe z(YCsG=rqE?!$>4gCmMK|E-~P6?nRv{SUfou!2R#39>G)Xd6~&EPMpnzbV_(pBnw*G z&nqllRvY3{0H2Hg=!w=ft(qEx5Hu7;!waEYUS8oZULt&z)@UdTB_)ULLABW9@3M;2 z!h~_5SZupHO_Y%kV0ry2APHNe`e~vUeh(PGcazDD<>pLVKKOVKnp(RRO=YlsDhKCE zHhA67SE$BpR!wBcZvD~cbU{4P8tO*1*S!qrE*T7=1PaR;Nq~`S%9}s8%s=9(w7NYEZ zru6dA6x?`3u{Qa{ZY(S8*JW@+-x(j9Wqs?K5yeF%k{`FfdzS?h=NT%kbWpx*{MT;g z$TO7a3!$V5yKBz}VTr&5=Bf#@NI%_>8>udg5wnM* z$5^UMMg&KHJCtZy*hi9{kiObfmcRHB%skoMKNofHe+cHs49pMRPH+BbjyO}xpQp8$ znyksP-gCjf8wt4Fvt32}qrm>O`%1Z899hlCFar_?b`LL?1dZoAV)R$J#~&U0z6Nw2 z9$TTB%E*nHwj^G}R65^$ARi^CZq3o0!tYm`*|dQZTx$C}jElQ5Vj2?e_NYym1SalBh7i){~WInFU`q@$Wp5`GM59$ znGgOBR+m|k?@R22<}{L(?#Qh>VnCS$O~tZf?dfclr?Ma+tlV60oZ}a^bH|lLp(Gz&^ zWHxH=gv3wo@hE@EKR;=;G_BTWOe`ejks6M8g~X#?p+lDm;dweK{1FphJ~65{`c<0zwX7zeeC>q7{E& zXwug87U9pz9YHt)>lLGr+%w?gxmbQ_J#lsv3FNyT#c2>)PUkiK{nH?44>L-A0|QIP z$sAd{@V3~LyMU!M=n^9Nw}hRIHS8g&1%?2I`TQlsgm}>89bOhxhJ+t)jT9?_AC-9p ze*)L+LoX@Q6}i=)@*!s5?_PXe?Mt}rHkS?IfSq2q?Hf~UAztLhj-7gtCOyu3(Qtwv z8H^X8%Q7c=Z5I8xGk;1H)fGQE|1(Hy6Kx)B9A4pa?6{s_ckb8Yl|#Kl(D0z#G6!cC zyG(W|*+?e9`z&Am1lz%aEe?}FzQa6Y@34~1?KE7->EG`Jg(JVi?NzEbf@C`q7{u7M zTt>Kh)*Pc*ghFv`ZHRU9BXTdng)d^Yn~%DU&O4P3pfVm@v;DBh$TnbLLoq2)H2DnR z-d}left77CT8Yet4L1D(!sUo!cjbe^@-`d_5=i~ayY>tbD-cVbDZ>xDIU^_tp@&|V|C+o9@Es(u9x~Xq1qwf67zppbGYI*5(}(IV zAJP4HinRwu(Zy45CI+ZBy$rK1aV3@=8V3{Er9b7JQDb5`e=b)@hf7vEqFws+#jT|` zhj~PFXI8E0zc_4fZTzubj!5Ko+a0AYYa6VYi;ynSMv$Y>aJ7dCkn;-*3|w6Jz%dhc zT8!v@dc40L4%kams23YW42ev3S4sCVSe@Q1J=86?&RPtN@oaTlTTI$JY{9CQie!Al z{P)jFnTuGQ*!YLwEa^*LF1?`&1eCn?(P(OCPg_GSJ{``~5?v&Hs@Cmhl);r#{AN6( zebK$g!Ih}pAo%Q)`&to^+Uaw-{tVpLXURlU#zNVOW(_KCI~{Ph@ilX}I#!7z%a0-l zuKPLCia)$VPv~1vt8ELN8WaM$FrmOS_CC5n3&bwqlzg?+^jaOdiA%|k6&o9CF;UIC z*q=d=c>k%37zwK?Mhg$X4;q$y*xXsC40;)S4pYn9=yt;FVAX5&$r<+4$;oAjiDtGO zJ(=h~SYD_wO_M9>O+4hS)@}DkvoC7e*{WID*uD1^H%{hO;|7ub(aSZgOc%KY>h}Y7 zW#l0g@6|G*pmq(x*@J?ZxVQ%rVF^h|&s?q#vJo3U@97q=w0+^EU{~Z-T;24ze_c$3 zh@s$A|CcA@o%+G+yU*W{CYJW!j;I!j{W@2G`ABQ0Rn zs3PQd|%t6knrSJ&5(?Q`5Ll$ zgQifAeYRU-P(ut|{N~PBrtQfWn;w77Z*R*th@{A#AByP2H!hG)StfD9p??s)^r##UpE_&R z03uR|zTDC6^(zuq1RsoySl#lACy4)`q#Ms-Yn=B=q9*6N&Ut+voq$YMsW|$q;_b(4 zdX{^|bGm5ZPB%CvRY>S%X5XA{C;xY|DOe`<>DIL9e7G!G)6Q5Q)0ViCA{M(pT%e>K z8NOot-m)fX26w~GIZyt&uJrQQZ6zAV^>G0Mgc4?rdnqk3&rIm~4-7zShK7bVLHEvL ze-gUGupPE$!?|anYbFPp9*?SQBVi~o5p)(lq;CQQ!5up9-)o%At$%*HjLvQNR%a+sN(&o8Z9YLy1<^J|!P}NH2$Rn$j!Ns`WB-v`P142B-QPQMV0Ka(31=Cj|Py^E3tAPow|%gf8g#`f{`CIAW0dOleUWM7uKv{kTj zelDUzvOp>XrM*^_LmqV6TMbTO!K{bBV6*>KGTBn&wby@s@;AgYZ*-t;?2$qU8zv{y zgjlMINVDBliOiu({i*q1RDXUGyRw>4L3x7|edaOy?JpVt65<6QH8A#Y0pEZ7*DG9n z{K}eCBV&&;S!HGAw6*BS$RI`LX250G|6SJjyE@z>;dg>c%-VToWZ>O*IsDcYm%7NE z?icxSB(p3GjMM$6VocLSv)u$U_SSR>nzg_fA3=vgmGFCXh4*i=`ED=+k{ zkMnWX|L6j&o}YvlUz{;9FYf*_P#CVm<}=YU!&KvUyYLW9U^*pZL_g=MEYcjYOvrKR zeiV7NR3oP(p`6xE8P#OMGY!Zll<(G-{g-4Im?MBfrWc@m0h+^a@g2qa=rc-xZod}U zG7ThyFsWsNv{EewyXGQwE}IvPt+eh{R4U=D{5e0m5gp}S$Bgi*#850UQ}s*O$E~-8 z^_yp9ZpM@fX7ra(KekrB5<}TeFA#Az+#d5WnthIX0C5m)+5;Jb3x=Xh>Y2O-{E?f5 z9v#AhkJGPx%e0DEvP@WUmO)k9&!B@e4iPaij4@IeuaCMoX}o9wxV%3=Nanu1xh?$i zn15$yhv)eZ=JNN#P|XG)PYcM61b{xwV?f)u1L$Jn{Pwms{mVWLBL^sM$xL+eqo12g z+S7L&2NYSS8S5|ARpi}vs~lyDx^scCNoL(tp^*!~2V^l|EXNgSqX8%3MPN|S3?OWV zcLCmh6*_OPfvE(rDihwF4YjLE0%P9H1UI+&oUMqWN?v_krquKpaLl6Y)IhsSDQ)#c zBe$eBATkn18I4{GH!#eXhc|$S_q+7NBlKJSr2XGCW}AKzC*0ht{t`LomR(N1fF;!x z*fddzyT5?)Sr{z~(CetU@1k<)o7&XMl<%EV;r1Nz+oR%XJmTWi00C*Hva9ohF6Yns zC@AX#HxLE5_Eu>2`*(IK)&cfK_tTSCZFNxS2U2Z&0%Z5iINT-x1&dK+-Z*w}=ZukD zVk@n_1JzgC`BJ877XGmL>!QVISA)pNOP)K-Ji6}BEW>X_7@=BVsu+RYY{>iG$&}-E zp0uII=(|o)$;KU}3SkG?R}s{tT$3?xi|^(MRPUK*QO`6YTOyj!%sr0c<=pjD_RDY1h;?nwW_$~4q^sySOgSpif%;2Bj;9>EX{93bJ4KO2OGnvLrtJQQ-+7Hx`s z4ri{EZ?dYTM?i;LjG}CF_n=?IcPGpWd;n}gQFL3G9L4mk!RwhZQyz+0=+YWxlq=+6 zg?TsHF5h7xWxV+?IP(L;@<&3V>=x$l%Mrp%qcX(>UTo531|e}*Im3Ef*h=!xwq$q3 zRxN%{q^d`48TX0~ml`sAwq4HBp}tf@%cfP%Hp0`XQTCnE_c7jTk%shBmAI?dTNVA- zu)|wT-XGUoyDoAo*F0fP-fx^;Ak00z(Wj4SgV%a}A3xDe$FzK~CNzXFg!k2R{K)Q+ z&(QSqdbYODp#5I|oV@J$#Z=IkPi_A3$vD%rMpk7hW`!HZsb~5g(UxCYHgBfp@1RG%Y3$>4i>!0pcoa;z}$HZJvO)Fu!J#hHlsH((ch>qbNzlW`>2GNvbgM6 z;)6y>A@`K>YR_dI$56>7ueVGh^UCYvHBNcIB{1S&KXXXHM*6tbiBgfiB%1@zt>z;m zooKBM6RZ!OP^}HGve}_WR*1TM)QyzAnDE$_*_CIAxfi4uQQ#%~O0kn(TrO}7>*l9! zezu#XbrPTNTgkb`UcD8GR)u4gHYzL7%{{P0z0Lp6{}hkZS?CnIU9G$Ny-5AlC zoyV&FQc6QVnNwW5*ec477nT;HX-yC_Y@rHY8zteg`uB!Duw9a+1S|n<1HqRh;2Y_Q zYuu_prhppIFHHc`2aLoP3^l*Pu2T|dz@-3@o%h9=oAc)6@8VzcVm=qCyH6zFuqEE- z(EW(=tfvJ4AeU|u-k8^?V8nog$~V2bZ5b_>VpepfDI~TLV|M&K*?rfcCy;!#P}BAA z=RLK`ANO8l%U{RElTM%&>H$(s#ITKJex`6CO$W?VP zhUgG@7QY(|C3lzlMbwt!y8!1Kg>aTpK)dk=e7|omtJSjRD?%Z@NMH7AkJ#n~j8j%i z7XTKO+bX=eq=WbvMS>9JZ;UpIK5yBJgKaWX|3T^0~G)AUq(Z|6w~^uK{LMzX!V z9SS04U_jJ?AEI6}i@!H~vz)p-XMaqrl+|(wa2*KX0L(KnDkeEO1b9#ke0;ZOTjf%q0kIhXsca@te*>+zW9PI4V8SxxFHlFHR zqb$@hO_3;m$?;5T1k-T5Rk$Ht)Lm3((&yr(7hrPSfpJSjMC1o}SPU?#z$U@YH*nCd zcj+#vH4~9d6S$f#asy$*B79{)b)caDZ6NLGDGH;k!w}p6kUtewRohpG3lYx(RN!ma z*x0ClgX+-lJpB!H8Gr$^BGM2@OacMv9awKrPs2q)zJ!b12F8^uW}w-C@&&*g0FFq- zT>l_Ypo#PH?L;Z>zBlfo8PLN4_FDjudN4ul?hwGHAF62bkDlN`*SUHiFt(uDLEkUq zy!!dMJPkR{n5V6;z{t{5o;qWu2$}@koY)}g>Gu-7w?^dSz_lQ>a->-dG*xk!kNpZ* zI;IC3zY$Opz>@(PdIdP0mjP*kaL0fz>jx0&X~5k|!>cXx+_QwEMT6+s0mP=r_61}H z7E=up2nLR@tyXOmqtiF?BR;nM7Oq9z17)ZLtmq7q!5(=*4OFDd+sK> znx8ExXGU^vu%!1T0<)c-kr5s6$bo>5gfm@cHzkSkOJ*-|pPpn9_IY>hirRciqiK$6 zO`&`w)sL}tfmH}Q5ulB4cMdg-V`?*tmIwx7c8+;}I>Lhf`0~UR+z=V^jL1kP=xe~l z$sFK^JzEA+R8Z z1o-Km*L%&oOMPkK0NcNW8Gr^xOGsZwSs51=3WWj`@nzKf{6tOt0dSCz!E0GSFoDU* zWPskign~rb!6krN@pRn@e@j)e9##fXOKAXN-YUxz7+s5iJ5d<6lEd;4me5YGF-;42 zm_6ZRWNfzP|MXs(?w;jOe)L|=X5P;}e4|-IJ^DFgkBZ4O^HlGus^Y^z*REI2TnfvP zCs6&m{PpIc?Sa4_2`{P+HS)~5V`rKxYg&;)&9ZG2Kg6&J@a=+2*3EuXyF{w+;Zh1% z1_EFgm0!YuYy<>yhe}k~%zbbZWdf=cJQTQuSQzdAwb};EvrcJYem<5E;1$4yoS&cA zp%7*;V$CUVF};AG2*5W1N0uEve9#*C7XTGE^PrQ-VE5KkjFOx z0u)hK0_<*{ry;_42Wdioz?)_P*mN26H(``@DEKVtTZuA-!;Ii|G~X*17{OSXO9591 z_Q(5sSkHGLhtJ!+_wIpq;{}KWm;fxi8VKflpQZcqQjp|M$?G}BL@O23y}hhh=BepO z1DJc(0a))47(QkO)h!Ah6JI^t1opmynz_+>C-EPm?IOd3#yS|Cs0eX(yXclYiLD1x*Bgc(7BEjx>XHVp>kn#c)8Y#eMJG!&rzf&tKJ2JhP9 zS0$(9#i&Jx||R!T0RLX2?~A*U)b4b9OmM1`P_2Fr5_< z_XPS&J3#l0CF1yjd%YT)3!Ie82rcGW?+32&3Wc~zyD8fP#UB7mtlQ2ZCif84Q9ihs z0t`Arct>KFe+-`PMClvxU0XmCcx(7#9`9D-RXFr*5XRsKhnGaSV=RzJ@o-pqydi4g zG5OySF@=a@u4d21SlOkFVNI;}FXrxx(`puI0k*CU*qg=>dkhx1AP&Z{hFYh@z`>#h zD643&jO?&aR|fpot?|`2UgHEB6!fN3!%WnKUzIvKy;nbb-5yxmgOZaRc%e0#t&fX9 z;R!G*?ErtZZX{p)a1JaJNMa?n{BpYc^61$EV=i_4pNpGE^NRQ)nqOQuCZ4+yd}V(B z^*)tMo(qx8>O&{yuAelj=C9@BfN(FzbWhUXG06T7GjYvlO`#{9gdxe;pm*kJCQ}!% z^XdhpCNir6z`?`?G$Cfd*Mitwc&B$tN5wwlw(uZ_nADn1wg!3!t)&0{?_H*Qtwss# z0OT^bL*Dvd?AQOFD6qeT@-i@psNDr4s;ke)KQ$G~y)&O7l{SOY9DF2dNKz6=6YsL8 z@mJDKS`fu3ML5Su4ZEZTOA(Go8<4E4@mp*t+N&<6PJq0^!H<+@QL}}2ZGgjqRzb32 z<+W!=5-O$}`Cd?_xfY|O#eS@UwZydAR2Y`5)rAdTA1Lrqv=ZGe6t<&@>)kF`sv=#f zx&1QfIfJ$TkCAOqv_}AMr{5sYDN>y(GYd&(Vrb<>;=X^qn13+32hm|g93`^!v|8Tl zDIFs!HFHzqZ^ZgCy>{&;JB7L#F50V7p5j0_G$cz&+UA1Q8ju?so31ak5M{gz1yE=) zG zbP@S9i(55eD8X$E@wZ}qA6chtiyMRV*X&@3(ZP5`FH&(FH5GokI#j?_+Cp`-sMsso zmHOA7Kb<5$Bgm^zukv4q!S-UGigJJLN|u(TpoGM4r(dUk2fKaDzu7N6O4h0meRU`2 zYq7+OKPgrFZSVFCv@F@(EO(!9d8A)R6^~IWICki-_Edr3rYX>sjH1G-1FCj_yd1-68+mxf-qmP}Q_@E7)hAO8J_vpO; zbU=FakK{$>8~;~Pp29_xTECj`L$ge&wOtRHG*_vY&Cl6H6-;JrQ-&x>KnOc6i#PqM zk*#Z?f~tDA(8YW&(nC>0dK>VBp)`HoTWGfDr?>y!f2UAoCTjUuMnO$nwp42tx+20= zj_*3E7K$erCTDv8^{ISj7?HPuGi8+|00-uA-lcdJ2CEKy3yCq#*HJ02zYmZc3??Ma zu2xOW5(pV&T5{XX<8O=qU-Wzy12On30)pHwn4+iW<8JV*?tLHr!#I(-uR%g0>N!hW z8+H|_I`K+L(@nYe@Hq=_aaX-=&zo@2auv*bdmysWN24?L*3GM1XqG}}r#H8x4xY*< zy^ldH2d*sWhnoGa9zrGp8aKM!iHt7?7!9T>a8lxk6O8Lu9oZ_P|J~}6w!s%nm3!DF zo8v(w;bf-8>dRYMHV(QJ6n=>hx^gBQz+WvGPUp`Qzg2MRAjO|gtJ>kD8d|UIrU6rB zHwhiP8>NdeAp@M(6{XqPIppkHg4w<}wq-5o<`9|d6gB-MWuG7pk*9c7I&OSPZiFu8 zZ=>4%td}2d<_15gvW_0(7Um^-clk2e;8C1d)P1{Z43SsiWtqbdau+yen1iKF+1(6g zEG9ifr}$%Kybj!`PV;*1Dk|a8{={@pe(9kgBDXq+U@M^e>1EXOOeNK*GPj!ID;`xX zHw9~2WX6@^NbqEzvlUY#GnU#yL+>d*W(*4t<9NS&!;LzNNPgDf)Bx2~b6E2o>*d_@ zuMOP~H>&;ZJltH3#OGCVg~hnj*VGjgk9VT}({XM4#PKLs(81#b-h74|J1e`Y9!TQCMuE6DjC@Ba7l??%^R)8dzQ_6tN9B-O~SVFY}wk^P$2m!S~`t-x2@5_4#vlD@<*)nT`hgYyuK(#j*H23$C$M?4@N;Hdktot@SMI-yWWD}lZ7=5X z<(j&ema3usI7ycgA5if}>f$sYce0+rxy6UYeC)+gYoIGjy%IHLSYU~v1L&WGA5{0z zDCk-F7tHLX);e~OdUbtCVVY#&-|3Zj@o!n$635}biHJFzL@@td8ar0xcPgr>|5sJc zN(;dkUdEH3|GjQu7i{&V^2Llk>~&>Ng}k=#vZLL8Q+`UDz838b;pTMYUJpL8!o9jo>QQYAX2sfV z2*p}>V5fzbhx6FeT=Z$_pmR#?LZRj)H#S$=b8YqL+Bj2ge@@%Yrz zE8mM-)Bv<#K-S`uP!@GPiHDJH2E+l7Ou#az+zbJZauF{^fl#=PuMHYTEoJ}F_ zS%M9JCusG+!u&^l+(skand@fzx21HaRZN`8P8tPar&W4^x;_U1Fd@Bl<>sxw0_7Vd z`VGNqT&teY`@&C&=l}GM#cRSfM*}#|jo%Bcu7G|{?NdX@BL_i^CJq#;{(fg@opedo zAHjLO$unYo{}HJT3I#Iu=tW0C7NVxY_x;B+wFfZ6S%O?y49Nk)mK8!EEBu`J*|#^B zMn-C!UAMrAcfXrPKLo??&Qth2Ho8=D}R z3Y2to`_n^LhjB6hG!2>a7Z)nNWY!VhhC8-V0GUnAx-4c^V~Br-;4Q5`6bMY5s`kO zr4ZAL3R-v=e#szg8aLtkKIy!hoOk6qqOer&``<5+n>#;q__M4*E$&SXdW=;HRqG`a z4y}Dkmp-m_qw#??q1iEYv!)HEvE4)h9obp2i)r19+Z4yNi=D|OYzmR ztfdE&PZIWLjNPs)KF!fC;SV=Ex%ek1RO(>Wrq%>3Z^GkeO%XE(O(&Gg*+;j2ZdVT*v2F#w%wyn@1sGf z7q;nE+4A#K{-T@b(Dx|6te^XU=Do@Z(VaV@RUJ!KhjC&h_5zzx&cgmV>(Qe1XGTk& zSqDESRk`Ux50hY_8#>;KPGCjzC<@HZ0a-)C)ff2bGWRt{GgP~8)Ju{b z9cAb%M@MJi$Fd~{gobk8W*eL z@j1;4MY0`SiA&!vE3K zTY$yY{P4m+fntTl-L|+xahC;(ODXQ|ZpEF&U0Ntkad#~ah2mD+3q^}N+(X~*|J{2Y zo@HV8%w%RVNhT-BFD$=)=9+c;4f@lp@fTSx0;zlpC9Sue3M}LZsTH<#*4)%!6~9@M z)FP{zau8TW7@+D47d;a(T#HX~{{xjHfkd=&v}(FS(oqtnpSB=aII^1H4_-AKq#|1U zq24o!RKA}MA)4~H2bk)qs*(hSpUgBtB2foxn35bQ#}$O=#BTW~Y&RnvFf$*9F9rv} zA|i1>vK1bQ4@|(EEzyWz6G?r%tAvOIeQ+*NF*{J4Z(AS|<(%E5M-zza1eif1 zE9-&6xwftaz1Ivb5RT*XPJgiT?W_8__vKRuTbTGf^VT1Xd)QJvhK;mFERfQ_BsVv= zOdR~7v8^SvO`>%!Rob~vc;5<$iQTBgf1}0YRP?MI4~L$EWFxKq>R0$qJd7>8_m|IMfM{_ zH$_tqe+MLDrq>p!l{C=i+wro}=ld@29REyF182O?nm(_(yYHF4m8nVO&NgmRfHw~a zq|DeyEz!0puzEZaZLrD|WVmd+F)q<8qs}Dbb?Z9MWge9D_zGH=YVnZkLJjNPmbXZf zC4A0L3gtX67YOYQSh;BXPC~-qO{Rk1K5p*GX#SF7i(WMLlKG)q&GdLp9gB%H%=8UN zy~#AT%bs4ORtV9|ZI`xUSGa3R4t@`cR~q#l8_icu2Pq_6aA9996nKY2=;1C;v62 zoqhb3eLt&nXk>JQA15s|?6};|tBDU*R&F<^ec2_Spie|oz`3UuNXv7|Z1^3*0;P4E zY@8FHT{~mZqZA1h)igg+CfUeX&LnxTxS_Z>T+cN>5o6qx)+Jpph zai&H;s`o7ZAZqIK3SqT4P}f}ob}(fsu+HOk?oBKJ(Z3AQxbwDKu%iA%e-Zn|&F^)9 z3c#c*tHnp*R*hZXGy;xA*p0n0R>%Eqz@Onr*h>JyuKQ&Ul>dAqD%R~iaX{?F$FYeD z$eI20vX4oBpY1-^w;zSNuHwitCP19ocSWbJ1^p1A`~fp+p5wt@HEp& z#-yU)l!x}yItayOXnocV-{c4s4gB>hjUwgA?$WnI7#9Jd3=ladJ3w|8do&rGSVF&R zhj2u&C6x7@EK_##kR?D~Dg#+rKx5sUaVwh+wEASPID)m=?-FIM;~8bsIIq1{2#90v z^7Q=aOTeA;IueyhI?ne^uO%jF2acH7>x$0tTCC)hQBk7}8#V%P2MOuMyM(>ee&9(e zw6|Q)qoT9OJE<$bSp)V~LS3}p9TbYJ9cW;5Oia~ zqMZ3YBcZ^AgxO2$F8d&beo|kv&>{qOkfi;w_R!ATy`Wndi^m}u@XIDLuiyh>Mf)Ja zQn9s!bHvzItvIf;WL_D;K(nE%Dc9R^pM?8pbfXG`V2O{Tv7 zNAjScr+Pk&5viPZAO5p*kx*4W7?`8?@L+)3m%FuHz!Os}k~t>SOtQNvc|MUlUtwWI zePp7GJvv-g;GFdJ71577%W&Q8bjN;Nl_bUoH0(E6V6tF?MaHkQ1L0uc0*NLkPrw|` z)G!UBw`gIW$*E(~`m067#WxtyQeI<{6gBSKkRltz)WWz|#vXH;9qjeM;Q=ah6*PG3QZAp=w2YW^M|xpR!MHjCI*Iq9_m^cDlVCowCI|gpew-#` z^wsc?kk9cy{9YOEwEty5lz644CNFM(jEI9T@;YXq4&x*EfdUIRz=*+GAw{&zZd69F zkEe!%v7!I$){9D%?)XEFp&2Y%Ou0xz5)t;Ao0eOAMRtD zN7r}!aTn6aC8W@PXry-N%}o(yN>;IjJ?6}QCmn;=tCp4nWp!>`A_d|JD}yIF1|d6k zh5SE1#i;r;2ql;tsF{=}fO(zdn#)~3f=d?qX#Plp3=@R@n(}H#(^}s1cr9v|y}$$i z!C?ohQljQAT0)X`Ww_6Wtndh=fe>drG?SKh;yy_bXGCi^@;WPRJ6af>W@P9MiM1kJ z2IXSFnaqwdx^j?_Et)A|z~AT4x2^(u4`UWU`s;fUCpaK+EF^qzdC-j)2G|n?izZuP zMvmF`dwk9)o7hBoViM0S!3hv&OeZJQoXPrdadtRsHw+lJwT2PP=pb=+G?acewrDJ@ z9|-RYcPSGxLp3wSMk6tjWXHATr>%8*EYX5Q4e!0E#pQp281ef)xNKg#%J9<-(C3ik z!1uwJY$3Jf$Or84= zeZIutl(YN8Bdr1>${_T*d0Er#X|kpdA+fYD6+dlqpp zpT=b?T3C%=CHn?>%?&qvkTF5bB-b~T+)39^dH3|XX6T8bM1?M>D(8dPW6HN+#IqXb z;6aZtI*k^KK`I9NN#-zQ)h+sgRgI(vrj>^uL!5z07eh>>q33A#U{mA!Rv!tRNq9h zVL#d8yAs3qrT%|qJmsLpA5~oILv||-MnYui2M1E*ChHiRczS??(M!BJ;J3bGdOno% zIUQwz4-b3W8Tg#J|B-k^Hb=cktZ@HTL#qJHyog~)PsYd5;WtR3w;&YMQiBj3hL0M_ z$xMyuWcb%OeVZ915QvY=ahNVa{;1NCe0n;J5q(VRJ%?{OS?Lo)@m*<-!?z|ILaAN~ zTN?uLP7GL+oO}zPl(eKbpCswc1%>fkz6t;QkHz|^adA-|t~-=!%(t5Mrb^x#V-q=( zCQ8@Xg49a0luPbJ^Zy(t*v;6gh4lC!!JC0l0EFt7*RoF5v!O=hu3AyQCkr+LqI3le z^wpT5p*X(C$Qw93CTG8&?B8><(ceGBJ)^JQywiI}%Bjbte61|8H1(9f0%*&>O5I2+ zm?%cbT!lU#70XaFEuVHHV&P}6*Uo2AKt*K<$lw4aZ@S1^tgM1*u5k4wM1DPn`bC4)NP7a=(8RsD`Qm^J+bwjnGZU+aA!bscq`DB91pDJVNJ zk5NZP4MAEVi(D7#HmrBO!&?Kwjc{;(zvZEQ9r0$9?Q%mU{ju+rZum#qm#4w;{_kq5 zotBh%JrBushkuIe>XHPRQbMDmg4Kq+U~ZV^H)mul(nF887%woar+W^gKd>t3F;cy>1sH`P2l^1DC#;YcfG z1p-A=Od-4lVjNIvRp3FW7!#fZ9ga8^7VuC_jOl8vZHXYJn~eYq7*e?(A=)>MbuJbz zWcWf*guU9z&Y{%hd`eN_r?^Rikf0z8Bep)Bc3Pa@U>b8wNSH|EFzR({LVcx{lNwQckvpU(NP+;6dH87YNa`9B?*}`pfrw3C(_b+_GmOQu?orrDhaSm zkmfST*y$~_KO+7;79}377*%AYzoeg3(MX`I8yUka;n0q>ib@SRqC|~!6Md2!WT~;p zuCq$*=}pUn5?zYFM2R4B%6_!;7BU1txkt=*5d2ktblVu;!1ev;<_}^~?zqc|B1blK zQxYVFM2Utk4q^xprGa^CL=lhD^`4Mzsy>XbC z5R{u-hy?TeMEl9s z>*MdAPBTW)%eHnqhE}z^)!qA`pUo44kv{Lk!neP_%{Du%Vj+Bey~vMH&P~X4>8x47 z@r%f`THCC8al3lK@44-0xyGyPS$UwSd5mH%N0{PYQ<`pH%?cL{GQV zZzzE1XNFdQdqQT~RH`7}DZ1a>p@VCAcmuXDeFn3NWQB{YN~HLXR;F@ljn{}J{a7vqMA#OM>0Ye$Tc=wKHPHr0*$obfeQY*TqYl&i_$FoQ^tG?t{?m7d zMWa8oPHe53`KnffL4)V)?vRwFqeTDacV4j0tP44H84YiRnZ4EXB1w=z%Ef|FB(uod zbf^jW{1L!Af*rTfwZo$1Ah@r@#tDW;0exY-REzu+-HWimKXP%JpOJ1^!mz<$auxNG zo=G_7Gn61FPJ`h5Q0?vPnR>6{&n(TbCSe+zVJf)IrE##%JmeZ$6@8l+Tc;nt9mw_i z-DsoVU9J!72bRj~#}FH5JM(td8{?@m$zZ8nga?$t&d?d#6m4FMJBR8bj1 zkcjpUwSm`=1vhB@0p>!EbTTNmI83>|E}U|?QbJ9F4$Mat^?>uTu5mzl5SC(+(Rt$& zBU1F=p*drR`aRHy(!sTXw9Vhda=92z4t$e~{CtYIHp*%wA$fJyrbn||Y2|E1^X?Hm zCZmbL_6-u9;|uYXlfR!lE6%1A1p7U7KOug%yfR5+z;B!VRD3Q@fQ*$qfvmlE)D65^ z9$CYSMNo)^Qfw>(9-pyCdb%9k8Ra1yWj?;5);-~d9Pz)BVpPkO*pJ?nXSisz5QyJ^ z{p;B58RHX5(yQGNLjy%5#mu?-J;Hqo0eg~Z*4{ zj_1v)x;%NcAGwfvT7H}pcRG+LtH}z?G4VlTj}CM$ea?jHdZ%kt?sf7_lr+`?Gi<7G z8&;UySUP8H`#lz!W=6D|)SMh_@GmNqAQ(Edu66DZ9|?iI!^PhYRA7}CzD6LB>mU(% z#tS>~kL_aBGGf!wMP>LZ@a1&;dv@2lY4DQW(ot6h#-)^z%2rn9smbu^@=v&?Y`M-q z$9ri`qO+bN-#i%14WWf;bVt5rD}fkpYZL}rhjWi#?P~UPvZ1Su@E4^g(W`STSoqxVVq2XAIu=(}mCk+(aY zJ+VR1{RwYy{CcWFCOKa-(GQ`RU*0!yGVPb@eB5>sT?Gd^SHBZ|(qOv(n1_%OFMis- zrTI+wjDPl4Jnfo$`bav1*nE9-N0EGBrZ z3A<_h)j?lFzof_ac|XuYy|3FmiR*sZ22q2LWU2OD`|M?+!(X*=nVNPu4@qW7>r=)E zV$$|q<$G3dEICGV_H+cY#!*dd1J0 za0S}t#!tC0uqOxH>l?+AY(KuTYE|IiG%wxImNWap=8wM8{>X@VD*}l5JZ_dw2_a!G zA^hv7BQ#S*DJN2qT**ZfA*X%$=@Bg2YuC)Q>fIdgbF87J89roMZLyU@Unwn8vRK^^ zv%9K{cAEag5U#@UE%LDM;>^X`A89cW5MR_skShK9$p=M~g%LhzjJvK`S%DyuRh`$S za@P55r(9s@(jC!X_eqL&Q-a{)=9j|?aqB=QxsR$) zI<^;)GKui${khutl#+^?KP-kQA8Y#gbBIBmIO$k_|C|K=lxN~O!1BLXzEfb|25PsM zjLTGa{Bbl+p4|v6=26aJaQEueli-~i$?h}#P&#N$mDknnC=P+dgoJZbJf`UC&dxiT zw0o72#EqVW35Ao;Latj2uKQG3rN?UKj>5&JT)4{_H61%~8VD~v+4dDkKJpnzdF2!e z2s8WQ`IaM-RU|;+UIQ8mlW)TM(DOR^=O!3d8o_ zwdL^`mF#VeWmMXlotRZTd2~5K5;XAT2}Z_jiT>9FZUI)c=c)=sQj>JdxBi>Z6$wh9 zJYpY&N7z91TS!H>&c{RoCXT^*-B6bdXKlm&a_F`42~`(b<3(5IK0O)@WCr}Q(Y6zz zY{aWxEExL~ZM`kG4sK!+uO>QI3MB$e={6@=zMF@d{6)I+?d`+3FxX6am4Mf; z@KL^4gwp+e;iZI;M63h0t*j5|gQ!uQnhnBpe-L9OEeI7KKG*4E;wICjYOE*4pOB0r z%Q97Z!t)14QG!%P*nvA$qL#fr(0jSMW%}2;nvwKe#5$NNz017evT#vU3xab9@eM=# zY{I(OURtteey|ra1!&>_?i(`ga*4y`mG^6!8L7Fh-s+?&iu~7MR+7Z9_(RSQcV6M; zsnU8-cyUTt4t)2at@Pw^YPW9n(icMtD+W4C;CEE zf=-Yc9N8D=)`bH!sZjh<%9>vryj9z(UfyhEZ1NZ4To9bSe{5)HBCSS2vmb}sYO2^+ zU(8L{p^5n^@l9;T4DV9YHfFB@2yEG|f`y5|`yx|dXpzLv@mpj{VC=J%on^dZs!B?Y z#v{I4H&bOIIX*Pu%z`3C=41b=%Rq2|(p-3$DlqhdZivL(cHQFw*M@_~ujs0ZgQW7s zK1mi@+70jUQ_*IJ>K^kFlEJ`>Axb^~aI8|3)|QH@P4LMY>bW0unN$+=)Vo*K){j81If_q8J82-==I28sK-9fytJ6<<#kwobKIqNp zdbyVc-K;Bw#zuBcl-+H9WA9}@y|c-gt=$cy0}a8`l9&82Nj^1-;M?ygX$PJV&#Oin~E&WqmA;hyQ3L**j={m%l5jHHkr(grV{AARtMocy?7U zb%D%Q`m|`5<~u5gq&EVlZ<+X&*=G-tgL+LSwv5r@iV8`|PlTLevhoB1B&$MbE>2i<4`fT^Sw+jpMGap+n|*-edh$5WhH zu!f9ACFXIyMz|d`;#7NUa(Yg{&a1WW3@s*92*n-)Oo%g+CZQWn^~z_mw^c3bwT5so zVq&-PBO@7ttVmw0>RUu9vZ0LL7a@|$Vi~+77GtTh+T%qBJd}0=E_>4s0ZQ=LFkO<< zp@4dyvY+v)Ngp{#jE81*+;7B&@2;Oa_{F!9fSs{lP6pXup8BGGrFp}BZKIOaLxB;^ zUzptHYg=*J^)$dFB8mjEr;PUc{_rMiP{_O)QLh-BW@VK;3n-&VyPv4~y(Z_xjY+01 z+BWS0SQd0RC>P}ELA3at!8>I<@Ac3zTe`#j;IJ?g%6_-k!fV~f<=m%e$m$Iv5RHz} zjNdx*@>O}PnO=~4x6LoD)Cv)!;`u^5AAKC?5-A+K3Y&6z#o`xyT>PX@gdhJd6OUB; z&ij?fQRd+p^8!=!PRD;K{P~^%`>T)H-uTJPzHj`G%oBnWj15SxvZ3-Yg?#(HQ&odi z(~{HtTbD_mmhbb6wh1HUaHRFDL*8!9xdzX$&5yYYc`StrkgUO%CAT2^d``63kY?{E z^TCvZI4H=SPiQyFMKLoXGIpRd)8xjf?EIv6I?L~m9z?3)ydqZ@pKr}&)3!6hE89Z@ z-G)9?Jh1xj`=6i8B4!FfDyTQVl_bDmXY1*>B}$D_0L_@*jb0G}fz;hg_iz46uYj9k z*Y~svtPawo67)4wd�PJ&41IokF`@{AV*H{>>#JT9}CzIk-8yWw@yRPjO_Jw`4DN zf=I2`iqx;ePQEd6nB?IPigBnx&xcs#WMr}Aexu@6EuO?0a9v?1jXA5<_ziEA9JvYg zQ>ub~hCh?)m#@wRyvguOZAAk+W4jCS|^_we^}FXilS z%+IUx8c&ZfbTEm?D6NSr<{(eXnP=J6gm`2TK-65y|KCb>ae1kj(IjEj3Dzo>?2Z=9 z8}z{=_cd}jSN6}kf#n@z;l-bZ8_OS&kdZMPV$^NGnRH6`O25#Mk-O*g?h9Cgp}Ku9 zD?mGzoGf;7;9w40l);W7UJReotOP{JLsV*t?UGY9?p!C}CtP{y&$1o!CM}h<)hXQw zA=}`8bE2Z675wWLLCJ_^s`v(h;)42xI{6kz-pfqol-=>4j+w6u^VU(5MZOfS2?b_FURG`mWssq-2WlG0fB&xOQjlKAwb zH1&@S9LSv?*PppCzit_b=mFNoF5ePV1@y3+C>7NHWrcqriSJAv6M?iTqPLDjZs0d)eDo{8O_EMUaSSH~Oxd%qhVl11~lf z8FiY#0iO}?sq~naB7EDq%ozld)%EX_o$}qjYfseA zeN-bqxBF3VkC-TrGL1+7k8Cbmk74@Z**nfJq{=0+ZF;X@uRsDnH8Lha2hH@~%80);ceW3<*YAPl%Le8lsYA*EHB=t>Aj=mG1=3 z%eQ!!(_G@^#U+oekSDl=E>MoFo>s+E{2P}X1_q~31|p`W$O-mx=X`~N*%H#*3baH` z9R)$9IR=G*l2e#%RdRA&Wbsmn9(Uz8a}HbXHB?wM zI%bV`Er^Yfj2SatLs3ew#>=ub7`b~RMN7@^20;}{Y}=D~UPT@pQo0k+5*M;;!I}AO zC2XP5LAFu;wN>8PSv5n=iADN|_9`-C|9(zRPyFrVIOJUpNG0mO)-ErRRnsP*y7tDR zP9J=O973^MWX1x-A%Bjs25G?51<41f5uhZ zhmu$x;FIsgv1MO?mLV`Xwoh1KF!-ce^N#bAVwFMY{{A2R@-Xe*sllmjzDEY3(jpD- z8Y1u+{y!St4UZTkx5A&rpW%-bvIIhbI<0O%ekdkpf|4O1FJG?gE+aj_J_7TvSgT~& za4O>Mw9Ig>SbwD(=ThX3JktfE_`EbV1J2DwqYVVRx)sxnn*sNFIBDYlYw8xhXv6f^ zEd6RF5UB!Oi!frKFF%PD!`(=ElcR`&F>}3~2J!%b|I9(a`@o<~Kto&rkg}LV6pZ{D zW0zlOmoszEm6Gkt{yz%|wpV8Tg*JMEXqvrCucF;d^njHPiHq!n-vYtyKUMH@kUUW_?|l7D9y(T!VKv zffs`PRsVH<#ETtG;*_1#)$OYYeUF0pf%(E~SHvIaO;-L8rEHYux)A2vz9vR*~bh zfi%C+yeaRelL~Q_D1g-GK1VEGV7Tv5TBK3g80mlf;VbHG1(3Jny*UkL^8U46eDp); zOI$%}ND@#612ADSnVIn$*!~-*K;&351rrlaD-E}iD}OoaO~>`NCZ+L`WP~~iz*O1A z#S3|YP3=CpbmN8JBuDXfJ$!D({hDPUV>@Jw5Y?#^g)9e-Of;bQ-YspU<7j+f9*z_Q z@+4ES%Ib!^O}YPQT_Q)3$4&FRZ3O_exOO@MeZbsZ$ql4_y@$|8{x|w8=A~kF6%Ao3ruCExGOs3dO!67t!Rexs|q zFeG9q<198ifSZ>9$l(Ug?WVl20W<6-@jzS6*XS=ooIiYn$`RNO2|Qf$HgLM;n)omU z-+(%^jY4G1|D)t*MOy;D`Jq{r6-{OV_V#4Kd0So?xs1qYt~IZM z|1lfKoKJ9l$%l-N$_2;7^mHgXDgXNX*GvMXe~iO?KHaDXq$P?LDSrLZ(YW9U2V>=- zBr&l2Y~yD1A8R$5mOqkD9cTTXYN!lNs^>KIxiTw@1eSOW8z&i93HuKWk4^6E@H?+% z^n#X>Z_`%XW1tVZ0~W;bsj0wCETPyjQo+bugImv?51k)Ffqm^TY1t?Td^sqX2l%Bl zX*>V^5vG@|G0m5$Iyq{sSCk=g8}Km4{_A27%_iTM{@sl7Mj@>=1TSes zjzBRJNdY6}<&U%>LPaRtuhcd4&CYvr$6K$o-XB1kagE!&@F8FPfjTgl?F`q}Hxrf* z7QK2c?Rc1&QIu&J1uXr$d)#|D3mYOrbZ6RpU_5D2d*|CDB+T1{@{aKMh7&fRl*ZAS z5c8cCo5isplc%?26zW@G@OXqRAV3_wXl>QcaeI5xx$1Sa-{})j-{$I6B(+AH)1Rk6 z{WyvKu_+}ufyvVb;rGw4^LgIdoUe|4XE+QTT4YHbPr^~T9Zcng0o>LU8de#m*aT~S z4*F%wu9`Lti1Z+-I#SseqwFShVm5ceZlYVT4^1apVc(f3A!B>Oc3xDgN9Hdi@X+v> zorGC12Ea*b*z3f5Hcc0@V8QF`fFq33Neu!Tzy7g9Gq!^Sa>m|u1|_)SbzWkckAGg; zyw?^Sasi;FZk(3vGnp9dolO3%I?k^%B^)E~D@yq+!Iu8(R<2ie-97k#59eUHB?UMY z6)4kzFpvGq*l){jnl)9j*YdsdipPsY{yuXhc=%TD%l$N{o~ZC%L7}y>vikM+QTlmzBAx5D4tImQ@aOJ}B7J8dNfm6Ji_SjDM%3a{$)m}H=X(50e};QeQnJgIJ8BW} zE9w{a$pqCju^DrD|1;_W*5|mT>o!uwm(12}SO9xFv9}EhZKo8Jk7Lp3JDqlCRUQru zeDs1Y4*V5+RMF94v?hj6m#2v)ST@+_fG&nm6WTakR$~(II_4~mTh3NWlQYQ;zEWZi zp+KtmgocUPVyP)BlAO5{L}e`CE=ayL7!7KOA(na*nA@rfjHk9s7k_AOYO?P}1PbItnY-Ktz&d{QF@oYcfI@^iq_-&2! zMjgm^meZ5%;&;n~iCggx-#fBwAc`h`M*=`wKpe^{7=}b*zsC}X=HyTShZ7XbC6AME zP=SQDwsu)IHMSrd&4?jej*0H;8Is$QSp-a=-J_qv0akIW5MvLgT}ZtoP{s!uH)k@+ zaT1LLf`?rRd&Cv;x(3Q&{FTkH3mr9=SlxKVdv>rJ|HD)@O#){k2wOU*Jr7R1qH%Wo z?7e?2kV;C+s9`i!UnF?S3kJ7FcfYz@cfeLvaV=HeSA~llHSb*&TF^(OfuR7BVt*`+ zZ>>Ld=-bp)@K!rusCP64bceh*GGAx!({)>=uGHJBX=QFid&}nA6!$6fX9s<0RUbP3smM9iR1?KK;oi;4yEW*#RNc z>)oSo1zN0%&Q;pR3D0kB6adSvhmEc5WLZZ`E(d`qXl8a2jdxxNx4Iw1-rRVWjGPh! zPKDdS*gWqvUZ0dxhirnUI^b|A@8^l`ddEgZ<(|??@;%Z2f#7F@RJoUju(v$4or2Q} zCo)n|qH5mzqJ<2D76t$(GWW@RViERoX)GdFfIt)hwLK`I^gW2w8=aDu7k=`{7L4LB~?KFb%=F@{Q2^cmQb#2_dM?O{_U(M>3PK7~8>-D3yP zmWH>+@u&gyHb`!<-o`o)HaV>`7R-V|GwB#uy2E00s?DUi-4BHW5^GT`If;@r>W@aB z5?iTrYDyOmo(d1+GgGHjU3_MM8lW~*?7Knq z^~;tx=qx-YCYJ;V-tUV%1X96$-U3((e4uyc!Zg~%dG-qTH(N@gWLPots|M8O?;sFY zLcoU!nbBtLND5pZip@jB7TwNDIzP_(5(c{>U^vj>kKIQ9&r~|g;Stu1>7`|e?|hJ$ zT*7MD-ENOlHx)&S9nc~7`yKQ9pQ0anwd(1QJF`LB0q{EO3$ODX$Q%kg6IHQj{i&b< zWGn74V<|>s4#89D6>@@QXR0?awgww2h==c`>u{{Dyyubm57!ZS7NZE?vw_azSnlEa7o zDsN}g^$^R6bcdAGt%pu-Lbeg!W$T~?>qTAcJfB*(>A#_c`8!-zl`kRAD*ySky(xnR z4CZSb$R3OEMMm)9%@3{CV~L3|*e-Mui#&YU2|Njc7PtwBP?jk>T}$?a~<$2|bG}OK>(>G|N6c{Zhql`mi$LMjTNo zLA$XqpW0(w^4-x-<<44=`$XaJm#^r|3H3z8qjt(iwcPrLQVEVYISV4>1XY>g;(b3YP@o?xu%um3*YwJp%MU^s~@`$HQMfCJOq#>X+f zXqcxs`Yp0ib8nwkO9JTF9_H)E>5X4`_+*38f08mfnrcfC&+(aG!;~xd~#M-ar6euM*nkIDV^iSF+Q7s=eYfoi26V(eUtrgq0}>AnvB-t>AeK zC}F<+>50MTZU=*o-xm=8*__g*I^g4D1?sUacwcXr7UE(T2~oT80XCbFH#%8hw`GuQ zA!Kf~SF_oZPgv{z&Eg;g7KXOtK0dWmJ&0o60CjC3x}^%h{aMeK9H(s5OU5b0PROwT zatA*~6bKZhp~j#I=o6yF_dZ{X`U{lu5np@MeyBF52RzBLdRUm4kM_Wnkq9$;%>J0R zfQ3;ndo$kIIJ5CE`H)mm8bfqGXY*0VLTWL8-1unzGtzAM`ZP7`=D^IxF|GnczsclU zEV^?Cwp^g96Ypo$W;!!~nnZ4_8Jt zZ3emxD5JvZKh@5&MFgK#Jd4}VdhfH#lUk{ORSVr-)ez5D$KlxJ3!tfqf7a9ci-mn@4G7F>nD=>xQHIsgDFCJ=tEy(oQU-MCQO2N$E-s)H`v*z zGyxc4fVgVeB?AtZrsP6VVmu1HtHII*EWpWfd4h!jPU*=P1!i5zLof(4=~yzY3@GWT zw3Mqn#O|dT*@rvzsj^)?RE}A-WZeb^D6+P-Dtnw+i`1;((`Yn%pS>_*nTY(T8yCYc_vox zfFEjBHb;3c$^!7hMqt}hqy<^WYhbUJjO0ggUTd3?4~5QJC$-9hoyDFaz&?OU`eIK0 zA+PKL%6AsBlxT5?f#6G`9A^QD$mkXqbCbRt)1Q8>3~dSj&dn@NY}W)Y2K15#dEWy9 z>I>$)tX|^C14CwJlk<)&lP(*%P^*3*U)*X;ps)A)7cK%$HU+g;WIaJm8tax~fPMW5 zFNv1t`}c&Z(-BAKReS@VK!kIHu6WoEr_r3%X1l3N)wF-s_V&lJdAq*UwX3%_1}wlK zAsjH{W%Lr49{7RI0A1e>Jc>_ye2k&HI8v1xes!3D^)gu*8Zi z>e||oKYz-b+CPzf{+waNt`pC143a`Wj_Ol9eZRj*=ybFsqet^Hkqs)67P9f)iD2k#f0zr8)nk`Wxe3?$k#{!eFA0M1g4#|hctk1;6#DAL;6 zI)0{uF|ifE39tM*QRi(@oGiO15tKWRAW?xQ>+8%K+)GclY+@i~6Si(c&Thj} zq3fa=JiyCh%T0{FgA3+)#&M3rqu@Cq0N~BylaoW99$y2_1pk6>-@wAc`uh&O2aBe> zP6L*lU>+X)mX?;GA3uOfQGGyFEW_dwzH2ki9z!;uP-vb(cGivAl46M*4fu>OUkbfH zPQ)Pz@IiyK*?oo#Nq93~btrlqGpb>92;ml95z|REir0{U2ZXi`)PJ literal 0 HcmV?d00001 diff --git a/images/New-DistributedVol.png b/images/New-DistributedVol.png new file mode 100644 index 0000000000000000000000000000000000000000..90f902857dcc00e00bd895692c89f26f11ae77ba GIT binary patch literal 54240 zcmZ^J30RC>__tlwvKEphQBr1Uni(OpZ_PB#G%HE7@B3OviIfr{r3Dc}vXsi!g0htm zB}-*1RF+7Jd{6KD{=e_KzU%w0Yu0nlbDneV<#+$?`#cGB8e!7-+2e-|8#akV#4?5r z8&N!L*zh`sF+j`qg0HWJ4V!w;gyWjjHi=v%9_9hX{QJuT0+MNsCJ!jq0|JTA>3l`< z2&o}LZS>WMO+XXSuhxp>61iCP?-&RO0`>txfLACOb9#24%b>A{UDPZPfwyGyb=>p%y3(*xY}+0B$b-Z{36-9}!CdV#ul75V|Os z;7`GF{+&P*q5f}LIxv+F_}@w3Y?DBU)cO(VATS*RQ*Z>pD$pt!8bX8__>qJVO@zeE zq_U;92sKoRqZe zAT(hVWDL(w5E7vw6Y(e`m`7&tLzpUmgA5J{38q+aAUPVtF{5R8Bf}qwQWFJa7@jUN zYW*|_8IcNQYj`*r(#}K&tE2>#g(@XehBk)_vWl!|69q#xLzG4kO)h2ovDsEN9O#Us z1%m_(sSF%xHAnK$Cx@MnF+Wc@PUmpb+zna;_4Epn_Cd;Mx$bS&p-5L+y4F3NSr1ELe)SL2)KL zHN=l&!KxKRAr~Q$X%$QbmKZFx2$f=t6^s?~Eg}9ks}-rE;;B%2FdJ((inVYXm&6E? z*n&|e2uB6Xh7vGfcq=4?hp-aKb`qJV2@S%+a2hk750T029GzOFfs4c#K3*gi;o)*N z49B+_O79*oE0Mo}qw1i+1n2yHk zWe`lLQ6N_6{8(rxFIb}&qh&mbm|+Ma>+!}=ImM0%4ibXeN|4Zwr>XqqX5f#MBVqwxT)qs+AQ`o2yj;e?u+&039Kprg z;lf~@lpicoki{^i9%l=clYzltn~V;834nJbfg+)!IBdDn9;%Z|Eexd^Xr-fUe5%yW z_DAa@pnjpiim)Oy0j$JGLPY+0zmQNcor|N6lF(>Z3wsp6ROY< zMHZGkL~6pJpzugLCxmT7AVT4CDnr0lT8Vf?CpW-w!XssnKFmFc{1+`>XgiFia2(BB;np4uQr*Ab9>#E)DP8k#Ewl#Yf=gT*k40xP!D2x5~!pkf=< z3_ArKA>vW6Iu%Dpijc{OOtTaYE`h# zK*e!wbhZlEoGQeCQOOh-23u%0>upA+G$IH?k%5_^fM)}j1%o5}3?Lhx4I|UYG=C%k z#kWxW^gx`z6BHl;%0Q%;1PWXbScLF1TF?kkBpwY?i$q`vBq%}*CiCC|qJmf}8km+4;TP%eFE&z;Py$zvgM<=Md>-B?5i{8`ASR=^dK6gA;c@JIu^Fyc zFc4;c8C7e*nP`ADs>nir8$2jT7ve{ia-c{yNH3Cbm^xs?co?6F1To-Xu~08!l66S1 z2n{%`o@3^L1q_o!41vHPI*nN@1RFViU}FeEEef^3ATU186rsdM>Tv$NAW&o^(ZED; zAxI>f3<^QZ#25@xjJDaq!9*U6B*lW60v3{MwE97K78TY`MOt-M3(8*!BMc#!Lt@BU9ociD1BahA7tm zML~e3Asz!~NAiUxBY+>E2s)pSMd7Uy5<;ct!}K7yK@m#BA;cVIh?a~FMU$9#f!!qJ z;!t=qk6_i|l}w$HABnRCvw=NDnm}kikE{kv9}TfE_)H7kgyQLJG_4J3B&(GODH`RE z8*+RiQK1pDZ8!j%(PWmCf>R0QW*bqc(45W`@wY0y7gB&*M{!U@`<9BoP}) zP>vN(W*R6IK0Sy*@K;Bm0XCyHm;hfSu>A01l%7ks0pNkQAfdtjXbBK_@Y*3uU|A$s zu)hL{gE%QP6xJc9yf5Lv7UF@sHy&_h`~Ga3e#$aSC) zrA0ZkMBo)+R%89_I+2Q~=Yh#0voV+_z?oqREnNum#{zbbR&t?il3ZdD`T3FL)*zEq zH53hHTmu11jUe&iSe%TZB9oMU2&#%`*BaE~i<@R5)1947PDV1PBM}XR>h^ z2t6L904um85)p-#fe1V?T8ky|QDiWhKSc91Fa!b;p-@n{bSsY`W5anSo(2V$p}{H| z7(~KzLo_y9C=?0AaV#DRgOW)Yc!V4ygvgPU2nN!^j0_g@kUD`Hhm@Ms0*n|;qiL-W zDND?^>&#k_UdyILTFr88uvLJy7}Zc3k**dChe(Ew!eCkuQYucUP-t~3g&YPAu@MOo z{#JqpXD86124%3(&JI@j`76N;6x+tk2s3$QsRyr>TtCIMG z5KK@ce+Y>wSc3tA7fU1U0ufXwq05j^6-*`I`J1evMh%ywA*1|+8koWYY(gtmsTE9w zACAbfVgBwBBV3d|(Q(Df!Z1E~o@`|Aiws$2m<`SI{tsR$K;kpOWH1Ja;FtwVVN z28P3%K}sV_!3LqBKm#2i6=KvuN(54i#RSU@k@_JD0KC)yP+*Z%A>J=k!d1vM!2$%) z&N3k&eu@YY0Wbx$f#5F!pcsu{4LKu6FH*5oFc?E=A0nG@wiK)vnz?8VRuv&v3kVX{ zKMPPZSbjkm53xgymi$Wm)Vi40` z$2FiNK^lfm2_#B*qmcqeMhJpfRDgI=O&l%?s#W<52`ZaOY_uEI617aN!~z5TEeH}J zh=j(FC2|nn%0wHip^=7AW@vHdns6W{QG-JwaoA8bQpaS-WmW|hn2ALdXw7;f0P;gp zm7)A-{2--8EEYo8p;lF-k)tpKiOC!ySj7+_0DKnsfh01qKaON319Td2HBt~57A(^k zA$+Ki%41`>dZQL9#bKd#3oHb0Hbx*277a&hwb3lLUEa9N7NWRr3^*2F7f{|KXFiNkd=s7$vSRy2HO>i9rf$@_Yi6#t*&asH$ zTp`lxXA=8`h6MZJ0oXwcqJ(F=)w zb|9qS!C1agG6ZyfauLlR9jUdD^pGGMohp@pP-uURK+HgJ@J1L)qQ}K2%b?aVG%!C9 zT@5e@G!~3RV&GU9*UAlv+`jr7d~Ed zUg7d0x@F!KzGcs-(~*nP%gzp5Kt@@9cHWn@KRw#Mvd4w!p6vJNzYle0NlnM~VgK`Y zsAH|tok5u`Keh+|&qwnmEmlGLKbJi3O(y#M@7~$+=hs(VU?*k_2r6HKPb37Jb_Bt!itwFTy z{J`G7@y?cQZqx+_#*~#I7rMKH!C+KLqM97#nP<^xCZ9ilzP_Qp;<@%(0_X4V zC7#6b3+fujJxOv}Cz<(hHFDh0o#s3F$KJcYFcgY*`t-)Zt{-3c`&Wvsy{qJKUVcJp zX=%Lw!GU^WS4qp}3E}gCJAc0o?7Qb39b0sJLEoW^Yr@6jyY}t`CK*~aEz&!+tz9+Q zs|{oOQ_MLR)xPA*&mYy<0oD$7sag4az2foX#|suOOltYlTT;(XT?2EE>rKG?d-1vb z`9Tw6k6(4k+4y=KaYsUI<%aqKlT!1Xp(qS5pBykpxw7Xw)9JTldFqI4`61VT*Y5Cq zo_4^Qr_y!c>c@?(pCsl8|M%Tpz%E)^WJNF-xwQa-%?a3Ux6#MW_Vf9!j-ddff2 zo$ur}=B(vFYwk7h;Fp0G%;!Gv{o2}x1)IGU&(Vtb?b{C@KNWi*JaV{OZ*`I5iXkC6 zjH~WJ+MtQ#6U}aH#85LQam`<^B4Q=M08Ys!Vo#6*Y(T zp|Z%)^}n$Z;2Cn1?Pphc+kJV!w3 zzPaC<)4?VUTf0UvZQ8UiRXqXqys5zht^XS2e5bw>1)!M^Rt-#KFj7CT$&q<%4q?vg z*s=0?P9q`$pzH3fVZX(dKFisXx@dIe)~GMNa|+vnmzVE2aj1Xf=AKXCZ$1S6cKgr8 zoZN=zY3$?#Vd zrwa#X0dp;09~m)L6z90`QIFr^Oykq5tHX-m)z_~@bujML9%!Z<@NH}S+Y{K~8=M<9 z8GwnZ6k)N0p|~pg@8{2b<p}GE?2?w7 ze+N27Zv6IlFlP6Jr^-{SpYT$*EIG7sYUA0k=c-#T*5!9~cHZm>$hif~Ttoq`p4;KM zYwLv~r@{8GHD7zzY6ZZ^qUdo`0fTO96Sdc6+XiaO+c@R@GeVDTt5`H340*;E+1IZ%^Zo(F zdd`)HSrxAEB+C2(XYNYDPQj%8gv+a-o3Or?80la`oY=1hfkdnKuyLsd6&){bE=B}`OtU5 zs48~NE9}_lzCWMODcesJMJN2@p%t|Nrd594{BtcL{o`5NrfDlno%hekn^TZA$Gdpj zwrx#6Z`BQKu21_dU*8N44Hp~0u#`Tracak>@>8|Ph#&3uIwidwGzlr*D1sb{y zkfp%=yYWw+`jqDQ9e)OXzS%bS-idUlwD*89E(sj_k4<$TbgjeOqB_CX8!^@>VaKPr;lY!_ z0`sXsL2(H4hC34uIIZ0BfOxkOQ`3GZdW=`w)}o!SEv+ZMuK4|tQ=Yr&{y)gyL}V|# zR25&2S13M4=6CcHyMF&(x!yKd=CK^H{OJVk)xp_a*+ES%gZ=&C^nI6HfgL%1cAR;V z&&ktDG%IY{QpHC)iy5DKVd=3v@q%T`f(E1jI>hb&awz5RBmjHA7PI@2nKvq5?OHz+ zmHJv=mZV)Ho}MTiPspDXLQnUO6e-aEF}~ z6pb{Ee;E(Dq}!6cH=qs?RrLfqZ1aML4hcy>k!$KgL8_DMlnH-#6Z^(&yAd#dl-qSw zY3n;jj$?RMdB;|x^VPPld9iT~rnfWp&G+@1)V+Q<;?u`*&erhK$o?CT;psQ*R;^1W ze+`Y6(|dhLLjzw2uV`;Pc$QTN>&Y&8wMMI5zKL<=)8kqZj)wr)%FgmGo<@B_DTlZwg@80238Z~g^*2@W#+kGqE zCqaXjb?IK*o_Jze@SCKI0ar&>O_`FJRC+9E)OLJjb)6f&dNDneas1lnAv=m`x31h- zd9Y~g*-Ww5@V?hQFTWGjM505eDDHDqOyS_KcavIR_iL8BFfYPE{`S!?H@TlIP+~U< zASRPZKJPYR8-QD{i#dbDD@2XQU<+a1?gDLz*TUSXpyHKVx3qeWA}pGJ#fNZUu4*uix#d} zJ^FltoJhR0#=xJ+a6u9u+{H*}!!Pvg+_RP&`}?(FdK&w1>DAeJ`(d7ir*Js)+@;Dn zbXMbzkCzvRKJnkOC)@FFt3&$p6>C^)9hdTJj^vtMKd$Hpm6*{A$?A`p%}!I@CXL47 zng{l-nZL4ml)jxPdN=cE z(SjJ@iuSpqF>J)rpYL7nQa0M- zw)avDiZnX$-YIVCy_qin={5LYI((#TM)9QD+diHsS*WXP*oz~Z8-}HhKSd8v46EsJ z^DH<+u!|@8uKTj(*s<`tX;SU4RK77_2{*^Geg7Y1 z6Cx`}A9(iLx4lFJ1LfJC z7i8L^lHB8Ntl~*0^R;L-X2R@)&GeRr#MT`yCqKxaP@0{cptwNj#j*T`*wl5`#t0Rrkk!W z|1dk|<=klP@R*nqUVZ!Kkl=3Iygw=E{)=_f?__6`C!Eix$mE~!L8^r-S6++QcsFiI zs`B*mxCF?gAeKyciZSQ${^sD+)dVLumznrLgNjk>VB z-tW&QAdKA$v(~K|60_U&$}bZVusV@zm2m2mSqGBRZ~EQzIN`nkHto)bXT;k3Bi*kI z%P7O$_VvWC5AhmLUP=#TZ%a*_y^r-Y7YI8~%*iL^GCbhLfSI1VV2#~TQMlF z*9NqHXxn~dVOS8$RY+2oEk3@qsA-u)ci5fm{1ta|`#mM6O!GZ_*9?qm4%*S+pX;@J zd~{^SzH`rMrD5MOOWnr;+dHXL#q7M|INfg-CWPsDw|R_mAHQaRT(I>X4skiM>Q3#1 zy6)AwAK&6nM0@^5=Q>Sy+gg8e6!zxg1xr`YI8XX81a|@e?jVNUIoT;m>)6xbCk~JL zg?s0@tN=~2!^qyeveF@=JFAU75bZGPriji6NqI(Ttbfr=iYoudzz&3O9IoDW-f>;< zpRC#=dHLthGXkK{tpEXy`Hb7_evD10JCVFH&Rl&~mYJSjwtZJO>ifhb;Reyn>{V}M zU9PyO#-^}4UD4;>zRJ7+S`|GFYHS3pTD9xhp4s<%rGn!x9hv)U4y3I;+tC&DBgFGg zeW2Zc+QQ;w#FuxpjlaLv6s3%giC*`D$H+|Gv(?zWdN(KV5<0(l9X#Ls^~()=aCE7C zbk)lx4mpEIzD(K$gu7+vqg;=w+}BK}z5BM?d@HHoVW4%SC!d)bDiPm7U4*{rp71 z{%?ZbT|RpPq9$Af^eh=%vAz0z{WIvU-tLehNZFzi>w?sPdwX6i*z8``Qv$MnSn>XN zJos@_vgPF!M#&&2c0D zB=2ed)L8iD!->Ijx!X3oYp3@(P7G%#%CwU+bFL(K^-dBTebdwNF>?*22E-2_)W3pl z-<6|3OYH8vfGnfn0K0$vDLo_mMyJT}J^mE<@!XXB+3rCc1%??jm~b&;`mX*H+{^K*vzawTfcV-OeV)8TG`YewJ&e`Xtu;#=l`pGC6o{@2GL%Z;q~&Ax`nVTEE*LsK&M7Z6y}EZlFEc%(Jm&nJdmY_}MqRYJ7Tlk* z>#e0X$q4M_!}yV%L|=-bum@{!KC$dYRcGF(^z3Z3hBjHH>eM~D?eguLmu_TT#Enl0xH4Mn@E-o-lCWtq2@CA(+6yxRJH)|SIx5Vo=S3LtbI_uqxkh|a$| z=8s7AD2{lT07Ou{Xyej_uNk{<-X7^*GW{LQN2d9V(c`8idatFgf1Xg?i{dtC9lzEx zZdBFLN#%9A$~p%r``jkq7RJE+$H8A&i4hl%rR932XRJLEZF9Tvh6D09Cu_^XcD$w4 zzP$PUw>c;%9QC<%`oZS-`WLHjroDI=wXh88y=w@gZZK!vkfbFLlqgk5-R~+wZ=laEDQoqtjWoX5T<^n{z6?j7rzQh!NRu7V9iMG0`%= zja!elI!_&WsKQ~$13%tJx@2A09+Lp0r8ig9-?zx~i{I_%uU!IfT18ikoJlGOrjJ^y zIO^myGr3|1Hx6IpzS4g*YD&V6qz7vvc&o^#GeaIXB~)K>TXby3{L_cWcswaMc}ALh zsm$5zv#I_u{Mf(JsC};eGby6`lHr~Mf6Wbe%K4|5XVZO7bRPJT)?EL^)33x_ zS<^T#js0|YYAV{Y8F{&X@xuVb)|fqZ%jQm4QgF|_%6BhZ{G+E$gK{1QuH4!kLn^W+ z=ay}9@X2I3VVNQZ`2{8ZvcO#--@lTA1Q3M{)+DJvw*Cnes$&3 zyGNH5&->eh>1;0E*$bY3#+BqX3z1_CUsSv#saAi#!i9gipUw^(Z=SoVqhl7Pv0~z4 z_AcF5Abkr*yx4?pdp~31i3yu^D?d*naONSher~^E*CH-o7~J%3)olJ-uX3;rvqj*c-J=raS+sYMU`V`S5K~T=nsVD}Cqg z8JDqxatB9dgBQMrPH{?0`qDHJWVpBIwkMWvnPo^+gwUVfb}!!lBb&~8KG*^W=zCPp zhZBbc?KShh7FW9s_D>Lr#RZ2N5);O|d(~%H^7A~$tM8*|zY5AIjUg=!+u!esC^Ahv zdQCYI2!r1zo;!LB7RA7azv|j7qulEYl_XYSOOm1&dCI)z8DC-B|hAvb&XpdDkgXf2XnIcbs&g z1>}?$Hk?wpZHrADbL{QD`HMZqlCdKo-|n8wSFSOwgZSD;S3U7|n%sUZLf^kKNL6=U zACuo@PtUPz`yhv>Hs5}YYu!NQ!zc6B#{el58HL?AMv!XUy?gzZ zeP^bM-m-kXip5U~cJJALbjUzQv>kP9xfQf_aAe~%L^y#+N-Rui*gVix()Uu!DH;6{ zPb|22Z*ubLBt?Z9K@$gx|uEm$$UedMLEZRi}=B{#!AQ);+XqR35^voxTwticF#^|nT zYS>DEs(tbn zQ>Mu20)ta3eans_$W76c`Abhv4w*G+$6dvmAojGWiOsv20h2unrqNd^MkT*lT#t;1 z5#pcL2Z|?PE~TL_6HkE2e#h`@EkK3G0r=+x$0Z>4DAdc z#yZTYT&ukv<1%mT^{35$|FrzQ^|K|q+>-y}$B&pxsW;R1@b&s}MPLFque6$5R`BA( z7-$b1Z2d3>U)}z&&K2docLuAgb5{H9_Z1FWw{G%i>yfLCogPoA(h2hMwd;^&>M&tP z$Mq$t0e(AU9@^{7%US6AHy)3TZRmYo+7_~(?{#EkUt{?oZpge+63V&~^VnqC6r<6Y zo|A*xgmF)0&MNJAH+Dwt(Z20dZ@^<;BNi=6xKYJGEL@oEo(pku+B?$Y>@d_goC9eo z3X7jHb^O8IA;|GK>ZX%!tHx1RP3xG=NB`aDLpyjVCgBH<+jJ;C8K^gu+6sA!qtjf9 ze!_t&6n*-(q0+D+H@=UreDV%E#l@x6lK=A8o#>EShi4E5C1koO<-^g7QrA@@cT)5| z4Vl?lWik0>bAUW?`}sTe?~{X$aKBFwd&!9aDYc ztiYz4ylUiLnQ9JJ_-Q_e);)4znd**OEl1PNjX=BJ+HiP*!|<4{1*Ivc8m@n@n%w!k zB&Bz7$r(3l_B8J_zFv+@N)Fy2k<6TwTuY=r*b-k7*ie+Bmgk!-WyGQEW7G&-NZ-fP zgXK(T9X_pMwJiGTPCc9+cTa2t8iI(5~^$DJ!*T!{?;U%c$u z?H7<3fHaPN`Dd?8IQeLva0gJuxu3f3{Y#?88PImT|9hv!cg-N)@t8XX>cn?T7d{Uk zpEgEN&zcdJb!^y4AJ5Avxeb#WAzvPpmlL8|raj$Wlyw(SJ{CRZ&&ZKLoW}^6&_^yA zU%pM+a{TOIT3Yj)W8a1YWk~ezH(RaX=G}K14-T9NydLMxIeTWk0!S4W`24O-+s#*- zXO(ha`ox>pJs390XQWIqN1#4g0q|b9_w42;C0IXGC&c@N<>Tzd5@55vvlpKzJZzPX zr>eKgKDgl5HoB~cT2lY=L3VtUdB(e{nFl6kW?kF$^~1f6Iaz27CGcbBz{92$6+l38 zBLFOHj5qD#u}eWC_lgguFKfIh${2%BY^cx3Py9wij(WBejWT??-F4Knn77CIYjt?| z(QBW_DKSK@>K4P>b7=x?@if5FWeN>05)X`JQ72WNl$5-7dyag0sEJ5BmfQ@N}t?g26^y~BH<=c1Xg zNvlq(KjJn~0%!Bb))&oBxF0fPmOzdawuxzlhq|;;SwpG8hq6yD8KrUEgX4;U?#W?MfdoVET?6&;zJ|?N>g4(sSX~$n9@;w~9^pT6-1KZbJTJohXK>K(wZw)tq zZJ_}W6q;T9Ea5|&p&$Y(9p?9W=Tur1UG5YFTU~G zT&gaV5cPvG(3=^J2^Zf5RGpZ6_^4@A_J`CC5%EX~(eX_IN_MsuWv4D#s>nEb>ND^J z>hg~V`|Il;MZ-@N3tiNmwbQwKn!LWBJH7BlZOygVhj&h==Uj;?yVv+I@Mhvk|MS}4 zeBunQr*rk)8`mzWJVtH?jUH$#CiU{$eq2L7Xt-g&EVkAa!eRIy%1J%df619l3(}nm}TQeSD<2-qU3*v zFFf(|G1)15qI~kIUn@F%N=9E?FopW#*1Fi+Sc=ho_QA_dsR0#tFf63v}+)3hXUM|bomA3OscaE;~vF~nGyqfW1yid{r#bI~qlqJR6;2Q<26Z_`x zr2o&e0E{TnUuV(dZSkXg!fwoky3Gu>*q z#$kNrqmcE_wj93-?I=KO?LO|;0QWm`X-`7)@%0U`pGVG{TQ$Bu0rN^OT4U!Xzk4~; zD(i$SmYxNw0kiv?kfpU<7?~unnoeydI0Pu z_o#IGB%d>LuJfPSuL6M3F_(WbKI+$Xd#RAwxI6sOj(tgnW0Ev?pVK2UGM>43)>m_n zK-Q|}Eh_nV?`{|U`qEuX-Cq5>_jy3kVP8)qarb^sZ1T09#A8qIHO}J{hGC+{PJ34N z>x<*~Z5er8Y5V!YX_V%H4B(8b^w&^;x;roYb=1q`Zx`b8wmrL>XYu(Vhw~L)iLpn| zgVJXiHc0NqWGtwexz6{_J8}r%+h6O$Y$H|`$H>ljqts#0Qy9kYoI{XL7iUqiG%HKr)oeDIAba+5wp8raahtMU zr83s8dpMDYwEwq#Bydy(9DsRUZ+!W!Va>+tQNJRK?~%K9wddRtOCAD}idZoGm~F?F z46g_8Q3mf~2T2+v-QM4EDg|d`!tS09K)_W`{JtN)bl&+#?H?;8-u%jEZgu{tif4JM z$rxdeOXlUcuXpSwQ}Tt!uBAP?C5Rt7XUmK^hpz9cdlllAPmP#<#Jy(N(O-$G^Q@b) zHi+TMy6Bhay~Ame)nM?uj2AwKMM7IU#kCBt!Z+-nKxrY zgz!O0!|aUow(8_L8z5KqWQOlJT<^d=WO-o#w)$)Y6YAYOV7=(+>$~1S9r)FLNdKoF zkcnwN2&B97P4ev7v#g%%>}>d-<{8tcBR&VNUR{UOL_|~~*YkL!sCk9^dwVT)?a0l) z`!2M5EnWH=II(-5bSNq5erh`ok2kC@yn1ybs}cLNDe#o*>Wwac2Qpi}TpE57)WXZargv*g^!dB=&4H>&&urUg8GH(sR7*x0ZJ&b5Fc><)4}s!daXA?W6xb zmi6$3w1iWm@2Ee%**E!(D*j&BiZjuPn|hj-;g|RdH-3clZ$AzcT4LY#qI!F_Kdg3r z6B^Mm=OB*LGU~#Fj|fKQzV)$}RG6fjLGa=?&j>GGsquwoy)5q=8;hI;YE}ouaAB}UEyfZHQ z2G4cs^cDyslSNPdow|ZZBB^5rU%IC4ZH{_gQ85h22kkjm)(aj#d>C_RbN{%&Ki{XB z%;sO4-Ci=3fIQ2(6B83BMa|os2h=5ij#av#6#)Urb_5bx@bTb|hDSAzga1U}l%I_c zR$uH~Jdr|H<8Y%_4}5P~yMD+Mw=NhhNt;ShaXdS3N{^OgHN@q;OUv+`AkME%SadeN;g$Er394J3U6NXD z)%znS&m^C0TEuNy(UxABKbBmOzRy+j>S{uF&ic2`Gu;<08-HSMwjRqhP>5=iU~=lC zZx5?GE}d;{(OHNJZ#J7^1p7cU%x(m zdGW&H@8=oOec#_iA31(}d-TxI*n?+oU*dhAbv<>1#QWl-aToSI^(ah@mf+K#q$_KEFxUy8@wxfY&?rrvztqjU{orfXi5Cgi+YKYjZC`F<)S&b8ldD%G+FktI_R$rkMWd(!>!ev15!X!BwGI~_l?}^Ui$OEi|Y(;6MXTJGr6<3&+!6 zFg;DA+xZF%o;Sg87swn#*Dfj^nCw>B(BKz2&>zBG7klouOBV9eh$$o_|Cqnu;<@ds zV&@l}OkVEjuqG5gt5&@Uu=UDc6P)FrTWVjUhEhd7c5?WS8!P*jp{qQi|K(E?lv0QlH^z}{Fo35@dr)kqL z7K=Zix#1HgOt}8o5tx7;9-cA>uGwI-xl9~067cVpjhU&bRrXyaCGZEry1KfonhO^% zqC%zvTG6%F&8_=7FZ%bC63xrlf&!m*dsS5xkX4|i(k0jb^sWUSN<5mIJ7Ub3F_hkm zMMZuPC=|SU^{&&L!rV)jW*$6v&^s02*B}t+T<5PBLQD4xWKK=PlP4i``j*N{)Qd9X z*rP{}dM;nyKI`Jx^V&-_r=hR3I+%^Wj!?C@bdJw2X2KHH8TU+kHP zw%HK<{ry8*O-f3tXl%5Sey!5K9<_AoQVO!o0jxo4b4&Hm1!z;jAkRFsEGawKIa;%EBHvob|i>EhUDhicdS} z7s_HUEH9lhIoeGC&m?OEGUVZ6u< z4O%$n;yX~fdyV@g4Dy0Y-TrIZ?-rD1?!=P!y9B>)|BvbH-knC^hK@*m1c?wwyIlZm zh7r28Gz)?~HhoRP9TYZ|qquT8s=g7vB{!83N2so-{I&9?csiV=ST!>Dh^4_{;QExC z2gdH&@L=#pU;hpejfXs})lKaR+ln||Qe;}0^u@h+_3G7G#vv=JIk0!%1ZUr6Wm|mx ztX3;frr5hA*!NdoYwLW^M2;e0#R~e`wfBC(%NYS+a15Yr{llnrWroooKYq+=KNG5$ zGiT0$tG!TZ-IiR>+jsAN`wgjhxw-$tlG4&I$4gJ1Jh@@x#*KGAWgR^D`gi8M{Nx=w zTC?WOzI!F%_Wk=4hqcDf+YB54FM1$-n`Y|n?RgOB4uPb$oBh#>=)ShGFG}^ohXdi) zH_n@*efgy|}r#IjiS+b2Fj_27@7Mlu9MC z22iKHzvTS+DKRlIo&W`N1s>K4-yjm1TF#$1SxoY&nB9Kn%(8~Y#vbgK;fKr1=S>{5 zRT_ULBV$(GCr>XgegDt?AD_PT_HN(1ciM^-D-iAN@83TfgqE<`ON%q6bR!IXnK+7(D}LM;m-?u*Kp2`xoq%jGMuhE@#0lr?DIQ>Q3o;#Mo6v?UvcV2J)OBF z3;ncvLm6(8r>7hp!GkVITwCosH*IZb&9rQ4o#&g>W|axjP=ClHzVgf!MnJm9)=Jff zv3J!`xuf2COjak5$`~0xe*PrLbWXD=E^B8e3UCsK=VQgD5C%@UfW8&nUL==%RJi{*D(7S8l zs?#HGy-O}y&d(ed$hhZ28a98?qEU%_KBE+8lF4?on{$sI9YXQ1U-db2?)B~A4+AXd zb%O6V?_xJMx0Q#)sI$?7zoaKmor=u`&S?NmBOg{hd>E1wxCeN)*^qN%!@29uDQ>G0 z!#DpLJ&aPlZ{I#dd(Ev|UpLO&JaoX7v}@Oh4R?CbhkpI~rC(q3>F?%*WGvkU|Ge=Muc#) zc1B89H94<$bFt#UzUGv{!QsF&?Iqa_OYhKS59kEW*~OW8qd1eoo>7i@E%PY7ciiiR zd+DsJ`=?*Nu>axO4@t)pW&6^mFDN`b``It^Z6C#&^?LX5+og_lg;m4QtL&f0TUOQu!&6+5>vjIezy_Lu4{OddO#SvHDber3;O^ma6iSw79 zp0!_%4sm18hx*QiFP?d#?d|Ky@kZw<2lnmSk-n%YAZpFy+hgu-zqEUWW+b_0cD%Vu z|D_MPQQf+$kk;Qyd9z`oX6Cj{7B45g_mppyxfGwR6an(Z-n(rX z>n5ihW31cZUA3)GQ0l>IUB{SFwH|`#J$C>E$b7f~>c4GF4h39(W0xfP= zc7*ZknhGTG5B3F}C)7C&y#21Dv&Y8gJ<(y;xy`;@O}w`h71iiu*r^D4*_7Ongl={& zs1;qyV~2%h|1av^`m4$(>Ki_EcXxM#lyrA0-Q7rcO1CsfNpmPcICOVOc|@c`3F&Tl zFTVHvJnQ)v-d|h`*PNL>Gdn+f@40qY;^I)~>FN81h9WPz0yh;LznXL+T3A>V?w-*> zvN=r*GknJ>!j0RGuQHZ?RBCu8GchonbrJeDH$xx<_;UTr%YnZy%FD`xa4}E0AS=Ai zA3k8kmjEsaRv1IaN@(=S(b3=hz^}3KadGanC$Bn+d>tA51k`EzkFe6yM_v&7atg|6 zwR!F1?lwlDVm>`Rf%LZmKIp{E&udTLRjL=1Jng!207lE`y!7mA%5!-6QkwPyfSHbJ zy4<&<1I~!sk|w-~|I*&XPs>4$wwtk|67m(Bc|2ad+=x6HB-<9&CDfzTEaI5ULuKnf zx^!&4gufBI$5^Z641Ew%pp1O_b4!s>Tx+@IVlElNV3KcpGa3Do73fCJ0Vpx5%tM915Zg>FwYwI*}xZ35*K`abj(m5@@103^m@W=PMt?cuymfqcXYG zdy(!=+HPpS#i-m;TKta+OOB1-08g;1Gwn9R4OPr725WJrQg0iM@XZN?Ptd#Lwog4RMS?o~yM2UJo4 z9z$AM8UxX+QLAflS($Ze4shG6tE&n12~=Pg2M2>o(c^D=Xze8jED)eBQzhg%aQX1? zu;fNO^!c&m;O{i#ub$)8<)zTf_{7A=*jQwy;V(Tsg}W=jd@%a#nP})(Se~XTQc{~e zR=tGaZtnxanY;e}{`^ru6Zuo-6&3A@woo^q6>Uqo&{PI?e$xh1RH+ZdvDPKZYmDh~sorUt=^baZDLID-Gi!AIF28NrpH&ItLC7QVG(L;M~>X=e#n_i@nus2Bl1sa>FA_4OPr38NV;KXI(e#MC1}g*rU|6Ok zY2tdJjn&5VHz}M|Rv~hplM8-8TU608g_3VWu`cR6NseJ*^!MBgpwvOqToyJzn9jof z1fxA&UGt4^OkxxhJx36YB?E$bmZ2v!&!QCJF9+!c-6BO^r8RZ?E+kw$PgF4|!0Av*lqH0|(F^FgQ29 z6r%OadH2z9IFaj0MA0eVf6^3;_O-|TcFv}?Ivd`6sBKNO0E9YGbP%F$SO>Fa0X#q% zi=>UsBv}H2IX#UD=yB2Z&uB-TCq3J#dLd;k-QB@+-gUZ;?d6_yWm%o*OMnv*1~}MU z;{#mCfbPz}IOz^gEb)^|H3JSlK0WX)A0Hf$iI{d;Ve?)OdEfwLK{>D9%E-v%?{bzo zIrD=FaB;Vkl=SOoKCJg7H8gN9g&r>qD3|-xmx8!Bn`#yTQT>Cbu`#XrvnA6oPzJ|q z_n<`Ng}LJ*`5QtuL;LEayt^J<`{b%UBSgtVO;4#M8;2yk=rMlJBr<$GU!)RPnn8ra&T%m@AO44-Ye`q+^~Wc{5} zdSV4FE$bP(74>74H*usht>kan%~&;b%-QE(_21*t;-3D!Tak3ht7Js046O)6&8k85#Zg z`*-v87W0d;vPOjM=*oS-mvF9e#*11BMvhNU{s;J-?d|TA+X{~DGrc#*kL4R{Em?th zDDjr7exrc;j)QuHRz&4Z15SRW-14qRLKP5+%~-AMBdrJDtip{JK0uNwL`E(Q5fBhm z#O;df__aGVe}{{xZCO~CYX<^tA+IfMui@4a(D!EZU0S6Qii(2zh5LOtL-U=&FV5EA zPBkSBZlwk)-|*AAE>j2E$O@8FSfp`9-sVj)%Q!2~t&Y-m&1SMD9lv+DEK@1XR-=+3 z4Mu0VbEIK{yv1Izoek?KPN9!HwlXzmy68{ke-5~ zl~k-@3tUAvfUzRgf~l?lVHCeJ#eQ=g_9ayW9>avjegjrYj0u^tDJCq7Khn#&*m(=q zDMiswmK&QmFRtxU`cL)(;HBe8;jjSp8Zy#`F!P=h2FqJ(bdb?2ssayF{3~)dfNRs zwgCv*c6N5Mf~KX7YM={0f4V2{@9&@GQxOx7?cYRov@U1&#ocXO%yi1giE`V(u0hYTrtoa zPWx$Nb6e^sR;1j}2^&P&(eRU&W;g)r$Nq6_p$M#%`NhtMuyWg;n%v?@?dj8>B=*q}fWLUfk#z8g>_2x(&n)0EPQwUo zQ$(Rj#hs$6$V*z2N?ZNdB>RlK{>g8M(Qc559uvmmwbDt}*@H8USZ-3euC`Y6C}F{N zfCCZ{5ivG7DPw9%`HB?&D}_yH9XC;SHa0dE8r|951xf>unV6U&V`47;L~}Ap1Hn&| zaDRZ(k%q)mXOH0z~>X`V$Mx!kNLRqO%6kSwA#q@b>6^WLXwk_a_b@%r0kR34Kn!36p;P{4F zU0NVHv9IO2NT?<+}Q@(x1PQi?G%- zNqJ1#5g;V5e1jL{(%*KCmoh~8NXsYulB_z`%LZ{*vRBEFvipXg;tKy>Z89(3$ z!(29JT#pxufaRVBQAFqXj;{EIuXg==SET~O0b4q1$;~MP!Je`#+|;m^dT{^_(lRlM ziTs1VxL6bwUC;9-Id@=jf`rhHvu|n*qr#_N$R?0deu0y_*(18A9S?nKW?*ayp`vot ze18wd$nHbF`_<_01Sz2t3QUDo>YT;GnBsOkXL7Esm0?NaQH(v__T*H09gEOugOhUe8o&NFls_%MnO^ws|E_*XXnL|!1HYLUE z8x_m~7aA!kY4gie9RV#QsDza%%qRM7hbLsVpys=hh(6J{$SU5YD(YFQv;0r7wbN zpL8+qTkFF?W@lw|x z==<|VwRUD;evQD#PHe+%%}z&~j3iq=Fw|9GQt4<$;w2G4Gru%7;Q>GcHUWXj<;ve7 zd0AO_S65eiC#OMx^+N!h$+$a!Kuk;w02Fqh1SXQ&+PDDX(dNF5=n8lu)zz$;(0kyi z8yx~bKxcpdjsqN13okEx;Gz-gX32SlFZw@oU=U~t^PfHXP=#^!W^2r(6vf}GrNnuFraWaYxIUkX6KcfDC} z@?~PfbaXVC>61AOfVKcAid9HRVUwDxwKaWNdAY-jzY$g}v_33B0C#VC&N|@; zag0L)-nacbC83^HRjihRHZPkr8E!$+>7sF&bp{ z+#~>6K{h0)N#m@}jV6*#-_tr77p?04Pc1+T+4qdO&-Jkx4W5Vh&E=dkuUAg*5n_aa?mol@c~N*i|s@o>HcyMzv7D{E*gRu=%p8 za>ySaHU+2Ffk(z#i7oB(2G0!*$KkR+v%h~P-ABvQSC$X1P9ia>vAfC2rul$5>Kr0I zZasO%z7~?W0(4?)r~q!MeS?8nyU7)XtpV%URi>;qD_hJ0*C7BH%G%KY>8Zx?DCRh* zoJuXR;r7rh;SAxjtb5C%a>KB0J_7`78SA;&>cYs->(QxG3Nf+?!CN2#0PB3-)*Tf` zgvjB!xp7$;D?IIa;?vXfh(<62vlh!zxqw(F{MHt3^9>VFYPNz!3#s|S{2p^v_@DC| zKL$ZNlJW1W`r}7qn(`$@3_x6ioH(`ttw~a6l{NOj!PQpzgzH19HdkVwj3eii&%CrX zJX_d@yF5+r&YeCym&u-7kMHh9A3tz>yR-YF-beeKt<{L%urqT<(BcUj8#`=g4lhXQ zNKhei7P=B2U13{RVb}L#l&W&7PBKpE2PZ^49B0yXTNx+?G(rGK>l=}TUo~*tn>Lj! z7*?jw{;F7yS(G-3yK-)?Ltk!9$1Z?`PblS?h-_#n zH%G`6eGN}Y5w~T|7#E&PpIpIA!>mNLCXnvI;L%9{oSFl0YFI$9DU|OW6_vCNSAM;L zLxWG4`K1wz;E<^5cZb+FG=M0*#oMgxVjIHG>lsfC^dAO>0u9XrPt@9-sIpNAnglFK z$Id#Ojew`(oo)0?+s3h9t%xih0|fYOcMQ}FrQ|4JhS=B{8D)9lM>uIMBF@NYWQ#X) zD4s(t{Ob5eXY7qfs`zkxO6U-akDsy^AM=KEMOgG`TV)+l@}k1RK;$zWwA^n{M{KdO zmWImS|4)q)0@pey$8bqnUph+w{GGi@UQUgT`1c()tv1jE+9D+Ds2A7KMg8_i=4?Uv zfV?-5sndYZJ~3V?W&32TW_`y>EnJ7Z+r9lMM|VO=L2;ZpM#G%lQHBK?g;GKzjjX0s zTjiosi)?5mH%rJBf9}<{(`+uaxK$6iU*vjqgu6&@x>t#j&mx*|(!@~W8V734pd8}&VWyiurxbm0Yx1H=e zp;G;IJcA(H6gr9n19I}j7PLM|gGF9ZRR9&1K$E~LlA{Hmloo!Dm?d(DnX@qoFFc{Y z(1o9+Rm8|0;4mZ<7JuVYbV*A`MA4v+)Ron62s%d$UyhosrQbUef;zqsNWN&n_OmHzFM(|V% znDDRj(RTH1`3ER4xQ_!s-M*}wANy7sF*i3PD)Ju>G7EWBTi4qZXm!=Wm+n$Lplk#v zp$Uf+z|sHB5Hxm-piST0BES^JkO{g=!e)0a0ID-1cdkRAZ?2?uefE!J08MAXCROH> zR5gxP!eC0eN)o>}Z5*cJCY)9?RrIHxRZS`;rUhMX2&r>GL@2Y###K@XPHDef@%*Pu z`?{d9C0p2%CO|a4k#p3ilSZnofy+OyD&>D^rT~OizepSnsS*vSyq0zog}WCx#ujsg zfl>1eD)qxvkS%jRjm3@}Dw_rLppUCK>_-`k;3b%OR8}%p$aLW*_K%i6Qzec=0-bPh zDH5sS!|cjh%=r>#hVFXkgXU|GIV(2UgzjSo=v!CWTwZM$XEZDo;Df^ZCV5Pr>c}X* zCmgW!1gdtDH>y|Sf^I`_t z3F;SJMf&2y5tTN%Lurt4N9u*nUw(IK;6hrM!-SnEUE9f9@txwJ4B8}AdBtx zl{-Ye{)j+^OLiSi4P;uG39+LzY8Sor^bTt*6~^zLV`Zh)q|T-+30xT>>xpP6=1l@@z8kF>FrD`eN)^2IY|l` zT{yxdd{fAc8MpIkmpVv?E#LG9q1^&x@b}fX;p;711yzbAssHw7M7b_2EM~IG;~U3S z!Eri#E6m0-5XjG1zY-xVLd)%a;DByS0=p|5xWO%m?Xrv5AELz^P%j+O50lMfp~uen zp*L(FhKV~knA`U2dWwI^lYX;>M3(bP%rM!tW$_E;)Xj1d0vT+h{W1^x5=FRh=Zxk3 z^aXM65iFu>TNvl0MoH!k%$uSPxR&(RFIV(9O98dR4VnG=p7fNRGpmC=V@8X<|4=dI zOyy)RF=U4s`aCDl*KVk2BF7?>>qy&fyoNdqOx)RKFxz5?DPLj6Mq#6EkDENJP%>M`Zlh(B zHu^Yrbm+@+PVga4R=$5Ugvg9$V?yMxCpEG!QUkf?2_?K$+~U_2z4P6=NNrL`)No~y z#X@QfF_9l09NgBd)sev>7lsVmiFxzqcCsVeL&;z5ftv)xG2Tqa=F7hy7X8>M^HPjQ z*3w@FH=E9cD1s?=E$gio8&dx-gYbJCY({?4>2|X+qgoOF!)eg|UDMT*xt*lYI!k6; z4R|koCH#325{%Sxj45NTNH5@uHGJ)|>^@bApw9kc;!O8W&smPbO&tmo5%>-aoW^^w zXSQq3em64T4p&6XFYUyfc6_)B&Zq-xsy0=_aM9wdF%?;kG=_5T&%!05 zbR|iN)R(j}BTo0&&H8NeECO9Z7X38nxCMh{tM2+F&!Fc@s=k|3f?LE<^WZT#XORGu z@XbacpKDaw(yug+&g?4>*W3SB5g0HrDI^}Qqok4{^}vLD<`F1g$c$hg~d2?QsY2EG69x!@q3j zbcvD52%7eVsqySJu@G_|Sk3yYz3I3c{_+qlbh+k*5DdFD$F#w^0#`raq9PjLk{FTj zV&iqy89VJXJ!oM=e;7);Q+@>I%gt0&5M)c(;S@C9id&$p4Anb6Z$-g3XNN#ApW zT!5k@Y*nGuo%s=0y)Z$>`jVd0f%+VhN|59i;Sm|HipAr3e>)Cfeu_M-}^}Flb})rlUSdI-6h1%`_OMK+jC_ z7evd=fGDakWX|m(Me0S3hxprHTzQx7NFEp>o&Xv8k@x`~Eic#~0tI2e7Si>*=*VlX z3kRaV@GZLEvVTl27;StqZjs)w3G(T zdQ7o4UoVDT|7aZyS>Rj@Jd#s53EmU3lOW%v2)lSRZ(|paG*ywATi)e1&Zdlfm^Vj1 z6%H$&jNo1pB*yziJ_Nm@IbCx@PH2f7@Ez59z+mm@-^ zF^p2?(73;J8~SXp>gJt7b{u^v1K7pCx@SzE8OTY(W;xz9+P=g7h*>mDSkx%A@kE3< z?G!nPmbbN+wuAiO%{mZAP{-Y4Mz(f?k!e^`gNRPT!2u~9Q5)FUw3_wZ(7ZepfuF9A znClspoHG1IYFA>_$Z<=&^KjJX_EpDT%>1n(R$-E^%x=lm>76~FBhOOewbU}tarf#CaH;VMElc8pvP9D1>N_N7dh?6Yp z+Yb`4b%B^xzM90aSVd_p5T~b>)1F>{pK;zh`tK~Q!dN(UZt9v#jN9~ih%!j^XRee! zxrYQ&)vlAr!o|=w+T7tE=8e-R8an4~>}~BJUo;NSF+6^qXMOkr_3603W&HNmcR%_p zy~2hW(tUrakbPXgx3@y^&NhJVJm`(ju|#ZC%_EP;zo6y=F<6&X`O>-vzvcr$oFhcP zlBX}=J#PEAZ_x`;Vl0~G{GrBei6&udlnHab@mPpzX5lzZdKQN(keudOEWWIGZr@>f zc7rAawqGAcJJbRj+#g_^C(EGGg~rtXo?pV4S8NLBQlv8F$aJcMXoOM#EPp3c>+OPj4py+5XwYqt3I1$FC2*iqt zIB*lXCV|=QDc$6CgAKsCknNrO!EOJ@o_KjV(&li%<9|&%mD7suN%(bS;P1g^QvhDj zuMZLHJ5wqydvRzvd%{sKPqN4~s3kzhPI8XO8j2mkj z7k`h(!tKMrWzbc5l)JH&pvOTy`>Vu7vr&*r=-cJkFrBtJ-X08@xNl24n( z4jgxf>vyaW0ZD?sV$T$_udE9)=hc=bfrE49Soy{wy(Ri7J%bwp_HPgqb7G6FD$O%S zRaJA|5~bU*=sI@qa%={_U`xmHRzXoUZ|c=AW!1>8VRF0r1tNULm0Vs2~eXzk_D zQ*Ptf_>sb3q>UyI{t0+=5_cAla)VU_ zENP@|A9G!<&b+2>r%}t*TJc5R3f30&s_=qBNMoR*$zdJ~OAF;w6;vC{@Oftsic-*V zDQich1U#QMk2gnT)xgC~%%)p6EIYIB=HsrT0=~jpjA>8M{nBbgwRQtX6Zt1=A;a+9 zJ3OF->Y``xc7A*uCp|`l#>+)x1=C}+AmSJ`d)cYq^)&Bq{CBJ8$6(TdKTC@u-IUj= zyBo&?CzIQ6g@yY8i1AKU&7Ujxy<=R3cf(2IHKxhjT0aSq6{l6`tW>Q&1QU;GMlzey zm6Z$9^Sr#dqs|qsf;>lI_|9=MgB4Rm#G;n|++~MZ!T9$NBrIt}8XCpM%w;%FQp?F( z$24t`;5Dc$+nsvv$8-;(SmtG=DjW{xZ#27(XMqfX=E*iIu;1U@*9~S6>9$L7SeN_opgkL zUc)awF9Ey;4M7@=BHJHQT<~hF4*ZnBtIr3H$S~>7e!Uv%>gx9MWhms!kd1C@;LL*# zpzUmkm?Tj+R!pqMjxmY3iK%#Y=7(j!i6w^7WJXoPjn@13pkTa{n*?lWsGO7X(P5m7 zR(AmZns3gm*U7(F$)fr;e{AnF`O^{yXpoveXF&T z1fbNcTIUJRlPTqYR+rw|*aM1{rrq5Bp(9mt?rkh0T&u6n^@Ew-dL1Vvo2<-&^T^(I zUWTh^2)14BCDn6wZ>+SF5g14-78diDUpwBqX0U4|-cT^g7Q7X?n|Omf}sC$pdx@tixGK2Fz@_|lyApxO$wv9; zJGLPCscS5tO)YOSIu>~wDLcG8HiA3rCNukU;b_;;7&8r}0Xy?xuLG8|BscC{EOocC z$bVt+>h2U7n5jXlKXf1Mj(Bo=_(Fz@sWja#*7FVH`g?Q#+Wu98^LrT~;WH!0JOKw- zUm~QnNrwNKOvJ(Db+UN~mv6pcKFUbUk?xHUMWMZT&%C!5FSYlJ5($Hk z#gC>sC2DyB%(e!RHB)IF+xzy|s0gT&P-nL&$_+JVF9c;jA94&I}Q*&iXcx%2TEhJ)nFhng^ z@@-!!)bE9@6uCk|gzZ$4{{roY)SEVc-$k%Dd)MLzc}yXx;g%W^<>1DOy{co-`_|S00M2#2K<99t#}N0 zZnB^%C@yfD?J~fmFkb*J8CM;Q#?XRjGrVtZmd$;M%}msp5(WxtwaL; z(kCV*LqJ1&-_jDDl$5k!j1p60NI)1J zy>WTT9m!K`^O%h%{oaAYigf6fjkh@H{V&C4odcoEP_)UW>8ulIi{^)2Oo_A~U>%uF@aXI-6zi#5E_2)8K+ ziC_AS4Y`$lKk)*7MDs@?7V|enKVzm)0&{5<>(^H`C|Od_R5uF+=J;^TS!C*?l<= z{z5BT1uQIXWxv|hZj0(tqs#oka^T^KQFg3WO?9@y+ldu&u4v?#pr0f2$gX?eSe-d> zVd`j?OqF3ZuF3(4jz(lQYr(Ib%#fvNf}M9UdlTHevOeQmj|31LMY4@J@x(7tWSSQ8 zmFSoH>Fe^}1y{8M1NIJQpiW1>f3<%L{E4mn9d~ zyBprLM>;BU~SpI=k{y|+iv(&vYR zYpymkb17dKSj&Py_D-u;-P;%)1BAQH?GV{H#e%?*=a#z`Go{RdyY1$8fXY2$Zd&!_GFU1vtHk|j z>2-F3I+ap@4{~uoKVL?i$r$$&N9>OE-#uIJR zGzu3NpS|=E;k@)c3fuls2B!f!aN)4H1R_t=E?@$QgyGl_PV>b63SAv)9`%kAM|1Nq zMvQ-7RKZ*GCpVj}^q+FP!5sS35{XGknkD*L`#uj`xrp@h`$*Ll)!LKrZrHT4d3QYH z!Y$S8%~!&h5OfUx-VY84IeB!`lV~4+HdPO8+@79*sBGZT-p5#+MSJI|hwE`C&v4kb zmLOsAmEIJpBJoYNAkk?ID$LaU(7N~nel(V(9v|4A>I1bIY3I3k_FFM+ude=eB~ zp}b{+YQZm*33tn`(aqf%V}$tL>j0Tv8?3{xldHS#_{2i}v8PRb<4 zr?ipBbYw_zcR%A~okm?L_4#4*`KjweY1gy3m!bz%$Yb9qlSbLiX6VJv-&;tYs#@yf z8C?q@NA;Y`nsEcQ_|pRyXk)Zem!#`~GV#V>u16lQFS4ipZ{gbK@$ z$xEw_o{vytGb)JADQ+Re2`b4Kp7cF^wL=N*Cr@CY^BbOic|1H>jeO>U&IqzM8^I!; z-@XQTgTjOu5c)%(5OA!7M66@W{xCA_-Y$wR^@srnY9G`&lR}OHy1ku|STrm{mJJAK zYqdfGZQQkc@)nWx+T&~yaqR?~UDP!eQvO@0RVo+Yb5J>|_r3r`_rw^BdxZPLc`SN) zOSY#7_eeuPPvz862D>ZD2=P|($)_wsJiKjm5w<4nha>mqOHcL$eBBPH?TvMd$S!6W z&o}|)e2>IcMkI7H*G9*&GWnS6=`#e4@f}#FbOoqet03cYsZn851jC{djXVV2fIxX@ z_@54(Zh=qK97wKqDBhjHeYLdWdupryN?m7x4p{zn((L5qWM-gOur0f%Thx;gw4oLh z6rHmAyNhHa`w8y<)B?OjMo-idyKT@Im;JFn>9p9yhsf%K$z$&~vA^%6H+q1P z@x3EodJFV9SQtN%=RLC?X(Es8p;=Ke_x%(p(g^W$wH}-ql^jf5Z_%gB87RCNrKzOl zqr1a+f}onxqYH+}b%-fTrVVAOiVlI7D8z59qGvPU(k zd2Y(Dl@4*%LMxUy1teW z5WqfG3ArRaJs}vNwhn6)*~IaTB`z*A+P@FJkGJP?_dP#??VkhTT88#f?iIgc8ET9)wUGV2$2D~%l&c=h+^Wa0V~s}xc7T8jvlz(@AuE( zGC~EM#cURo<&peL6jK$uB>cqdp_0n_<%HlG_mw|VqnqRiJjszIi z?$#jm{>E%&wMA`*C&cDC2spC@qH>L%j&27$kW#yf$TWz$f1L_YcZ2mFf2l2dXZ+PT zLi#aCL7H5^i>2G=x!LqVp+9TF3!HYxSC0Q$Ezf>&#lRmc z!V-W~Zv~ApmMb4!)Qrx>)*t}bPMc86^riTi(d)rKl?>b4zr(XdKtD5TeQ&+7H~+DA zYwWAk&FX#<$o)(U=nwb>aKr(dE@-R^x>HV2R&jq{^{zBpd;5o^29^_o8`U>lN4pp{ zG@>m7S@@pbP_AIn*mv4)B`QR*#4=mIn?PZeOd4C~i6m{JR*1(g>zJV1v%hG7*_d~~ z1}rlAI^Do`wYP|JiJSAy4)bRmCr6b)RNFgc<0Gob@!8C*cVtcCW$L=8h>8dH#`h#T zRteR4B83VR2A`_fK>?^gB#f@KSNarIo{|!IIf~na^)hdqNX(bEkX` zziP?6tuQm~pywE)9XT~$7tbeqn{hK5r&()@E-##qo57^QK_d;YE62MA>JhXJ^HSE1 zwvYHj?(;LLKnXdZf4rA@*&zN72oC60%ucoyr;E;u4K&q`bNr=(q$D&`<0w>=;u+$F za)~tibET?GkI+MA6M%7L)e9!vI*q2L%6cV_nN1Au(e(Xpjy={xmdq&mmakU{ST}l& zT0x)_*Y&JIpHBt7SQG;ezP_jRtF`OFx2mA8xllVT1aj=1R{lmIcSTl6yx!inbPAzbU$;647ks z1Y`*gbi2E!1)r0*>-Qo^N=7FP=$Qnf?8oIH+51T7l)955TgV)3uDj1SBfUHdqwLb% z;oa{rr~K(#vp>Ma_*@WaeJR|=S``;lW2tH&rp_ZT{E}7($s|BTF<<8Mw79efrPU50XsZqB#fU2)_>*nqci#>|XNo8<{s z6GPa&xYz}S6qV4O9NFnO0vE58f~KKSICYLdG+)$7xl~PHIyyWaG=0z}1d=>F{5^lO zYTQW(%;D~T?*)j1`O(t68SlH9-j~NmVZcX;jvY^KBAT091RPF*&p8`Kr3wru3Gp8R z!(XGT_Ey)*?)!vS-ix3wXFZ3}$b*%fJtA*3>To;&cF2xpus|{=G9zk0n7q|xuTBp> z%j@oyGj+h<6WA2HiDhh@oT5hTl=%2m@%vir?uGbxQoFrOJ-$r|FPNG3WIX@1$jc-E zoCq|~?deANpB0Zr;;kYH_Yx<)SpDg3XFzl=FNs0OCv!6QJpQf{HSSpfB(=3^_h`oV zKZ5G-Z#O0$?|56i%hbsB^#cjW63RwXU_fSO-2;Wnnbh;YMgjE*QbfIEeD`i2jhWtg z@smY*ay! z)i?T+LUndMf?;gd2rHtTm2tOXvasOiwE+@mV%&Olga~nHF4F3SllX8Rf^AZ%%zCVe zk%U)6bhp`UCorK+6Cy7f&0GsKm z`E(B$2~U|Au1 z0xD>B$^xD6xA9jXqd)@-M28%y5)zDP8`P!B9p64?WYKEiZwDfxqmdX?b~l>ad8!>iq#ALWs{W z?9uzgyWG-TF7UOUn!_ADDlrkCv?v;O(Y3??NJ7(fP6oVk6$s#}VO2Hnt#bFc`#I}8 zk&wjV9CEI?+z^kyft_)-b3#874|h}m6bm_ww>*ncjh5@Pv!})!kMcGj7I5Xs^W}}D zUJlOK8{dq>X{V1EOzP+&Nqk)ZLqpFEQO-~WkicwL_O6@GJj6>fpo z-@?5zLGtS`dEkc@07Mwu_d0Cxbr|~VFrn9BWWcb<@YnNGcpdht@wi{{#f{ft&acA| z|CbW!$p8N?N|UvulW0M1wKqc?z>UZx4PT9vE~-At6a7>50*?haf;Ta*N5E93y_~`W zZE28%!NU`3c195-B4RgeMm9AyjYOXRh=8`SvjwXlBJzh%K%N2(?ef=#tTJ$F--}-l z7o`}QY>8ezHO1|+C7UlE{Pu^Gda=C>(9gl`{pgn=z&B`*p<4Et6Y>npOc;3zq_t^a zM}5KGjV&YGf`Xz!x0-b}5z=$k$LqZ*#r5@j20dMdOxoVbGsB6+f>!oswZs#7d8MDg zEY%szFxO6b$T-!H^7719wusAH!<)|5k-3S$l>X8_5${*iZ9lNFO%;k7)cyS^y~)SN zs4iP%IowfJT;ek#+o38Bkd^t}cFmXrHS8XS~Hf~$(;_^S?s#m9yY9# zYdPTJ;+ij4;T)=DR{+OSzf}tD$ji03ABuAn3)5njj^7`&S$y^e@yRh1M4drY6SJCk{cNr+FoRhaQje>A(AsJ;rD>#Q~S z$lNCC^wbSP`32a-?(NxjL+dwOS+Ms+HWt~_NuF#jl6BFs;(?V98NlO8MZJUc2RqEQoENfl< zr3(>wQM7-$LY_#YbZ-CP+l&H;zy148f%VP^X&l)K_0N&h_xFH6&-nnj_H$w}tC&)v zv=l_Ud}GEFd^ACsmQ~yO1J{5R(isB+j!%F5CqR#t!tu@_;5 z;K$q2MQR(>xLM>2dq$s_NEIV9o+koP)_QS0Z7SOdVgI(1*GR={7=8 zbs5U~_|(yVd9rAS!aq%EcMnQz25=&{?rL*yS)%GPDH;cGjPw@i^nmeEL95J=Eobqp zQ`5v8Jxk~IBG`2!lF_n*cKr9~_bfJKj1P^QL=0B`I?|{EMRD%?-ahL9+p4{C1YC0+ z)HH-+q+^+7FO&uWBjs84>Qhk_P0VMHDsSe}<5ZR$60VVACQH%M5)~~euVQEgBMS-g zokHTU;Lz(Ye#(SQX#Q<;rYijCp8c-j!UENDU30FyO5lICr0F;#N;F6(fWVRR0#EcA za_?b}eUFEcY<=?8S*S?8f5pi%KUgZARj%DW5d4&bei)MRan0_X2S25!2;s}ybG$;} ziHh_0i768AANWfCbic|95;6oD@(APH!xTN4i~mOK+K5m1jRrv$gwUhmyVT4WzH>L% z2-9ax7F1aym7@!;zK;i%nTb+5i4x1(EcUr0TV$49x95R@`@huVXg`K-|oGuR{Zd6Nq983MhaV zj~H%_D1<#^@2`N*4x+kDezTb@Gt1L6Gf_8Ec3A>GQif!il-!v=VjOddzi21rb#ue@ zS(+r_4Pq1(zy*7t;>@LYah5`b%Hi8!d?6b~XK_INX%YObQM^$TjGZX_ zb$xx3?XD?|&=omgV#gr7AyNt_;iM+c%6GH;NR*A1L4>nrn)c|Z691jNG&4jZ)Vts4 zWN0shQI7V!mC|F6CC1dD;Jfh$;Wk^k&4SZ-mn9U4|HY1w_dopCmz#i2XLkJtkX!;G zkQD$DxS0pnroAB?-APzf;>^t)Z;Bliz-o6_+~W7|Yv;}~R~ME4`sjKLryX7jXMfy& zQ9d5hfI(LxjTpcJHM4*#2=$XBJXmFv{~PJ};X~1W(fb4f zw&>lNBDgW3;+}vj)P;owfF}3tk=y&LnjIf>jkyZ#z?%sD?QLc@t3TY=X+!woN&hcB z93c1?x{)>&7UH6UNnE@?+*+8FxpA5NN|=qX-HKhJyB84qIoOl6?(`6~C6u4Q3wf&@ z-Vv3ROr||Sx%-i%#Kg?OMZh~*h2p{S0M`IH2q4onSE>N_sX!C>sNr0fzbE&IX1j+C zpgKQiKb7v=zXK!C|BB8EJRD*b^bz&s7S=IdsCe9>DEequ6va>I@PE0A4FbZxkxvvp zW68Pi#aB%q^b(#Lo5)Lm2Fp< zph(?qV%MLIr^$r}%dUs|o}z+p#XQW9|Dy~Vp|E|wiX_MIcy^4{`txg8a@b=0f44L8 zm|>#*X*e}R)m>5Tzaz2?BA6`_=2Owt&_=<+8U;Gnv^srU_KTR;dqc4#eWbhHx1*4|GqF6V1Efz^@A-Dqs$0SU)M|CVZ; z4SPH1NTEVsH`bPOYr)JcG5%{}xCAcpz2i1j=+L&_w_VTf7fji1cv;_J{Qw#CqWl$n zt6cJ|NeekbA5r}6&<{hmS@lzEe&;CJRP-!v#6ML%0@5bRRkF*duz=QUYLQ}bzf;Mb z3%)!}CukMS2yut^LDo@AS%K!o?hF?2Xd3s(ED#FE4iA_d>Dn}d|3A*&Ix5PpYa9kq zK|n&fOS(Zir5gsxp+ms|qy(fvQc^%#K)R$EN*VzP=}MZ=-v`@!~1=lzEoqHtrMZe8Gp;*YpiV_meAXbkMG95q>qNamdT31zo2ZmG>zIz z6&I1Z2qT5mFIb(779wfLdDW)hp{XkHU%e@w$oz6aL%i+%n8@ow;6}L(JAuiuRrzL> z^hm`;;=6Bb_YWWLTuzZjZiRMki`xkNIE=4t_iip&054SRVY?ESQS&xgRtJH-+Ut%j z0|=})D!>@0rikO>;=-e%zJiY9O8;9w;FdHI2f!ntYWc`K#K3sp>ev%Srlh7Or=|7& zSeXr!v!L)v9wES8C@jwkut}}J@2F!Jg(gY|Hux+Mi9#J-&W_^w1j!zyO;_|q?4jxgh z;PKy+F*B9Ub-(2cdoAG^7_+6l$p0{^&dO9^?y2L%B0}~37=!gmsOHFIr zl6*VdCfzATbHHinIxXOPuuG9caMQKRKU`@(j5avqj$6a;{QD0@w8Jxv|0PAl0=wG$ z8!;o-y7{h{eShaa{uKu-2Q^UUagtkK+0F$TeCIWRso<;V&GbF%T6w*%L`x=*n7=%L zE5*?l=~qx+U0v<3^zOM=Yy@MX^!4?nhcAA_m`ky;~jPeahk4@O$|H)MoxF?r=YhKN=RR8cnN__NwgT9JCI2@?w(>}7eg^D&0-1{@8 zs%GHL5cF=dEJ48k(&RiuyfZW0-+B2^^<&E^Ic}zJ{J}PS^+~7)O#;6U$LWyR$A+s% z%v^233)#H}t+%M+SFc{Y)P`<7&xrC(9gt<0pUzd!cRvXXBVF-+XysbX=KTw`zKUtX zf;^TF(=RyRd)I32&(=@!tVDt87&EW9mFrwB?SIjEf)G*Qj2-TrT+p+2sBPAJ;iy(n z{(O~v>xX=r66;UJbd{Y$i8CJizFWfA_qvk$$;+{63>j95WAP;qgdd$4UrVV)3ry;L0%Mb=B(CGA+y6$drT120{eex*#Pd$W*QqSAu0Q{XLSmGmeeZl0`r7`* zPQ!!K*Z{k$-f^+S5mPl_Le#5P1Ji9ySm{19snrZ$P2C^sp z$`v04hBWpVHbhUK>|S3#ZIP0T7(yl!kh(;S5--VU73}fk<_~pRBlXV#=VjgTRk7!u zl#+fLL-UZtc;+03eB$>}1NrIYx#dS4W-=~QhB>CMUSE7y=*d>Q*lP*O(|m%%8iCA* zh)Ub$3krm3TnD{qiT$Ke7*w5?0DG~TVyi3uQxse5-8k=S2WZ6AXpeS!n(DNv#Z0w< z&XyCc;^cTuvi|!h@|SLdMUb;DgU3yw{$@UU*SL}KHZp%rdO{luMVTg{%3bl!%<)56 z_$m71)f6=W-o`o#ywISf*ZF7>%cH*m*ur5c3)MvvVH}ab2zT%2S8=(%ZSa0*J|%XR z8|u?unx!s9Xlnkd2h@oS=d8?4tOpuxKIj`^{A+N)zzF{gzT=at zonHTUe7S$eFLQ5&6n{7Ri$V^vBM+b;o3AIf0P5As{t60$>`lwpgJ)}(W;p7=j`Wbi z2trn|=WCbFXcbQm39-RF*^&D3R0mcJzSngqnt-iSfNfwRq#$XLNEc6(08zn@qE#Gz zVIqAF-U48p$p;gV&0+ugUkDKzDJ2|mLKRbNaPs_rQ5&-^zOIpb@R*sobPYuw@;vjP z1ywYCb=CUHFz0R}6!m{S$qA{XrIzyh9J4|o|6KXb;`vq^q3-%pR~XCl7Yg}$0so}} zB+f)sRlk1yT7-nEdxh%McB2Y$%gMD+VMLDckppOH>_t@7${ds}5rl zG|$7Ro44h1@hCrJv|$cUOW`HMogr9D2A={xsg~6-Jy3QEW{BT#R)x_2H64&bOCXia zZE~CRGQSg$_qcr&a%dJouWXX9E)U+-xQ4=c3_F~2W3&f|*n&q!v|2sFR90~Pofa20 zV~rx-{@)oZ^&}`h$f1=}nznf5PoaHiYg_fIym?a3`o6ZbR`Am#bdnh)A*0L6h^YBN zi6rOI8mPgg0C_z&)kpE>SVm5Z;1{$kpXCFX!+-irx!&{)nOc8h0z;=D&k3uknZeOe zRt_jD%bS>)8H1HwDYvPpB9RngX2ura!IMMg<$Qd7FOU%busQspD4WtI?7KKxx6p4E zM&|!A3RKI&d4D9ahk?S-;X$kTa75BA%RsmCxy^|o7 z?SjJ3e{r&bC!iERMqr%sOKPc49+(^>w?A@r&RO7oQ5_@g(rf4iNFoJmq3r(J`Y&6? zu5P!@ZVM)%x{rmd6h?Vr^u4Pbme}{AqYrlH#acbo%M<((ll1IZBMMwn`S#uk4AcdUG@z@#{T5*pFm=yxqjPQsqF z^5)<w1cFoi?6xL7nOAPe1IpBIw|NxW?P`h zIcap?At$xk=jGG(isJZZdxyBECxyYrLU3ey-CJmlt+=$-bib4QGb%dH5igDDehwJh z_@(g|L8g9Q)89PzBklc=#rl7@i&a;LEB2qit4=paPi70*%@ADz`7srm6eJs)!6skd zEw?etg;3cPTfCBz%8qcWkeppYN=o>vn3zgVUAF0!t_0o>ELzgp2Zkl)PaBzD;#+N4 zS-!m(FFa})_@MGAei4mE{PILURN@C34yDf`8p@kcoBcpspd8hM2f2!)VBfE?r~ht` zR+ITtU{e^WeECyNU@!0)q*z@bocBppok9>4UTNL;NKRgU2^g%BhQ`oA`-cyA&(E*? zIzp(#9?QF5xb(GUNhJ7uM5+_GE4Pjt@QO%TL!+lF3>Tk@DjKB5vi-`Sww4ajbZq1# zvZS&XIr3m9oapms9;Lv&(a%m6_Qyvn-uk~q12OKR<3v`5@2O|oJ_RYNJEcAstJKVB zWU}(?$f|ZEhIJ~9E>6160K-(?*MrHpC!Cy+{y}VA)elhC*rDR^D}4XB))U9g+PZm5kf~+R1AsnRtDVL6^Hnt9u3t7+9vw*uZ-3`|)#G zmD`q{n>z-a`7YgJy9M>{gBDi8!1cZa(5kA_4!zeC&Tds?nve9E!BM>3s%6p95A8oR z{3x>ya4D3E9;?YWt#b}N} zm6ErS{3Gix;HK0m@SR;uQ0QbrA{Z`HymZv&6C}A59M9jX#6=^>6^_?gO9b4LV&nl?xT|^|4`R)t#n42{V zv!$hX%}o&Meh}_8d8SsH_7*KH4HtIU${B2S5giSw)1v!9#2r%(E)r>}lyU#_XY;u? zxfZD0u&}qz+rJth1!C7Jd2jf%)^1v3Nx8=N@|492)_?gmD|JobIgm@u%rKHkuU|DV zqTqPcC$9S%9afJqF;&*r>z7P`hOg|(IDEb0H|baRD$S~w?Ve-cP^LAP0sX|AuZrh2 zSamAfaNn63MM_;OCxsDO|Eg>v&#+A9p74YHIjDb6`_J%>pW*i;9$-!0kjJyr`grL@ zo;mcDQXU^`wOGpMW0iqUr$19gfiLbfaHlO-^gga&OtM7tm$<19+X(FerF;4{FY2ty zbU``3Nlr@V5jT>gZBTy=+FMGGzrOsmN}xSa_A+A{^!4ULlz-LBQ}}9@V1dI+4||#& zRZVNscuCjx^9bej?A9MFO1@O7S%-H7cJ;^iw?$CU760Z5rSItxiKziujKyJo950hzQk~Bj zinZAKQf8QH4N&v7^|0lbmClf?pR~mK2-EWAX zzdOQs`Q!#LI$<1A=opa3r~*0#5lZ{gz`)FpPs6vTtM}~4g0dqt#FQ#P>3=gQ=$drc z{}>P4%RZ&(PB@FBpjG9Em0Gk-?wG`z_Oy%4e~7UX)6ahE)@ZDm=3ZA<`DmmxD!&ya zNVQuUNQHfq*T++r0wruEW5Wxcrln&>Q)X1BqY94MaSCWf+0AY8JZ8tlq4e5i83x+5 z(@h$bkefTVGSFm9_Hpiu#?nPHP%^gDv-~blY*wPeq%qqQ4N<4+C=|*I|?U-ke+I``eeB-`;~?5w@Y7L=DpwdIs#?CuulRG=VY*v zljr!B>S0EgK__p&CNwbo4>du{dh&^_pY^>sZxL8}2t;#M#A2Zhb`08|*{k3zYKb5- zGVj|bsO@x&KkuW4gd7Y+lp78_X0LUBNKpbGQHdp3-lWecqe#x+Ko}S33UHd72Xkc zA{82p2jg`LhVkkM@kp;(c7LE&Bw{zPie0#q|JxLV!lI6vbDoLMdh(nEHuJE_$HN=H zmjVPjTzDi5Ig$laHjk;fflB7O!M2K z&TsF!+XI8rizxSIWrANaTG@7zMv#&%RkYpoPm-2E;C!a0QpFQ<$aXk^C78aVm4GB9 zU*Y1A#l}ABMU+KUD2}ltlYhWUK#$w6(Sfz!ogolC6k#7fQ;Kx8;Iry)GD4xePr%Mt zbd*+&kLRyQ`xk%oD`IjK6hMp$d#YBQHhAXuOz%%ZH*}%A1L)}zXdeR=SWRADES9zy z8uz%J#=y?im2O4B*bx_SRTI|ZhlWK4Pg@9Hh2cESAhxNgB6EvC?Tf>Xxo;R?`rA(WEcI}${_RS@BgTT#WA`I+p?Z42PlOK1 z^8xaMRtcGBb6@ZN1_d37+%#;&tZ$_TlF@seot&H&TZ-69A-M`gdij_)m9=s|uPz{8 ze~^NyE)QY*&5&|FLn09Lnv?3U`ViZgcde43k_q|1^CNJFa8eR-PD3sL%R4+++StI5 zkZ3Xf{sr0VC+84)<_6hxtdbcinZ$IW9I0HA0DfV1zG)HzB`i2t!O125wsSSu7Fv2z z)*dLEkiyjLh&8GXx&ul1EA`8N8zYrd_&JzVmma{^&w1eB%)>q4A2FtHXTKaZ6#em!U3vC0=eG=3 zBE?&JuJGR;p%zr;0TyIT0-VL$Hu;GKFJMV_Ca%it$sF|~eEd)N>j(Q8zjJC|8bVVW ztwS+3a0ef{fQEQdy2p=S2=g!S^Y8J;uG5_Tniw9O_8uDON_8cFGu^wCQCjkP@cG0e z5d)dmq_xnpW2(^j&QABI813c;T)xEmLaqh)0qv8ecY^LSs$_+2yf|+ALq}`wGf-D3 zi+`OCZlQ0dBr#lD;ml3?)^T$Qj-d0^XV?^b$k^Sj9)K61;X%&IdbsZF=6x25iudjN zLxsIxePio%Y?^~}q!<{%SD&v=KCJ#E7lMUfrzh)PycsBBba{)u$N#i&TzSEng^@9z z{dJQM2I|*t^$(J?G|>}D%O&pO}i3(Cj9qoDN6WKua&Gw_G+ThSmY+G7NG%WT0YY6YWX z>(1zaFG)Vm_Lrwl`!N`Kf)5xTeKsTg!twqJgF7+1#D@~`02EF@rdg*5gfag+Xe*XrbH-r`S?0VbJ4lwY26;o)4(K*E*}g}#a~-mTH+P# zf%#Zg9UoHATxGUg0wJVxqFZ7e5Kgh3n#+UIW0Hgzp)Am^(9dajmitl{e@5cy}-!&U5<` zd+^z6S@sFkU0jG_Q>D@RGUZs(NreIS9@Z#r*1X?(U^`C!(lJW+>b$>*F@Eu8)nH?8 z^%uOaFZOA;3rH-D1`wE{RZq(M=F_CC|)<;9}AOZ!zfMU7i2tJfr& z@?~d5Tj+I#aR$Xr#x5T}l(S+i5WSMH^45(^32E+`dImpm3_<6JX>`z+Cxy$gJ+bLmn%gv? z;pHv&)>2aQDbV@xo8LZTqmMbE(o$ZhuZtprh!}S8U1%vqgr68!YLNM9+@W?;0qvEY z-49HR;GKs-7j!o!M}(jrr^u{=>vx-=LVr~PL^V%avCZ&d%SL#BVEN^7RbE~z-~?x5 z&y+PeyDyXG7~Bri^i4-_tYk!I90MKWSrgneXXIXLs+avdd0+^Z6%Xvg16=X;bJ}}z zHcyXpY;7xGbhMuars>@EI5^IV%>`Xj0t9a`Cf&8KM(gX$Cmt2go#S6G>E_;lEt+wh zkt7kHa4B7JOC^J1(nLtd1|!qf-c689X8toj8?`xZRpsj~Jd`>4%Za%DM_)q64>&tE zC`PoH`fSjAZh42=_k)=@!z`nCa=_x}#rT>!gq4@yyTOMK=$tNhCV7{-QO9DFfnkt1 zohFxUB-zUWCqfsz?}xIqv|!vePl8A81@<>%m9hrLPxi8c$@nb~x=G@M{jT<|BKR6&2;X~~TWi%i8Bqx6T`VqCi zI9aT?@`+$0x&OiSXg(%yPTkLu~rWzBtNF^+<+yqEf$UT?k?+dJ;L?yC~^f0KT^ z^0*pEm}@OeT@~MMq-*v|t7=c#s4#`hmD{!GtSb-sIvrzv>2c$?5`Av|o@joH8y^O} zGCuR+OdjhOH@*WrjD&<3AH@p0Qu#$cistw3wQV_{S5iw|6GlWu@q9L;dB>=lb1-Ez zrA5sbY!fm6RArd(Ycmbnd{$F@0}&p^Fh-bjIli(clX6xZTU87z>&93FVb)(_3`40^YIp zoCt{v>qk9NycYc*gqyGH@FR)kt-2#vd=BIvyKjfLxB6NLzu~#77#q6y^_{u34LSoi zH|hGY9mK%kCLgD+e{jO;Y?tS=p;h}_qe2vgjZB+g6rDbS7y~h)3qi50Crx{9&C}@- z!T{4Hj5>-Ar=f`rUV5OkofU3<)1^@I{yknTCMja z?_-;Zh%#G3R9lDk8aax>qEiHdZwBcJCO^N zNFhq8{dlv=ppFA`wk~94f9bu;@SXLwdn=_Cphy`;>2veXvixbXP4!9F`je0S=F0_m$MH z4+tRNn69Av<=@5_Lxt5$=X8tMGW9t%v=hI1gZ9RIfyq~|qzUGsFB=^|UPNS!<(9CO zGx|xvRdCGn6j(JR;q!sF!3^ZqF0imZ>W+}byi`)GY>bl~jPSAwhd%}OVRu{c~7 z;gct>z=>b{>!Rzj@?a%5g@=tq;)@AFaW&~hRO{Q!d9J16-XV)`<2DD&HJ)!gzmlnu zB?k#HXLO2<*Hpf7XG-E04%AG(3XP_r={|*r9V|q*E&iyNqQ51{eBf?W=nfrTyqVnU zsi{xSM0J@S$cm+Wv5@+lRO?(xd9KGSvS;YFh1-`)n;1}PJoHm`!D_37A%%B|Fw<|F zvdIV&FcRMR1FkDG_dM^kAv_!HkkDOGOJ}Fc;UCxCboPdxceA)9LUki! ztTwL*-Z<(p{cd%CG;Y?{lgHagDt3h(iaXfb?&rV!?$^47GMAje1{~8$NPL?-(`%dZ zm+#SIoIQjx;xeDD9~4&!-(?vyS~*Qa6&54-HpAc#V&2d7-lUIkD$h+;!c2q$LQTgF zgrOWhF3O)_1PfH{t?F%y2_F8vV6gLHS(5(fx$@sK&AgHn-fiNKeP5Nl{rFJ2gTUV` z-l>to31M~N8rS6$Cq&XAKV=jmp?i+UW%FDq>s?7GL{-i@$jsw9ICe~9@#3jxxZvEI zz97a%ygjvHnITlam*1afc+xajBE((|$0kb=hzz!l58(N7f;prtCUw{3=lL-k`b%Ee zyYQ05bqA@H7Q@sH7#A=g^wUd%*(pk(h(DJmgxVXO0WMnmq)xCeSB>B3aMsJ8o(pU} z`>tT}CzjsSYZ)2A8MZ<0h&`Q6W^^EXSLU0*u-aXa~RvnGzpMgNX|^i*X_Jj&|l z1b~HHPb#hom;7RKuQ3S_zLLdy?6&MoR00O>rPwQU2~T45`dbcP!D`5)pCdM)P%oD9>qlGg9c^oNFNL zts}7)#{#x2O79)MC^ctt3GTS(OG|6sXxW+nOe}v`whp*fe32iHUvTG~&VnbXNe*UT z{W_Xj`$!WT&;5Rl-+q3y@!9PA*8;?M?l>C1d5RFXrU050M_@cK7m54(3gSXDS2<$$ zqv}+D(={&o=Hoa1x>vjHo3YuPV>0@A-R~$RUk)Ym0PcR=`1-4Dcm@WSH@LoW1U#qU zwV(f0N)q-|dNH;0*O*OqzZDXfpYXvJ(%a?D_yPQ1UMC-pQ@<(Xcby*dT^zm78M5}* zbxE(Fy7ksXi0g;|eWF($d-0Lu!7S^FOWv@oV3Y75d7XZ4nTPUw30zj#?nN>!?f%GG zITAXOQsr97Yp~kgngSrVae35-?wH};@gYw%YIK63cSgw2(pK(a5BGez zgCy13j`&v3rN@2ckO3@UDXbmumXm$9@b>$7<*5XD#)T6W&>sHQ4~*d73;dvVpkadM zQw&-I{owg5ns^|e#lf&CJG;oc@4rBA#Pd?5TqMI-9*q#coTOx1puwxx6yitQq-^G$ozihkFaziqooicix1ozGz_Ti!Tl30ncNS5LO)zPXl~^kD+YVXkGjz?^Gwt74J?8Z=8*t ziOMe@y9rZel|twRI)<)|4O*3j2%}NgLdc*nf~p2DO}yDSBtQZ z0`{8R&pO0)bf>G`la2Mg>1|p7@3Q^^cjt8FwzT4s#W252>q7GGBaL133>L%MuuKGt zU(=8E3s;bPxc!P$pT_0OsaD~~AU|LoG)`mIcDLiPKe4vlOydC_UP`x^QPCqLE7tsG zeNkjk=n!`2TON{89r?!5RiO^MDi<|ci9}L-XFoe7kqT)ERK77Fa~_lS?*4b1Hb8;B zlXHpyY`6^oEvY6D*`lL;?<1gr;5gPq3WJ2lS4MEpa4X`b{v5%GL3~m3p8iUZrd+;BV-XsMQ_kSo9DnDf zefPZK)L=#Lw9%P*kw23yXb#uD*`u}}eTNkAmcjVsbyA>lUE^3#)<*MZx-4j_ zm2=-#lRxQuMwF_ehmsjLylf#;!T}W{FK;5D^+kDu-`dYgd%;YfJTXx>4C$`>zcfrV z7xG*2@b>-MjS%EXQw>WQMJA<8&?jHeapy>4>zP{uR=!NDn6cZ8*&-#f)|Avp!H{`@ zJA+=Uz%FHG3x|U3-wtcO4$CTG6N4Y7An(-E%C$Dk9ouL97R)R4N<*?7Rcmo=qe;+D zA>D=y@XrfPZgD7gL5?G@BALC+%=1^q%_C9tx)9CC7Z}*sa^NZxaPQ6g`1s}8;lavI z6x6^CkKFtUGq75C7akL%U~9{Ab#2sDXh`r{HR9=1D|cQ+20zkI_E()Gcr1=LKX@y8 zUqwobC^F^-Ncw&k^O#MsQg1uX{>jVGol+8)YYt+yENU!i*kE#f)|QBkm(2``GucV*6%uVLaz|2~GR zl;7Rkc84yYveI|V=0XURyW0LbruL~`bc;1>ySXMn#aLb6${SwB+|r+VCy|xybpi{2 z_b%?H(SU>at9p1)fb@@7)8~pRRiD~VH|7}(5niH5O)dK1@ryJpNs=>9tA*ZrcE&qdON?C7suSvW0}?%ch5 z+8l_fO5PwFN^S-f*_?h?c2ilkTXpn(QmOhT+gn>%t2e5X0t{|uZHPtFGWZzWn!>;|{5{}u*P zX*sr%BhrLgj0B{rTT|cTZH;#?uk@;B<&WXN|A?LSTb&ZI#sIHam@30Zx%TD+afsc3PoXF5b}Q9C_Z^B`2b3aS4!;Bm zSPk8Eshk^i0|V093^ApJJ+^4z?5OuTmv%7h*SW8ypOHZzs&rrm4wpf2)WK**$V1v# zZwZ<74cdOzM5Ejlbc}f~896gmA#T zdvkrI**7oE7(v|mwgNwO2rOcmZkXbwTN1%Q-8kt5>}3z?0Lw+F*DB&UX^nLn&eB+vp|9F{f8-wR6Y1hR|OuXsiE zGPV`2t38(cH&vD>8Cj;rM9{o7dGh0goul?w>1hXgLjG`0!3jG%qvRu*qRXK>_~{H_ zw)Jt%%cF*+4AOO9xWOUh*tb&qFK1^Emnu>XI!Qn0t`TUYLn>L_)TlSDKnK=`Hc>E+ zcX!etTSX`ArAG1iOG`^*3wHAKLUPbt_c}X8hr;&LA9ZZ%2uTB0pt6T=kh#i^Zeke} z8RK}#FbPU$O6SBAG1ne)c=@RguOjd}O+!?89+o(cFM6iq*9B?eMz0?N*t^L^2i<^ZeXM$Q$Q1-6E`*srrys*h&q}C1V z<`kviO)0Jw<|}$`CqC7UD!8QaF92qhv}Qd&Ulc)VShQe&IZv@E0sW}L#b&@ zJ~p&p>w{cwRnVc_4So3RHhJb_qu=vJnF)om@V5cdAd3bJ`ChKxJy`;L);==GhYu4Z z3RdJfx#OiA9QHIavbeNtu*UCgQ}!B(UU+WsB6W53uTf+fahcRw&&kOAgq=Py>Q$W9 zUrghW<-`~zHF{0-s7J}Z_IjNtZyzI#T(;XsRbEW@Xlf6)IFP~FNYlcVlw+dkYea~$ zvo9U(`F;k7s6g)vOasl*z*n&aq$Q|OTD_}TA3T2p~O!V6E3^{_5e(}dnoCV`+(uH4&A}V#g zCkhIfRB)%qVyG_H$*fY5@0CiGBvY4nb6-~-+QgC7ML~(M)#dUYE6O=ouaa-_8gV>1 zMtMXKRzz<%m2nD&9 zwAWx#3^cMk=kMSpJ7Z|za|x0;y0iJ{1*20&p!L@NA2wz_f)htxRX(4Xp#~qkI+Gwq zCq;0B34(vI(u{yc%D?)-2iu7RAE+z_igcM^oWyRy{|-BzGiBL$LGqK53cPfgJ7rn* z$#KEJxY@u23SLVDuXWR34_N&}CYZiNcEA1cBX#ZOY-x#%i%33`5R83KHdJo@i9Hq7 z(T+&Pqamv=A$C=B;{QHx+35M> z*LIu!qkm*JpQ~3Pg_-bjLS0_Y-f{vuK?cuN2$|P`LTPjB?!Y3UR%!SJkOG%(^{C5U zD<3lW=?)gs)<@?gV2R>2r9CVz>qWYM2Fuf?D>yu&p+;taxYK(`6Sj>4`&PL?m+DAz zr&@hcLGTm@VHz?}^n9XZr@IL5Xx5H&LDeSyoSP`Aa;KCpUBGg}^NE05S+2~OHTm};-G||7h@6Fcz`ZgsA^@zvToUthAaNWUJ#8t-$9`0e_TZ|9V{rw z6^|;TyrKH+)AI?v!B#sM8s%Gv5!`U+-AuSTez-=gp0Se#&;LRujR)oss|+`v(Bp(n z9$QY#)co-bQ&34 zV+%uz*5+|1s{EQS1CIcAe_aPaj1vFLN24gn*3}7nBw%P+e`^5Je?^1OoIOQuH;BY; zK)K%Ye>uG73!4OxG|T=%Dg+ArKakSMbM(|n;dQ-vP##z*F&yT_3)`9f4?mE+0qB;^ znEn-NVhpq`i8BvZO)l@rU&#U`pN)jri7g~?veCdz1z?{Re`stX9h~w*Na-|9P~Q`% zGcWH;q)c};&salPAW`C`6OfpS3G&2L0## zfiE;gR@ZON(7-y;u=x@-cZyo%Qs|R&ZfIgGbRkJnIH^hju_z~}f)wokCCpRdYC8ew z1^lzm#C^Bz02)EgL`h*_7l89KmQhe7G$c(Drw-?M89}0YlCZ8WnM$jZJO2)RIZ*-z z&ieakvnD4P*sQ__Jv4m3U(vx*MqKjNDe1TaxW~NEB3%N7vK`)H;;5Mt`LZ;?rrFO% z;sn1}zxFHc1l(v^eD@yD!_@#@>;L9gdLou-@2983I+8HODAK^`o`-S&VEC_YI55SM zheH??0eU3h(4kgbkL3UNGYIiFG@xWl=b~u7$_l+RXzyQ2{|o0hhk%cd4~Pd`M5+K* z7Gvnl^x9w|`(HCu2U*d$tSqyOqI&^iEn6%8QVvOy;`nhOrvIy}4r{u{PpU9;1$Ahm zOzajCZ|;YOf9M1%#s^dc=FJOZcbqMGA{QF=lNz3@82fchBq6U2m7YFdm>$N!Fs&5( z%v)Ze+_{)V+vS=`eM}kmnKzY~UNXceLS&LdLap?WuP}`ao>;qDrqAX^5WwfRaaRD&S65ke zYjGmQZ55HY7GA_jEX7I<*@Tp=d>(k%?-EwDW5H|D9YJ)exU)H4xHR6JdMqR(mv`4j zj?=IaIuXADoRy*i=lnOSoI1mC?^lSC5EXe&l;E%s%icft#ul*gA-cQ~AHyJ9Go9(>}$a)`d49VIiRi+UitW|lE0OloGzrz zlqyASs3kFmnJ-mszuYW&Ydwb&?)ThK?LG!wUYnWbNCO0oa;{DWA^C$^x$6FBvo_O4 zU{;8eKXkW?#G8&W;onSe{5Z{dNb_jS=Q29XrKPu$jHO7{z_fTPwU=5t3@65amD^Bj zy!8H!c&#pgqgUuZ7Ys^}bp&NYd`UVMQj)W- zc<#!~(B77K92E8C$Z;o(=;gCUKdycV;Lj$F(TncwuL}6;+WMX(){eq}m^= zWJV%23Kr|X*IY-(3LB5lcFJXBW${ZM#>>PYQU{E8i?qq{ zH8RaA{&0$VbAHujh}48_pb1|y-E5~r&F<)^Xqlroi(j#BUK)B2W{8px@wGVG)I18ytMQfHz2%-t@YwqqSk(tFpz3SRf@DR ztsQ3vo8Iy8$|1RuNAPBTb6 zL$)ETD{`X91&3H(0@IheOP^B$c#ocYUn-{V#&6$Y6OHaJ_I)&(K4`?>#FX!6a56ep zt=&o(I9*NjT2ddI^kRwxo+T`sFAgo|xBpVGJyo5(W687N(;bvAo`QyirX+Z{v zj*iZDz908{`mdl&R_AXLl@P?tPrHE#&+r&N9Y$3!w`sdN#JY2Jo#&4SrVT7t4$Miz zS#Kp$c~=fG3P_GEhp7+b`LiJZzAD;!>GGgyZr1P} zC0Ixt9Ud<3n3y5ZWs!d@&mE$IJMYekFuLfZ0vk+Z*U$MPVu}d+``14<%13`>fU0*CPw5r%wK?vH@O)e+L<;>k+vC+jD*WJN}#% z=pu%{AbRUl_B&B(PaSZ$E69rHYK7}Z=!xl#OM1H!?eHXaT+%`b>LrYrWD1t`J}J`M z5j0-@38hs{Md#3|72yDK*_tW~$DCHZ#d{ZS?Q%+%J63(_8wfn&mu`ZvIp?wnjopr` zX$6L&7Ae?t$vdDDJk9=&SDv<|$Dl_%(R2WKOGm_lt~)2mFgdPovNY&nAn=n^=Y3@> zMR@bVmv45HMu@J`f&KNLk^9qp9%zQ%2jf#~jKBr&3vPmqjU_Km_w64`-?m=^akPWO zIz|Oj>k;(y!;I{(is!pML1*2vxufRu%c)~h==zJ>@U$C$SyjJ<-|D+ECx0eCjoO-H zY%V8h#>B&ubfagcdnB=<6@t=C1@Q3a$bOw{$>fgG_L1uC?s@J`R~#JYo&jjI@KR-j z4P81TFEJSzS&W88FA(Ir9YYcOK-fln*>`f*05$j9+H#;;e{AOf0*4N$>zpt-|{>*`;cJ%$N zgz76C_kJZoH(!oKQ)$H(d_K5HaWfY*tGD=W;?l`uc} zN7)@HoDz{=c_>(@z-itxpPa1!aQhaPdW@S9{*BD~nv~Gc+gWi38U-aWWVgP)j=liG ze1F}Q{1?2=#H(gMk&cXCC$A9RS6~9Ez05Gm$xH4}bz86sI;WJx*r9$tH1>B3 z+jWhTn~J={Zea9&lV8utR^lECP}}wFG9LL5>Dgk5RwaL|Ms&etu;Bq-dpbj5709s# z-|3!K^uJl+09wtjcGFE{YFRZi_$`I3kmPe^J zECd1bKE2AAVYG2qrm}nYxrJ1ymIg?%xfw=lJYBr1{m3`DJ)XY?%sp~fhuI}nD?t5W z`PS>MHOAfkc=NB7*~5Kz&l_FTxA86sWcLLT@qX|QzLKYAGv7ew9FXx7Gc(PnldfmRVi9qfM7 z3i9{7jhRlvOM9(Xcy52@0^xSk{TeD4V8n}yiw!U>Ix_WFMl9puvwlYodFjmun{3f* z@qP>}ZE?5f;rkl@pyM-5>&V86wqUL^_nt=Ab?d^%D2Z|o2ZArh6-lz5aK`rVYQT3`{Gw5jF^8tDxxvM(gPIhpLM9zO_G*9D`4Qu?uX@wwt$NKFAr5{DtZ7`|guu^=q6-aSMy z^xWK>F}JXMNc(1N5y+*0!&cO>8dyW(-*! z#>;Q}Mc~@{czx@atM=@y0a{i6_pA7&OP2&B1skj1?c=uHt(N$0N??r@H>i%y{FMeeNov|2TR~u-wGi0L%(%Z!myJ)RLsUCV!QMW; zY?Wn7F>p9%H}C>au9q)gN?4caC@Cvl*ioolQGMOL<6Ybr&$oTy$9WE(Dvv+D>!|TY zpmCr<4bZ}BVAj>J*cSXN-VYRSIjqOal+ypR$=uv3_1mnbV}1C(W?FJ^ChLvA7 zznFYWw(6oJs0H-+STD%qz$3g8UtU@Y?DtNao8))XMk(v`?N6UReK>1=KLOZEl&~yP zVdIrzDJ~LHQa%J+PazQX^Czh7{$W)hv)$R*p+BxvZ#G}dz`)Sq>Eak7uC%Dj#l6jI zeq%?>^!U?aAXUB6rcS^F{o;-$N81UG8D~#Qm}Ce5M}S&^J5Pbzdk^QHKNObs`NoQ; z$AEi#^X}{@eEiH#dhs+U`&jSn)A>D7U~#Ylz5Rw+JAtl-DBk(!7_!DsxBoM*+pYF= U+u5X*KsPaXy85}Sb4q9e07%baBLDyZ literal 0 HcmV?d00001 diff --git a/images/New-ReplicatedVol.png b/images/New-ReplicatedVol.png new file mode 100644 index 0000000000000000000000000000000000000000..8b5e77f9429bda02f273a1cf042c46596d2111f8 GIT binary patch literal 53975 zcmZ^}30TbEA3ti}sVG88vQy2z6fyh0PqQVOrrDcm_ANwF)`*0xl_dL8$r36;mWq%f zNp>kq3n};0*YAIy``qW==c$?boO3?socI2Ezt7poW|GE^nlfs@fB|DEWIT7kfI)== z1`K>SVi?epeCk`{fC1y*TL>bH&Y@7p$_LDX;{N@b1p%qz%$8YD{45AWYA^)FsHIAi zRA&y-%Pl|?(65V&Q7hE)n15p+AP6`R1Ofi|VDKy`1_lB?z~~?t3M~0IUaFMq|2H8# z2n0+pR|E#n``0J8NR|KQ5nEM!ixX=z6Lo<&x`G3@TcQ8P*yJX&I!^y@7#JD^4}$&c zus99!f2}ccn_4FS*J=RPGyiXI{Vh-eK-~YR02WvOZ{H+(U`)6M#8opy9Ci$ggrMPt z|0d8&b^n`|4NMgX{x=CyV2PHY<6tB<2+YR8HNt3M7uZ-87D9%bBrXz1FI8B141v-i z)j_ocF%_e*foVcEm5Dbf2uu?;8e-xL;czpIi=G7`!Zm7v9zz1A;JG9y5RVlDr4slK zJD!hoad0dwLQJJI88S5;O|mjTYPz21;t;qF9Yx5X*l-vq9D@o62}CRcmrjyda2hI3 z42$MS^;9wuV+M<5DtrGCxFygoY0C@5zSl#8lxjeQ{hB*j5!XbN2$mR zs6a0!sL)OxmKCcc#o8E3Dy^S50^J^C$69DOh83bUgP3Zi045OFbx5Go#bkk^xk?q- zWj86J4M>I7fnbOL$`oJ%5+yO?^eA8yOoLX&7zAJfFdtD%M8Y&QC=3lkD)mCP6KF!X z^wBhpQVg--qBU}{SuN6HPz+FP9PlnjWK|O!aeSwff&o+y3uh^b4k*DwWN=_Y8(ya& z%S0%JDo(@G;K?kdO{SIO>|nf1V&fnjc01a@ATpqAmH_WG%j1v^5rs=vI9M19L>LRq zhKc6FiFOD_jIxudP6}15=hN|Ug5FA$KvXKH(4dReBV*(^2{A?;NL}}DcBgHK&^H14Qi#0tJMLmY>Y#~P&x$&tWgSu@qr!T zW3VK!7N_9EAdD~$AIugJm}~-CVWttxGQ3RbfNL27zDvuY!~mlmJQh`;RKvqj5G$OC zjzvRM1dtN};!5N;B8H~ofXA>HoGoHhSx)&0H=s_jFC?g!IVr8K?Ih|@m5VNqF*IJ95|5(^uvfT1RYjR zVS&LyD2)Hi5qP!=T{IrjY?T7P z5fYt(hag6W8?Cr-I}2#x0{@0s1z6$0!|)s}UO}^}(0UWzWj2r<&S*QCpozv{kODr% zj1xj37Mz6-tO|-_8o+R_jX^Ou7-kT}i9>M&HY|&$Ari@q=zil1m&jZ;Weg+Msz&1^ zat@dwGwCsLLX?U@bn10Brq;qD6Vd%jW_4((Lbis-)uW^?23G?#iC7ex93>&}RSb?z zYi5C+);JeL7$*bz%qXLo7i*B~QF<;{=_27pTrJ%p2iwdV3q^)!LrJO_yu}3vhBC2+ z7&DU{r{?KQaGP2c=Q7icFp}P7a1xCo8-XAbxa*8+>B%zAHEIby$r!bhtXaxl;2lL2UHAqG#>uKT9!hWaAkRgQ(JOrhWJDDW8B|17*VAgS+G^{j6Ov4*ug$9aC zX^oXwgt11o(ttNaGtdT?7zh@?yJ9`j1TYNC#4trng;1x9Wf@Eig2=%Z!~(>{a!j~b zl?KNZ$gEbQ!^~4k={TAS%;N)=4ZOwzOJOFEgD8MgnN%hMO~OcQG?)>H6GV~*6pb;F zX_jaWfewyA!OS)+3gjYULAsb2umVDt%E44IGMcQRnUOXg->3#&LW=NUh)xfWBS~Q{ z1VV16p`j#^kpSV7F%mJ+tdR2rDj+6fMMex*Clm{v61f#=)NoN&go+VoB3PJ!HpWtA z2nUi*H*jDKr4Wi1fQ&H;A0NIvg!B+hGu~Ef()&pzQ{`4TI3aN&Oy7p@k9|WFZG*wbS7SyiNiFky%a% zP9byAz$&@irG(gHPy&-k0wP=GPHwavA4f4Op-3!Sr)47h<2XeX!*jANpnkt?wb3ItRwv+1x@HBZ68D*t^ZZP42| z1c}7Xt7^E(_5CYayu?i(k-~wKgMjd3wH5E25@S}n1HIEI3bwnC9I1Wc@OLM5^og#u>> zX@v$AM9+^Ao54^3u8{34vsbR4B0(n(b|0)k|s$m0zCe2v8#jaZ;L zCfteVP*@x)(;4G*N-Yp7O=qEKTu!hU0b&3HAbPeuJXS@Q(qTM@K8DYeGnjE?sETK# ztBE8dlV_IEZAO=pAOVmcG1h8`gQL`Ju0m&yBlfTB#A7IMGMUS#%TyYxN-fbz;S{oo zZ8quAA_Bzb47bL@NJ=o5BXCK`6grw7tCAVSJTTH|qAL^(G2Ml8AV^xi6C91i((rVH zgBv5ES;L{OXm+$s4=kYufNj58(r9V{1VPg%WOxL!U$j~g17@+n_(B*4PgkJ8P8}Ug zm7?Q#cryfS1z<291D1g-N(?a8g;rBdD6pLgHKD0G0593;N;pR$)42FDlgun}7^yLF zY8KQ1EJWu(MX@%sTurt@>;??UWag0Qb^}fa<>JtAm=YVO#F?oWj)RLa;22T}#{?&v z_$m#U%G2UZI%fag*i2|YxCK~@l?m|1SQboz!yxP+nN|t6OUYPD9AALa3Ls1kk0LQj zA+Z9QkpRI!r6QgTgO^}%8WLEAFo7L>JlT#lXpB;xiR^-L4PuOiZxaGw1xr=+D_M*f z$!4Kp8i+)KfwD+QwZw(gTEJ3~mBYkQ;4&=49Ii+5AV3m8}aZ! zFNI41)WdQ~WME)btOCb!iX3u_4rbJd^h_M!^?{eg8m3gkj1`06GNhd&c4(nIL9~O4 zVBl0xtr`K1(JLgtB3P}#$}n*>6r{+8p<20M3kDOd!D5LT6J03)%Wx)Q|7>iy2xHX5 zVE6*0ONTXJ`bCYQg|pNIi^c8$n`Kmkh2i4spd>gG6HUZ1;y^@(#0ADHgyEK0GajY| zWbJgs$e?xrWMbnCQXa%cLV?h9gpSTqgDfnqhRjDBNFby{X~oN=7%P`+!ShjImW_>L z5c|1}$1`z&pI7OT0w_LC%+w?JE{YM|?<*ltnS{kuiK9hUrH-o;X+&bH1qLBo_(Gi_ zhNhN_^(L)@90!1VqFvw;IZ-YN!-kilK>cvXPN36hc9z`^)|-$x6CTBqsBIdK(1xJt zAtX9m4(JLtR!4Sn5hx4Jh^62Z5)xd@RmEX=7Kt1jZG|9AS|^$glaqibN3>{YIxdk3 z?oS*z0+z)Qiv|KK7kCN>xwsUEOan#npj5!^VwDI$G6J)LCS+(OB831XcEmDtF$@(4 z?Xn9@NJ}`1M>7!n#q5L_C2E!g##f*%;SLs2uXR{eID}9vFd#@& zzYpy1HR(}gycGwCnrLvLtWFh}z&B$1wLuXBz-&h}2#8%oQ>-*rBMIlk(bRed4aVmP zxzS<)tl!u26%rMZ>`?S4TV`TE{0nz508)rz92ab0aB<;8p^YzcAk`cJz&Bt~tcCA# z;!G$CS{lcq$n}D75RjsXAvhLP4;CtU43rT}b6BHMAfh==gbD{DI7+63sx%Y?DvnJ> z*+|^}#p3u{xlE;Sh!IE-*~XJ|9dd$6i+8HTHoL_phASogLs{W|V)bx|&w1eStl8db4;qV-sONUk)ZL#6~EQ%(? zvJ?nABpeN4;G=m$xej9jkqFT)gUZIhqhMkzm>@PH6=YI>h6#b%72%?AIYDy{~cSgRedM z^BXg7dgl=3AAHl6d-CyV%e~yUH^#TE`cTpZd=bvg25F{Dmz%ou|OUls~A@GF`Z}OA>)8azH~SLJ=<|9uN$J80&BqM>kA5 z^v3VZ)iIQ;-u5Cm-mV_hnzZ@fJZqVksm-tDvH>GYdmX)n!m|LI+dD0{Sxb8H7t2mb z8h?cYDf!yogfBRFC0<^A53dj}S{`TQez-B!AKhacZ(Urxlxw&#eGwZn#Vzo{Ylt-qc$xz#QG z-~L}qE8Zu9`+nvvnK5_nc0FO)c^xWWr`48s247#3m_A*889HI*&(bS%X`Ok!r5=Dt`>CAB-O-ZL^=j4V_Y3cT6p&6A z`5&<<6vO-yx(ELGvQ?eH{-36;i+`h2u466v<-I#7GCN|{z}Nx#Teog4efqxWaN+dK z&J^4~l9o0d02Fai&;7?_@`k*Kc^`L4wg!B1fBTBmGuYo90Uf-Heq?<}dFhv#EjK4O zy}CG|?b@#?%U6GMF6wBR(EZr&?}ogC-BYJITRi%=w`jO{=*GR^ux<8N zE5`y-^bamwlXD62sC++IwCntFb6WMhj55-UPyV^Cb@7t#Zj?bqqwtvh!6y4L=T6f7j$JW-kwIsf)`NrD8Q6TSr zk{ugRhNc0XUtey1dBrY!|L{QFwN;^gzp4_NA0S`dJaMMa7nlpNyzcT)Zeox7C(YV( zzqj-pJhZ>+Q&iXK1D5=^ziF*szAWtPUh+37wHa0acFoedg?~bpja(8h`_q-Mr1Wp! z58TziUmLo+uK(Q%tns+7lmf7D-GjSdUtalkCZTt9TS*V1ukG=+_FI2@Ck#xT-4(v- zc;WP6+m`g9f>vC++PLY`HEu(F{igRti;`BPEKu(Tj1*eE!*8gQGVo7T&bH>4_Qs0N z)&})o_nCkNmnGdT(lxYyZHRmAme4)ebLP-$f%k>%OP}-lAd7l`@;)!`)V@4|Kq80x zCA0^%?pQiKcG#!$WoP4_*%n>zK(Fk4t}IBX?$?{wubVGag_Q%MkNRs5Wgqe?%D(ia z*WQSgN=se&?|zeY9O*z?Lhr2BY+w?LBEsw+c-8MrU+=7yeZSrpBpmxU$-DkZ76e}( zB2G)%^LehQ^J~`t#$> zHr3zi8U4QRwSMLIZza)3O$NXXa>Q@I_+4a(1_x%gBebM!+k50B-?|GUTG%aF6{(%DrZkemX zF0{DKD(hO@ugrCY+DWZ$$v38k+P?yt`NubV%i@B~O-X}fZOHESq%KuL(SJsKa^1Et zhu@6hyLU(b{>fvV7GF98Ppe? zR?KVV7VmZbC=|A@IPbot-y!YW9~}1Ar|s&mDOEpSv3li>Ki_UY#V!%9(AZa(4EDYC zZS?95+QqjVZLP?&_O|8qegj_j)m>1!g{|xgeDNVLD{>ri>h-Ps*B|o^-Z&k1J+~J< zzF(qer>yL0WiIOZ$y-#lKJZsfUEz<Z{C}CXePF2z&T~rwAh#D zEt^J4jnh{Bi)4wk_pZKn?w*s~;D*LhWgGDeL@Tw7vv5|}WJ=OZTvbJnwP{s90 zE7H=FhNqsn-ZyG|VvlOltp_*a0>Zn?{rZ;nKm-1TI&o^cX?Z_mDc5o3KLee-*}%iXFa?j;`{tp4#fp*`@f-^!@xfb+TbvTt(hqvEvxSloIs zRGlFBC$Hh5_SH#UjjIpjq0fWo4Ep=~{p}N3IbFep6OUH3UqCMZNt}AV{9D)V<%dhm zBenNv!Hc$K{Fz(-))-KBJ#J3Esonj??b~bCtZ~QRn&}@Huo;)~)+l0nm_||AZ0_0} z=jZeJPqJ)3QB2i=QloB_&tlv714RifHFe~m+_mNRyeIIc3 zp&4h3R(8K04tmDPi=2*}(jQK1QraH(tsHS;w4a~f5W=kK0ePd~TOU`03)WpKobd2@ zaarS&;mZfU9x-Ao(SY~C&S-A(8$Ysb4cqa1X!*-I1A-2&HduZnmu#LBgo?lMw8r;q z9RI#IwJnBBaispVbLkIvI)&EMCRoIoyH+&eD_bqPnRa)&@tRBfXH>nfh=ckOS` z)z~qPOz5ll_Y8~GS~I7x;rgFJ^1FSs#-_fPp-&5DpIWnK!2ZY=(kRtB+3nOf)&Amo zzozm#fJZeNs-6|lT8P;vW=^ZpIWszyI1&2Kpv3Jv&|{ti+&;Xj!doF2XZSHHIl%xM zgq3;tc%DkI>Ah+OzEv(2}?RB#(7Psv4$0ydD`-U>%lRAh0I(t_8Ea+-y zC9Pm{3$gicSnr0|RaYn;<0mE^tU-5or<5+4IqV~P=$*<1cp`Hkv_?)ClvwT`-wkT*1w!mJ{k5L2q5U!V zwfbwimu&jXnK8Y&odD#zb*kpmmBttSfdO#VXVl8OK0641+ZR?GJuO}NJLJ~Q`|vTT z^sM57QB3A(5E!16nHe%<$dHVVK59z0$LED3=RhfYG@F}>$1l0L?xWw#sT+Z5-UM%3 z!n{J(d){Z}zs_-6a=7=ByMx4Ld*@kyzHa%HbgbL+i2Pm7*@8jqXT7~MZ!VF)60&Sj zZHW^o3nCftlhizNWnr7)U-Y0z_$DQ2gSVg^3OXw7C??GHeUkWO=p=yYImHEoQd5K6C+$95a%kj` zvhuqrepk0$xJ+t8Cp7w;ym9F7&%BOZ^%Lf#mRFUw97J~A3D|a$ro2^=7p00FSN=u_ z3b`A2ap7rc?5Fv}IYC+WDDPkEbE}^`dGoR4La&=N`^$(5+P0{xc;B2`pS%JWy-t98oo5#K7fqeLaX8R9GOkeuo`*HI1H7V`FT(3>471xiW(?r>$NQLm@Uh0 zHhu6(z8x&HdK1H*AsS4)sjuXCE&bDCLZ-hM!w^zGB{7AN0ssV_U;_NjCpTMJYdz(Z&nw z(D7xr9{VjHnAPsK6>GpR#O5@6c4PRzC{OuMUp>M#JLAJtpK^eMW5#Y(wb=)) zZEyefs%lBfkpXd25xsAoT$ec&* zgjy*YCl5cRYV{#Rba{uk=2SmH-ko*nBsqv?x_$IC$FTA17b~4!m38gh(9=QVK2$xv z;n+Bvrr8`0UY=c{SZ;`;eT{j}nfR6j7Tbo5&t6&JcDQ35Sr-Vv;P&sEkF0yCC@XzF zdgJ{uM-=a}0AAUc)3%4Blgp3a_CIsldWfsF3iAsK%*ne-7M%2b5%s2JU_3Ie*fqRt z!SpeJuOBgHGlm@FQeohg3HN(R6F z=r`(m6M0DTIqJIgggMh^1dXimN!*v7IaJ{adURftxNnzm-HXJA(GN$_7QAl$m~y;+ zBAp(I6;=&s?PUU9o7wl{(BJ-i!;#551cWn%Qu&Oj*{0Uqczbf(Dek?fvs7oHb!_mC z(0E_og=b*2d1!C>V?Y-$=G*R58c;JYYQuJf5-9p5XVPBm{k-R3N#e(i1L-y>m?K;J zZ9ZH#vk%Y2{cRGnrrGj!3$~PwjyROli+0aQ$$IV_vYr?aG|y|GKL>SGX0hil zJ{QLQ{Nd|pB`cxWG4#fRIU^4CWwnnRld^(5b5L2fhtK5A)j(XkIom(vU|=!5s7cZC zV&aQG(MR5UW}mz~5|{JXd&)lI3}{m4>Nx;%4URa}ka&7Wcl^T>;}rF%d~3AsNN44) z1&&X5=VZWZ&b-Bf<6bopax=`vx|=`YTP{r6Fm~(x!zJ;aV*>}PVtw)E9*`u2gqm@Q zM|;;2H1V#Su<1;cNi&5fJFsWk!rL(W>QK`x3-mFe@;r^K04_>iYxqhs(=b?uU=r7KtZSdDmYiR@WiO6utKS2-3{m zyG~aFzWDsiu2I67c4=jqmHuFPX>BbE0)<3rG^xBitKma+()Axz<-v~yH`g+YDbkVlvV7^!C0mKUf_p@K+j{3vx zA|QSSz(PFyIU_g76l?d2H#Bb=R*SR;Tt71ISMJ!kr(+2^v)>ka;6D##C_e#3Wge=y zB#%9G??P=@Z|%-SyP`$G`ErnVH4;Rux-n)LF-Yq81F15F`rY+CbS-(~-UZ$Vir1aK zW~_yG8#Ce_>0Pz7E~L2ZQN_S_8ATvSXbN=LY?O^ViIn=WtfF*qiW<1NuF7hg59N0jq#7LM!M^#ScNM0sa{tn{9EpNI< z>i)T-7eECuVuT5D<^7+KhRn_Hi()RaVX~a68-cR+>>?)z<+o zihITEflgoOXB;_C^gJYRd;QLkVb!&d)@D|WytBY>{r9vtDATgxQzXBu;YEoDHU}@e zn)tfKW5jS|f+*Bg`YAE=zexK2%g|6|+%iK)a>+h7 zuQaQnYkkQTzb`NUM732`Ja8NLwa?H#V%-HH=*6G1*OR7fyKx6UgfuGvHrtCcaxa;D zk3_c2UzynK;j?#;LIIB6dx=a9#uAs$GHudaI#N_{*=rMZ-`^j}KmkR6?ay}_P%7XZ zd{;O4RYFohLBW%q5)Ucn=CAtdpu#c31~YW4_`gnle7@k=X)Siuy#Z4s!>V6|Z7KQ+ zwsfA)IbMFj+hcM>kDd-9X^3|2UzxC{it-SVr(bTu?{(bN*jL6a( zcgDDe&91J8Cx#TYAPhl^u0VI(OP%HhAjg0S!rgTpUqX^A-W|@1Blj$g80mJRHo17u zn-V+uQ|UXx0EMzDI6{*v_cHvd^xCqGb@=$@iN=wXvvyOtbe8qYB=_4Ge+md6GkyB( z(f$-FQ+nHRwNS8Q4dH?XK zgmEE6K{fKi`g6Gf59=?#zPb|`RQ$XAF4X($YLE7Do5UHl?=|7xPzn2l@iOt|zZL6oz}* z6+P=FV6iu<>Q+ugE;`9w;D2_~-@dNv|0!1u928MhOTFV$@$)Ba_Uzf5NZ#gd{OtWN zuO2?^@>~MKw-3HPw{!G{H){?L88?d3eE6(qH$LWu`>vta>!c$#+{2Km9?|EIZ|9;m zl^>sT*zvPJDZt_yM$CD*=XH81`a|aal-JKkg9>eB=+Lg7jQV!(*{!_{po9ci)1lSB z=f$k+`T=`3|K5o5XV1F&a%~Nh{u0p8sB&^(Qt6uHcOwgIsfRDvrSneza$S(^6oSK6 zdC7RY!HW<{ryKiSd)7Ifd!TQCs!c)S4bRKpU-$OiYty?osX%^xr`u;(a$e2z(B%4~ zVYP@(V@pe8z>a$vFDjBlV!H3mxIrBvng6HhO@mKj&sl2g=!lC^U0+`obS;s-JvI$} zDK_70S9)ptow>8;5BYOzdq?{pE`4=b@z!%S!@_b9OG++{`?iQ!LxLySfINe6(mF ztl;MbvuU_!M}sYNM+1{!%B!h;x~--;zU?q%ZNcl$&6}tOUf3MpZ8R@ z#gCZpdn~4T#i5l;<6lkq>PDHoUQRo;eUGQh6%U5Px7J^F%>?sazxy~ieCvd%63w=> z%Hcq@d+@iaZH>3tjQK3{*L%SwYiPTJPA$^}1O)ic>n=~;F0snz)lGgClw$o-JW(^n z#d%g+RnN}Sjd4YunrBFFsbGxGBq=K8vj|>;i7Z90W!gf$r%HG?h>6nQbZmq zBOLc$Ah;z;8#U}ldUfXU4bAMy-3PpqPj*+NHKcGfkFf{70VhEPoYU7l?#@#ie*Cp063TEk-qm%qW@Z-R8r zKp<>yxhIZikBN&&8tY$Z;{G{C2}>?o7T~DUEbY1}?H-EH&OSO}mo@U3{sV>~3;y(O z);ZmXe!T=zr1-Ax*0Em0F7dpGa{$!;8<4ZIrUts{U8;XA((UYtsawwH9OHeMnprwv z)~y9JUUA^1lWouH(OWi2I(whlbBfNn!M83}1$CCHvc?lf?*dDX-S;m(`JojMa>eE+ zpyk=gsh;SWmuO`YmSc&d%+^4|V4^RM)tY6|eqHEi^OC9HrbRJ$aLi70( z7z*9|JDCXLJ%0W5?dY+m9#0i$E{r~|YEuI4si_4x&RBa7?R)UuGW&u^pT*hNO!=$2 zS}#^dW}fvwl2IDfwBZG&VD{#mUGvtSzWT;n{S&uJK5Wwh35WYU`TVDG{90Sq@zYb` zV;5gEOCp9<-)y9>_1fr5nsa4-S?kIdGysm5-~1U)UvV|@;A#QBFtwyz8x|gQZJGB1 zx32Fyc8zgJE#F5?iRj^oo^EO0BuEz&=3Kozc-K&l_dIr)YVCLc(9ZD=gh(k z3!6B|eZunVV<0Oo!DmVC z6=^PFe%RK|)quE%FvEVeOqfd>{T=qcXwt3ba<|5ay|t-ZwlqDte%-QSXP=~{_VSb; z0KJr&y{m>q-cqPeY$QZl!&0UfWo2EP2)DMz-ffk?EhrkZ@Wn)XnBT>OP1JDjY5U4Q z-1JE%Dr-VPYcl|0pMUHLAgugW%a!3rYpPDCgf23+T`sUcdJ5n2Zji4HB7XN0-yf|nEg2&0C~C?$COyI)8c0boQX>ryhQ9-79p3eFtN()ka~EJG z4iV{C%6?qEyr}kQuClo~m!{I_i5l1JMXUZ8zi%l`-X(EsTUy_<7pUN)o)_t+4oPTQ zWj?ce`O0Y%CQP`uyMFt2_dA=u0SBI5P498iH6tW@xhh5)R$FR^UwX%BQY5Pfmg;)zVzhI-I(aP zLXUFickhS1Vw}Nix7RJ-N@0cC+6VXB0XwJAF{91Pimb!N+qXeFQ!dsU7w*kMI%{t zgBOHn9i0$veffnDvFhRvV?#<^xR>Zx@qv<^A3hzzRyFk}B+-s9xO8EI7j)p6@Yo07w;nvk0`NNQMJ2>FV;fW*X-6GQiiCoD@F7I*8zMRz8%bZ1Z zjWqqP-RTW|u>_yRE2I7iJ-FoTuJj+SrQ5@14u5MDu>OygC)kAX{`^k=34}cM?vT8 z-)ncWqS_iu3KHhJfAt%P@&7RSKx9Vk!3#yDZ|@1KAJr#sIqB0qY)E-^Z8VOmTTNMf zL_V&ju?jfgxpB9#B>o$45chpi+r&A)n?L)~o}Gx~YA!D78530WRwv5$@@&E-Zg#Kd zx}CeU`vrC3;XuYTop?_5a^H==FPl7fUu2Iiny(4rh#S^_`hBn=w_FmpdtA=(`Rps{ zjGP3=u<|z-fIJIB>v0c#uomiOPd}a#di_M*ots_-wy{QvZV=QhaI(a^GVK1uhH~%9 zNUVPMrpEJc5qHzvwpi=3VgrwKzGHi7M(-IhroCZ1w@c-d&$F!isZ{hXU@dk{XAvL zlw+S?ynHFNus(kLSQ}qgS6AIga+^AQoKJu0dCtN2FZ18UUTy#L^ev?oz>5bH^!HY= zerx9esnLS5Sqr&Sbn29vjQRHz@*NEavo<^&9|23hoqhbg&#kwNx%1;NYWdX4ce5^@ z-Z$Tus7tvg4mqjZU*d=zh0B=)hOG8@+51MN_rtF;`}J4{t29%O9tZX30zm2IBRV|2 zwqn+#!}B7iCW%u(duDAejh&d@)Hv@F?a16Q?-!JH+&^fYyywKc!u^oXt*u+zw=b<9 zY6Bm1HOCbs09S}6tQLu$|5#W^Jx(%q)WZu3?b8>`aqGimfi5oE*83-WDpB*;RGAjZ z9y)U5#R%(_zM6=+4zU6@b8P$meUpX!gP+}v{4 z^W1V#xYs*cO5wTOU4>1tqmlxkAApn`M_V#qd*zr(%J`y-zc5-mFMlf|2+!CQHthVk zbGJI(XB8#wuZfrsml`kWXc@^@Z~n%Q7veN+>Cc^r=yY321yb6JT6iY)>>ZSMrWnWj ze1CDS0FNC89HQe$%1P(VKRnz$hmBq^)yLEGl{f5h(25M`^ z=VxjA_R-RI?@pob~ls{(XW`J@)&isT{T|!gTMQ_TJO;fOSq?dS4i>Qs&No z5i}67vGc2kz_a+cZ@&jS-GCA0#cgh+Gr({!Ni}Y|h~$9_aNQcY^hJj(GHU>J&hAloS2KW)JC| zoKk(rKdEnw`lpgCn~uu_yz@+C{5Q|__}hiSZwnt`oWXlve&9%E6uM2jUC@!XXUEzP z-7&{B2NSnsha72nGCknTsQhy3=p(kSYf0XIhn5QMZl)ij-94wQHNS72J~Ifnxpo+F z=9|RLely)pylE&|>7KEJw2FuPoif{4TC=yaFEY-NCJ77-{FDipSP&{ssZ7btoO1uc zgV#X0R;AY`7Z!FTe!hI=N`6;Ob@kGin3+JS+SvQsTKn{A+0&=CQmtUe>gjXlq};zx z|Jm8uoOft-*@q8vzyJ7=m7RUQNg@`DXU?1%6%{oAsef?;P+%|$wPDX5@1Ttb_wCE+ z+p^@9+p{w(QUK+B7N47!mviQf+mgS3phOZWEj@h*8hwe-6VBsJxOMARvE}3VircpV z4>6F#sgYAorKfwY?rB91xbftKu(Yb`y6SH)U=gFgmRDE1%jI%!U*B86?V=4|zv_?V z9vkFIj%svN2DYcP1g8uv)F2w{YP?5E7a4D&em$aJ!96r6vM9xq0gru*%w+ z8qSMiB9SxGOaHVn+Q` zGYPyr#+E};Z>$Y-Y@kfTvPEN>cW|XEI|m(k8}4rujm(C8ci*vnC=Ndi+x&vt&{(ta z>(}KiUY;{hut6D!Nz>t7U~ zYpO3QLVWt`Op^>tiud0GJXn`uFl$2^8yg>XWUgB`C?K?m zi~7LsJ_dsU+yfOE{yaSufpUIt*>wArLV1b1uPJUgV{5?X&Ij4tD{*aIceWT_@^@a_ z0~$Pb<0w@XTX9zhNDJ!ue8-6!8Ozp(&fFbXthSH(%H44VPWGI z8~?O%f=lqV8dj9@f?^(VNBjCjQggTB%mCZ!gaKE&Mo0NwpL;VVoh_22e7y$t%z!Ut zlU0L%w*58*=kMCN6BiljYh|cZbFB|6D>sESK6{q(<9${6-MbrUo~>W>@8_ARfdj^j z9qS(yq+i?g>B9&18F6aus8OS$SFYS2B3!A{O8`OUksTwYkf%L zlPBxXo&~L2zg}uG?FjkW(tiSgP+NO$|Ni}6+pa%&Xv#&v zZ;l-9_UG*36>Dc5vEA+r25ur>{5X3R(hM$89zVPow?6P<<)X^gmjT767U2yIkG!fg zPOZ=QKE0OTGCg2-VW>#4e5`5Ir|l7Z7Qwi=F1H-=<9KoQg>Ul+KHPn&f}`VIk7BBK z+afeQkB(ID4sxIc0`P6pEZ+I-$R#=5Il|3uzC zw>Q`K{wW_1-zvO_KZyBhFRr6_+)b}kEc-RedsE)I6=J{maK^cam*cO%TF*u_j6FYl zIZFC=>;W@#JLl1jfl{@PdqxFDs&3y%PVzU67z56z{=V5vJ5`W9tfjW#@Dbw_4NyZz z0d-W@#A?L(3JNd&nD-UTvRA(Gh5OrcH_a-#w=b%}-}lS*MX6`-;ol0^EWGx1_s!M* z^L*TQtXWgMkb7YhP&Z!JQZT+{(z8cl>Y7s1aliERbar$_6bOVujTq3~@odGViY>ReEL zi2is2MEPB`CX^fm2B(EwORqI`zCKV>T3;{uv_Cg@miHL_NS#`pH8Xmy_%|4v!Hg+uMcI2{U$KDJYH0W4(;5r=>Y+_ zRdEl=NafAOhYwRj9!@qc|JvQ~WuZ{hCOBc^9U_4lt%iFx~MM&8+R zmk49;1T`_y8yG>w2Uff9+`6Oq@dxHEBI`KJ#mlYPH+y{}Y~IgYN_H?KvG>*b#D+uu zTYa9bDsZdZb?3{X<;$1fTRnEIA((j}#QMy5QmY!M4V@#Z4+)qbG}+(7h|<0p^yKl; z?avTNH^Dqm>Yk=G72+?Uip_0+G+)khTY2Kmwlf{&XB!p`#EwI6|8b|>qixatpZC^E z$zczP6H9}NyqU&ZhONQLv}OK2Ip3SS54fcikFTrP^?Df{y`}7)Vg501&GKK{4V7in zHsfPMCnW0Tl$>Kcp4{*x_AKtM&x*SrrFmNrwtGIe!#)O}rsuCq%1$|)ot1Q?U=ZwL zR{-?P__u|=WcuCQRgbaUY{0VNpS*wuV%bGqUFW>B8UROtzn zp*L+BWz}0O=^@vA@&N(oroOtgheD%`4hsuw|MqQda&kptkJtF|iGYUv`SS-95<)Fu zQK_S>54hZY+qWZSD2%)7 z3Ivbf9^Bn!L2zHe-QC?KxVvj`cefw`vLLtxC%7ehxSsod-gp0m%?CJ+VWy|MtgEZ) zta){V)u975x~-J&-y;GhN3YkT__cP^u{``6IvN-`77KT)N zTvb(7;SAWRMMvQB5zrpu?teCx)daFQfB#2i#wDT@=qotlC1pHI$?6RT5@S>7_uP`W zTZKXXu2=_V%V1SC~zs4uxw(K4pq%0^?IY0OvE*W*ASki~~+Viqwl z7!OESjK-1ax_r~CrlX?^?B7h)nOU3l^!xi@E+?^1EWjjf#amSPQ7L=#_d#oGt27`U z%_~PUBhib@pSga|q-IS%4o6-opSm+oY(M`@rD^Wz=~+Csv9ZZ7DiZf4!@&{JcYnTj zJi5;Tj~mq!9X~wcJ1PA>c_3q)^)8W zWq{i?v$p*T)@P;3fR_?@Ldom$g-ldBl~3NGkqCE9^8MWmXXuZrLX}OHpp^?F`fx6~ z8iMiPrh0V9ms8Y3ABflg^6d)nWq=P&i!ybR7fCX6gL4xR6=wBCjI=O(#7&HL z-k_Z}o+a>_#ttGWC9u1MwiSE~iSg-YMeFk`GCJYF8oc{3w_=+ba6Ze{Fv%7AU z$%gYRMU8=yvhV%J{r!C-);8k!m)lkQd!TYXZFeez!-(~asag2%Jq^s}L7snU+$)d& zx%i)~G0tO!Uq1!nYv5BbL=Bu(yUT3fWJR!!e<#xauOCep6Yy(InNi6qz$H`t+BT_O`X4 zEMnTxZZbPLwnF@?@FewpC`Ctcae~fDz-vYaRBvUnRnPYzOfj-P_sdb!Bx54$YI0y>jeXN*+q6;V|+|dKrW^*gn zM6unaY%36wV8PP4`}KEd8>G@k@y4KA5(G${AI$CV$L(;A+!cQ);Zrl2<7OKsmR0t! zs4E18j4f>7jdeDsdzilq$+zgrL|wh$sb{p1(bC|I&rv4>Bm~Kyz+h_=JISYFN#rhd z;Y~e@y`3F+z-c!XJjkm!M=D!%V(v3xZ7Yd!o?PCOv6YfSa`h4NcO?f*dRtpt-|#Tv z(a{kO0YR~x%gf_∨=TQSDy!%S80z{_d{IsAnXOGjj|F*Zz9u!|wRk#r8cJCi8y= zT^_jl`uZ>dxw+)E);y8`49HCsl~q3uPo=rJ^=j(;eY0krfxd%qHu{-{u9YVHUoQU1 z6lmV#ax&x0^v%-oc|jA25Ebx3NMR;XHFFz~;U`cM zGUhqQMdDai0r7>37;NC0e5|JQn;^n7e@j(*jG-#18FAPysx?a%DSV3HXX|}o{WI`i+o$_avq8$!2yoIr~C7N&-H6T_BA0;#4(gP;hX?&J}N#v$Hb}QBQYd zY(xYK9v)soQc_TH@jELjAm4R+cPB0(5#pt2X7<4~&bi@-Q}NQm!dE^a3c{T5EAKo3 zPrTO+ei?uk=q;zFrh*SrlU}1rM{Rj|6t3Bg6EMIQ9~z1X27`fN#r9^MH1sWOY>J(P z9v&W44GffWbZZx#?2_0vf`WpG^hSS{#l^)fIsz!e+E5eVFuvFhgmCim@~YgGw6Xg2C7jY`2KOYL zJnM>8NSVU!Rg{@4Vojo7m3FIvcd>n}p0a5~zEDsU?lcnbP$5aeJPCpC>$~qcjDdEg zJfv*b@1hIC$Ek;oC_xyH)Xh~?t$}}gnl!m7e_)dfoPB-)4prB+V1}|{Bsy)x2$?Yv z89ky7DI%b@Y#5Q4LW|d9VB#k>k&+*)>&4(3YZeO-znJ7K7Mi&YWgR4Zkgp}-M?>fo z9qC&>Q+RDM-Lc;*Vyeqnl}NWdcKc}hq!iiC*uh1HjB7#LTlkqFePdrdkVXbLC`l48 ze_xaZ}K8}%+`~@3{1Weue+;9A^;j%lavg1ZY{jMbpxWzee3Jw)6>(7 zm)F-WN00pS8kiUulze=d48*s~hW??2h155hdoncu?|}$Ug>yx?3Y-iRQc^;JVur|LAz;T&;|Eln!Er{KTwC3cLJvqT z7*UR^g^{HQev@*Vgb*Ui*=Yx5?bSK)4^vja_!U$vE*G%!l(JStS67feVpJEH zI*^wsp5&Wv!O$=er~ho*$?oQ!;Rh;!yjiQ?KIdDz!9tOiSeu=~Y8qMI3I=T+;Um@c zSs;YGUstn2pCGk7SL@gWHa(hXAegr32!L!kxwx45Ea60aeFZf+Q`M|^hao+`I$W(q zUzYdmdW`Jsl=k-5fBHRNO4sV?&KzF&JiXj)wC~0_n+lqn20d`~J`A)w9}q>X`8@5V z8MbL$ae!hF{|IdGJXy2@*(!&$I)!#{j8r_Sd#KzOdqsq8>o&dS!thR8P_kcV2Y z2B5-dlFUDgjJjE%kajTKtdyOP{m1vj3Z)XV>ZsjXi-I@uPRBbXI20!mB~)#XBWiRg zxQ^`+G3JtXG!V5p9H!7zEyt#j$RZ}ux2LGvhcX1x(n&mgUx(&W7~^W+vAMjDEacZF z(3c%x1+O+1AiuA3l6NDZ`QBf_@j*boCMb%Jud_o{RXcX&UV^8sP4u{l91A648`)3w znUXT>1ult)mJuG-(KBMKT;w`|KEza=%1ed}?+;0WDqkE>MN{CRj$Ao<F%Tc1$A>vOUvS?-seB)%Xt}qli$_dJ-49tA z{bz5|!qPG@ICwA!ESEh{T2T=>HYRg@bMqQ`D6gr}S6Pqh*ZRrTE+;29Ha$Ju*9Y_J z$;QOQP%n%;Q=(323(w}sj2+h+L=$w&di)>n3%8VY9Koj+0eii z5gDm^0H}+qib}xNmhoDbCq4js9vd6ea`84m+PS^0)vVR)2NcE0$%&GgS;==JMTY4B zuA#oZzSF?k#wO;^AE%i3_?=M-e+gw}Y#?aovHrn57YO$hb#{`zx?fUKQVAIusM^{; zKk3u9yt=|wf{fLLDtHAkZ5|pST$BQ2{fR*NADxt* z6BXRRf>sb|R$vD$9p`8S6LU+XO@pbtt!5I6mG}cCO$VS<8ho^#iL0@1Ykr~ZWYs^? zR+1lRTa{ol1vXv4lk`Z^5fW8xYHBGcZNWm7RcLGT z6qTMu!7xUJmT>WQ!xR>pG~FEx!X)az8SepvgY^t#KO9prD;i*Ge5jpfWel>HLHk_CCBRIqdg&P<%P1n2fOs?zs!Bl>GeT?3DH)y58}`1X#<38fpQLve zEfim34vzuj3UZvM+&TY$r$+)zFE~CPyHZ;D&%0*fw*(^kzBE=xM~Ug#aGKcm*=(#x z;u1-26*m`{o|{hRT=WuFs39rh#cqh{R;UFuil}72uV@>!;&%TKj0+AF?S4aO<mh zOX{b??7qXc$rNpW+x{zJ4jRkZgIa|QVE7nkSAh)oQ7*XiJRipC3Qa|KIpCcc{hEMp zzO@A!tSX%j5KQcRTk*ydTY+pcHV%kS7nhV!F)=CnB~s#~q9#HKDH+GYzp2Q&6f-zQ zFokc{cC7s&>bI5AkcjDJGQmUN+$0PRNj4dJM@0FK4;`pWxVS}?_*@TTC5E6%{6Qwt z7Lv@GS)^^UO2QD-!h@@hbh(qT^2max`#WDRZVj*EeKoO*3*st`%el zPj%xvikTJ3>om>3Ac?YBLW{XRmA!pW7;98&E*>Dm1ORb20X9=jgx}9)GKR--B63AV z%be*`w+S3(??KzqJk?&he+Cz zQipyA(S?*+7;{Ra7rmE4u$+0KV!qy#u;S(;we`W*z$O!fgh-DLLT3;&b@<#Rm;j!IC>#fkgkp3P4A%B`8zk^ zV5i2$IUw13lb{Bcb`iiYz-@S~h^~}wPEFQf$U?WUvHGuEr~Ado!UWbB^`~nJbY)i$ zLF%38=N-NREB=Tm7$o{&P&t-aq%E1QO;r$xRi&gX zdsz;7&&<3i&VGQtj|GJkSrdpXQPanvG6`}-&{>Iz+|o*5(>cr51pUF~Y;B@uqCsEx zgJorF4QY#c&Xlh)v=BI13!*A2jJ5a22`I?G%v^m^-B=5b>R^_UpZvZN4-|_yU`H&j ze*|ELfFBSUmpphVlM^oPKsTOppy;HTTC3;Tz*1sH3{wkU{@|53FQ>X!$GjNaQsbe07wLF$nO-PMA@RXFZYx!m!RG{+4f4+@QS_CG87c4^0hH-E%_ILB z-It9OoBct36eRGt4d^$Y4f)gcSE2$yS|Lns)Ga}QWLk#=!^{Y(>gp0~A_RUWCdbys z#9R|+8x+o8`vWjravCqY>yDkf>xMksS`Gxb!y6lge32`f7H4#i!YiV=jE1sZ4~2|T zGw<@^i0pC?S&i^MFroHcMz`V;`eU0zYN_a%VLR`_oqAqSs3dUJtv=^B2E&`hxeaB+ z3Oe$y5TYXMIWSx+_owJOefGsV`|k4oPOhB)_6&+@XiOO-K5Md?yR+hdfpL4UC^mHD z7NNIFZocH+*N)i|0bM~Q=314FU$MKgD{)npvmHG+;2v-P8!T;dR+y50v=iOXm{`4P#npi1P>Q+qIf@-USt%Z61cM@(_3;;;L?gJ5BcZW?Ctz zLTH2JYND93*4}8|VTug=6_VjOjM5F)`$wSH+AGPe-#=ZvOFq4hK5a)%7WMsCRf87< z2ToiGVs6B9OA#|vd((*9^X-#Tja~SH_3F+P-%k^7*GWjR4GFo`rgNE zyor`woq_1|IV$K9$7mnl8rM2d5ZSDROOoE1Y=sG?_xf?~Kb!{5hKVq)coCqxNgCbO za-_HHydAL>j1MH8DLVd!Z*kg9F3dyefq`}RMdkw>-zlPHMT`@z;o0L@rB{h|A zYac?|X-uAI5l+wsYx^8d9LToh!)xd2_6ri&G-0FVn|BBwd_*s5SWvni5c%ug8(;ksGE%&@ zW)!%2%YiN2x6(%n0^x+`TF>;kIf8V&k&A9(^eu#a`8%e6!q@fjR15l%>dvkWjuc(4 zxZLC2X5Uj_RG-8id+R$M=V#rI*>*T( zNscStQ8zNm*;P6_?%dvu&e*Otm}-LH#Hkw`b!l5{e}xsv>ulbC>mB9-I}K^9;F^b$ zv9)2R=+HMyq;$BP#=TibXZ2kNdw4L7T)c^9Gny~~dZxjZMNWRVUXsGN6^xn7nuQ#e z(zey`hm%{+_8Sjdy68G%*=6mp+AO=qV0J{yhLf8}t>8~#`GA zq+>B=qvOPA09Z9V$B~Bn#+Pu*++=I1mEvF(aI^?SOEjkW_*Wnj|7IfAHbeN4AGgd@ z@}{C~rC2y8%h$}Du%IuW-!K5Tw3Yzj~!%)8hFwmxB`+P@7BI>f=uRYh~ZSdav{h6>{UnYJMf4Sw20 zTZ60ZGj$g5vX2Q~c$7cfcZb#2$P4}zC9}dDDRN+n>!3>!I>*1m=6GkS@aszWp1e2s zL9`*gkp?f?J+J@h#eB>ELconIa-{zvn#Yj~D{)TKk@keM!}WON;g%)@M^dHYNB+;9 zx46I0gLM536h8cG3W<@c4JxB6rfZ*j3Nzk=GNE8Z4s`?sqpbMBARln|Z%Ix&xI|*c zvDk-xWzdz6w%yb_TX&)Q2-v|g^jZ5PHg9ia4-ci$YF>0!(=hH4#au-9_>4ntw+~+N znSr%5X88->Gb}hfKBS)W#MB>JYY{tP?JSXnUCj?3Hqyh@0_Z1CeFCqV=&%K-g^MnJ zXl+%|X4{cKM>Su(ops@66Bk=gvtzKRn+nI;k z0SthDYuXL&2MGW!dA)U)3~#{;LEIJ|H+ zN!-D}2gWWsn9XS1uZ;zLfOf-3LCY6LZ7H(!2?E&Te;urA0+}A zU+}d2h*7UQ{M+A0MT{z0jt za}oFJBYuRqnc%sC1Cc4cCbY65NW-Ul+WiOHQP#Q?3l1X^C`NAR(wXyjvlW3RvUVI> zX(rLmIr%xe6DJo?E1Vm7)fSH>Q+8>sbe=Zp=Kg7XpffzH&0nGzgC%IXa_7+p7tf!cgl1adw)HLK-&+kGEzz|ygoFFH-Nhm#E~xr|Qt`E5@I zn0m?uB}qHQUyrui)@rrV;XGkWnOuDvU*Q)IptNkT4WdEMqOk$wnQ{9st*u*LGE_-3 zZ$m;Ot@an2-S(nONlUA>zsu=Xj8dE+t7jND^{gahv@aUlD5g2 zH2n7EL;eelvI~e4kqlW;e6*j#h}(szBPJWZj7{iprvb zlkuYo)js1!gk%*rWVpjt>$sPcwDojUgSUk1ddRcw=5+SqmO2E-RHb4sk`0~=x1|O7 zEbLlZEWv8hR?I>ep*ka)Y*LEK=!eZot`6~&*`j!YJABBLEu)}`w~+zIOsn{R3tGw zhl`_RZZm^M=X`JDe5Ly>H$AT397XySm708S%K&Ztj%`poFrKeQOc!#eGh@*UeVAM_ z9;f%euU$p!Rs17wdw6-3zR8>MbSi;DB?l-P!)-dp46HevUSj<$Vyl>5qqf~bZsB+0 zaPIiw$9D1hA4N!R3YDS17LKSL%mfjp?IIF9V{tq3>EKFLD@wrfoCUhr<}sihs@6p; zQeL8i8ENC?ToFct{pberug1$I2YK#<$8>(04R*dp=euagWxeOLOO59$Z5Sk>u!A4_ ze5by|T&D+Ks_h%iE9}YD+^tDw+f#_4jAPL^3oI68!SBa758ZUvZqu|PXFIFd4DWH7 zZBR1mZ-q}os!Xb+F;BAfNWDw7X&f{J5oYZof`wwY1hZFLN2kZXf>-xS54dp~&!PD6 zVKFkAR!`XEQCF&SE8XtGsnuGc4diM@8_j7@`O~v})~QSFi3c~Tkc1O}Dv(zf#ncab z=-h4a)Tq^_EB&PI7+G(t(+QKygHyDx&1!|Kp|ov9sddD!?hz$-0cQb8?LTr%H1I#R zVW!eGck4gx_*^5N{A{I=*Q_4!n5}R)T-U7upmez^F`!RzPp6_NRJYU$E(?wNAllMm}TiWQIrHlRRZ?;X2=i=pe2LrGp)82am~TWOJJj)O_ok2+yWS&~6woUVm~;qEA+wH} zmARsWbhFiG-VGz4g`zA$xHOl+*cW(5?_LJHZ*3S_YiHzuAQ82AsgEw9{%H77#OLO* zH1u2$%iGHWEW2E1&u}V(Gy5t@wj}i;GN+q{?STLbMOFZ5|uyJ!K%$MdD}cqK50?3bfL-crOjKM1J~P9=ZPO-m%B2p)k~O=i1;u5{Ll@ zw@wSi3Q3c3^wo}x4vL{GY!yVn zh+>+fO4;$hMn;gBvKDu`jKCEDW5k-`HBz7#g4U|FHg9IQNj6+vn?{EU;@Y}K5wWo_ zrGRr2d&#X-1p2Kv_t05|YrVA-nN6+=u|EH*qmW(Iw-QIM-h`_lfH*i}D04zOpRE!> z$OpA87HjDg2mXKb^T=;6u8yK6JAXOci3qRpZ3cw|Z#&z?*H}b%b5u8&>*zC*{lvk- zT1$nN0(_nxkGpdEpTU%LG=agw#fbJTmTSolNR_irv`-?Q3W6QE+bHm`cy_A&%}=tl zv7gM)SZ8xqtK^z&SBWIiiMPA7{%2`v8mUlw?3p6D5;S$26qEQ%@e|ypu8(KrZzEQb zAE_*q)DS$mhpPLhc}X=_TWnQJz=+Qt1k;`kfQX||g;=oEFg7Qd=#CA%Rm|kv@}aTH zQiVTp@Ux}}d1@Iz6_&>MJw45W983C1%ve5J&(H!m3IW{VEo&~(C@h0`Co^lqadB~F z+bh`4H+Bk+V166yGst9VV$x|6sYn{0n{0qTFim19|36O^-INeTI!3ItVkwd?N+u4Z zmaA9bWpY{uY1HWSFQrl}8%$5z*xJrjerDjboJSyJ(2xLbvVi8>)pk$7rs%xNg`i8} zqMV*^Gx9q5S-th@TT?kgX>v7u))AEOKk1qI3KiQ978`JlAF-xW?5joos0Cm}BNWuLcIjP(~gfS$F$?n}&=* zs>_fezP?(ZHw7;1UF9lmtd$y-Az_%LmA*Z!QotB9Y_Gq!VZ6P)0US<|Pk|V#5yn3f zP+tlgh6x5fXwYV)K8Xzlb&JjLgI}$Z(e^B+_^l(Pm6;*3y!UkDB@psKF=Tm+Vns5Mq_E@|<7x-HkQA(14)~gNO^hD<_HU0X;y_{ zk?}bg7%W)nG=4xMAt~gAXiW#7%0zsq{(>dAv=)a{5!Ct$$HZrzC1Bn01XAaj? zC_5rreo2o|4p>Bn-Wnv1?(3WUiF?_jz;>Y&6XW`tH5_?05J%|J48tp?z;?Ao-HPx> zwJ6zdbo7(ePiYT&y}7sZvndQ}NnbC?e)iz)gz-S~9E`&Z8;6(qe?`9oN_|yJOn4b{ z*^qj3hiIR#x&+tyL4)-@zX<>4CYz_R+C^}TU^+_8|Ki9NE`=(Qm9)4vni(lPqp8=r ztA|{qa6`VEW7uYLFDn$-{vz2^P~hp^2)W2eiA06ASc;cu;8w`nQlUFXy%)^7xp2o{lnyxN3d}AL)4SaLjpqeO*j(P`GFy@D0(;c367Fh6LT=%adHY;YYgW ziqZ5x0MxO=6&`}|nIT1U7341%8R&4yX<4Da|B0NX_U+EI;}5e+#^#!%AL5_rn?h)7 zDg$A4maEEIz}Ch#3T1C`3MpAeRK`})foViqt7pzz4SUtg5yEPhH*-U*E{G|R*c7BEfrgMvK+pXcKdA6TSz zyqZ<|7%AwrQp5l$V1Lhf`WqSGe=r-;EUqt`aXGqN3T>2$BqN)div6<}UNFnuYzhAj z6}!yX!%pS2`e!TM7K^|_B&6)~7?e4UP)v;P&=IM<&=#Kt6VOQTlXL{vn+ecF8m(vv z7YE=-@D{QFnOU}#c64BJKk>tZ*uutXp2i7A0HeN>=Ty=U60g9MoGbRT1(A>hu!WU0 zj=)Avq(Pq_{uF+>q@G`;`&>xtC)|zlj^6!I^a#x0MIOENlHCUicAcgdG0DD;s07NM zGZ*V(0k25bf4z)9QN(1Vgv-CDi?{ZFE4rcZ9pMtf1Byu_7zU*bTxN&*0pQjk|Q8(Tboe(b@|5+jmd02ErlE*=RFcj(Rp^2EhwbA7clGDZcFxCU4D~1il>hth2@UW^GyP^|za@a7H%48_RX z?)g_=T)MrJn1yV8*+7b6YjZnCcn7Zd{ONNt^cy1D_vir>Rall0b+69|&NMUz`r&Rx z4QvL(Lo&<1lkP%*CPI8JF;<0?w zjn{)Nqh7mMT3S~ZKk-k?cyBEdM=*Xk$63@UroYe#Hsf5s)qx%aIh=2JpOF6W8#(D* z`_ru2zBdp@)KHSQov!>m2bP{G`jJ8-Q-Qct8~e4}%QzTM`nZiRC>grtcIr^<>)oEW za6=PV6MBfNrkMrc~}Y`--eS-^g0A}zL~$x&v#}tBc=OC zB`k~t9UY3@cNnn|2eUC8$8klanLq8G#p1|aQIT`^ZL_f;SeX0gd$tw=dW3>p@-w>) zEEo^R+v=(15f-h0`%rY=jU_$n>auXYPgW`wDIZ-cezm)igCG|HWms&bp5M-(IjYNIAyoq7z=?sNJ zd%~*#BAwQ$A=3d&ceSZrn>~KSE&dVHucFK%eKil%Jt4-%dfqLyrkPE_z;wmNE8n%R zW^rvfT9i-W4%!Y!ZJouDy)M|#?>VAuG=4{9XJmeM)YB6dCO0&E$LMO`;|*D?^I-T+ zauz8JB`+N+X>3ff;eDv}{{Pkj{G;Uo!8rKOl84&;ONfp2@V_`FZb@T|lt#wd1P15X z+C_lz@V_d(w&vcO{P@cdVW}3A3bLZs&aUKg9=D_4F}AjB&7eU!Gb#UgzWI1_c|C;s z>~V)>XxP=4!=zhZu_)1FidCrh&5}3Z&X6q6P^u|VbnCIsUQL`BPogK%qk+7=+jAfrU;-dKY^v!>Y znVbLVqai{t_9j!|AQ-q$OC=03@@8?s70e@zQ9#Q@wl|2s?6H07Z$&;x1o;=K+nX`P?MsnKY#nxQJ;37G55=Mvi{S zD9OPEV949r@dSOTD+{kAr2pb=r?XrW-66oArZJY@WFzONDzLR1QX?5P_5y`*cJ}f8 z`xfS6>(&tJ9Gq~nJ6;SevLMzLY~3R0PdTPy0~ zDb6?GVU&1y>0F+GO1j4v(plv^qIiB*9H5$XF!JnI$$PaikXtGfL$`hBzXxPMfBFaE zVG^Dn8mD}p9G6a$5~$D5*RuP>L2K2i1yl%2OX^k&a{8}l8}R!mIYWh;hxMN>p_h{{&a|4P??8}x zdVM&qSj2f^52jo;DX&hA68QkO%PdOudiqcw&#Nz0i*j-2zAD9w)NEMR z2g<3r-qs6Rlw^?cZOMzRB}+;+@7EK>SRM-tV6(n|YqK1*D~-%krKU3XevF*09dD32bO?cr-)(B?nbF)bl1iWr+tQYkeP97H zIxo0nm@7D#Q=@B|yOVY>4G+`R;UN0BU8Vc?FU-&$^Nj#}K*-IpBVg=K7-8|cv~__y z8})d7{GT))ISjLfSvY25rKA9NiwI`(&+`Kgd)TiO`mMXW^Lbx1n0JCc5sIEyVq;(A zPphJdagFS-{t0+qT;Dswe3MRATh*ur3B7v<15?Ya`{CvO_urRGTAuW)19U^+^s$Fd zTnf(8fCBYu&it$+DxF@Yl&XHlKE8r);l16Z9=??rr3zD!EY`%4S?^D*os|^ns`e_m z?oMVQFMX=o)Qj+MkvolM82v7zf?o28rWuw0FF4B4EzmUe92`e}A)n`wDaW z@SL4O5vlNrSCS2TVPH_j3S8c@%;dns2%w^W^~>8OwwH}_IiY>gJMv6@B>s``L9v)( z!^Z3bN1X4ED*1zJfLkSq+GC)z^Iy@8XJ^^M%J(LO5Y~z9S+gA;-*@X5LoE^abWl4;OHdIp^!Hz}7*1ig;S^dTTBC;(cb z3>mw4)|?6=<8`lc7Q`}s3oiNlS8{iE+@-bi=aj-D`Yv##xHlm5?y%tEwCwMm>iUX% z3QPRmYu^H)wT}0Kr<5FkV-lfeXNSZ7Q;lf%y{k4hE4TE-Y~Hq~^G?{5ZE3D~(v@du zjIr+J_I&(#g`W1~CC%xoynluV0UmXu|B8`O0AQfx?=&TmO-sY+`nD2(XPt{nl&>p!+H zUY%$i%*@AASgfdY{7ceH+;Zvl!+H6OxjDNxI)5@Lp2<0W;S#zw70RYwqn9DaMCFs3 z1J-R-B1369lu>cByuy7)rPj0fXA6*JQ#~u^9o*s$B&7P8ZA5xYZ9=*rQ2|h=z=3ou z9A!_(7|@7>YH@iKNn9LZxej1}9Sn@!!;|9cr98C=KH@C3p@YG!0iksw(>>awYa`xB-Ch>f3Cpom^)B4cxxX*M}8xM>K zKsUhK)oMhlgFqZDp`0jh#Eoin<_+23A8h#hUc$j5q5~%6;OFF_h0R1B1*xLK#r+uW z$l2mM9n`EbNZofnU+mEP>Psmj~Xb((b;CryTwa;?ki%S8}f4c zgW~%}clK}3moUXrZv<8W#Ss)lf*J6>P4-htT%EPZ zm1Pc2QGM6jS0XrlFp>1WO~5{P%9hM-RfBICAQbiQIU1P0yXCruP7_I#o14p{ZAqAI zxKn#8dSCd^5KPkf>Be*Wjn^IQ{$?-QDIG{p)@^NXt;X&-=j zcW!3{#5yB5pUh&e-GYyP1Sha%ySfwmi@Y14A|=i6bZgV+7vT5A6BhOxszN9JayI+5 zz8+)r`Dzf^h6#)?o+k_`ibOq_#qY_O1~Ia?8vqf0F%L?BrylEF=|jOeG4#JDfQ2S| z7J!ze6Lw>nY%8W?r4t0+h+q0V0GbNK!?ebO8}O%P1!uo?Ji?8W8;%%N*C_+R8lg$0 z3MQj32o$emQzQn+R;mDeodqSStD%Isx%~zu2^&Id^rd+5nRBnelebCHTx2RAxY>FJ<}A;u)OmjQ}RlIgI$*e2t5ABoF)t4IBb&T zJTar1nb)4sBh->cA9`9MXAFK!zgxzXJ@||NlQNb{`Ae_WT-8%zeGBR~2{<#p`JQ zy@dO9H27C?#=YM58VmG$jVjK*-nQ}o*_ZA>06-cDh}r#(DfkF+pbRPVV!ym{VT5Q6 zbM__x5oihL|486N-~0j+XF^BccO zV6)QI%I`%Y2EPXCu>K<$c8S|N2(6`ASOxxvSM<5lN8Zpv%*cp{zGTN8m5C{gBF_2* ztB%Fm1JMS`5QhJlS1VuhHH9#&qiwT47}<)6lpyMZ^ZB}98$W-MX1kM>zRV9^VPf_$ zp_l^kxk6fVyEwSg(sI2no|QZ{+yPC`*ccV)6b9FDRR7!<3I_<7hEwR&hl&p3X(7od zqorb5D<)zBw6qK%V>rENq92uD{&+S{dUcdxbi#p){$Su!UXiaoO@Z$T>DeoxNf!73 zTJL_oJ{*cp%xb;Fs9vQ48!MkOzymDukgHbCwkimbEf|gIJ@J>QaAcjV3`@z1Q%g(K zs_kLN!|znJ+@zs}^FTL}zEV}1l~npyrO+RitZ?Pv7;HktU+jP{Qn@FX#ijXPCjx7b z2K6E$(rW&Kx||gyrfN;ClpA82E1>lnas5^F8{8SfM!?0bd8`yIwgq9FnyQZ}D``<{ zcUoyNMPJT}5Sx?1!^fwp{|4?@dCedkaOzv>re{Hb)PU75V^vxo1rZS#9U{iDPVnl` z+FMvsE{ASUT7ua9hpU77sjuOHbazO6EpcOmlxuw})^S%SK+97r@Ni_@Q>}2F&LX}t zpV{?lRH4n@mCyq55iz`Kf_}j`_;><@)Yew+`cbmrg4eFwtAIjT?o?;Q=!;R`&4&UN z9>oPdSJvFu(M7+Ov_KN6QLwQ!kFlb}ww&Y0_>oqrB|TKF$o}m1O9>0ta-)V0SOV=i z0;SMqWM4He`-8k~I~<||0M~Cm%cms}Islr)lpER2M1!+7RzMWOe!E^@fk~A)Ww58e zlJ~T19Bgf871e6LQR#Xh0nEvMy-QS-u!rY;=hZ?m8)xO`=;LY8f(vT34@wfmzP_*G z%CDFLy@@G6>z0WT6aA6&z9Dx9yc)|^t&-9%{22iDM} ztiqFLRp|=bjE~q{-%Yz{31Iq8S6fx30^MI;EeT*EN>n&Wg_3nd1L06GG55UGRA{?o z0Mzjx5>hol#a93K252TC`u6qj1wDH-v`}uY^lr({x!e2EA~s+)lY)9Y{mWybULjRO1H~cl-Zt2pK<~; zm|wKbg$hDx7pfuY%hWbKX>c1^zYmIm+tpDXnN0CNJhdAW4>^09ilGhUMLS$7nhD2> zsVV{npb);PX3@ZlW&o;7cjzyKh#CJZ)tsDyR_@8#O`b@Xj>|I8|B+gttWaK6 z)TLliE-4u!M1aV6%N6%APgxYWn_Zokg+HXs=%j?;#3LbSrB&U9t`b<6m`eaxGIW7O z;vzy(bQ01_g)*%rh`5OT68jj=u}p~{R|aN1s6;V0m@kwYH)!f!^ix7|6I2k*Dy;X0C^ZB}}+=#qvW)DtV3gRWD(BfX>yu0gCRD4tzO5rPBYbN(1)sjNe!n zz+^FAM=A8DdQ)1@g-bGDJZq(YTs`?MxZ6G)ZMpehxq|1Q9 z`PURh$PfmkzP12FDcZ*R)?j$a1_eHT(V|u=;1a&9uRWeQLBUpP!tt%Yh%H^#O+Ov* zg_ZSj!4L5J`_Ju8>J{^j{R4zR0{Nn5F;{`0|L+&H6XnuA(OeYm9Ky}=0A{>0_N7ng zRrZ&rs40qO=vG#Nfq`hh$eGG{qg#@7WPR`@NOG*TryFa_z~7nB$s4GB_T~&#EU^9v z17>y()`zya_n%9D7KOy2BcCMWpSoPM!;*mYik!r{c(`ni#EU@xVXm7XXqTsRHh{GJ zsGKXIr3C^I07igy8v#uu(-nO`?e@Hq@*>R>@&#KlHsxDMS13MQaSHuWFP?iy`MH+l zz)RC(B5S5MTe`WzahY>J9ZV6<{NZIX!;6CQckE;kHUZ)*&Rl}lz$^%64 zRQ)BpxBJmDKU5H_fBabHl6QA~jR3)Te0;or{Ol7M&ZOHS(|Z8){!8a^q(&oRi~v5{ z^~EEZ7U;A9fcmA1q+*TC%)$VuRFVBgkH??g$ZP9erg8wRB)Z<^2?X5%a5n%5E!<}s zux$dw_1k5w48&P(-AB&49xe>%sLAWUPdzRA_thZs1t^!?&V0lTlPddSsU^ky{7Da% zhJcm58T26avh^k{$iCOP$N94>yN-(g%VX$ke?FjPkkNmMr}^0u0~P6ba;BGmtF89t zZTV0rbmHZ!)m)+E<@PW-%x~gF0E0C+F@YAt#%Z?(+UWIPUJKizg{oVf``}snkehyK zf*a8z$eLraLbf*V7X0*TKGJwS=*rZafu8u^D*!wNHO~{*9CmBt2 zf3X#O?G0urM?yk6T<_)w5R{ikKN#B4t&EKFuOUE_Bag>xIyiWEV+#wE5s*|Y38ILb zsG@>`#MS;ZK%)}5l#BgoQl*yo(jvtYA{;|fovn8KtnT)mj8IuT$2VEe;qE?Yx&C|0 z|1U{}6Is*6H5)B6n)nB1{S3Q6z$!?S_Aj#_Aj65VGchPU@1Z%OG>#hG>fz5GE(Hb4 zOYR~%kXV@fVyC=XMe?S0oLeps&*DdW$}?!8Nzh%R)|#(TXFwLi<^s4&3k}5vDgZ0dfgje9@Abyw5faaW(3%MgwTfT3u=M>V4*2&=-TD9F?X9D-?4E^Tx~02QLXea$X{4kZ6ePUqkWNWSK|<+}ZsDdI z0RaJN>2B%nci%qeIp=rYwZ8TJ^?fdDab0`wsXa5-%UYPmr_F#i%~3~f)u@Kjsi?EMMpScUHDO`9J+v~Nx6QC|~|xF<9<$2^Xocu!72@#O9nRIkFZ zv}L(%k0M8LYnAG$TfR<{@E31%-G2XpfEmUF&@Ij4z6J2+ogIRSZ0d`>{c6rCmMu^p z>-GKwo#rF^=Cz)KADr%DB!hG=KZ? zNWd{+9J4q&`Vp+31aL6N3e?OOTYW(#K>6tj=puleb^D$i7nk<#H*f*sK7S4eSHx{c zYKe-9!hQ8$Kf5?sRBv>B3&MNg@9p&{S0SJx>SRTG(~K|jh!6b3gSab>j_+cz+BU5s zE;-&E_~I}3i!6;AF8y|i8$a5lw{3dRJv2Y+WmIj0D{m$7zEZhOn*OprRLzOja!G!2 zE*hZ5X64YVdmN{3`acGO4O&pq95Pkgv|Wnk7hL&Uz$4*0dY-p$m~d%AKp!Vwrv4rt z{Ps9APRc1*xVue!KdaWZ#>fxc|RK;>J`mj4?sL|=F@|B zG%JOJ1>}6P={NN~7W2ae3JyVA@jGtct9w-Z` ztgtsWBi$tqX^)-^8GDCGk8sjq`9>*r2DkAuzH)xtxoTo{V^t#jpGcXLn7+PE+1*xF zuqi9`oZWBdT%&W77wtMys&L%cp_uc(m+d(bSQ#S~_fb{7X!t!#<9IvzgtF}v!w;3_ z^>pja;bBu;_J^XfPw(i@wXxmH2#sTb?$W-Yc~x_u9o5(0|I-t7u5%{~wMbN%^OBDE zpf#+jq}&2xB&RbV>~>_4bP+2R_m5KqOrydnW|R?eX5bTlj^W=dvSmN!tQ*SnH=pco zAsZ;Levt>8^xM2Ux9iA5f-9Lc$pCp0mC~C>x_Y9aoF96Z3rTkTxmblkJ_z`1fMe2D zndXPL_3V%IKWaX7eGD<%7pmIg?0Zk`CC}e*0lj3W{(Uep% zM5Ybn&UVi#3=xlwhHDz-h@pSski4m9F|+L1uxl;4NBxw;`;W(>S@cNs3aKSu z#=;Kb)spy3g<2@m-Exh6>xLm?&FA37$Tr}4`nvn0dC?(DyA^%c;qiu2&j_LQ?~I_7 zb*RJ8L*)+Vv8)xW0*0!+K%;5%xBnK;y>e({j6_g50ihGW=KN!pN(Czyx<&7mwzLCd zR$(eTHmIgfPnZ38k@9q`Fvp^Gx6lt>IkGrMQi2adMarhWvj2O|)gRKX!z7-wlWC^+Ewe*Bkg=llMduS8B6Q<_4u8w^PezD^3<*j|Tr zfKVGl36W zc|~U2Nl$Ev?c^`25qSwK-h%wo(hkyM%d$H>%9Q|!n#3q_| zY?G@oS}3IH!B^uTNMWSmABVYU96@q4`3=@Zj9d_P_VL+Iqa@6V6yn9cftAB68|R_( z;shf+yC={950j_oZK1+3sP|}n1|(IDLL+CKHe{&l25p=C@vXl^zq0(JOj;8S6tOdW zqAY!}bKN!(zkQ7?7R6w%VKcGa3(aURB5ma7kUg?C2M+DZ#MI|TK4NW={b3DgjV%!o zW3Cq@ebi=r6_*&TVr6=uqS(KZrQz{EE;t7i?&GNtnN2Nomfa$k)?THla$vU>L8Q$M z{A)p6RbAmM78*N{;1~5hLQX|IX!rZ!q-712?~%}-xzXK?(*D_5`DGK&^$(r6Z$3kH zLfStb>u?_byYn!y@4v`4U-r@o&eNxyNj2&+VZK$`?E2jFm>T^;{LPKuOP0o$5Y0`+ z6_P(EutUi7GDEBuw(EDkVvOuA=; z#r@P09}nfk3~I6KvT~VMm?Tz;Gs?ZU!*b931sj=2NRCaOj!F5VU)YHgXQiG8a%70^ zFZWJXL2RMN6UAyQ5oonJ;jf-hhEYlEr zhJVjhJ&9i1$zx*7iXZIpi3a;}>K!`zXWx<1gGRdV1RLvVdaVyw*ojR-9CZr3YiV8CpBVQ2u9pu z{*_Vi*)3ZQ;K)AY{L3vZ2rJ=W=>LZkv|AKe_>0Shm-lV2fErQ&4dEAoFN3l@usEEM z1&OkDvOyqRLyU*Wnz(+Vm@)Uao1K4a0l>DxkiGdA5m{RV|2%F3isg`v2+0T@X8Y+Rq-PUbM~tk{PftUF9>b;BIdACJ_2KkUz1JQReA^K$fO)C&$A^`b%JzvLf%1A4_gjRGBrp?|Viiu=97 zNt(`#fD=Oin$ui|V4k#&JNv5g(51ja&{UtM8MWUP?Im#15ObQOt(s*iWQ>{qs*YoX zo?-C4fz?YGwO1VHJU%aP{9Y?;Qi@=_`fv~|qopOdCp7GfkOrH1ZS)hJV9lzQo z*G2Nj<-Hj(bM2{))O7(pTQu$-N+llh!Ndxx8&Tg}#$^J#K|=VLf?~BR49CbJ#Z~_8 zyIpI$ncNy_NqL*Acq%g?vgsmOTYuKYH3bacc( zHXi8}`hDwiCwzG1pFa{0e)pRv28&Mxm3RZ;B3%wKUvU!<~$^I~u~{_5mOjRGcFP{{}M4VBcM=~`h|1uQsfQQYAPYOytclN$m!r=sW;4@yGC z7%HLn*dZ9Ke7Zfnp&C~wAV3>D{7n;521^+dB^rEq9+mwek!WYC^aBX6(6O;$fL>gZ zp!rRbNd8yGSAFqq4)vA3O&(50waT{#o5S6iQwt2@Dau&i^J+P#5Jw_Fv*1;b(+H1= z=>Zv(mlc*Od{y5%aft>a>7ueh$61Y&YM(fI(y!l#t#vAC`LC~>T3DU-Qal#U4~EYa z#uFdmCeK6~;RGWr^?hPyiD!=OJb6QFUik7W8ZcOY6S&SGfve+Gl=%5OsqlJVkXHz1 z?$kN6F z$sF5%|DJ9nm0Jt!Wr&|nnI7KL7x$E?PoCILf0Iq8q8|tji&WbC5toK79eos3KW?(& zMcW{*mLVFL<>OhZZWbfR8wvp>)-VE+f+RYq73dPCn3^3=a7qfI#f2NvNfo4ttev(l zMeSQ}zEwFBq*1fi?QMqxw-gm!O@m;C)=zKraRWIITyxaYrH`yCjO;;#1 zK>HM0S5K`_K6GT>`;yP!FAt;aBexbGmm~=M!AzwUNb`a^d#=${hvhGNKZp?Ee^t+6 z5B26mi%S%tO9r$XL_EBWDGY-*udJ@t^ok+h*=B9A#w3$=z{E^oGa&5f=-Bx~t@=lxe`gq#XNC5FYa81pJ z-Yz|A_UmJ1-nuYJIM*VN;FBur+|2?uMBJs%xk8)feS>nmp*$NK(>Sm;CI>z} zziJ#`6SC^zN~2&&%E%y8A5qhwMBy=4!3`qvekd37w)XXg%ZhLPBh=@>n#j)3=JWt0jb?g zP2XgWKGerk@bGXBP}FL|%zX)8i!^5!!k8YO#+f`TvzDF18vB`heI zrFynw4Jy5J(2~DNp&_+bu8aXe77pfPMQNvUSZ#6=C(cRh@$xD(%$3V`DFb(fuxK|I zj_z^-8PWQa>1^@J-2BsC$QW%Zr-FK2K6Fu%7EXsW@h_HE8dPuyf7q>lUg?nO><;NVp+ zs@Htj8&r6GeaFj9jG6Lk1EJ3#$Busa{9~ojmwTl)qANb^JNRu`@Nn)%=S9PSrYps} zyScAhy!3~v!Hu*aVEQC)$`r=W-vJBh>b$9(a&as+_8F@LN7`kx5~zTgpD=_0b?gXN z^|1or>JUo>0d09caqAJW7sg9HQ7>`?=SVty2f{T&B#%TsB$^Ce>5Z?yeklUCFSu^- z;iL+;yPh|+|4Va_9^@Hhyd5{W{6$3@uwm*|MAgh$&Ur6{Oa#x!f@pBuWbhG{hQQXV zPd)^2&umHN`oJMwo);7IZ@EZh%)NSK&ZRZ-7WFL=4}Yit&c3-pqzc=MVSiKGV;n%) zkja-}Nbo!~?q!>>eX97=2dLtfQor$Xbi{=Q`6ZM5++Zd8T9e#e6&Rlo0 zPtEW_0C+&x#8cANmU3jt0Q2;1FKEguQ02+bVOX3uJ~KQ0JQMYTyKG^-_U=o;jSx*_ z<7Fx-?fTR!xc%SO-w!mOhm)FPVV|F440FN`rScKUNP;E+5BjRH@qwi=63P^_oxs|+y8WNs$Qj$*UvHZQczik$S( zN;QH0#Z)PA7N>qE^g9O$&fQ00tK&*fVUmrMJV$zW^2K0T%EW#5r~0n{qHhp55K--` zUr}SgZyi7n4w_^}EEQN`1;Q0t-BI{?onqYo`F&D>4hOFhUxxScBTZZBR?0JFpWs^U zu22X^&8XRIG0`dMYPZl~3{mWJTDZo00c$0#TTA01Cmj4x z?9`-Fp z1|PuXPhH@E8#u33fwKQVe%EYmxF?K+kTuq(F8mPVf3s_V;Mfa*5Te1nvOzYt?j*$^ zBg=jsgd&t8JyD|7Sz^Qd4SEhM08v4L7H3zHn-*%c7zrgj!FUz`9vRWkuqp+upwLjJbnd;Tsv@Q%HEgZyc+Om&8Rq$oV0!r*Ay8*8*c zjD@AI>bY{(FYDxLr;K5G=EK9cFNH5nS}^TF_ipt9uXhR0c!S{03vFHnEGYYke_}R2 zd|li&X;*E)30UTGGAeTQW(vhOR1F6Lhf8be%stfGCGV$vdwcc@2hHp<1(n($Vl72e28rKczmyEry8>%to<8iKlL^-iND+q zT$$&i$M23&Lpi#D_Qr&r1B|f(=Ky)l3$0(lnTv_tn!iO4l+5~er7_fxVW{I}UNiduDuToB{gogPRA)6U$0pE_5O=ovZ~m#NkDJ`5eHV&$Hfd7bpow$SJOp zSC=|lUr&fY+n&t%?qz-y-sb9Zsu}Rx1$d(9}9n%~w$(U1b5XAYmgK$mZiMuT}Qj^Q2UH8lLUc6Xh` zGal;PH(#qK;!yM!baS{U*^SJ(Yd#rnhePD$YH$A;(DdFAT+nn?+>n3gtiqf8^V#?h ztic}~P#z8!HI;#~pT>CbkF>NL18?rkk-+{KI!xnrk(fGbD39M0+AR7~5msIQ-b_s* zItTnAi6E9u+{};$uJ*4=dt(m|k019B@(K!Q8D2X@2rUN?z9<9wuKisp%4wU(cX4Tsqz~NGHzotqN8E^8D9E~mwLNn zYG^{?7-eLIHu`keL~UywJ-qHcIjs*m7yTO#g^2P`Hk-iA^-gxMnRQ!@^eGpsiX{&k z0JNx>$7k<#N{E0b&CFSm{*kf$l^&hXQ;2{|K6*fKlRMMt0S&v4_*Z3OC7Svs=HD~{ zIW5@Lo(ZsNal|V#mAtQKrmj;LtyO*xwrOV*`^@0%9z0UI^99@ZEF~%@Mq=Iwcmb=yY3> zB!R=zYZf%RG*a;6)F}-JN=+uYyh~D7(n6bg#@jVGsB(!JW+Ww*(ly9I7LSC4q>L+0 z1I5ItTkb@5hr=IA8_6~nIWxhMJx0eMM@rv!oxG}ESV(TCzQb5rX0iB2htMvOz^ zdJ7D8g&yp^Ae6IRbbt7`$nH_9cvq5mq-2q{VfE_^oS{ZX+gW}w4|$|1GpJig>~O69 zgXwhakp26?N!80a88ZG-?Md|+Y$}h0$?5S5T@nKM(@%T8uzAB>RVcdN@SywC#YNU# zJO&`IzmvQ)O6%Slv7Bpp0{^knyNsn^{w5XG^RtiuMuPxp60)k9S( zs%~9b&!2a0${>UJF#-cWWlcR`D83lP&@baB5*L@C6|8SN6k_Xc59DyVu;+xs`^*)1 zk@t@OY-dt#tvA=g-``>!AN^PV4vqkGgL~P;3{m`>w)u~Jr$vWXf}frmmkN68px*e} zLc?Ru!I!h>u#Sq9lmRW5Bf{ymY1YJo&r)G&STEUUXrEr#A~MV)9JM>ppucP zz$<_S!{8AwKceJ)o9@IV6-4p#GnK4ZJbU@0u_-gC;=md3g-~2KZ>G6{AOOv%{uhutTC2~Xni9M}1}v{Z*jDu~|Ns{U)Yu5KdiNY2l4@F%$l#$)OHP*a*lIXM@IBf2X|+e-Q_xo7~IV~9zOmY zw@uQA0!G1)-T3g;SN>dIHoB~hkU1-SW22{T@Vv=Ux(twze2_|@pAPL_MqZtC!O1M` z&zq2nH9`-sKKif|V`lhW7fE?a!0^f+vBo7OWw{M0$hy5Hp7%Cy!>soF40u(7$MExV zlP0#bK04ODw^aW=YS!DfwzhIwxD&F663K+n(Q$`MDT-!!!u5IZfR4+N9RWc+xb1=O z^2Q(I(_8r4Tk)`p1C7he(y)TfgJ0Xu*t$)Ihlhgj4hMT&J`OGUM0$IMR~sNS8Om0eyt%ri<2K`M}s=%dNAf<@I;C85zA< zW9h-F?jT`{?&z-eUMCfyjGu!9FGaxKUSMh84?82nzkT+`iZpt${Y}cFYEQ<}$@0#N zFTB>1?1Tf!?viqHoNwNocGkSTRSM+gCdVpVTGGV+W^FSuy)5v+cn4cATZnIyiB0 zo27Lqs_?v!!YGHnxmJlv5P;A9g0a4-SGqt1Cw-q$R4le*V3+pvdj9Ze8jB|L+2L7q zD8SVJfea1pb3j%WUZoyAE`Fz}zledJ9xtWfFXS(qzhY>H8ES@Po;;KFMKvKjX8{%| z6XTK!ELle!4osc_V#`H!)2RzhP^h^JV-SDlmS7gV%00eA05{th$#fT%sYc6 zYarSlffR&%NcZ=doxPhP9$wUdfY-seEePrRgw4(8(iMv+G?@pl`TP(~7g;_lSL5Mw zAzPl1ejeF)CuSb;8-kkD5M?VNyddn0a?ZQ7Ou^v-dSVFB?> zsPIOuvCwNk*I#pe42Mz;-#`y|g8P*O(~b@YE6mw9p>}jT69ex*lbTO>q-g_CqP4avEiF4;a*ucsWMs&{;_^Y2Xj1^jD|A{Ei&zO~`6d!$*wEow zZ-$ryd^j9#V6J>qjx_2*?SMY)dN12Aqk4xU-ck!9WKbE5cg|Cgt*uKSOO>7NC`@3u#cF6R`e;~BH9 zN9aik-t?Vofyps&=7O=7`*8GPo!> z*isMo9b5CW@9pPHw||>_AmYzFrwqyW@7VrA5JR_W$}hp-8h}<1=3o2ViNyYHyPTRoq~WrwjAxc31Aj z5G^Akl1eJSY{NGTTC9fJcQZJd_9g-jaR=CiHfh6ax85$l&_2HL;_M)R*4C<8WS zd7nCYZ)$S$HGdnkkI5S=s9R9%aN|Q7L+bm(Lu69!9K^?J*zJF&AIr#I(Le49r|~=N zINw_kbZp1;zgrLd#4P5xpC+=hIv%87? zJ^B@0`SGVil8&_C;F{112g12I`!^y?%V-FQ%(Q$74)1d&zrWIhS8V*W$4P0mbRa=N zM|XVJFUbBgR5iVHYhgY@+?Sn{iZGNK&DD#@>R&qMouq6+7`vobCZ53 zet)=PNcu@0ro->wtd}jiGg80&$^q>y7Pkl($(^w_iw-v5slh|CO zlTsxu{v&Zs-t5YcJ*6X@iE+8e_V<&&J>3}Fz38;1X7DkY4gqrMYK@z(cF}vhE@wMm z+j#|6)DoYi4pLdJ^Yla-v7eF&M?sC%!Bi6$GKk&qQ@9DU3NU67^ZM`%N!z)3mG1 zpz3w|Nu|CESsj@y>MJVCC;cT}XA#*}!x)z};50Z;fF-#v+SUeCZ2 zpX$t-(D@!kmJuGgd-}4FT6_+Ln;ZUejerx2>RMRXweV5y5s`uh@_3mcN=5TAWrlxi z_@NzbFN@aIQ73f zpu}$?2wWqQU8BaX<-bEH{|UF^Pb8rxWIXrC$Pj7H^P=P9pa+FQI-HR)0;AwGKfxVp zZc;cQtJ?#e%&wJaysVkdHyInJZ6S7PG_vK9owWSI>`8bbDwPhEbAVVwv-oob( zmU{y+d*!R0fkB;{5$_)m;NWN+6lI=kt#gsMBPwDBrnUq|*~a1yIilKQ!QuPiO7tSw zEjqMAyCtthrp;;vKJsHLnAktmZwcWO;mdL0v6}QBAzp5$ej=6N!Lu`%^I0?31)gfi8p%=M?uM7U{Bz5n7 z=6z4mA~wU7!=@O%&(W~NNcqIwoq)NI=ziLb@nti{U^mq{Lw#K*wWyc(i5;y|(B;+o zL}Eq2Y2bx9zc1~_V=~-|C~&nc5kG*dXeiO#|J6V-!DsG!dN22u(4_1luIsr$ZnqOQ zmL>j9!^QTj*1pdoOxSIuqJbwjIOvP#x58DsT*|HIA{3Y8`>&?1%{w<4$MYNl$Eo3X zIQhm9rcg^5&{pf<-jRD)ME8IsdGEvmdWYX76xu(MgvDJ@RA02PKZ{kX`9@+RJ_W9G`iZgdN-6A{o z`bgMo_C#cA&Y3K2$X^8UTktzN#UgCTeq{&lWe$R1;~9&1CQ;-@Z4D|y9iYnZ#*O+) zPo@W7+Fi<-r&Jmroz2<~ee=jk+~KEs<|LTtpidl~QnRl9plI~Yn)Bxhe5{eWy{FS) z0bGw$#Fac*)|=;+9NU>o;*WAr;LynKpQF@Qtxk$@_X^$C*`ljGDI zoLQ0QlAVoG8C?rEB*7M^@b|{7ewnzeq=1MDmaNR!w9Y-qTP^#&b`K`Hhf}q=A;%{q zgHa)j@YId2x7E^AB-N}sx5Mpa{O6T}hY0b+Bn!i>;=3llh=!$BW>@NdZT%So3|)*_KatcwGQr;?{XpW()RjPid9h zUj2T2Z!O_ZjMsi9g=S69+yRENA#GRP9Inru=wEc49V_>qLmz%z^YZtm{qU-n+IdUj zrJ?t_QMjW+-qs&+9}9|VwaY z%+2#uZr+$T+&40`Q4cnzA{=9!*x=@w2PQ0jSIVl%|RB#Zs zQOU6Nz{ZBx@V0dkr=ch!pqwiXO-$9;SV)|`I>2d=XjH;>bh^14!;P2dyPGmLu)>*% zYF*-J4#x-uH2O-5%72Hf_{~)CeD(N~ke~gislQ}AT+5X4LXQ%9yaZIIM&g*>9K_qOP3^6TNcb z+(+WM{+-M3lzZ7gPy|zr3a{-m@2JXBXVsqQ-75YumvFe0yb-E5_3vq-W|8Y`^d^aJ z5W8A{>Ppn1?(JQCA5Q&&4y!*YaF>ec18dqM<22?N{1Yeq)bEe@T=E8V+KbNq4~@`WcbrPgHxUu zWUM8iK0Wq6Epm1*_WR)0BNJLVMjV*lklMSt|9rOq-6so|xZFgf5ozxB^PefFs=&-- z7L(d&fa>OpU9Wz*$Zt85IC5cf#)mD29J%HEC?4tO=fkCunS<6EiI6}3T~9GH8N{SU z5xmD2&=FS7#cz+s`_2nn`n|{S`$Ayke`Tmc4HyicKZ`S0y9@S(_>mM3{#zVJfp+TJ z0J(z@c|M758!*}lr*`u!5!oMRfj|y zxkc{F)3*FxWnlRW(gMAb#Hq;<%h_@gAop6c_04i0%Mbcs|@`&Qk$FQrhhv zj{Wfg##yab*O!;5`}(^F?x^ud|M*3@l($W53#@3_=l*T=W7IWdc#?C}M<)!xn3JSMBS+l`Oo76=)sAYongPU>vxbG+ii_P<6Ji3 zx~kV#&5HG&&P|7B8Z*~>^M*75!5=^JY>nhn2cY>Jg>X*ADp&%ja(WeF=Yz5x+CoCl zAh|k&%#w#I-1GFcb{UN`q>d-|jP zFdP-Te(So-a1&8d%C!cMo0|$w=GFG-dFPbs_O5(_E^HO~-^Y7ag*thlEv7xAs3_0X zwI8}70#>L)$S@OgG(}4&`GiS#?L=g~Uv~Cq@p%Aji+4+)c;Kce=g<9q-IQ;le z^N!c90&sYwe1(aJo~-Zg!jB_?1Sbqy(ZoJKgJ_4ycjxM{&jU1dQt5r3HfUGNJ!4Nh zZyf+Z(_I5NeEY?HWLgBfS$+a>W~Oo)_2)3|?_a&MU@UGm(DzVSlPQ5NR!o|0!v>#0 z@^mYAn}&_O?qaCV;Ia^L150$hF*0NEr(z4UU~=Yk9vM-r<)_cx_RWtH2j^z28v(Hw zBU8yt;<#q67KAx z6Nu`u03)rV@WhjwA$k@3qpAmveP^k>y-4jel1R07yC6OD!a#z)0pR*$U3NG<9DEC* z$A-=wo}>ax)Z8L~o~dLqQLc^ngjm_F@67w*P7>}p9DSvqul(|f`sg*@UoG*p!o`rH z#-Qxb`bTsC+>Kp}sF*(qSTddw-FWtyOW#@b;^Y$~Nu(rB-JT$lgl~Q_&R_$_4U_;8 zj*`oT4a>@U2x5d6o@ZTl>3ae$wQPhIH?3Vm?d=eMQA#4wGjs(H%PRsDVf&?_nI}&P z-TTzeM4vsKPBEa#JLj-rEpggbu7M}d*zIm~%n+okZm;@%Q$B~EF~nm6#cpH*HdHXG zwEeU#$AMVDas(J5-y;0X$n9=_?yU&S zgPXn?Ao^{Fbu$Rz?aiJ$(FSgCtYolhGYoQAJ~7S*GCsc)O~980FO848UF6i}9X7DM zy$fH1skY>TB$j?g#dG=f(BMMO7$YHw!vaq2^zbJ&9rgIivPB5y2G>u7RGOQ~?E>bfnQwj&Gs-hV^5zaST(O zqN1d{md$}%TVBX+$8UTY8Ts0KqCFpG0ud9P26+A(a)#U=mY5)l-Qi8*{-Izjoshg1 z5;6142D0ESLU?d}MjwG@_}?$*=KL_*hA%sBJp8Dw@w=D_qL#tVA^IkfwDu321`muW zqVlq3@iD*Hm~1C-p(7*jLQqRJx{|0QU%>2t@@9D9BdjzQQ6t`l2B!AGy-@m2oo#gr zYHD^-qiT_NJIf3`^#AMavm<>5QA=J=FKQ{rLrK}-ve8_(6&ZnLx>S%mt<35C&za9o zMHxjctL5#itqcFoTT-Km1B!l$i2esU-46y#!5q2#2kIqXD=knz#&g3&zCmL=0^3l| z?mkC{_4NQYEfa`&#|^kZ3m-l5y$!LA`TH!FpO24F%MNgmL~6(NGdx-33aBL);|7s&&8?_ z6k3e)KoswQ##lQhg+>|3f$ZaWEKqt+)zH;1}Ytf-7Rm9f{`hR7nk^{c;kJtZ#Sdo_TILUn6YiF*?WzTGxLQ)215&nyRGmUKc;$9Wa@_V2nq|$a@=AY;C zGG(#Dje z5DQ3Tbs|2i0RI$6(l<4VLddIujB&J;{tL+pB4+lP#>`n}Qd1m8OEK6IXyLAZ2vJ;6yG&MEVn?0PCDP&}{ zUPjTslmBP21k+*h6RiEY2F=4lQsM z5c?zY+y16C1q__#vG{m9#n~wk6VB4MGhT`j)Ilpso|fM6O0s2U0xYs#2o@<(3KPhd z3OR02nE>0P-pTx^FL=3A;3F8*82T&-l+jQ>8d8mCJl*>Bez!m)#y}#cIYr!!1>CK` z;y<4M_3P4A{4n~WBz?}Ufw%+y&O{LtSj0Ge<{2J=P{_X{*i1@EH8eHXV-4UuHq+rR z-*kdY%N1o3Xav;k<#SQq15608H-mAYv+&kC+;JsMslY>hONa#7H91`ScoZT?gcMJ|-lr)dTvPw0JF5 z7v+XHu;Z!wf!I}K%0zKH1d4%!W@TQb2O2gKfgcW>X=ZooN#x9ZzF9^)-5V3B_>gi@ zDCV+(Rjgga?i2JO{Mcr5rW<@ad^-$x91G`2Fri-yXOI z!ozQj??IgcoM3wTdNAYwjy~u8@3Z;MLK7$tdCTNWk5`2%xl`mhZqdb3{uYC0nito2 z7E@I!XrTRx!KFGT+=5BQ?^>_y!*od*EnG5^bF;Mo`$E%##}KUw6vg;gT%?x@leYhS zY%wZIMMJblvg}o&$bU=Ykr4f}AHrwDOwBgBrmR^Ml8D(M5BE0?<0YD!moc>T@zOKf zRC-mfcp8I4a_q|>An4BrWE(zOZgmti!~F2g;zcg_@fJRD#OEKZ5zVozf45QqFY{iNCj@Mmyy{~-?@sfciT&$6k<~Y%_u+)a%7a4#b@*Htj8fJ zsg-Vf1`QfotzcgF1+oCO$5;fdpTfZ}g@*iSUlMSjES^_-J=y^7@Pc>ckkb@%Q6Dz( zS5bf42)S1Qiu*{35`cyOT4q%S7)L5lmzt>Twm3gjl^nYFI#EjKbaJC16xS-B|Qe9_q)m*HO0`ggp=OcI46f+ z%nfJe8D_9tZbu^%Z^!}aGnPNll>3gX(gVZ1Q6(0QHGOM7a#GP0da%awO^^b1cE|ni z7Vo;;Ph&>_deCwuO#Oh)rvM-C!$+}%){Fkc{bNy$uc^m{4GO*PdO4qrzDCy$cujObf`484*ke~ zYl}Wt$J6E=>+J9g(NoRIDQi;)G(f*EtD$I%G&0EQC34TgGMzvA5*mONB6|VPx@Udy z;*{ce6U52yumRm)5OSh8TnM}@w|xun*HbppH+8wLgI=x{Sl=FL@`1-ah1y#N#u8PJ zxZ^Z&351ZhtGvIpstrOCvq!8e9cM@Zu~;NP(VT4GW`@g_?CV(a5OQe1ottTQBuQ6C zW-{jDwv0^3vf}1jOX@gp4HhPhCSRyazk&*;FAk5E<15mRwESWp9)N4^qb3Hwr z>=UD25)YM{6Zhe{pD)FYc{q`Wty2NZybmX{Z&%K`t@B}sL3qP5E8uy1afn(^K7>=M zl)t>bO1%OUhVe6sVg{8U^rAa>*;oB4kpzItb=3lob7w18COiofC9H!fWWb(b(Gx7d z&+J3r=Lx!F;-m=Gh#7=Qvvlkb_*Y9Jh`&0?X?^g*!zmTZP2c(-_5G#{DzOS9#_?5? zD?6Ur>FL|Ew&Q)CJCfyE6t6Gu_F9AU=xOGh_i+BT{RiB-=w6Zw>InT>NNnoP_vpX=KebR&{r~nr0wC7=tZIzwarvH zQF_7(@YLczeLC{`)bm;5%y&6b8aUJqQmjY6)L8ruIxApq>G$c}AGW6f=Vzgpbk}2H zt<1SP(>4=DP$GaQXPe&NeCwiLPV;AhGhaU6lSf8PZYr)X5MC;bTgHI4C|UOD2AA*F zrIs;efwL!6QAopE$PA<;f`k*LWp-CzcYHU6zq5?DiTXMt%3!@*dvqkTn!5Jm*j1Cv zixZ`v2;;@-R5X5qimt-qT!chWILAM~xk<6awZB2t;_a?NKg51)}^+_@PykVQn!{H%J@ zhfm01K$xkr%8CxtTl9meJQz$b2zy8Klw_G#%)`ub`CYK6`OvgY%7U*n<dCY6qsg&7U%H0%+)&~5abBJmLoq5(VrEpLj0DU+pu(-5Ww7S*2O{xRujz}i!*E$zOxc4(;YvzvG5xr58A(9Kn$BKl!Gz!l9M zQKqXN_ggqWF2rkTt4y{XUXe7~RHzdwJ@Yp>Vy^}M|v&)cG|U%M>yo5XJd0s=yY zS1#NT5ct(iKwx{v1^SS*ZPwg)I_+g=Qe-|qse3SsjJlS>bbe@!4?B1e|v%gE+ zz94@2$=GkXR)>%5G`ny_r2438p2U%TQBPk(C_7^d=P*m2L|m5^3C_ZIEoioKXV*Db z<7e?JtK7Nu(ot3zyEeSo9U$)iuRloJ!)UFFLf3=KH#iy*tZDhck=!}kzFS!8;tDza zZcJ^9ofkJZ;M!f=YzNwGf?6rDK6H-&EHW%w>+FRcOdmtBbVM|bSa|v1vQyhaoWe}Y zuC8Q&`1=^6A?IdA`I+U5y2BZ&!r6%W0Dm~AdT|xY+j>Druaj}m-zjE7@y{JkIJ>-- zI6W{y%B#8KE_c;f#k)Je)Fe1)A67TK^9aXar8bWYRj9W4>Km-UY4#%j0vwQmoRc>U zfmz&hEu6FOlZy^{uIUo`sCv>E70{rmwj=PUCKMzh=aX}2bnTZa&YCq?*?V~Vl%Qi+ z(fWoPNB@I7Bktn5zu!!xZOr7PFY~?d0{u~4;7!OjVDBk(YN?8Y!xzILIm?t^Hl8i6 z*!krfX#ca+t~LI|midVM*-8l&aJkn(+R;|C)5kTTBN-v|+zZEIzu~|t3~G@V(ucKL zI4;9X=dAOiuF4vpxB`w#)s-sny@hsGt4;7&+pm_NqIxWveE%vSal<$rY!%A(iEDyBbCQB3_|#P3B8UVn|e%hEdhg1`9Uz+89oWk$-}*Kq;?Y zo;&NxJy|(_iL;q`J`RE-+7O;%h@*)5tWAeo1asHKz6T>^<;mPm&aD6lId;sP#XVMsdoPMURFcG^9Rie{AXvI@QI&oR8`?3CvvPz+; z==NYcv$!I{GJA0ZpBWa|CDd5`z+kuhlpDTACfYUmvXQn>Eou6%ghWr)=cB^+7E?bT zWY!RCjr7Wh$;5gav=OFAZ(N7d5k+a3Y@hnp^0+Qom_O3E)b_L64bu zvYH$tt;j8`r^f~BKeiG@ze9Z!Mc-8vE&=p54Cd6Tu)fFByC3O+R5Q$6zMVI zq5kmePyC&|q~&&?cew2JEuUr4RIg}u%Zrn^+Ed?rKg7CJN2X2Sqth2R4{;S-kklk+ z@9eHn{E+9M1xQ*Goo{gAa(`>G_}udbma!Zui;6$y1EDkVGD5DN)S zOA|p;B-cdea2t(notj`9M@`s=xYh&M0Ij|Z_m+S`>qS)J@%U6F&Ih@RewlXjc?qsV zmWz!xN?b?R+NtD>PaBIXbLI7k#|zSwIHcmtI+s%aH6`?N@*SS!`xb5(hFYFCK}U*L z(69DnA_85{^oiG|+%t}B%AD}{E5eUai9hLc*8xBDrDW&^lvua4`m1j1E!V&fe>C~* zZ9{s^usGkF?9xTUAg~DxNuNKcSDjP%uO;3{;zXS@x0t4Cfbx(E*b;X}T z#Cp<=4~z}H&H1&nS)R7=*xH8GP!#z7kdkJ1>EYlH{kp*&lErv8%ErVl{b{b6@! z{WFIq?#jxP*xiH@@pu_L3@{z#e$T%RYOAc%J`R38_Kmr?z5m{8r#;9*!<=@g@$&-eCtz?>ttFY*%zX&zmuqSN76Kf;5FqQR(-kN*(1<( zI6yIxsiS|yJNOwsyGieyz}Y;?6iNPhhcZp=?Q1t;Rk8Ijwn^X>rZ4h*3Cz(vapE=z zp?1YVp)JT(NrOZ>1V)bC8r7pToW4Os}P%nV#%b4fefZ2dN8`7k%QKDnxc6S_?x zS<1z6INZ&s|2!FU2@xouU;6B>x~F$%XsVDvOw+UcP0>w7jF8V|{MQ%5K498&7mS{L zuZq+p60|4%b~m(7BBS~DSVIhbUeS%>)lC_AD)aS*95dNss8DTiGJOJO1k(1uL|Sr5 z$2>q9!W(iZv6#=ac%b%hLtTE~kB;!5TRKl@Ena9I88Fpi}Efw2rg zwI4Z)BJ`)KTFRPU7xo6o+Gw74(0h$fED(aY;gndi#E@#)i1zsLu2Ls`JzIfPA8Cbc zA6E~vM$7-%wsxsU1G!tEWg-YN^mnx3%Dq;xFGgrOwPE%C@di+oCjXep1t z7rZYc=xjEkFA9@Wfm2)CUI!zGekRg$OeIdM(ednRw+nhZ1yWUJW(VMw)gxEc+?;1dM&;Mr zxx*rDk-rRQy@2jSb z(A*AEnp>5Oj(>&q##nQ|Sj7^SF;=#3s{Fb2>c>;Gez1mHpqkMmkN*8GXZ`ACy}1B8 z(jY>#rS_c9u@rR&IaDqp6TbGq!t-@4zB5YXpa+v+^XRrK{&aEm`u!>}EQ^^zDuY#5 zYw0`X;BnR)ccwgEIpiV*rVfD=lqK+Pk$&NAMo?Dsx3xlKn4E_Dlw*8=Z$gTebAMVv ztg0K%4L$m{FO>s!_Jp{Ap%Xtn8;{iI)kcm8uz&aW1c)&Ql(;wnz2 zXo2hqXF>yIl{?e9C?{T_-43@_gxrfyURhe%K~C7EGaBRFE$xZ+7|>%K-60^5dJ5qB zjB>Q7!2OeVEd4xS@TY! zBSrm_CwV{_Sak;2qSD;;!^mKUD$GGC!U6X^d{N#503)UybhRtu`NBy;e z&8KeXD_1@l9ZbE_*kVswR&ca;ORu29_wbKVE!o)dzCJS+u_jZ2x^V^gZsR2Qn^;A2&aZ@xO1n`JT-0aYj`z38JbiHR zZX9qTI>_XhDUT-bx5wbR`+oNns8!AIBfE3-yk$G|j_gnx zJH)%uLfVxq-bZ5d`^1B`O??3HQkt(buHm)IB>WR^}mhlYt|2$WOLuXg(x*> zEn1ah1J~HKpw=vZMuVRxXY<2&mx3LE6^3HEaD!Pz`}L(fYo*P2`UO|TgDp>8p zifhar@uflG%Q}nUoeGS}^+Vp=*S;H?@fq~U&9AgKzQbLujk{Aj__FOSI%DO0$1(~N zwGvi+?i1qEqSD$YB_XH&E+U7F^0%N*wgro5jB-j>z@HYl`o4~PZNt?0f_6L)3MHq4 z5HR08w(6+tDrPu$ng}UuD_2p#hk`q^2MLJVa~cuw!TTPYZx|pFGq_bdMPX%b1*)Tb zBvQ(VLfR;wW7SR7qu$u4S&lBsc}vx;;PtD*USUm#+0-4qr@5V@gO`yG;&l_Zp-lFB z^(Cm(PRfH3g}~5-uf8&y%|a+@xiKMS+wGM@Trf@SC6mxL>d)G^0gnlKIJUYmFfer%<4$uqRP(ZmL+Q)YJXIsF#TdZQuFoJfK zDb*h#Rl*b;#0-L0i*U8L3QML;INeZ}YVsKp5%52Ia7hhCIAoORlSbW~Aw^VqN@5-) zQkTM4p1Cu{vEn5zIiir$5w3Jjt@c2)_>f*}sX;nol9gJ^TvqK>!?k{v@(&LtBBkAH zEBukf{+wYB$y-x*Tz0bb!9isX-{A7|f<{~)PhO@rbLz6SojvCEIjdJ+(HNPiUmL(o zFznTyC)K^EWqCsAxL^UkS>Hb(7%G>%^NU2q^9l=;dcJoInXNK`elus>!-n6&97uf7 z4Ok8?e4S20>-x?Xx!@gYr~1@$(^cgPSgB6!6u)Wh*3>-a(zHayphLUlS4YxmJgPqUTfwLj^d(OZ5dLuIU%tsfThEn@+1JaNT!DgN}yDGxC1_QXC_b2X5=$2(ZgEz-_=LbTgc1JbE^ z;Rx;_h#UbK*CZ{1UTT*g=s0!Fwm$?9Jn0@ag!oFV=Qx(Q3MZkN#Romdel=zTZZvJ9 zE7btW1|i6X9zz|&M@%>mD#I+VaRvKIdM$TH*txK0uj)%D@?9Lhm3`5^tZ*uw5<~TY zmmuy4(f+KWPl-0$$SGc3@3Uc5Ql3h(_dap!LvVmI=gJ4K)z_DuRAc&jR$9#2fN3Fd zC$*Jl57h!THbN0UV^XZ{37$u|P;v-3dyAv1FH4C?a!O-u8QBKwg-FS&`|J7FM20FwgGA8l3vRTJT9QFyQx zjgjJc8;@djUC2F%Rhwg>L`BhN8l7o=HK#VVL&QY$et`JjUiAYWg_pvrPL{sQ!7e-1 z;;4~-yS%EB;He9NYI*LDDX}GhU@ZO19uWo>1F*fsT;_TYBE=>erDrc* zwHpkyjid0)$lw5(-_Y82B6Pf+y{x_yL1@?~DM(`CcCO0b`1%GKXN#Tp@UCXK)6ZiE zBc#AO8;nhuNz^?^zJX+3i+jg_&A58yO~~_OAUE#e+slUSMI4&ic?PO@5lSK5Kl#X2 z?eJ1~uZjsb>CFvu*t?doLoVGg8O=HN!X3vH@w(w9?-pgZ$V_LoI}FVLA(~e7YXsEw zm6hNV#Qy3|LhLlO3%4hvN8Ml4l%2iYi&pUv2M~ig>nnD8Ge*2(T(aUM!`!6jC0iIj z{-+PY3#YeM+$A7wFoE+sS@tPF5F%i!IxCsrmOQ-?Lths?d&jI}3qW@c#}@}*OjGFKBY zcx%GRDg;b&dHs+nvc(iYS%eoq)JK;qQ4yw3qtsq`bTV-M^n@Z>eiVx&_Dm%Vj8qsy z`B=k|{n(Hjq2`)~-}GMJv=IyyU+WW38`o1lir0cP;YXmU%e8sG`R;b~QZdezKKgYe zQbtB=j8m+%IXpcv<4N;LvOcZJDv?=IZB?0x>e8H>m2JJ{ZProJ%hqb5J+HM>U|sK8 zc+@o%-lCb0Ex%R^v){-TudtSFHFw=UF*;YS85a^hJa`tPnSuCRUy8{@+_5djUIzR} z2J(ky)oy?g3hAdyW(|_G!^=Ag7Y*Ble5od*!j+Yx5z>2RAX_(4udZ>&lu6aY8ZQGT zelK;)#6K;sN>(g)K22Ji<~~&m#C+~d^nnp!lGV?TyJXOCR1xDaHMSuSo!y_FBEZ^W z)<@GI@SYLx|92NNk0$lJU#?#jy(4L+fOsNueq866ygwUm_+n@6twmYBH9PL^yr$ah&6>#vIpSswHco?1_>kQJ>Y-H!*L=(Ng~ z?!JR>;m1VNjsZ&}U&+LPt|t=&@p%BjD%($HHmI#s>Ry8O-jL19`aF%unz)gk5@s-6Yp4_|-090878SFnJe< z>v41d3LETJc6*a^JnfUtVGe5@yQUHNq@E%lv3Kq?ChVa6Ol@=;eS4W(xDcadC@kbH z|LcF&UR6!J^W}4Hsmj<s|gS-L{hKmu7(q zkjsz}5ICz;9mbeQ==eOkTj<|&ALxAKO!mbLHdvRRTw3;Mix;opMU?l2oFn`^T0BKC z<766KSA2dSO4PCYF9V~B{>8r!URBhY#q1tzSQ|pBn4Lba z0v~H@vhyU`fEJ?ln!CXt+5-g0BR!!>~^P1l5T?7Eq;Tv|PDI!`0FO zkU9{)qeQ_rxmdL9kQsX2OXav_$eFpg^4W7Lzj!NbHkd(ioUftqT3l7f`x;`j7Jb;B z;PZMd463r90k0B81wM0bq^xKBdC5;rn0rMt@}MpKb#+~MUCE}6%9$lr|GU19QJg4# ziQ+4dJIHj$Wbb~bLsl80s-Wm6n9no)YHHWkZ;;03Br51e^!g9-x{b{1nx*oA7p>zOdD%N;MJO$RqL3})dakE4cB1XrMXcQ(1&+$iyy z;>zsXUy}vSK%uqxwR-HAxy99y1D%x;6)K1>R%&N3lY^eF>z29Te=jeGYi;0c}ip6PLzi=0nS5Op>k!vvp@h%Vnz1(Fvd z^|7qYE%5F<340(S|YEvKnh|5`YQ zZ+N#P7}Y21Rp+Ao<$1FIV8e=#$mUwHt_3|{Gv7XK{G7d{W499mgb0`a-SSG-T@WU! zhUj9~U>Ea2iriB&Fs}Qu>ca{q74dSPl{cCA${B3Yw3N)toV$on*ALkVq6p$nI~~m8 zm~&2NiF(aBx_$VK1+;;rM^q%OR5Pm>s0@Wi>K^lKNX=8}iLY!?1E523`)JdgSZI-d z=(1mRe(U?NGXEiW6#a7!;87HgVS=9@J8(HYnE}VTH(Hup&Nb=7)@U^5yrY3>6yx)` z)*btunqk|UvQ)t2V%tlfqbHVczl42ywJs|h|J0U|Jz~B1by%utiL;OU8jsKR!O@FV z+{|fX6R@_VI2BGFY+6&)9z|f;0r*gNa&@eNV2uoy6(v{c6}YD7ss84k(qXJOP~zDw zZS^c{6-a<(O{hj|oM4r~+H&S7uYed?tetI-MR<0+ zI>R!5hX5*I5=?6n);m$!EkCu(GWMyAgBiNIY)$Iua+Jy{ZS6OD15PYVJ6bci#Q9J> zd;{^q0GVqIl?)iD#!_++`_0+c@pTu@QFE}BPUa|ry{J=$h+s+*(ObNHGx6P;eOnGQ z4yWjiX-U})gxik48_vz_zhsI&cF!D08F+MW2iq7o7#N)%4}KfxbT(glHtPVGygFaX z5={hD%+3oHflYloE>_WhretzlO?UDOoZQp&@)YtaG$JS##LO&KcB`FqotRXUs7Oii z(|Y~3p$5dTe`G zI(G3OkmLZ+D2n1L4^7m$ElyQ8s+up*n`vng}`J2d%5(^S>+kjKCf7k zduKOn0sf+$HlY?&3u5jdWBdbFDqFM=D1w(9BunWOZn#IDT$m-ttzA=ff4e}k)IAz6gKdE1C5UNUq<4q;R5l_Edmt!r+$``^Js!={0t84n>^YP~E;3s>sc4IQCv z7L#wR4~Mx(nwIqBApSWyM5RQg=ZTlvqXJCvH*xGc?uZFRUY)F5UmgGQ{F5rfz=T zMgPaTidb@R9MN|y(kAjW38p^TnYA(%0%h64`kIh|NlfbctW}eGO@^iMd#$E>mVU6d zN9~@wEic{TDDYIU!cCa>mOl)tU%n|*j>c5b&huf_;0b@6 zhwjUvc8GVQ250qifxf<&PUzZx4L4x`b>#kpIsg#XxM~JJdwiG4v(;fxsBM9lJ!mfg zsdIG!loO@;UzEc*FD-vPx_@dqU%T^TnHV4C9RDxM8Jm5)4;g{~s{G2954{>gfNUHO z<^MoRaM|PFECoZ?tCDuNd4NxVvb2>QB^$BDesI`$J@=FV}|@GolyS+A@x6BK%9%w_CX&*rTt*s2aX8uUVUf|pZF^&%+D zT*u$^W?v5`-}jD|!y{DhpSjnyk9ivioBMuzK^HOT|Knal9`WYIYsXA@63}ZR;?ct{|9U(xTkQXg~QrpUaK%(|>&+^EJ zZKXKv?EcGClHo4Yl+h7P4~Y9cRr)_vQZGl!gnI5xykQANzUO60U-?(R&|Kd8>ey3R zQU8XmWUJzj^Ojdzjxta3a;}Y(2}i$|83rQnoug^)jT+jklPlq;#m>4x02S)trGo+b zoB-&@2e%zsbW#66p4hcXa8M{jy%0!m)qlJ3U;dupXKrDbB~kynW3f0>vrle~t{^Oc zm5&3tAs!Mgi{$5kQ*S9+^%)(%Z9F_!zm=MO@n1D^6s`6!lQk;;_BA&E0GdFGH!%K|v>y_i{~*4_S)Dq>3cWHCV20UG znQ2G4a)cS+#UdXJTR?&HA#Om1N`WWcABk{+L@ke`&&OP-qM_r) zFuc*#@hlLhSv35V<+gnYPcw(W5(V^zGtz*9`PLHjf0jG|YOo>rBHda<3%!XS{+z7l zZxLGnyu9j<4KK&(=IQbUH*g_9e&$2M*ZQ(T#_6bSGJs&dr>kx3b$YV+y5cE~6kR@N z_zh~`H<8wSpXDjI_bEXcNP12KW)nrd1H$tCK;ji%NL79?F(Ceg%9yroGAoh4YKFgR zE0u37LDeW|7eDsFC>SLL>;>2#pq(~bS@x~0$BQR|5yMn@UGS{Bi4#A1bBjh3YGU&yfA>JeFN7{3^?RfNHE#xdBv1zG$Vuj})g7oVbUv<$bT$)Ii`q8& z!kzUlWe;et_?SEIO}z0_wW)E?cO&;&QnS3jtI`KR<}hB-MU3^6+GPk?%Fo;}Y|nD& z^V(7>ffio*>qqGbK7r<&_#I(+k4QjyfmmOln3q-lv7q?)rP*}=c@5hBU=#f(rLqz_ zd3)Z|@a+xS*l*vrr);rN!Zi53t5eTYXH$1!p8Oj-e1WXNVMoJ8sOL4^LtwuCjmd9a zra1aC&7Ak1DHk96_;MMRZPj-T*siMo9`_(i9DGDNcu|nn1Y}22`Fu_M?#g4F=(!Cy z6H32a4;BId!8OZpHzG!Z$IzeZYWHtvP&3jLY-KPGZ#kGeL6(&9$kntqRq!m-sEIdFf;R{q^+6= zinF1QYTHK>*;4k?Yho3j)Uf9Obv0H9=F6Z@wW@H+$jQIFwF0PJiU%Q*A5i-h{3>T+ zbLSYAfic6Dq+mUVj_y>fQVO+!p0Rqk7+z9O8-&^Gg*?MwQ#8^ST}B}`BB+?Ch(rIF(k z%frrM6@I$(PIwo31*a*9(6o&6pOT>H-i+g}U8CbB4ku}od}f`x&(*1;(0ng%i|H5( zd4B9>E9K1Cqiv(;56Ln#?@1vPAzn7^1OA+I52Y5&22RPO%CFD&PZEF*F*?u6V_- zgeN_2zGqIJd4p=C8I_K7a*Sj4cP?an3Ei_i#8xxHqv0oW zY_;eyyTV`hQ#dQv-S-1xQTUE!NoR22#|(iGBF_n=-A1uz@`=B{u?o9;AsuKTFuhkw z+%2`_rvPHcl6&IeO#^1&RG`hVd9&i@9W@B2Vm#X?X6GYQXhS3BvtJ*E53> zUsl?w-SzgYw9Pg^mVY7iSZENSbRa_HUQ#i~0a+~?&2=Jfkrw8H;ZXE?_RzJb(9}64 z`jCV1oX3#GO!%H;CQ}KtBnh@zu=$RNXm(bhe|Ec!y|(1p5&5+N(q6L)1wZY^`ty<9 zEif@XJ#P?$*zX;ovaKrKy&Kv)$Ufi_;hn^s75544Z>}$GeI1tPW?LL4+7h#?2~4}J zO=2qGWT0E5Ejzbb%*`DDlg4F%=AM#KIViM9bgSfrhs`EF-`1*@?D4W;8fGxKUhQS7=-0z0q)1rXPO>DJJ2?X;Ui zFAv_*%Wm*v=*1vhr-r=dDqPbG#4A2k@^iMS#_Z(X2R{sc5wYI|KP@r`w+MIp4Zznh zZ1WogyslJwu6b(xT_K~kv!N$L^SOVL0n*u(u1VQV{g_d_@S0dTyMRCKCR~Cypu2<{ zqu67Qmk=`Jw_F@luGdz&)lR7s-&V(-GW3x_S=5Q*s6_PI567NyfO`75>xI3sc^dW8 zCY$Xq-iduy?|M$YVc)>u`N4gt0P+3-q|-x$ompwY68tn}Mqv|aal5+{(XA=tiS7bt z0)<>XF!NnGQIf=-vwCiTOf}fO5aKt^e>)p%e)+;{2D!WMs7*8*8d@aq*ysJ(g@>?d z3pxonARD7m02?_}GxZoSFG6t5+GGEX$1XGHa}*t(mj#fXI1l`ZVq0kzNa?b|imai2 z0cl{G_T@u=BDw=6sNx7`X~?ri$(fPE!!X0yC)-}4p>RFlzGVI!41+x zXk>E5fEmR;+Mw3;@ifvvrR{%&wl~L6pgG3JUVJa5z=PF&?9(mQx0<+ z?Hh+XIL=({>B>^X5qmggaJ3d%oUeU@(|#culibrWrle-nbvCGM$|}Dthuox`t#E0! ze`1rlG<*x)HWh3#c+F#BGA@zYuFl5x>}}aT2?lb|XetkM^6$9^BYu-H z)<0-KKOG%(?%3SvQ3abQxprUgiqbq4rs~piU1vEJpkzjcfb@sd4V@kCyojaF$>W%M zq@@wFxb-`X(5yKXfTzMS)#SMasBW$l)B~Mz=!-T^P%E39c^*uT*5XcY)Yq` zVv3fCRiK9sSx=W5F6itty)rbIBU3j?dGiN$)~*$SO&%tD4x2so8?Jn-Iu%>iVUr4P z*?f=$T$a~#!{6vsb-s1lf%E1id}Y;YI)M#3_h}D5rrT<;xi4U{ee5P1&r`W>3w&-w z#Cn0K9-D>B#w-mZ7xM5U!p-|d0tG+{%?wQO@$A$W=sExm|KcVhung%D4(&3G*uhJ4 zgvD(^Ki+;h;g?n)usEW(U_+tv&jk;9t&?giA~!(FZik`i21Y=nvAtrq=C1pz;?sf+ zQ|Uz&mNaI&Fq101D#_c5*I4w&+1e+G0)^R6!^e;fgAlh~82>@(9_10jcR$-`4>Nu_ zyB_m($EaTq@Gj3sa({Ko`(M)9}?L+WDRsj-`90Te_nU=W!0tpVV-5P|L^NGx_@!y=xlr{#l_f7=68kN zHjk5SeL7wgu?aNgDAsKr6$C**-qqvLJoa`lY`fNjx?zFC5jT-vynzY`AHv=jV4l0S zeawR8YMS|N`fb<75c?r&`Y z0!Fr&+ooMrpg3N~(yC?F9^#Y~>rmYaaw+2V ze5H420X2slxbTl~XwXOMRJikec>Dee_+WmxC#6k(crF}IuA?eqpJ?HU$Tk<$=2X2W zggLLx3T91*vGjw6-!{nU2eq(!EUfc~FXNdtnIVcmCvv3X`o=`Wy^w0YFAiOOzz>*t zDgZT=s+oHK$5j2w$JWY7z!nGr_v5A$oZn-+)7^UlgkG-cx0XG3R*>SQ(P_m4Ga$vb@MERz*MX@e8zqlu@LP4J$~{goD;SZ`Ys z>RiXLJbkXod%fSm_BGHOl)X_(9IqNxvzip%_+|=iT)sEmxK;9erE9pcvDr{9+qfDo7{5xz0n zs-JCjj2c{8Fdb4>eq00UfJszHVzq1oBpBYm-B0|`wdi~q+;4`rp+jynR)}du{aGTZ@v0My?#s{Z6$1WrdUK*}Rz!;>g zEP0QRRISx<{;{K_29o3XIH|>AH^q4sD`&0cbu9^$9H^Vu4)==KamO=7^*zD73o?~5 z@zOA9X@)*!N!(!ea*ZYX2cr=v?8XgA7mR#5qH0A>p!1qOsiQmTPq*|JoN!kQ&(Xyq zLtElzW$0PvencMBa|BL}BKvX-@-{NdcMX~%13X}L`T8X(2I+cFmq_r+j&r5rh^1viz)OEw#G5;cI6S;6<+ChO5g7igxdXk3d+*dLY@ z@`}c2n_j<7Ia9`BQJ1vglu97|*(Y9O-!$zBrfH4qapT-G^gblf`up!;*NZ5xndUI= zj37VO1{7ZD06#+6evf2~pk|Vd>+;R3$`N&UMC~$FQU*i(IiAJ!{jtv7 zlt`g>K`$XYAlhoehyDp({VW`zn%Zv}+rtJAdjwUw?qw&R0eamq&upp1+>UTxWMFTa zy`2y(!9-e3 z74Xr?S`2Fr=#5L#b-$GKOVq9m7BW2Jv3LfNp~ZCm00;%>jo`a<{!1>MyE^}ju+1E9 z9Uz0g5!>Ol{$4C!x?h%n3vp?|VbTwCaJ$=pUDeF%78Fyleo*A(iD4Y$32NEkQ&e^N4SFZ0ZpYuOP z{R?O=!lAvOMx$4<`+wZQ0(y!ca>4Y@S~jVia092l>@&w!%~r*JSpEF1LGbar)R&O? zmj?7ez2(CO{siV#5CZ6md0ZSkW)03wb?OhYj|FN=$M>YKry=r$XdncV?c*@GbK70@ zRqOS+d*s%y&q5FO6nF?tbtqz&t>~j&HZPBka=tdnr^{Tpp>>oI2$!TzSK`U^>s%yqkY-sPYp3t)#W9d=rDSQ^aedH6oQJ7h(}|Uz+b-$- zeO(_&xcLb3+<%tBaT$}Wnx}~ZKzHHU`yPOjKj|%ZzDsx9Tjfj_5{^vXHfEMr2oHxMtKR>1SHx0VkF(9V5FP3VO9e1$}$2QP=_StKBZe<6NDmVg+8 zVLTdd9>v4ogNqGnH^=ct~1LeH@ANT0L$x+_Z_Inmv1D-EA;Xj z(5DMQh>i|6Vi03Uy?0f#ISe$!BBL=w&|H4i{Y3{n5aDzo%dhV53YMLDXA#KwVr7A4{ zk5g#N-?M2+{W)Y+jDqC(s?HV@d(x1TF3`$5PU;w{Ll0I26TPa?*WV*1INFtGAV<%% zs}6c|Kf0KCwJ92W`fAPs>iKMI@^HrsvK}P53az**%Wsqdkk z`=D5wwYr*Y@ly=uWpL2-jg9$iXsxHZ2R(-za`NJnkWy>)iJdi%?!w*^y0kZ05FJRb z1|%4t8E=nV7hje)*M_tvtl;f3#SbkJ^n4n^u34WDc792Rx@QjttQ01RHGcdMe1eIE zyx0sj+iZ==^t%@ubR_G^>m5Al{)lY%PNk7Zfll%HdrcoVOYtJQ5D9VIoF1APd4vA^4*0`ZaKw2&U(M%)vU+1NAPQk0?k35wtW$bqft|8kwNF4 zFSnUSge!9=f%vW>a_e~S!!9wYgwW4zGHyH%wX-|?zySdjv%W>pt@cBYWlx_2I$-W~ zTP~t<*n9aw^H!{yCRVZik52SvrE~Wwzdy3NcMkh+elVyFV^qt)h5zNt-kc}{Ie||l zSAtQSwa(q)4*18i7e4Lz5flU23*g^Qyu}74sV6!{1bL|gKf6$O>eG054LgH z^FKG;Vm8l)${xI^d$S2?&RKspx$l2imUi!dVHpIlDC1w6+9m%4NZlKy%`?}^vLk#T zB6{!Fv!2S4mdb9)ntM$PFIR0$*uzF2^Vckk_Wjp_?y|S7uFGgD@07onuSj2Mc=K+3 z2hHuBx`=>l7NcG5xE8cwf2%EyNxjf3W2)yfuD}Qh(*)vgMm|sL__I0`6W&w|L-p}v zP53LI50>9u30P68eJSEnSI|+LSoZXdFW`Bv{@?M-7>6}(O08(AnRW!H?jdicq=r{Y zDoqdPP4Zk?3~}eplAsDIPqp5pG2J+G_Ahn=T|&xkQ9$Dwp~es?C)B^t`Nxx0|Jc%B zK-jJ|6nAsDMk1IP!I8lhfB44mBbIrAzSu@AQ^iP$&l&y(yapKsUW3oEmja!$!0_G< zJqdGO++Blq{6^Dgz?U9B;n-r&G9Dgn&63b^Z0CzYBcZJ z>N;uD?ISQSWly*U!+)+fJMoz}fhC`~*?Q03UH?{pAG4R*u#ow%dUOq|mY$vdEcB9Q zd~(dBp&l>*c6KDXEt-liT>FCg?goHaFW}n7hkzS4_^Y{Xj>T`@ar}*N0JRXPRP8w* zzM(6LQiq(YrEg}g)UA9~r%9;ERqffC-=CA2v;^TXG_`I^9{@Vr{^HSD3POOXx2x!z zBWp5y(ADeFDg8Cz;9w? zIGeYd18qBtP~*#WCwQ|_fL`D6@t@WZcK>ZhL#(>nbFl;!!LB@mbUz%Uxvm89)VZj| zGls0o3Yr=_3{ycb*>rTIzFvOj(qDqj_vkzv;aXp}QU=F4yatlWdp5kZ)O;Z6XXr6L z=}edEwajG?_eVfDzF!UIi6#-2!ehqj&=X!jI_+o|tJqm;q3vLZ?7I4{Dbu_2~hLz@)ljn8$Cq*9TW=u_JPH?GQ@xmVxFOW5fNOr z+L%p@Xz&~q4*xo)dX#W#5&?~O2S%8j|JvXsxI77-(CDW&u5Vz~9e*RdmGWeq9$TCz z0Fz+X$)o-j-pENWXcu_MY-%NLG@Qy$;63;_L>0}hVdA=!-5BSGY4`{sp3M;g{$-oW zD9?I29~juiHhKIrN#L##O)J5TF!73ES)JAw#_snUWY1|}F=hQZ2+2IAa}3AE^=-?a zCYZlc>wq8Ac<#~NaxAu*X0O*uO|pK~)QLl<1Z`g4lTnr(qT|0_GwPuu$(NL6UDn0p zBPTjXx%1;R)Rr3VZw+nAQ5&OQKoLgf^rM7#6}R?Og|_dxGwA`V&zENK#;T?<^+3NV zju*Zq4ZieWPkmlQO4lmK2x-3rKz3hWy-5@*m4-Qx9`c&}8;CioH~*#Fy4>C+z+1PW zW_0;m+?Jw1>#|&qUPr0|L)w6=6T$EuS^AxBSiWmVb7}=Jfll^J@5@KDBQ`OI)6>d$ zQ;!tSZi+S5T4UL(xGn9*&Y}Z=p5}X=!)l}tR;m2s+_yot+z7vjA}IDM#CNNAidO($ z5x)=TYF5oSju@;rdc>O!2mI5n4xU$TFi~}boIg$a{Kg76E%s!Zb?LCGpaZuh5I6^R z!n5|K0qhIKx>5xSj1}q>%0(1dAdvKvVcjK;-4y58XLMaciOdcyybu4JUKt$k@!@T6 z{e637H;CM6{Q2i>#F{NIPz7g&v)-MrDzibUKZ2ucd5OT=N%>(lm5#**+Os;X7+}n= z0PPJ7c?m|`%DZ!}S9R+Ky;A@8&i<#}AiE)L0bgCS*Onewi~B)X6Y3kO519U)$p-0y zjxL#fN!ZrY9XVKkDBcc`T;EwrR|!pilQ&ZetU!F;RB>L?yi?!NNe)P}$Q%H|62VZl zcY8Zy=zo&#XlphkT`lAq?Rq573(!!4&nv1@CTIGmX z0eDzak|{ZXTqJ|Q>ewh zp3Jn+|Bu6CPC=}lU&1cUUM01hHj$#H<}o<;pL)vCJ7Oqa(+_U{!%6dZI%Ek6RGUhn zGOA-#Tllv3KkuEV4$_DiZ1dBz*;|;zZIe>u7~JHy%K&=XlPb49dE5kOssx&p-Xjk~ zP5rh2I*$n#CMAA;$=_YI&Z(FPL7(K!QUec5$wA;Z7XBBAaJ%$&H!vvxG+q4kRG*RU zQ2Xhn<|N(m^CKte!`5G4czOdz!U<_6twd7j^UUGG2d zz1OvBGWVG?XU>`PIcLsHtUsiykp4FZzWcuz0tF<0-v%|7*pJ}66A9YPfY?6qpRM@* z1weL~Bqg;TG;k>p;+=Pn z=1Qn}-rrncs(7M;1j-XYATfTDQFWNCNG?8mbc4!x*462}%@IOJ7^D&G^1At&T6ZKGKEhx!QqX-VLIgJS~DU=HH<{9KYMaF8rxN}I%7>c6Q`@Jg%GeJI)FhYG(_ zPFb6G>Q%^LxvOd8jo(1r?ULuR)b66wv^GNG*_)d*zu`|l6_(6v-)uIL2NKZo^G4eJC4*QC4Xd~1UCFgdfGpTt8@z_5JfjU zlDHOSxE}wiB_%~iek7Jt|6UWUusyIffaBFv9IAT8!@>0%qt5P++(&x&OUvC%JcU1u zx2(o#4rRw^uWmSN$kV;f3~mf~Zmc`Q2Cw^Q`nvBR^?8ocR-rvE6}1{CL2<~zIO;iH z!s_+K_0;V?6Q=URok12IgINh9VnBc?9Q|56P72jeyuPJ3S!617G3=Q0Fmz~}rpW~$ zPZu*?tW6IMml!N<@39!UN#5tOwp^n%m|g0QBp9ltcj~t!ce13V1e5@7NsJ?@>+SQh z{DEEMAwO2>Zr?m%JEoH?O13Ciaer1)W1NP$nzD%f{PKYYxB<*c7#hiCjjSIJ;cmycAms>}k}P;bGq>1~j3 zmv|a{;<6Q-Prd2{;vzmlBA@lqR#m$A@7l*HscTuxb3;|C5Or^NlHeT;eRLSZzrCM{ zES`xdHm0D8k7)QUIG>fiIyvJl_~(Z2DE-G*S7>a^l$Ebwv?xVM=Dc}l*?xz3W^%0R zuMOUP*GltTvtIP+4J)X5*dF)L>dH2!4wr~61dO7u_rTS3C&Z3NY0}4ookH%}yEvkg z$^}qa7tJ&^{79iIhobgATD*en{0TFC;1*Ast4x7+VI8e9NV{?o@8OfzXzbjlNW$L? z#RFeR{0(;nalLr~KC9h&tw(rtMsXm$59B|ZpBd{CqB@p|49Q%Dl#!Hna>hr=*7~o5 zOgOm33c@RUCnFYW_C|aZ!+L!zZ(a@V9_v0w_H|?py>{yd3$V}Hzigy??tSp2#%&&# ztSjq7BU$kFUwhc(jt`+j+2;2l!`940t?)QeX>VQ}#XXt;*O-uYW^4aLL4;%_16~;bs*W;wW zU|I+cy-|&tpY~r)nHwqj^wC-M=*m<(@w#xKNWDREz>k&Kz;CGkO6?! z?ma*mUCOVxcS!e^a%iJO7xPy@%^Pxe+JxHKMUO}aoTK$T4gt4w2v7*ig}Dvno2?rK z>D1%~1trJDg&Gk(oxqfy^q8CJq1D<+K>I0og1iR++6_6DXXrd|!?5-cp_FBu3S|#~ zvx$McI;q%UZ#ID~O@C!qhl^<1K^9L5F|t?7dz+e4(qgbErSVaAu#XvbZMe3GVlM&) z(yf6G0|P-xk!P2f!KI#_Y7l*V#6Q6V@h%{%mC!XkGT0AC*+=2TtAF5@pRTZU12-5W z|HDCkkbd>=9&Y1)7{FuzB=0zrBO>3=riE18+jM_Fzl`}{_fD*6RMV^E_3NMF7huMUMn_n_SPelt$Gf8TCa z-D+CuVQJ^V+x7a8knHam>(ZdNHpFR=`+tr~vwjwm%ps^I*XL1% zEJ#KDA>FqgMcN@CqJ^0e+U>9Bl_YIOe*04B(W3YExu1!Rfq`IX3D})bV%eoiq`hs4 z1((kJdxp$pv)8B~A&F`%`>mnYH{Ld1=+2-Y5bsy;Jv3j<%Pc<;-?Z5Q=$@dff33xF zSIMuT;PN$E3lsFnXB>b%L5dWPaLQ6o?L_RUMQQsRpuFX)hG&2-w*t|%OnYLs1Rc^E zc3+pNQ8dD1fIfmKJRrR_mJZ;sOVB=y&iWcwBHb2qj|koJ^SUN;Wv+iBOJ{E2_D0nV z@$KB_Sx^%aPp*Z8V|%H``5!jFvp3muzYr4_*)p{jOwOII3_oPzG~ zNW{-UesbhGR3%dV&DLeTK~0zptV3r$IRZ1I6&mwFJTIi&FC(~_p)a?CF6~D+yaEm* zkIWR%I4LN~UW27~QGBiSO}!q@;pp?-Y!9sOiUi{`PMkue)dDz~Ss70LI=X^s zRG6x>A%KYjvrQ~pEJ`H}M-kwD>oHX7S)26Rfp^?NmiAaDaIApS?AOmz6x*#k>tVP0wroJtD z1gg@6xeKie&B-P2Dt_0Qyh4oOap>(yE>hLKOU-jJC#M-K-X37I{nqq}Pu3alBuv#= zsqV{`2|73Qd~Fbg)nC$e_lX|qQD>$xrP{al>ZH6QYhFN1x>KRtT9QOH^D*yGwv0_2 zxD_(+T0C@ zak-y3@&cXbvG3y0laz{_t}R#dJg6=iF{u*IsBUTxHBBbK9W{e)sIF<^s!R^v{v(e= zN^csXD9>N%{v+1L9*&w#8aTpw0spt#6%+cb_%OyDS%a!t^Ona>lNm|9uTfHO&5Ny~ z{)d1t(nm0K19MdhUo$wA>ivd@W|VSr_FdQT=|H;d7#v?`^(s{OYSDh`lNY=Grx`$w!|`3VxqVa* zFKYZw)^XmHx%$*U8dEDURe!1>s*mOhrh-$qgI9ak5qAurj3vo?ZFuHXy-l(E4gu7@ z2%8;W#M?uZ&>?9S=46~ar7x;40;u^k5AIts6(qS!A|nITuNNl z<^G_!qZ-cLi|p^yBcVHc?@81kRXvN3rnNA*1Ba-Hvn!vQK<;H?M^T-wcL%d}(nl%d zo8emfc)G!2jI{cM^qn1%8&M$v4r>I>PgDbuSqb8f}-+IOQRQVcKWfy6qAb?S+Iu{CHP1|I% ztN=qp&)ugXHtf^MOH)n3v_GTYg$A2lab4V>dS8rs+*CD^A(Fvd1P9iq%RAcGY)W_B zLXKw)ZNR(V_x69x7GfxyDR1JeX0n4k%4_cjFF6QnhSo3*KSNObDhsyJZB_rQT7@ryqAIf)`@-LlOb&xt1#>FC==~H@^G260cJ|Djbp-5 z_@dNG4TB-Za=~5yL(Rmc^E8uBzC;G$aVfY#V=>*|_FR^+)Px`{xv373+wn@gb;RR( zynmN;=~dUs&W>j0RqTC#)e2j(X+#Bz>#1Gx`zbuBru+gZ3CLnC&qwvb(~Pa~3HCWT(VaSHkcNZBv&7cqJE}|GFPis8;dg#B8OrRQP7_bB zQwkV9_uf52nCu{?#rO6CUwV&sZxB?LneWm7${__ilM%h#hAiuM*Ee zvPWF-9=7Gvz~&oFtF)@B;$<4{fJHU@z}(S8ZdWc=KmIC?ctIeRs)aJBM>*I}PHQ|m z+JCx#KQwvJfIgaL!@2uei76XamEj|YvR+8aTm3e&COx2x`~8n6h(FVCJ4gmmnuf7N zi8kvI3lzP1!AyTuvLRp}^Y{hr{2)%za*C-J%^~6$9@^Z4Hpg~CaV99G7d~+O#-Gm& zeT=L;s)=m|d>MK9K~7C9)$XhqV=1-Do%B+bXXW7I@{JmARkk7nWBWbKp{Yo=gJz5 z1(Nt0d525}CDQC8(#d(?ddnEx{bL$I6L(*ylg?I|nMzb&=#tUM{nb+KbLY5$+=&&7 z(cZbw+@Y(zn)9Kb?8v`YDt5xLZRe!pL{r~ye|wN@XjBuYg-QfB>>uX7WzUCFEw2O< z&7J=a>f+$AM`YaliGp!?YYW_xq!Z}~0{eX?hHl$!GMKKI7etVw6olC5mKUy=kM_-7 z#kLyoZANV+vQSn*15DCgSGV6DrBt)%;E@myx!yd6qdX`~vIutoIGX_#sq^|=c znO&+%lRG79um1LAE#JvV=&h}dSdQ* zDL%y3qlqYf!n*^dMQjuHNm}#jQ!K62pv*q>09T@N<|+sR=G$_sO5c&6q?Dj&Csg$~ zf4Fy-{p!b<3!)t6SPayg<&E7ORKh(8D5wOtfSK?7dx1%CF7t1MZ!ZjsV0Di4L^CIk zXxDguBd%%6bzS(8)ImU*uF{e0QnxMMsDZL8gJ?!hdvZw?{CM&2QBT>-01r^f6ps&6 zxMD4ELV?OVQj!Cz-KGa`I@x+PuuG%R=XKBGnn+Okr45~Z!k0RBp=&xlT;zECkxm;A z_P-4@=*h0m;{COfQ9)Ka*y8c`MWFk~E_S5*q{TraR6W5tmG~CLEe|oh4hwek_rH`#O z7A5mdls2t`7`F!HrptTEzL<@6Dk=SPNxe!X-p7M@dhuv4mFKj;L3_=~tK*l_7^u6* zrc&jVH$Fq5=SKPp1fy^X7@`|R03~2Tl?*Aq;GW<1rf2RCiRWmK*Mpeo_qj>4Qqq_3 z8lYk=q{f3fIUB0$89nEnPP#el04|EdgsEg1yUO_d+vqh-E-zZdwB`Fcy%7m9?^EnC z7)q_e9pZ4u$7DLY&_ltzhN5G{ElHvG70#(rN3PryQodezVK>ouw**nq0&&Jw=5vwq zi$@BIA5kU06pTn-K7ABzFZAq)m5I2|OAn{_)HRVGEu~^PlV{L} ze99v=$rVe#X&U^h$oR~5s4p0g#5*oYV-j!pik?V%=#f|?Sk0n4E{nR24+#b2=^?cY z^kO5c$%j3+d*=AS@Vy)av{I$Qsc5iW&ubdG|GPylEdyuB^Jj$jg z@hnFnQ@}ZUy!%EC$tyBavnaf@rGCt-rb5I0PNE;Ot!JRtrCccL^m&Pov~V7jeE?1; zUdw&aoi`WxtKIDHZgppfy;o$?vw2V)*D3?KT5<%5kNo9f+gvoPW-Kx3;ot1- zo@U9T)#(@PKV_F_D`ammGTmd6lfe&A9xu6>_(iG)F<8BKCLGzXS-L-69{E0~INA8u zDEd?cBWh^5{k(9aQA^UCZ}~tZ4~oBJ?xq`mp}Fkz=|^_F&E5?cmE|RkQ3I!S5P#m< zYF8|_2f1C(>=!AXtNI0sV&g^5p2$ibRLB@h$h{qrgq!I;ZoVc^otsg1Y>PE9PpMnX znveVNk>=T*ro77rW5DYG3$(hd{zIm48!)~p&+y*sfS&MiM1@RM zY^wWk;?2uFqu`>mf7DOuM#(IMjWG2o%4D4$h+95jii`HJ&G~kj%RxG zypIk#F!5W(@_y%4-PfcgDp_lEl!HIrb1R0e=o^PQ?KoI{2(wL3ZaC(HDjwya)*`7% zQGVtz+$Z^WXmxJvPj zdKXA>m7~UnJ6;NX50jW>>nQz}t@IS37<~TqtBdbq+m=hH%3~5$PI7@IV~5GnTSFO4 z8~tyJJbvYV;cW(=wA&3B-CL~`G7%v&6DQrNv8PuJ@13vH8(#|M&vN5L&7%^HiPb^NhbGKe8=Jp-yA;EK9U$|X{0;b{>OV>_uR1pd6(i6mk(`_l;1fk9NZXRVph;D;0?EVCRrOv zAqR6CoiiXh&zfw`8VNk<9 zewbc}z@dUCvPctIiyW8vBI^NM-uyyYc(kUrI%d>VAzkm9NcI_7N?BiA z-Dx9=r{5{@hJ)kHV?SEL?U;>>OhQ2D2L%_9OxuvjHcuYAcRZ+?ieg72M{Yegn>ZGU zTEQ3;i+xK?S7TdIZEYv$!-$0-uuV-VD4TC9b~TA6BSY|BcfqwQ+Ia$Eik zbG3yt__URe+oJxBmj`GeBubqkt>f8E2L&Ak>DxSmEl3>o;QP1MFa}w6Bz9XMoUquW z3HNx7BZo3dvxh98uS=ETQQ)Az5q;k%xEW56+ zS-Hs`mR~uMC~Fw8c=jMnFPV!mQ+k3a_^9FQbt6VFpeenkDK)O9BIsim_tc^Bb#?A8 z<+Dm?=C!Gtk$ZWV*ULHydb;Rwv1WCmnGhs_w>&5eB`-3_vYB~XI(9Up^*R+=!FAAxc{UV7wCUP(9O^s}+52=gdL|CcUHH<6Pj)cR*P{-)5>TH(h zIyGfAJ$8crYuw`1vGqU3+-)b&nI+OW%aYYd`SQ8H?~rP7Y{_Gj<1%mKRsvs?+=@&z zK1G&0yTi_7Vf_98-FHeR^MIdV@b59z4!4PWoVRjbuJbSxW3}Ajx^eu9v3~rMevkH+87DF zJUdf>UuZsH*ED>dE{SLE?Vsr}l=Z%m&eV^vZ7ZRe!6R(l^UJyoJ6rnHeS(8icNod3 zlnFMR=FdA_k9@>?y zo;){yIUy@i#qFduy04xs78#xU+XBnMGi3wy5|`@v))RTY6a)=enANnsdaB;A*GK=0 z60*SHJ%X$1_7%pnaD1e{>d&EzAUhP0LwpsVbc>kPfjrEQ>cKmRdE_Ak zm&*Hg3h8T$M}-p1d6)tEnSHOw$YR-nz7p;)de9Quc&be_W>zeBeR#G-GnR7mmI{7SBs=sW!7T654v} zC1-13jaHqCDohxcL$)vz>oWT*ODbx_HU1jiYAp9A zTy?xfGCc7SGdv<)$xE|V&w0exF%^!~bYJrA;l^~9z1k8MJ?c3wnRP+vNEGJSn;6l zx$!9H*FAjNd2Wb>(d)-xePvXMde40j-C2UyPmNWt+shO?%a}{v4t*o zEyKoOR$1p);K;}iBNK1$>gOdxZ%~1=+Y6B4$}f!l)Y;OPyug~6iYH`m&JaH@%se>7 z1D4`zaz8&DbFvdV)Eyo_ivNtNt^q#r)c))()m0zD`9pX2zy6#t(3E)JhmMLpVFkWx z{q5&>4UhYpa67l0PDSKavW7^O$I66nSHn(%(lZ}|M@pqba9+7hxhynGPfpaM^mVq;Z$3H@=HU?GiQ^=Y+;v7vs`VKumI zn2#8KlCExDyx?_6rA%k=TYc6!HL>&4xR#`a1(Sc#-^*+dpFv$e;(9<>vfnqk3^~9G zOx@;ySamu%P_V0H>~YP*772Qp`EoU0O9yy%HxHc8>?#0%Va=&4ed-?aYEMEK9=)$IjMne)Eg+F1nF{fH@M#5)sGTW24f!RqSuF}y*Q*2#&jSLLK5emC^hzXwl>YxnZglCd`Oh)*DeoYWN} zm>OkC*mbFzDD>V+7j@lduD1Kk)YFO7#@>-eOjFTEj}dMZMvs&{Cu!`_lc=9K+fZaG zdl}I>dqtBmu`f>a@)V)5w>_LPsjg*JR%8&tUt1m+(o6p#{;Rjqnf-*n7DW3!1QJ7| zE06WUA8M@2R9)=ud1q4}VNME#moTv|S$$gW-)u|QRJTS(C}@hWokMkE`r=e3m#*%5zpr)HMxvDpq;O+R$C+O2jynK3E#!13}h*B123Z|oEcZJZ1;@GZ%{nFCzQ;cPeUz)XQ1 zo;1eky9+D2d)wnqGey(`ZJy;^Qwe_N+(j88T_Hb_HPksiacR=@;-W%H86hJ%f5D*d zvxOz@(hG&z0Qi{g^g`Wgx^3{+-!cgl_#5unNCkZQSo3wXaW@YW;-UMNdiSjt{rEIG7`F3F-)TR%l>VimtQ;)a@2oPCbAci$VeEaE3eV`3ONTZ zvntPgMUoC*8qJuzQ^@35&zmMt_7zc^3+E+^5YeVc3bVIHmSC9VNAsC_L2d6sxiQI8 zr~7OsD%&R1byf4c@|cNwdqW5m)Wk3SuU~KTln4ffMro)9DMBlvp$^flA&0|mK(55? zd@~vDSflX-Kd57Pn!_IvP)gOwnvB?v%_i2>9ptW_L1Cq?d*n+lXTGhm_UI{lD^rq$ z%qUwv$qjq#b^w;0lmjQ=c6WUNJ{c{qZ@-Pe4wa&oP zCJDbZJ#lZDyfh0-urNn+U524=$MIxwq^!4ky8~pgUkR zaoO=@OQ>pOXhQ6yyq`Agi_YY){k*jG;G&81DpOidBlkkaJ&I0~aG3THS7{f1$H!TN z8|*lS>@BmXvbDn;^c%3spKm7f;Z;Sib&x_2cs8X@-oYV}Oq=+Dgz@%f3mdlo(b+}M z^u&jiWRT)6#`RzDULsw|o1pABj{wnC<2>c8k|Rd^D#pq%uHUOtgYse}wcZ zgdCZiE6FfT5RtnQiBs;CUERw(U9BoA(0N>7;^bu!BDpkArCPB~c4zL$!)+zyC|md^ zS0svSEwvDEkA8f73jr4C?$+B8zjKR99)ERoZ`yeyp{Rh<4jpj&Tg_hnoaD=)`?<{} z@xL+-oxW!8qU9Zss=IJddHXNMo*4bSttSy*qi7sD8%BRa4QZiBvh8IC(a4&v_Li<` zsv2C!-tGRKUHv9Yl4fG+@cpc(U2|W9K-Jbk;^8JLEc@@sz2d=4J%SzMyo&q}^_!Vd zUtz5$uSqiMOkRptUFi-j&6lSnvFvEUZbUEseKvxcFw zYE=#*6P>;e@tW?fCIz9`2$|9@22yME@IEAVjLuf>^Fz61txRgi!Xg0{oIJQO!q;EO zrHKIkb-z}sV@433!Xn4`;5@s9EsTV6kTniFx4;yIN(0xHXJJOIMz$~cqW}Q>`N~O< z<>1LKzgZT!4cNdI@T$X~MnW+C%`t$B4O{Rx1VP_Vv!>XQX|8Cc-r^%*U{Ye!%oLV-rCa^xVoGs=?@0o{N%1|U60 zj{~p3>OtsaB=T=<2=s^r8z926!`ZjnvkXB$urD({Gg>uZI%_i{_wL261AGh1)&<>s z|Lxz}ydCu$n>Y`9$>OiBSXwXzBY;se)Mxl3XTS&7Ru(u5_ke(4aA>fC&|t>G*V6-! zz+l2}9Bm1Ai(F|V z;T?-EF;?I52c9_8pmtqH>&f9jt3%HMuiT-$@;Z$40+)Y>JAhHYgOtHOYVY1MkdX(P zmW2j^paU%1#7IbpWU(>#ruHU6umVU5xcCjRgBT`K8Jbck3WP;QIQ3%aLqNfMAWJ|V zhP3ZH?aGX9jr4X{GJs4!3Ec&6*Fy*Z)kW_U0x!V6Lt*3wsnYOurwvueud16J4LSmI zRR$X31Q`Y_TNcvwlMo@0Ed<*5521}<47NPyIiox4|6lJMj?w4IP66s#mH>^cUj*Y<@4v^|NY^$flUBbZi?H=e%n5)~a z{=Zc3s0ye*dv1$2=r*eUaQ17LIPl>W(+}cS=5A z7%ZADUQaHbpCx_9`t?%EW-D7X5=a#qEqT70)_BJuw=`Tx8YgxetRyf|a$D#%7;N8# zKmRbc668Y&L;p!`-H>*4{eqQ_MY)xZu6f37(}A8;Kex%Qp|3hllo_8gQw54lN%fwF zJjQjD>|M!y3@f)Z-?$-5`cVNt5AT%+q;KbQLtWpQXgyD%P@?uxo?6&jIpnd^#R~i7}yp&8Eahp-o~jb<4NxTu4;aL+=+(LsmdCo*l~nfX!i4(DTh7-mgijY*aXhHUaS!12@?V|emNv_;qheJx+2V*s z_5FxwGyM+nhOlVWOqCA1lud@AUS%@o3#ZoL`0=qERl|7Q!$^6VtitfDJp8kD@!h<-)myg&AFzXt{xFtHjFb9!9#?BB4t#2qOwfYd;8FW_g@PP ziBX(jCXbHg!~kupHSN-59xmxD{i|o|?9M@F7w<3k+L1c&FyD7?>jx_^tXgS{s;n0z z7seVVI5@0<_a*w339ffbM?iEX8i*@D(KEV|ArP%Pqt#(o2bmJDgUL|HgV;+P$#W?O z!z!Of`5*6v(+UdNHaeDKVbyy|Lv$=X1a6c>=vXooWa=-{ujdyu8(3Y^#aFQ#tU1jd z@^oMOfx@v$UBkWr2EBIQe9)FiZT@Q4=*q{P;$n%e(f%zpjH{a~gM>A`a6NeCM)O)= zl1YSZYSVVlCgQOR^um%;xTd&H$HMW?p}L_H$qMFyP0Aa0@4U;K*e*^R)o|wGV}+b9c!rT!uwVs#p>oH{@Z+r3 zR0Cbh;`w!kZ45k3SCizOk&n>NTqJe{V5%=S&~SuHCHv6_;Vol>siSib5ZEJV?t^!E zWdAKN_jL7jend1+r+BByiH;Knec}j8BqF*PXy!_^s?CN1lzXWvXXL*(u1iIlZpwnu1}=DOH^SZW`}pcY@X@Y$tYQq-|kMJC6!)l zYeoAyk>or6*sr{1|BBQ;WH2i25`15ASzvS7kWOSFF+?XZi#ySX3cpIgdlPVm zQ|+iU=oGK)D^_reu9WLtQ};V)s0Yw(W-||(~H+?W@`~smh0d9 zY^>Hux-+Q}RBV4Cw{Z@YVfd|OP1n)62}Or5meeAS7m-65sNBDCZEf>l_cY@$=+ZiA z`Q=>$r<3iBzdFWO*0`Ut=2PRlG7Nz* z-sG0a1V;?|_nQj#*V-g$FC~zgiSq)Iw)FH!;>vD_T~FHiPGeaa&~ru4&50fAWdv=W zQi3cr==acDv7RY>-BO-T83DWw8#K@4#_i1Zmw81p`*grmQYWT3z{03_K@be2@cTO+ z^)k65RUR4}R2D(S@eeBUpqcN;P$ouDhbuC>@;a%}kO%hNBVnV9jW358C%Es{qb?oZ zhwTEK8DVXXVgg|?#qVo%=uaR@>m{3wbN$hT5G*h3OP~A(C)Utt=GVb4LzeaO7nr0i z*svvN&w>kd*X(mQI66z4O20Ig1tOZNv(QM3)XR7sCf2sWvJH!$80cjCTW_?`!9p!a zq?+c%Rszxx?0)OlgstbAWial~7qwVX-zX~DhvF|n?e0Ql2`!3RJr41zo+U7Ocqr9@cdKsJXVDgRL!JSa}it7sMd?^PVha6FIGM9lg1) zSA2rmorYNfEzWzg&IdUL9GNN}Uk&QBG*9TTz63s3P(_9 ztyBD%^6%M7i?cz|h*C1&;u>c#IE!>_%V=Cxfa$X1#R>1jR*hJgOu z_l1nx6zrmYO%Qwc{?a=aX+@_P8|tLX*;9qMG;zRn$j_;RnYA?76ySS|hq+?-{Uf1; zfnH_A^^UCHEDvu|lK_=t!h0$0O!M*YgBy0%o1Y|_s<~!h;I9~SS)(F`BEX@^-t-xG zzin=tm&U5KB*|Z&{)|Q%++Sj2Z>C^SSH3=KHFx>+rMt%y*Y6RgW*b-ZGYD6$!jna7 zhIF4y_)iuuBW`R+Ahzg=w7A$@k>#%s?&}s_&hy#k*C;9>r6tNTn6~0ErM=ARETUE|}(K3ZOy(Ujd)^t3ANs=l# z-;WbHUdtH%Q=;Dnlk~o5WyioY@q1nar!kdIS$_(bvmb1cW_%g#tzn)7Wqj5nd6ukh zTIbz;tWK{~jSpasT!poPp>eEIj{G{l^dO7->7$>re0I$GoJ&^d3wq7)6&G{k3TV|v zD;hMF2wLXO72P#lC}?d)Cj?o%sT8NE|79 zxXqK6=0-gvoA6*gDEeUZe%3o4pokHn_8BR*iHGm8E;DRNilDTmNH%_}7Y%P`s>Zg( zKg61zn(j(9dta+ut!z$eKbw(OR#lgoSr(rVdZcC6Os3%0>(;Gz;sV2DDkFwsb|N8e z>Z|5(#lGco&Ac{n_ku`0&Cp)&Af7c!JxA2(I(uW!5U zVxQGXVigX?PB<-4Z)PpD?M@%NYF26wai<*E4g0IDuGVAxCZX;<<80N2+kP|QpgO5Y zx_FQF9`$FVF)w-eP)AZF=A6I-KVOrqx@coWv=r`f()fU>pn6&N=Yj62J}e8@!j$Z1 zH_ZB)NwoElujdkc#g1OmYV?Qp1Ol55_7eU=<=3fQT?GI0j3fOuSSfS|!KCGG_og?T zv19G#-WI67a94KqY<^Cvyjm~99d(Pq5;?xybS8Pj0e(JVBOzjf4Q{rtT#HyZzRaaI zvn1lRn^{e=lBcF}_#{bl>)($XxJ75l4L|rz{^TH_CweG%^{rdE1g_G2;2d1zT@2&o zUmu&@`PO>t_Z{6=>OX0a|)CInoL)-y?oeRtC6g zR(?|Dx?4C^&Yg))8`60rOs*}sP_CiDot#~NT2+3-Cngu5mjK`7r_&WlC%&*QoAST( zS3aGPQk{4LB&Y6J0r%KnYjK_?>B724`c-V<0XIm<8Z4FF7q_WOX%R_^Sg4W{lv>jeQjT&d=-yH?my_+Z_Tm~HOSDhTR`lwhegB((R&7D% zCX50if>08GQAdBEBnZ%1pEWA}7bx2t-+#6EH=wnlDDoRLAW#U9)&`CLwZ*0iPxZn%e-x5bm_tgp%7m!Kw=+N0ZOC-b9M=E?N4^6BgY0OvAC+8((tx0GI>aCC-E@=`eN^H;4&`&kbmG-qv9X3;3?gNrEnhBx14<4wQk_tKKvwwwxW+ho;6^s zaITyZJoF(@`gARjwE!T`Pp<*QKAd8|0?(`(^K0$WA^j-o}qbb zW*tA}|7AZ@6j$P1GYK-F;@6K(VOK}q5tisX7!lMtl{@@5G>#lsvq*Wm6?#)1aVAn! zv%>w&Q8n=Mlps8ia_Jm6SG$Cj5dvWZcwCpdgwI2Nf!#R=?Js~u;ehSHIim*hQ|KQs z@v?_rhq6)HU_%5vtsvHe{uKXP>^*2R^0Q49)JFHe+YEqDsbK#CdW1=D)pz&Lpuy$@ zc%XfM=uedcoq$dlv{3>5%gGe_SKAM&AYToA`tshinHmps8%Xnl$F=s;Cs)z_OenPt zo_`Ty0`i$Z$ro?r-of{Bl}f$*)pQUNgD3K%cWz1~``ORlN}+%dJmUAkE^&6#=O3Jo zTPZ`I{+mFOAb}8n4z||Fj=x&zCp%DD8a#5>4M0bZU`Gc$kso!^+HtZNzPz_6VE2OV`l2a4gLH$Faa1%-^zUsZVT0w%%+e==bgrpZeNo1@To zFRwvKeejrE4F=nEmOm$J$O3c#?cIULwHB0BZ&KRj2vHjQNy!GhatzdEK~L+@#(n_k z`ENUD1l>P6_+~z_N#)JY;m)50uaSOE`5v*Y4#NfNBiEj#xHsfg>PY!FFop%#V}6n4~<9o)a6QHo)e z%8Wq($C8)h4cDsNT>Ms;y)ic@u`A!OS@%d{I2Mu8#UL}78}jOQJeagvnH+ZYcYJks2p2iF7Vtc;|Lr;#p;_1aY)ewcCg z{cRpwHWWq0{ty74xh36>mU@l1q+M z-~BwyasVguGBI=Gl2}=@!_wSk#7{X$tbkKeZ~wpTdmQ`zUCX5XpWl5y6Iotn$e7Dn zmwfl;Vg9@7^ucWWqbH%k5hVS);Tv$+q=e}tFq0HKS4!O2;&^6R?VB^3 zx%o1ZujK$8@c->L-zFB`=CmIqg8zhn?FC05_V~0n$PAa(Z z`|-PBvsvgg1qtAV@J7yNP7ikY>r@IHe|UNR@1*A&AD4s13G{^6KsoM?0Z{Y19iJD^ z75FrH{y(4dlG_9y9Mic0${?(V!n5;wg4oa zTW-uxJ7l-@PgeVOeNa&7)C*pc3p~B5tok;%o&t%Df`JMF9smFGUDp^C5*y_~H>fgr My85}Sb4q9e0Qw&RcmMzZ literal 0 HcmV?d00001 diff --git a/images/Replicated-Volume.png b/images/Replicated-Volume.png new file mode 100644 index 0000000000000000000000000000000000000000..135a63f345ad997101d34e72ca63b889d8289686 GIT binary patch literal 44077 zcmY(qbyQUU_dQHYGjzw0BOy}KodUwp4N42r-Q6uiNtb|hBi#c?2uKNnba(f2eSf~c zKb{3F)|x!`+;jFmv0qV86?q&?3QPn91e~`DGU^BjNH7EhL~C?3;E`Pq+dkkQnyHe! z48rrjpS+HeWZ)4DM+H3>1O(9Dzdyv_Y0Pv4gg}J1GLo8}zYjZ5(+I?_EqV3Ha-X}->!a@pkw-D8I*Ro)Y@9z9jE%qeN!~tgrF@Q^ z;LH|><_9)7eiwKr=?%JQLG*tsM_h0=Eqz12b;BfglYykuKe6+`=5NpLOUEr9QU$Zr z?g5FHV+QAbo#2d0Hn9oKt(4N1c}gL>*|b!bH0vbuQ2I zm6z#semdAP(efl1yzxSupO|nXrrk!L&=eIrN-HNlFYkVUJcw-h?0I{|a3DytP;Q3;w=HMOWMmkz;My4@Nfo-!tzSNt%gVHIFMG}@e#JMO#n;;u@V^pGp}c2w2h-*AFIa^yt;=EzxH7L}Q>MHTkX;%-bM zGmF4i!^E{{KhTW0jVu+;{E7o(PeMiz$RSEZ8Vhs*U#$!pHOnFWrx|W7Jz56e zp3Ah3ny}n7UH_swx{*gn%M&+!0ejN=gRjFmYO_S(?eK`lfEZ8?BDJ19*~LF4=&bc zDfF&i0?#%?o%+g(tE5S^nmk++6BAb;aq*UMDw?U$gx-nC?fbBB3YD|!my6OpzSqW_ z)7P0ScQMJnf3`nZy^|~hJ-x%nzK%tgsifajyZsW(DSeH=d&{JK#=|F*EA|}CEIv)| zFPEMk3nRv6Q?CkU?qbCE#gQ!QBb9|q_ZNk}5$91>#w1&dAi1Ai-?r{%7l%vvSE13L zaE4;Z82MY{pCX9!(?8ol=uc)Aq}`nC{QPrJA4KGDKFG#s4dne9nD9U};lQLn#o~QW zZKCwr$jo2r*4^0T+_(9PG)+{sH#b7}@iFAJWEo2CCk>=YSe~*+c3y)Hoz00-9Qp_d zQD?140(;YP-^$t*wK#`v8UzbREl`sz1OHX5B=S{3SELToBFL2B?d!79hqlU}==FM7 z1fCLfNf2tEHlpI3Cr3GKKNd2HbZgE_iz_9%{5Nkq&r_hYQ(%)?+J_&Cxx4s1DhTF-F8 z7v9cr+K5u{O1zKPL(@XzWNv<+@^KzyzO|_l$LrX^!!^wi}BEm|g z@giDBeP4K+#<1|B9_OcKulv^NHa+4%yT9cF$mbaj{3}QIg0+wQs}toGHj?Cp{2r$5 zO$(U@lt&limeqFKw?}86uY^|YmOGcXte6nRlW%icjqO{XlT!nIxK1+;=W!@GH`DywGc|9@Fw*TNH zQlRc!9Pi4$F6nsu@hO0Rtoh07^43oaabC>fsIK6#;p3lAUj*#w8XXq1_GWy7ziE{Z zI^sLN``zr9%p7~DnmIdO6tm6q&dUXU+(;cp8!>lPyvF{!-Jj+i!Sza}?=u$wjtn19 zT{~FHq}xd7a~uqLG$!BQD^y+{oC$TQqP=gcAaa@3(O8$eq!#bNcClV6MclNq14&I8 zSKFm*H{lpBqApODMLDypi@S45>a}ndk zd8ZOpx;_dC&D59=$LPyTug-smr@P*@|1&;hm3cJlP4Amy=NYOI-VjrD7NxQ^hQ^VM z{0+n^3;$W~v*@`uD*^xA_fX{8S~}Yh4&#k$)Gkoh+#BYAv#j9_h!9bRCV z&c`T7O<0v!584U~+q@e_Sjn4fn%<6BHU3p)lxgg_it4>voVK%>X7Y7wEK_5y%IM?H z&-{k>JxKbUrh=75ZK-eUmlWFE4-2S;-G453_bhxO`)z9xP+odfn>Ek(~ zN+qhfUz1r>r}CvEcK-H+z;bg{QBhGF99G1H9RCLXmDaRhY>Ip1u&lg4S6#wqJB!Y4 z+#X57q0bn))$nn;G~oH^SF4+?&Ck4`Axo+Lx)$@-WcHy%ky4CuCuiE{^ry$k4zKsd zsFSL@cM1DzNWGss`u7^`eko2uR)D+XNCR@b+@CAqU-k?M$Dx!nXtpo@k;XYa5JS4# z8-}H+u1?>-aeKbK`>RYt3MwT*LPDZ$Y)oTeVWFm?f_u2!VStdK=sTFok@@jYt6PKj zrHy{ADJq9iD~#lop~CsrSm8pw)pUhkz4c%$Ij_^23sVNAD=sRZ1q?s?yWH1~=e1heoCu9Gl;Qk-sWfgRA^+bH}-2_;eTot`4@ z&TxW4vU{Ajok1_>^p>!nKUow!)1mWacqW)i{OQKb`}&UuYL#(^qE3}zeq9|m7y@SI z>i2uNaT0Rgh$LGJ&r?;zn?sq6$2eKOh+@n4&CS2-j5|^sVyq!SWC+SLL zf$MJo0J$C5xae8lAEXN>1mz9Tz^11lx~hP;r|_4k=10U+vArw0az?hSb#8V4H7n|&4o9a}u<*CFI01CPc46E!MsFCYvXE^8J{68QlKPK&bCaXmX#$l@$ zzZ+s@x9x_yO)xB*Gc*p2O4fWQS!9zz9#LMC8&jI$Kv5`)(dsT zx@C_&FKE#vy)JFOgJ7T}-Ql=ZJNE9Nbd=Zq9Hh(UR#q{9PqITju{kgr-1bSpjJ44Q zsVKqez~%U>AP4R|gD{5LLa{9=m>tZVloA?=a^jKMWG9mW zj#T_r3veGdS?n)FEJfr}wL+nevn8`%l`}ECc0-g6cJ4kx-;4P>a`{5-&GCo@I?G7o z4yPko28Df*nb#lTHKdNEJ>T{z`RxIFABE~Zr|nJ^D+S$tzVW))!B0vN@vMQOD`LAb zNMnGbus6rEg^`$oUR$Z-I!SBNvz5KVV2o%vy-%+-lhIZmPXjJsOWCwVI5GcVCRxaB zZ-+4H@HZxSp;&1EKDbYg&d=2|17pBN*ba-2QS)5vh(5 zRF`ZuL4t|`gr$_s*OHQ$+a$6rl7(R-;eq3H;g=C@U}k`}82UGszPk+g0{)a%M)I_q zDvC1I;;)0L>*CUI>q=ldc^v$uow~AMWb!>{SwsaI0f(J7zK+XTg91?7^mba$`yw;~ zI>?{hO`}3@H=C1QvtQ+hKX_eqd46*>BWEwnvd~c>uW>2Ki&)~$$~tWFWOvpWOE%p1 z*f`kN`~Jf+f4a=b3PuzrnT^dKX~Bv}tJlH74?v35B>^a~kF%v@iio?Vyyo)0vD6p{ zW2Pks^Xy@-^Xo6QNp1vKqa`@G(Rutwq z`tqA4GLPHuze|{Nb2qjaqnO!kB&97}wlgl$lm0SQwP5%0_`$uwR)uo!WUD@Y#gpLf zI)tnG5GWM*Jp{yc0z29@2mu-;Dz67KgQ)mec$R4pr13H@@qTY zk05D%UzU(qbHH}^@mYeK@TC&c2PS_1G^U`9e=jT5sm^INB|suzYfmcvH1ka>aJtov zt+q1d?0iPYgjo7cCD#fqBF@b~Z<5fcQHz96-ACx$xV%o{5?QBNOPqOlUh;lZH zY-&&y+oc@1h|VH*tfK!V!?m!+G6oDK_?bVvafT^K4j?B82FeR6zzc!hpGfO&m|Y_& zUD!@(%$W@uZC-LGBcL#$Y!@cf}KNQ-fxV^%X! zHzGh+;lH8W43ttrl2q;$b2|MzAX&v<*ICrc`11t6FGLZk3;=tODY~JmvT2(`j>Uao zA_{beYG=7nc*a6<`2A~wK)R$-+m3WXX6DPAR7^z_e>Pm_ky7L6JmtcKgz;#${nD2- zc%kciK~G35{FizBCNV}0Gd}Ha8hzHuDdzV1^siNvgc@37=yA;m6Oe>MY8{LX;0_W# zC>ANuSXKyUX0*#_NUo>PwOtP_OI(K$^G`ydW&q0)Q1>m7o$DW&|C8uX8-UIJK4fx;Zh z#0|5ZtpfNi0=Txk{^R`r@<*^ACukvG+UE}ts|iC=YkcP1TyO%m?_kE_NTTw##bzvm z>NpX z$;#E<;XP(kOJ>zKh*Pl4*k8nsfig!qn%2Kk2-h4-14=#T&LXNcxzsv?Yx^&z1XL{> z*fb)Mt?v7*BZ!g;QtaWlPKE6t{{ll00%8nBno6z-02UPJk$o|f?d>7;FmK;?oX4K2 zNd``doTAUXT82OJCzP7>KD)&-QuWoqBl1wWana!jO}Eh2%C_z*y-KPM#KOnq4~_iI zdjGp8bdc4^kbHX}f@-d-jD(TTnSeq9sPSd=@>1AzFeMC4J$=TP1V>ib`uFSw69|w9 zGnCj{f@(qlYHx0UYT`MqZwDei4($I$72j`gzcb7jLo(WnBw5(Zb) zD9fG!z0%&rt;K7yvY@jmqLZ`m>9IfM3$EABt;Krs)lqNrTUXKnh>n=i(rr zjoHGIfLv9VXYIZQ5{b$L#1c_Od{-3_th>$1U1OjP)s@2zI%P%O7-}{=Yp*pw6&KB~ z^LQ|M<~6YtSS)*QJ$I$02+CD4;ODTC9FC+^hZcMSg?Fmi68v-MKS!mZ1P}E;C*H8xA;?2(QdmF81AvuZSy{pk_U}83jNj5> z3;qDxSALe*yZx+NLHJf#N+vkQ3#Knca6Eu{hC#^kv*pxm0NPFjJAm~8xEKW3XOOCz zAAOzhGp?8XZKfN$8ioS4h0>&_F@D@YW-Dga;=HV+l~7v`-sUy+v;nk|!f`I?B$VTTX(;`4G zAXp<{t7Aw(`5cHqEd58wR2`{r*@i1BSC=o|uiN(>JIa?*biJ35N!8CY6O?);=w4X{ zxBBaYakB0=wQw32-)DNn=Nhw>AT*o<6EeLLmF(_o(dTv-2KCqtIV6-~;r4cxggw<1 z{`g6GmQ-(Tk?7b$n;{C*T9jU=7|wN1e>2h~r+!s1J-^2(){siH&bqXwI$XWH1wsz0xx4LOG0!mXh-)psH z3>+7uP21zn`zlYIn6Fu#&Gd+=xpk|#2|o{2#UEY&`U1lP5o?1={l?G)ktXoD-Nro~ zy@mAGrQLNaMwa&(pX~A7!?`$*e{gwvnGGBg>%A$RZ>^J*XpZL~mh=j?&tBZ{UL}LM zY2xVRrmZ<8j|$y|u78g^vtnT-2%b#1j1JU9VdyuTY*lgf=8!Rev1uhN)YvMTtv{Wk zQhoaP>CD;4yFdj8%uZX0K|=#gcVU}dwC&O&jduVI^+~Al1}x878>*y>Ou&)&8b20lzv@UpDu8; zUev|!`yml54A$=p|0I_c$T8k0jj{C$KHb=JM`F=#EGA?luCP{uYd4%0661V2XBWMe zdFOps`{ly?55+|~J^TKzZz_0f-LG4SQnXhB^RJ~OuPpUn2dKz^|L3%su#rM9uL$Bd zR-DJ0C>5lusSK!0p^Bw>u#D!12VecJ52A-ljT$g~{U+axA-QmXiRWQ7O8_j{b~2J| zg+{;iCE?B;x{A}3+4z|+Z9RYTfu1e;08&?!AFM9A5hp91Afmo_ z=vQ0gGf`dSLdMJAe+P4u?8|aUeoVP6IJT^kCFWP1{^OA#z($2!Ufy<3GVp14%>YtA z+$J@JCY zgEV*%7|rxwhAH&A8|IelRHIoKi{BsAcW+CR6>=jiN4W3LayUJ0-fxO^`(Yd&{b`dj z4>;+0fse{`gtu@)edcx`OtE!2GIxUag1i1KtKjQid37hnd=(QU>22CO%nwTx z`JKLa6PPdH`GqkLGG`kd5Svkrz}Wik+k=7N#{DmWl1PHSdoVT{R;RyG~-0tPMxu6S2^%ZtbD zJbD$qgN`k(KgoGLL6?VfY2MdIO#LSCcd z)mV+?F!dJ`1`{llCWzfBw1E7e5eu zSLS_GN|AcLxl8fq*hwLeJ&8?3IRe%5xF24jPLrDWN&?JGcpX7Br2bd(XTE(I(GLhE zQx|f9eg~ewo0l~gC`@(fwEj2wfB&|w^O@7Jv9S%%AmmqdK95^c3e?e1%Kj-@SAg zr-TXLq zydmcyU0`}(r2R;T2mm*QwxhHz*x;q?S1N5jdI;s4Ph>ajh77(WCu=dBXreK{`uagB z)+bJB)vFYRVYpu9vI{>ryV#BU_)>e-q9tB-J5Kz)pIML>mBf&UE$l%e3J75vD5nw{ z6pab@A$AFN8s(0GOoR{z6X5Xa&D;lt`v)HnG=9dp7V>O?TlD#tb&>W?eMIc9QHB2A z+06bp^g~!vSzmO+Q^W&H%imCOu8w6;+`L29%g@E*8FT*?;%a2d%m8IM$q3zkWd5E@ z5b>=4DWpFBY1L*$ihRX1BiXs-Hz!R3XZ45>%|OlN=VsZh(?oiTBsUTC$(Fn@dWuY} z$#nj%j58mek&J1WGx5f#z2b|s#=_~$h@S(o}qKmfB%GFV#=<1fa$8K zp#AH(-ynY!h_`jrWhd29V^EJ?ri*@~#!{4dcy`;tqKuL zRzuw+^ZSRRAI}(9+mnZSxAbG2UpK^P9e+OGNLEE_S{0$sd?689Fzlyg1=;lBTH?&4 zqT#29IMG=6tjF>5UQ?T*XK4JQ3?obJTijdq5tWPrdo?UJJG_X0A*K0dG);zC?Tw+y zt#L+K3?sy;I?wOn6fPCHrd#}azE-o;#(ObUW0Qq+=TGm#;J%Q3 z*@%$hO~2V(H>)kzr^D)92v0&M@f`IdTgBs=(>tF{E1#8`M)7sXz`9eC(tBc#!ZG!!1^tidhczcrtt^B5$U!E=RnKZ`{6E6T?3y1CImqTF>uh_a zm$93H#RsIe?rOfa(Dm-(<2%VHFJDm7B z245JYUz}jB+Jv7Sl2j=dQaUZfjLv7MvG7Z}#~C92InQ?^Nxt7d|3h&X(Te)~2m9hr zE%F=pR|g9W#(gj(o?!K{X#&{0%r`iMIeWJ3*TmOiPlK*O3ftSjM#9Mug{vY&kn=dU z)OU{L%?s^UIr-=#O0MVgn0}`%1DrG#YA0h_eX|2kUJ0OBE9ts;s{vp6mBh=8ZbSLp z_e4zIp^ln0b(P&*8E%HNpX1qWH*@!Q3>X>u*L%0313yW6opa5p^|bR7A>M}5)2)Cq z;8_bJE9VA3Pa7Xg&7Xx`A9wWcE=)`U<8ZMa6*olmy+fju!fqY)#%gaj>^BYqO8k5~_GW!ynO!VP0NduA2BM zEUMW=NZf|ds_0jcv-931u`t6`-5w$7e^BJ)BM z<)yVXjy)5}Q*h{Ut3QOw;VQ`!zye1-3!0cImRTYmlNwSf|@|BFtaU( zDH}Pf5L~BLD7VH}smu&wg=}W&p?@g37wq%br4BISfUJ;SRPX=BKzc(Pw{ zrYCjfV*T=h^{UOf9c)Ypxf8HeOjJ~U+xE0+bjXiQKi_4jpV(kjr-K z? zBwRz&sY3$WK*xAqj(EV9__UNDUe>$UNwUWAA_nmO)*V;GQFfNJ2QjbhO{^V%UiGJ+ zpGaV_p^{2adJWW5h>^?dY8A{8Ei{x(Zc5Z*sx!Ho!RvR~LSewxkv){D&Uc+9*2NHN z>PgwxU@aj2nI#N&oFV+nDMdeiyoSEaw{58D!0WB$;V^1vWBT_pP+$uu`n*Pp&STyW zZNrxWEjm5@teHm(CcWq2NwtxTdQ8+)_GRV{(H*Fy8BfKsLO>VT{TuSZf&Gd9)ps)WX9L3cZE3hYNcmXx8wqzYt~IL`Wa001j+Lh_^Hm<*IqM|blv0PSpJ z#J!Vv_QxjczIJEDdfm70@ttAZ9KLH#K|~1-H{liAcUyv%Y(x0cvwuphw*^|$yRyXC zRZ80UWBQ?fn{W$%7^`WW3{ZkpZEG;~n=NR66KLk%b8G~!dXBJA9p_{n!r5DWdLuks zm@$;(gdHS^-mEnH#Fh1K0sUJoy&-f9Z7g5J@qHI4!};?1n--fLKCIMeVKOi;OT_DQ zdRv5dGB8sGUB29;Qq0YB6_Wq4)M;b9@#8+#9sKC{Q< zaDtV>S6)RlztQM^7bR-&M|$YBDkwosDUbZz`nHE=Zne=zewx9>$@v|YvD@rpHzX7> zMy~Ai)iQZ!<) zNuR9mwO<#k6dl+OWumG5-RerHm1SfvB``h*ig#%YGm{V*zc54lBl?OfyJ>va{^LTP z!rZb|M+*!XKxxyRd_%Rcx3AhNpu!I8UGX{Y?0_X@fKfs^^9(J^@?=smO)+yt?U!`- zrkfLL46%)1gsi|kC&%wdZ*cRQ6k-OCLL@VP`P`a7-M0}413!7iln%v0T%u9+5VUip*});- z#7Gdf$@L^6wW-p)Ed*r1M}(s>{i31wvWHx_r{OLLwcZEwT(xJ^@F>3lKhWXBS}v)h z8kTzbYhfD|dY7mPc>7!5H1Fp`zy5o~mq&=^0ZD#v|Mhey&NbiQh*by2*T)*2q%lq> z6jzd+%w7n0xW*xmrQXkuc!j=%<2ZrJmEJ)F41$U30G9RX5KERl$Ul7em=}1Xr}x@$ zG^)ZmOR$h2i3OQ{s%wg3Z4zi;&Z%TS(nQLu<&9ZKQ5ve*YZiU?^p` z1$TH@;{EZzj-4l^t-3$i<;x8uTH{J!#d_XXLPSqGuZe?puZDkV)-(^)U zY=Z}uAt4L%E8pOF9{hFJZ*mpGQVib=j1+XtgtyYdTpS{oZKEwFd{@6*syiDrCG!X{75Df83+JA_#d5}5_?ScxR zZ1l+bBYu~_9O6gTlQ;3oVTSRQzoa&t42yPGDN;f$-wm0fYQ3+eClG&@=y@k@e{&^` zpFj*sm~$>WTYJ^2(>_?|6@Q8;|98ChrRz52B)*$eo>{E}PWyHTXQzJu;t%0}L((NZ zeAMDE4k(j1|7fs7VsP_dp@2>@BRQE`4O*r9o!)wgve)itNpQ^#mLutIy?A9u9}=$2 z6mAIS<$~iQ5t@P)6wsv*y4KyyNmnQ11V4vFV1GRlZDs4Nj{$v_jBy^6dqv4>|1ckz z*ZC9O#K9m-@hK0tm(y<$;$bu#AjRk_nQeW zbfWGu4rGolM7QG?o6C}w-=MjNQw4qvw)d=GxXip0^Da)c-b5EKBpvgI;YoX^&zr<` zd$fjdAIN{yD@#{GlZ?vmIzSI5sIJtl6FjvUT4=HrC6VS)ok?+AbYtl;yCF!v5$(o_${zrqw8pk%!1H ziJi`3WK*vUe+Hjzb(BTIEj4S3sEMt{OFSluCaw4t%g&ChWUlS+16L5yhYU!|&K?Oh z^>QSMEi!kP7m6{mx5+^9;I&#s0hK5S)iG6uf(I@#p(dGXIdfy}gZGXFPGkG`YM42y zfGpfY!+-Yc(8Cn_p9x@ny)+s~va*XiQbVg#of_liizu0q4YcX666hgZ8y&viZv|n% zs{GQFS5lBUIuAy!Xs%Ry_phEH5#R*6I`l6d!1{41VLv*({fIOra`^WtKf`DN{?D}+ z?d_NC-!GAT>D$|b*aqaocG_TKQR5ZI60*HzgZw6E{d?cEe(Pq$eXy%&9XI*;Wm!1w zt?MyXLA0<|%Q=^pDLzhgsL6urwc;F`lvC~cIaQdW$X6CB7F%?9lw|MeIQTu4AAYy) z-P&JWiltkvAJq|C0h}Xla|Dq?x_gh!goDHnhDQFS=@;NMR0~X`g}3aMf$}^kOeEMc zM6hVx1|_KbYdT1w1*F#ch4de?aQMmh|I-{4>qqX_|rif~lB{G)WkVIQZq?lpop7)2zGxm4)f@LilySC|6mdFPdBOyrTIk)dt$dNE&YWL6Ox`6 z@?TE6(WX5KGNK%K7%WGP@e}*4JKWLQkSZg`uwhrLFYKYhGxB92)E-9C%OA}u z&ham~(#fiF)`vvoJ688zuX-v-3!wF{L&W8#NKwU>M#%^u`yZHGBiaEK^0kxYEbR0;>0$Q@2vk-fusxqUMc>nC0CGaNp<#A~%0I zSa`Ry^t2G2UID#n-dSF-a2j{X%1HEz_wJiX3ME6EWem!D5wU9~)^_a4M zn>y&|vGKOk^0voRJ@Y>+@!USRV0&$EZT{SyE^!6AH-=#DaBL=$Y1h{^5P3G~DB_LM z{76}W5HuWR&krtuz38L)?YbbROLu50H zSFvL}sg0kkJ9bFBmvfG0F>bGTZ66?`>6FF_oq{u*Q~9>IMaU z@B8xG8CmX`g@K`FO`O7X-AZml_>Uy{6@w zNZXu_m3ok4yWVg0mc{N#w6Gz(>b>&FVb0RCE1#S%+tw-qxLAboV%T3NT!JM1v5Srz z#2tR%SYJM_UEL?FioNHG*N66^qDn7E;L;Yj${O}gP+qcp;6xYhWI{u+P$nP27zFeb z4E>BU3?!@Alwz8{DnmW{O{spB!GfIuO=A52w!$?+Pp!t#CWl2i_L19Cb^~+f5BU(1 zHTI7$>yo`(R`S?op(IwX7-7iYJfBq%L0FWt=nn2KP&O06 z(xH#^;f-AVIQIkokM~i>NiGn-;5t@m^1b^}K&I;s-bAq|oTH4clWcxPTR<3{C(_v- zHzDUVY0!I%hWdl&D}TZDgcaeRM@?*x%*<_ph1&l@P^;I;QFGN(m~y8?%A~cXvE0gD zq8Si8Un3&D1}vlgX#`6K>KqOD#cf?0-W5_5TPqa|J1wTg{4~ zpw8oXnf@HzX`$Lg99T&ug5qmFhN8<}IZZKodM1!!eG>B6RW@pK!(WhFNxatmqqS@! zDQG*(?tM@TrxNc1Ad4>%L2{Rd)1lj{>qgAazg}iEn@Dyvt)VD5j056PRjmo_nHm>^ zXHHc4*yNab&Z=R;pL%+^3=9PBe*Q?vaTP9P@k^ZyVZN+7p2xBNe>#CAGKtu?lHZ@Z z{f#7}WQAfLg1KFXD9_1H1<9O^8$9!C45(h`b=8=JR??%;H({wdgz=c~fc6%v@!XCl z-{Hq)TOYJ={i>T;G%e=mPH%OAz<1Md^jjQ1NI;uDEli!B1E-kOU%c`0iU4HNm7odQo0_m*<+2IFMM10p?UM?TbH3tb*MZhCShl`}|ps#9Vp;AOmwsUV< z^I|{e_e*6*(E87xo&#loI+i*VQ%x_mgfM_77*O-EbO6l1jaf<%tj<34QT7FChwQEY ziaq<#%Q5{t^bwEUqgyus&8bpryyCTbYttlG=Q)%G{8MKaB9J9m#D%8J+-!ckBkw(4 z6SSq6zP`BiqIeH12j%N6$NIB|6oQ?5q!@yoi|v95jQ@BC6D)bF&bd5@{F7#e=tKAP zIO%T*S(z^@D2zBEp&bz^=UwH+NF$0dNBla9u)LUa~@(HANTlNfDrkj-p^15prg zz$TW?dPq1ptvNOC*P6kAjz%^`9= z%2QA(AX5Z_ofl$_=yJm}zCwcQ>@rJ(ogYG-*7&d|=E}@g9xMTBeU1E*x!Q<5w#8eR zS7}#|(*cGf1gni=75}clGV4yv2MNo|T2#jNDp3u{K3Wox29~6m>D%)wr(W@^dh#!+ z7AXnh#Tp6>kT`Z{?f=^S=GxUVaOmE7HrL%BvrGNQ_bo4mka~wLN7E-8+z<7C@W32< zq8K4+^#l)hmnKdRhX2lpKt_GMtb7)KPzG~bP1t;UXo*#++JB|(DGl8kKb^@NO}`AE zcR$weEdH+*t<2aa%W?Zu4yG|E?>8$2C`q1nxxk<;kOb*Rc&9lKB>$t#B&u$aAAmkw zBkh$@zlaOrtbT5$b-toUPw%&w*KOhT1v}|HnK@fWq0WBd-FO$RZo0 zK{w@@Ty6ZSC5Gu%S4Hc&HaYECngEsMT##3Mwi&?J3Ia!ygz;Kyr?XXCVh6rFfgau> z>t&Qb4*{L7U{HW2ZNP&Qm>XEizT%Dn#Sd{KIl>W7gi#n!7%>w* zlL?~s>*1m)N%K|`|Bq;WFV8vEWtbTj&g#wAuZ~3XxV)!@Tz++xTyEA6r6vC(QNXD8 zj12!Iz+0O}aoXd#MUjI%nhnL`)P@fv#G}{WH_S(^Wwx)Bp-tIeg;}--1STeT92?!v zsp7EU{0(7+CL;cIl}VV@(<}xiC3CsehdVe!K(PM%+!orS^Rri%J+C+&&_`r>Ysxo$ zWi**$9OGpW49hCEV1yFByL8nDd*-W;bNzN!1$_HJO}oK%LNAf>(|hA z7I1GZCeF_Kn1o+zZO_vgqxZ96=|EA+@{;8(Fc0{5QLoz(){U1Vf{ol6f2K6RPnSq0B>U=Tsq~>)N8L^WB_O z`#^R4!&MqT{&l2%(u?nvoo8GaSzq(EbnMvwN%rq#TjIL-nUU9?f_cns7}@dZj)v!#*+UqKT)2 zwc4VNA0Je)&J$EoNEAO1?I==>4tF`v!A6Hq*VC40)YB9u^UxNr^UxHVP47=lestR@ zYn`#{LX4|;u=+n|EXBdj&QgP&f7HFzbmE9mypBJ;_v`}uw&N+=DqI7i-Q@w%9KhjX zhLz~@vLYm$zO3x~r6lnzM5~^L`0?XEzFu99%JhlUR=ZkmQAe3XZ>X-rEKCU(#bRr85XN_pvw5l9YS20W zWPkO}s0;fffPO+bU;g{r9FQKom408BiHa7#w-$;Yk4i}U^#P+1;^F&lgj^@N4n7ME z5NCGBNISfcTW_DHJ2+{*BfmV|&HXwr5igEfc^0d@+0*&>!|$4l>@D)rbW5{S!n!eB z^jP!f`qQ9-yTHtfx+2sx#RCxYThD#-C@l~PQS?Z_ZrLy&x0VT0`ps5dGM(*GF3K~Z z=FoCC^h-X}Q{*O|p6XE#{tyL}xVFk}U>Y7dtwIiD+5*As^2x+4yh*GCpKEan{*?fI z&nvuuJi(Q_)O9>MG=`*Z(F7myNz>2wlnlg4rBA8`KE}QgISE|HqeSb&_`57mtMIEn z9IzrKDe}p^m{DyuMoq_S(V-(!Yq`-zvjlnnD|7%*BQ%sHfM9i{w+nWC{re_Vxw}zb z@jbn@to!Jz>2K;NDv$0#@9eKV_NFhuHcxBPn$<_rYg7VEG-K$BAL4Ys>TlY`T$&nd zc4qgC?Y;n}vuKP`n$7`cuMdtLY7IEId?vi{`5?A4nkaK8U(0Bs0W;s_y?|>%HTt{@?#`Gh{|q*~yj_$Cj+3P|3_HBgB!B?HG0J6-6j3*<^Ld zI5ycOWF8|s4h{~EbNn89z22Yi`+d9p{=&IA&*$}6*W()Z>wZNtncu@XJF`HCMZ;5Le)!)i4bmkPVs)?m(GQh0U<}Z=yXUfeZsYz zXzHo8@TM7#_bX{~_b0juqfQ7W)j9!z0Nguy4+gwZh4rgbN6RY96}X9F{h;tv%SUIp=E`AY zj$b@m`K(mUCGkJ^ENM&-!ArdU-!ju4&W#E{0lP)MBS7;O3`+Uk+_< z1+%v82BY>s)|+O(AFgck--&J?i&34@%F51^Dz4_^Kp3u-=R{`*KClz2c4*3J{9@KB zAY!$fyM*Du#sS@m8~u2Dv*ba+I6=Qc1<#;zzpvi>nVb5@+YKhk!~Pd)TQh?bC5u1r zhf)aP5y<>K6kh+tZ7(Q*u!-sjI5_D;%6~nbLLS|%3=tdz$7~art;^7Ix}WiB_m}iFxw;IPhyFlH5Z0(?*x|kc9+@c$BUGtY32R zs_?opqb%Qz=nc?+pjh*<%y)C_`viPhfp4Qwc4()i$GOJ!mbm$F9t&+?wW4f5^CcCt zh*x=L>P3VC{&%IU^^sKppD_WbybTM)>t{;MILmf4{_vO7&WCUbq259S73-B>;OLhR-1boqFD`_U45-^(w+N8{{7Ov^M1 zlx>|#-hF10JSkVGEunl*JRe@N$&%t0GTObdr}1)LWAYSx`n}9^B`oaaJ@t149EzIo zJ_F)3HQv13Ls(HE)Ex#-yjC|PnMS>)7o0?3OL}yVS6AiLXuzdA+QwF7|}QL8)~S0Zli^xJ++ zE)fHs%92%zuAIA);;UvE=e`tb(Pouv*SdI&eUF1=TcDA4=iPh$y#Mp-o*ifJC;j}z zGV|}{+<2kLr91J8{`)qEovFx|$^dZtW>%nNQLNq_*9B z)Q&XCUM0|Rk6la&fE_;&$&ibSM>sLPIJwFscwV*H1h05dRtSSV7b|G+p}#aH>U&^T zsrD=0*17Qu8c7g!f5Z+u4ENo@xQqhUgUS%&aq2weE$4uM(`+y)IUD{h|qe%o?obG#8GDh&Ha!Xh#XGy90 z?6+y&EVh&@Rkn&5(Oa868r_@trj?>}L^Uas%i_v*LOHNzm2oT!Grf=?yB76T_7$bp zi)#2`=6XDJrCsPqSQ2@@!k@!IpEl`$rzUT&(YkNn(3ZP7vu%whP;k?@t2Vf|vhshr zL*>p8tE%9wdlGEh6-$I2cBvqjCTW1}GVP=v2UP|66O=cP=jln+^&GSqUfOG9wC7E1%=mO}yo2Fg=JwlZFW>KEwI4p+2}hJ)tdP6sx4hk$F&R`8Xi z*4=`B$61)YYr@|RwStW(UQ>8UTe*5bV2O6M#yj6I>{?gCp+bZivV=_Ohf?fUh-$gM0%sn28Y!fW9+^EG83fuy>Zd{m$owb?_`6LDRdFW?lTRuniYg3<`+dn>6Pb+8k~ zP*DTxd=34wEg{gY|c^Xvh(cjYQjm| z9r+A#E=*j`!#5rnE{~T8lB7_ISra@)choSk5<>449x(5{!K%+ip-sYs4=6D0zbnZ} zU_&38Td?1C_;{iZE$;Eg>v;UGZ}40^#~F;@2bfi6RqUftpJVLlNl%iPP(4x$4GmUz zvVbLr-7i2Eg+8I=7`c)F)J@UzMti-=riBC7NleA*`k~6gr@saWp9ECPuz2PEO(E5T zTi*UvZnXZ(0RBC_sH%+f6)(n`0s}4md;y#x%l%g<+bf~6EfU_hVv#S@_%l)wBlL;B zNe@bl=R#xVdA;R`<-eC(@>qO6T~T8tF5f;{391O%jbiY3RzLclbfsor&-#2LcdNQw zzroG7b1~Rij6Er+8RmF5(DFzuP?ZM>VK6ibV|+)|w5YJRUXmVmpWbA=3}dh2zkim{t zu$RgZX1Yh!cWLh>pBQ&V-ic9gH_DsaxWi67E3MQlPtj43Nk}Mpv~OyS;?}ewSDnWy z<>sto7S+A#%?)eTDJc_?`^oeH!a_0Ku~fMDjERZIQ$=nwLKq1cl^yAeK-|=0Ty5NC zZ#8cEfcmT|t@IT;ZNpq6N)m=JCgb4Vaf27@7kAPw?2!tq<86HukfPUHim>bx zTs6C+CG7`M~zAJvSSwy{P93?4nZ?=zZx68db_4xA3jZIOL4mnCo z;jt!~5}ZXgO(L$bv04nf`!=@h$?18mq=O%fj8~`@M5Nh_ue3=rr{4g7$Fkn08%JHuLuwDmp+2snt`tjqGjA9mkM zn7WIn^}N0G$T)>z@-6>T;UC5n)udIB(h}2aYShrW8r*v)|ImzyWCU{Ynx>sMYUt8v ziF8Kh`#Z7OKfhSTc4kZkS1oNXYAK z&CjhLeaL7OO!QRh)jl(59nsvG96zC>26!d!Vr$&z>>JyXihgYB8j|-9zB^$gdt)&{;nD^+^1% zaTw)pwS?n!TY7p&x?KdeYd!%{t+@Ehv7yIdAwOBym z*%TsX)3iu#)>vVij3J#F&Zt#UDIt2a)O4-6ku($HeQB`zs!Z&=45rA)-V ze$m8GN@YLI%A?Td-e(kJ25(;~lr*EJ*O)9l*~oBU5D#2>Bx zRY@=KyS516p_{EQ4Y&*L5n6{CQ@+hDZSIlsTV)$tJk>Cx-#27*Ua)=W@HG5TN<=8> zO(JdW{!PS_$IUb!*Wosfud+3VFFaqKRhc}(qbXiya5&SD_onpJo2(l*d*uJ(O2g)p zUGppFVeHWJLErB>#akPOXs|QYClcKir!xhLgL)kb!&&SRShNhgW>9>Ty|+9izfaGi zeliQ+yuAKPF({#~2w6m)KVUUGIdOYNEFT0hXngp7e4IH10bcQW&k$+~f~QWIA`PN- zO0K3wDkMkh&bHc*x=Kpp^|o(B&b_we8?&c}^aJP?$Hz(xU#nzWj+P->REc@eM&6e> zU$?jze38hvA6O)(1^w1XD}?$y|CAww3LNh4Kzg$@Cs~QLI&y8KrEgOf%Xi(_>`^;4 zYvfiVU0Ln3rVB)Hd`%{%L&b>=%xlMyv>H&G=8LMDD2(f)Slwvc^||S>5Nl$OdT|;8 zVQHBlF!Y{*+nU+ueO}Ox)%UAt4@IRBd&mI{RAt%pAeN6{g27 z0fj5s{tcQAtx-^dkBQE_EBrtSeqjBuFYI+|*lSXbaMDaslFX6rc6Ue}vR>&!b++B- z?B7J)Hg$Y03PH*=@QnEb-@*U<^L#YX%H9rOkAY7u)`L(8`<6xxE)@I(YzY%eqc4rUTEuf6#|= z?8hyb#5{13Rxtp%F|Z2~53TNu(;N9Z3y1Ry!z-tp+*x2-k|D#^tWfo6G=vjEhv~soW+ehRwA*cszM+)u&86YB+fcD zr*UPJJ-SOmbW$ho@+d^Pf8gEil@L#;*Wu;yi{oQGy)b;|qXUdWixgWF=|2liJ9=O8 z;rCnOPdhBR)gGim>mSf1W;CaB1=ksOkhU%)nl9gQ!~aNqx`J&M{Jbw}O*Q#taSFv! z0X^-4a3HspnlYTH=96}{G=FQt1A}|OKCJ<_vO!Pq!X-_M!f=-;7w7J#$FlPO^-8d9 zU1GAy9PY5Q?A-XgApe#rozQ?v0`R3h%0{|*T<1*F0WkHY79X! zs!bjJKl=ePMA;j&azM?Sn_8sFo?)tAcroRT5BdiVho!KR2;5TO?I^60zGKMuq@LbW z7L8Qfo*bj#qU?3J`DY&bj(wUi0R2G+JLg?dRnZWwmX^Zq(sshGlVHzjYGi)JpO-Xz9GA zS@U$NgVD?NqHC6cSqdKfMQ2u1+W}M9?v`LyB+Aoybayj#_uZk=CIp#d;W$X9ie30K z@mPp^s$9%Mz-o5Iy7cAdlBh{^kPdF)*jo|YU)01$-@YZQ<0^52YEf&M3+6AaxF5+> z(0BrMB>DQve>9Q0x|5fB{BugUC7wa7KFQhrq$6vw}R3X5c@T2d<4^sC{8 zZyDb7%~2OyfWC9>KV0OdrS3g**H91{nNE~PyKL~eaaX22Qt<$yB-T&19#e0!UIu^b;6NTRT#Y^bQ)XoEGU!erVGvSWney%MLuwlva4B9Z> z!IpEMnw|t*QL!{J3{K4A?r8e*JTE^?y6FJb)&WaQ+q;qrfdu$ z!}5T;?s;`yl#^_)3d_dhrjLv%)2OBe8ETt81zs_k9<1Pj9tACwf`)Txq$xEUsT zzfm!#Fg(k$tmCn2I5X`8t5R^2&+`g(kmOjKa?1 zSFZq(nM4}~&bIlU0UvcVGrH$3-^Qgr1r-ICmwIIO>%M-At?h9ZeCd=PTvYBr3cn`5 zBBU1PEJM#Uw^TL+Qm&~(1tZOpS91H>jH~BSw_F~goXHm+xSYr<0^nNZ>z4wElTON9ijf+^q!Of8N zVM<5cx~s+WR@E-@XWQ7VbwCdJlUJ!JywTk{@_`@=VOKT%S)sNPF}Rx9QBMb=YaHv+b$(cKH>7WgtW8 z8UGI|3FrfPgf->mK_dY=`SF`~{CkMIg~0zw+J|0-2%0yJ@-zwo)BU+;@%*f30IXV( zpeP}PZy6R{#wdt?IWtxK+o!(~At>a3UehxwDJ}h|#U5faMI%;f0p>B$E56~iA?0-mCpgS>wCT-Z@1 zyX`g${m=Zs{!Y-_pTH=+^+MgrF*$TI*+7-bP6trZiAeJT-a#+IiQk23z&v*Qj7qOn zTum*js=8WOMdkeg8f5_F`2;3l1fbWS0;GD4!@wo;CjVN>5bMC5S0h7D-JZ*V>Dq#V zKxJCp`)=*Bv|VS-jJ+VvVdT_+YR#T+kq=q@11&Q&pfs>~rd_cJJ7+ianR?ZzwfJK-+j*|3UhgqxSFVFz zM8@tdcUf%GZ%3R>NCLBd;Ty_dZLHHA#41y5BEi}rE|J!JN6jQl@T1U6DcXkuO~|_e zemW}!<{@APxPk~rD|~f9d*5}uK;`X)>E6D+HGookJU9l3Cs+5@M!W!@--YS()YQTd zhzS{@rsn!&gO9}Kbbay{fYi>AbItYM7z0BIJn}Ga^sS_H^IY!FVV7|jP7!{f$&*5? zPwTb?b)!GiAIPf@z~1ysO-1M$rtj~v;{g}m4M;2(hthF2M`M0F4$ z`^l`Y(vk9gziy3EYKTs?xGHjGcfilDdl}CO7%Gui9}#kS_UX?=HwICF+tJ^Kz!G*= zn%;RLPKIcYpr=@d+B-O$TZZ!1xz2ojb4KLpGG3nlu*5j(PfxB^%u~Y2(e;3XXX&az zUn$*~1ob~wKIx7=vH^RMgk!aFvla1fb%lt7e)Wo_TrzzJgj;eV-(%tIht3pLT$VC!PT-D z`7@Eq8Boy4vlh+%74B*|GAHXftQr9hQRnhfkYsvxY-^u|hM_rx|GnF_Ja#5jf1;27 z=j^?h-X4t>knzvghJVZ(im8ETm564OYW%X#(f9@MSOdN=3S4kOd@AB0ZL8Mczq8nM zHioOLq&9@N{{J_oS95ZJO)~wu6Jy+ENIc*wy9+i&L2Ee0NC+{h=5YHkcNmQ{gwkv0 zTs=A%+u)Vu5g$?!xFouVMV?x6L|AF?z~(|JG`FLFo}<0QugcUO`~LdD-lA2}lwRvRtO?mC2MDBil}ttRaWSQ?f%4AFU+_|zW@=h?KC!SHlATh0Y`JBm8we3ynpBeOup9~q658lq7@j|+Kro`h$)biP(Q?h+Lp zoSz#gqZ?yO5o#wnhaBD^?R?R&Q!K22`}DB;ah>#N`V=V#7SXW}p#%ws133g1@f)-2 zAStFPO(DO}AILrjP8dxCuU-hRC`|*q<=I^N#}G^Yfup3R;VltUwZhT zZO)!wJUzC-@A}g_t_UU39`#uhZmQwe4AwGGC!g5jHIj+TgK!7L!B0a1&Z?GCzLqm~ zA!jtnodz5l#sA!1Q5#T(#A(zYS8#iP@I>rI#*~~;+E&u`N0b8VL=mvq!xKkm5nc*IxXgFqC%^(1p6Grc1vf#Hapv7 z8v~oP!n&=z%w53zf;}CF;Zu9u=F;p#lFb-uhKaBRYWUkHarF-C0;HLm08W}2FB32O z_fiF7t@A>`0$v9TNJgm(Nc-HH5v3Ogc9UgWzRkgTuaR9yIUYXmZb}pKFyBKWRf&US`K-`{>7UUj3;HxbK6#S&f5x?-fKfsn#s7-O->qFf z05LrA=lHvpa*_WCd-pBrkLvp=g;iF{Y)Ram{`a8X&{*TR{D)yb;>Kc9ub6Tc`+e86 zzcUt-Z)_@|U6|)ISu6#iF*cVi?)*pwGVTzttkZ_CH8`gwe=m83wEOUqK6wa!%m)zL zK@#VaXiJ=S6yxv7)xDXM1)*13%;e`&C=&mra3=!(5HV+~_MKsM8)E$XleuR=$Rgow z9Rbd~^zXh?_rB^Kdn<4(hN6Al(3w5ebS32h{^Iw=rbcqN7 z;PMlulqKNy{r+NQL?K_g+|fp4rzZ*19c%aGYq7N#>CNHEBlcnVL(0Nq^MRdYx=B7- z+!I`9f30A`+dE*dh#RZ>(S9EFm%3!F0XP-gzKr=xi9Rw)uXPrSyWa5({H1iqNYQ1q zQ0h15x8m;~_jA)(({!9B{iQ&B&^|uV!ekEOo+-NY)pO?Zky2#osDc!5KEdBkzy03qhX=(jN z+Da>GwsU046N2sz?qM!S8+ujmsC=?K`**{|;Vh(O!Et?R9K9}s= zd;9R^z@SmBw+a>ug#Ug41Tz+slBJ z*o92lxk2JID)%P;ePBEcjjZDpEZGoz+Q#RattUp8)oU!$8NS{eMGr>^H5nxULCCEabnv zhe{B@x_`?s*J-pneDhKmM-_`{X!N{%%Ei`gHWJIiR7n!9DxpG0!BFIm;+ODclXAoEm#j)hUyfk0)OH}% zu%|TadBHPhTh1zml1vc@y#dcrdPYGXSjw-J-4Sg}CQDP>>SKCNE|9yK71Q^d;~Lis zjQ-z~+l3c@mp+BF04#V7jg1qyDJRtcKPApNy9X2scESuc3{MkPlH9l`H7RdV-ubMn zAwcf=nPJaxo$Q4ZM*=4&A;9v5HStpF8Ob{2OK2O zG^Af*oP%R$QTYyy$SYGq=XNZY@s@4~tPgtttN>*6sP`{xXSYhSk)FtcQvy{Um+mz+zTYOunNiNf zVzK0ojmTt`mYgEe7#c6>aK3QPFvc*ZDq%_^BKQzP9sf3B`rJ6}wv>V1x$A*%6_wgT z2XOzRj>e73Axjq;8}k#5uH>d7E~cc?UKB-i6jxqPf0#$Ll95)%I9N;@YzMr6 zFbX{4$yo7Du{s)U1dgc_xgz@?5G2mD8`fy~5l&;r=v&br`;=Uk?%AyCf4f)IEK#r0 zcUYeTFs)V0?KW|QSopk2!Y3YEM> z^vLlY@U;JN)~K1ImdWhNB%rm~DY-xHOQ6#lk#TglyBC*SQ=kq#4bj}!6Mo!BLmQeJ zXV&mRve8%{X^Ofj>*<`Jh@s)%wGD-vb$&(_i#El$7xey>t`g3Z_4KvqiG<^Qw8Z(V zGOtO6e4Cz4NLPxmQOhTO(_Q{IImNCnysWj(A6wJ$kwp!#3Yi+E17{bFckSp?k}3W* zGN72gPq-%A23cI<%*?Op##x9T-0#NWoXLW#$m<%gB2U|jH}`Csw7pOWLekdhci1rg z6g{-p!oBf5b-nSnwSVz@mz^2KxS|&$+`e40Rs<90sV`+$h|Bd-uK_GW#eG16xVoZt zs({~)Q2U6O_16QX-VUM@B!6}AG>U^E1?&&cF!^0Z^#``uDj|!b-)O%PC&zIN~^HH6TxV73Zm^5)R9F@lxAfZPkxvX5XTcah#U6 zT=+?Me7@T0(a_!$itrY+e8@2R>-+<)4-BR==RY|RppkD0^Xr9d$apFQ9W|)sa?5ew zJ+1uxT5zX9nEm>-Q}v-IJ#e*{^iVe!Kt=T_>*Tj!+ZPG5{|FAuen4spOu^f)>?qw4 zZZLi8qb>S1BpjN53Vm4bxD)ZJ;OXu>g-Sx@+Sa$UmvZuDCx_i?EG4g!dC$T=gf9G< z!kItMuK{FmmBKGVOGd2anm3$&en{0Di3bCUo!lCXjd})1U|0yl-x$tLpgv;LI9&?fFzwXfe1Z#s+s#=D?;_ElHPvp~r|bM?jF57&F{$hN1?ViV6< z_RP@ynLS+h35M`xsBg*v4VAH){eyp>62|EIGi-D(4t<(N81ocq`fyYv`2A`3RdbJd z@Yun7KH#y>lZeh9I*kG|7f6_Xm3z_JNCF-6>fuS%^i!)@_Ib(8CD%%fd)t2!%#k&5B5|M{z%HVvCQQxG`1AIwDJ+?^L3#SH`+o z>r7Ma6zklSc5@abPo2F&@# zJHl)+?9O3@svgU=wo`6bZq>QRqZXBktjK0mKZ?*~h*#v-oHqJRR*N=?lO7W#J3`M$x(_gJ}X`knTb z6|uar@UG#@GZcn>A4m%|(8NER&_wIdxtxihImzR$--{%NGf@->W;W#-*0KrwW#W^q zJ;EwU z;;rN50V%cYQfF7XM+LZ9Va67;m8;GW-KTvV_r(_UG#_L-;IF2fo~lE>bFo^5I)}1N z^eH_u1pQCq?;2X^r(}!Wrd9WQMQ;Cty0@B?Lh(3Ji9lG{%NN!PC5k}pq>oSPAO?TH zNs;Ht(&st#H5e7!49vleh!mC0jZmi6Z}H85lywK%4ybDMQAJ9;*+?8m_6aV zm8{BQoV^AzgA%fBCe*#9ktd=e!S%espWtb+T_!P2&mNq3SR8_;PjTGRf5hPsdzr}pGfFGuLhtLl7;QZ!5tnqa@uG8PZAYV zoSOgS6@AroI9e;8#~rAsI7jjOVDeIZbve{5DW3?QJ>CMutdBouwvtoVtZNM4a&@HJv^AH<3-BxCA7T^#Ms+a9J`zP@pl`Z!}MmB z^9c^#CxBS}mWVHJ3o%H3=&x$HnYPl-%n1<3Pg&fXpgH@E4p9sb7%0c3;WAzT$Kopp z&;PY}$Urp&9veepz$Yze;x>~H9gt_0Z3!hUx0u=-su)?`L+Q|hwm2EcMd}3J~MgVz&qj6G=Iz8C3rw@xh=%t zJP0#<0%~Ou04BtV!+xxiq}eT8h9y8-=`m%C1s1(n^AJ)=zssT|_r}O;`xb<|I3_*H zXFuU+&q0VDHFmphcIoJ(wCwA(6Vp9v6nuDTi+x&2=wVpEw|H{mZ)Xu0c9&@s9|9!L zj3xZb8B;o)&IL7=ybL#M7K&pU?Y0<5$muss;ai;#G?VKAJ@Weh_Q=zD&0X)1pU!Pm z{XwNHui^2)6`(_pY6v69~mV`||u(*Yz!A zg4~@`dZyL^Yf%F~`q_;`f}qqtN_rXK)P+dN#_Jb)>Bo z&${rAdjZe+4*H4V>qOgH<@UhEn+78_PO-i8;xD<9D()33Ga~zGtP2?YqrTl-y#z2D z4_Eh6SME|ks2a!D@TtG{e);_84+97Hzt_n?18`(5?hlEu@0f@ST&+2j3h)ywz#`~S zQfR1avsl;%Uo=#(sjapV*?z<^7ify9h5o=s*mrRNPd^Yo(p|Z9nSS_K_LS2zq&;AV zJ)T?VH2`6pDbU$XXzT1WdHT+{$xr1+hR!?528M4zFs1BbBp{(wA!hwk{3PKYZ_xOB zhv`vk?rZXVIji7Ldi~&xWzFwBfA(xA?^k#R_aRL|!WA=sus?e+0Eav!krGgM!X_g2 z2=RS=8BIvlL*7wy0h;oNCBrFr=P^+R$@!gzr+w6y81@TWXYOq&h=wq#E zV!Eu*A&=v){8#>O-aoNKnn?V1?y}BG_4%AgR&#i4HG7YBq637yOHv^LunkAyxSc0E z1Y^w19G+A0WJ^T?t(iTJ=E@ViMDwE)3wNa8Wn|4-`uY4>@s8`{uOq>)lzp+9a|_j( z&DXwB{zd14ddg=o3oM4m9fk>pl`tlM2N*-rn?D2vFrc>VvczKBENXVK;Tvwszrvl) zy(rvJeQl5Mk*^M^rBj&gIEqgmE438d6d0 z$qulg*(PeH|ZN~M*PL8KEX!84{b4zLE90_ zH9Y6z(Wjv6*e}ajRO%I9*9ea?EMbsN6z=vOi{L^9OQ1g?3AKy#j3r58CHe|23eTCc!d{n%@LR(kjKBTq*+%W_*LNu;_~{Os+KwJv?A~KBW2Ul`PdC#@W>Mz3_tD6t zU!|AcW6vYLwvYO*@}^?|{D>fF`!bw7-}6S+S%C!$Cm1;Uw{g_Zn-|z`fv#Bc!4I$U zmoN;13~lGug_P7q=E%3ZGACwtAbv2%dt$RF?V~}^fvpKyw&mVZ4Lc-t^_?T`<`#*y zaqsHL!IbZj?@=ion@b-c?AR~Z08yz~QWULb0XymIR7Bb{c0Li#OS^ApQH)eOg!nFc znz)8eoW+3b3f}dl#EN-3mLS)Q=*CTz^Nb%ks(GHYw%ODXuTsC*mjTct?SAugU8QoO*>fJ5e8PP-@z|nW|2_HNi?)KU9@s`}Pai1y0XP4724kGiV*TJ zTh2e#R?613HtoZ0wBr`Cm*%@|MonLv8^<{uR+`CnId7)1ZD_#*?jh%`UnV}PBaHv_ zoI@A2Zor=ZBTio=(Ul_}6W7a?R*n9=Y9gnq-YsKc&G)@iALbeJdUcm^TH24YSs{Lu z);=-X=uDh^GiI)IsoL4=uW;$LT)D(~!8*0v^UtWlZ7I0Q@eH(FI1B%iD5O%e1kJ_; zaQh?`KG7tx{K%4=bSP55u^9HFY*2AN_lI_gw#J|$Z|CrUxsv%-DN{x56Ee_@AzSQj zVs-pEz^=4TesVC;rS9$zE7gp!4ay3FB39LNCq?anFW}$ zH~BX@fjMiQ!g5|$+>uj|+#dd^&?_RlKla&ZT5Q-4mWpjd^>=I=opu^k18ev5xkABKA1k^v|oKdTq3l`wmwlYgW!I93A-+_1Ut?;Fpdd0^)i>xl~++ zz>u8N-LO8t_Q^?Lfl?uBVrhwfk_!9vue#T3 zwpOpTwoT16H9bix z(6doMs6hOfyBjBQMe0UrjUoXwAmV2RtwKk3;(Oh>9`{uFZ@cJIv&?R7ZT1KI?Aaq3@ZR+ozK=(R!{-@3 zz}6~kbGMTe>8UCy=Y00%z%& zNO1jWDTyQ8jFB^D3lP0R=TzI7qy3Xmm`|qJ`G1(b4FRt5+MxsB+2Jt(J$R?J01KUDLGVsSKoJzORVGj>% z4L734^RdG1nz=f9JkEZeF}}qe_wxcHK+C1icIri~31)_h;3n@KWsQv5J1@~Qd*kjU zS@%X688#Z|LjE~ezV-k?jAfDNkh6k4NDWXSjQ3y3(LTshb-XB z7~cs0X13bOHu05I6C|>JF)ELob_1DZ(6U_N-l3UMFfZc#_rv_N8-yIq6L3Y7+~+ zOX2wxlQT{B#{eF8vdYYQ|2;I{L;~|}=%izJ=0G{BW2J4GMmw6$8`W0cjbA2sXgMB*Iox* zX~L5pAI`>(?BU-PtbKo|Q#7SjJm_Y5{+&O0+>){WitJG;hZ{&FB< z13`(;qPmW9f8I69(3VBTn)t@rNBt6glnVVRs;t_K>^Hn^>=pbj+WJyl)`k0BLg&xC zs*3(Z*FZCe9?;YKZBGcR_+e<3rgo#tz%DtYYTWhl(f1~wnO-E{jPl2E$zElxNOrd+ zw950uuYVM^N8e^;!_r>B{WnGSei8ycpkLz@qyks6)IbqTjihGjmBNP~r=`(Qm1YYN zV%-iR%V3d8)?r`jHcA2XK{5{JT$%45Wl>kB>rkUFX}P3 z8ieNF5o?%jy0tT>HSnYOc4eE~26h**Kkixv2fM#li=<24k$7V_GLd_I{7-`8QF$He zbrSccA2Z&s(4+S|{L`F!f3XxJ%F;NldGj4_+Ye!v?pR5wHA`T7eU9xgh^>9XAhz_k zZ@3`e>CM}8wUaMWwqOO3#{s+zL`0&Bw>8y`2mu6PMe9tL`IQxCp7Uy&+iSZTF`p<7 z>)-BZaDO0yAFfAu@i)JH^KRzkux#{oD7Uel`BkGdG!g0vHo-GcFiWuOLqsK&ZnbfA z6k`fw`GpjA+qU}&Sqv51b>#UOjcs{5po+Q{lua zhgKb#CT5p5?bhAJU0J#3pfl$}lccj?J$$KcY|-&4wJ!CMIsK!i*s5koIT&q||ARN& z0z%%07IY5Zhn64W^9~w13xse+p^Lbo6T3N!qd-Yq*CIaS{1=V4by2zl>v-&!p+b<%FT z9OkdDX$ZnpR2zaMK@PVmTvKD=8xcp0uehi!>9ojSgbO=cVi=c+6}Ql zD1Z){_+D?$0GS_AWk%TPcMdJ14W+Y*gI+?GArGoDP|IHp1IS&I+xVob@zu7ubl9F?cYzvr7NMz2zYi;do2h@mB1}Pop)BmFRwfN1(bR3}~BH%62Hr2lrYY%<+MaG1-uHS@mw(ybEdh zanqsL{tERHv+ z2VkoySEFr64^_OBX2Y=yGTUZn$naL1vul}+CY9*jHL)*8yD5CmH9Mb)g1cQZ$vx?E zN*dq)t7hLNKHTg_L_^))s9>794Ghdw;Z>)f%y~J^KU4Bh9I3@+^`DrM=UW_P9rYWK zsc9M>Zx~#nA<+&(E;&-Xv9J6(m_OhxCGl!f>C$`#aD^|ceMS2)qtehmqj5|R-uDS< zfjDFmIKeyN>&EFzIwg7RqG}%B_iTdq_%@uICr_uWg5#xv6Y2eoF9ySN_*SJbdJFR_ z>2oXTi#j`0^vfkNWR6kHt!i05_e~55fdpX@;cqdfc9y4@Jj^Iz>0k%%y&JnNQ5V5& z9P#4`*p@43ST$ZswV89MWOb-LyKR|R%1P76dGflp$Jjm3kvtL0yQv&~nE`u;NtwQX zJjbHqvTlRJl606psQBrKJ?V&dEI#RIeh_lu{mbi_9(+ppUjy_$W8V>DKbNheTIey% z5;86H-V2o}74s=fQPvKE*4_059UqeoHJjm{38N>51>CQYn>KF&A!|q@ivEP~rbw%1 z@u_;GOyaj8L}CTqC&n>M_=U^8!){&rDV()957K{zp5RPt$J^n{&L>Xx72-v1y7 zKVnfqg^}lfD*VYp=F#vR4P~;L2OhKBH;8?H+O5B8)g~^D66klA?jEc8`LXy=yzG>NBd5AhE36GV=zOo%Y@ZL4?t;M1Ke&)`qp^N!cr9DFh6ZH{{%a&Z!DE|r! z2wrh@9tVn}pDZ_|W)Sy#!qbw|E2Q`@ z+C~mvx4vSNwv@WDEAyy(oRQ3*ee37%h!_>nL;OtrCQg6 zi?D$8H2mmWm(LX5K@|1uG!EzYcmOjTTBTl@@{dC!&i`EH3`EQ2?1f0bW2N@fHY%tz7PKYAKv%F`)R-5 zw)5yC3>t|(}C=AlkuQ)k&RO9a%z=phUwT=-y$FXna5l*HQ)W8IsSOC~8N zE8@lh zyY}Hs@pbiLx9=X9FMbv5h@KPUR&_%52zvFT2zaJchVI0sZoIg?qC0&sGz=IoYu9#8 zz|H*X@lIfKTABHL7je)6nUs>fb( zU#UENIGR414i=Z`C*m;v#4pa)kg*jpl^3$wy&)>%aP-wfhGqU%4+epM+Sil+jJWD} z;HN>IqC&$Wod*v}L*du;UCufJPNv0e$}dc1#SUi)A4qKkz5pB$pg5tLdWS4675)^E zB72-q!Bgwjouy+|ttG#afo2CJ$)JV52$_|7#_e@w@mn9b4EqPy`Rgu((>b{GP|S`J z@LPJQ`3*|On#hQ|3o)L8weY2w5paucz?28>;)pg?LvhM0DX)Dc2KaFy*m-8G zdRSIN)N$O+pJk2n@xu4ySh@#P$)7at8Bm5-NOTl*`JCRa3{ zU`>=6PYH)EZr*gLI{(|vISDqf*PuoL7uNzjLoynyR27ActX;Z=vTrCtrlT^0Za`mO z_Gft0d|dN?0?p(!qzI2eI*2&yLAHJu;E6h!EgoZy{r74|(sOHJg9xd!1=PmEXE4xl%^P9iBCmE)nnrbfmCA{ffzx&5( zQt=;JTasvIR6Nyd^GcPdVE5jeNnCiHgF6t;U$1VWEO$HU4-w8fcNBlbvCARs^{^ND z)AFO?~xuHMup#_9+R>4@()o{Kj7Q|Xsf;>?Im&`ZYF(kikFIDwdnAs4D$2grZ41rxr=VOHJtQH2?)XF`Q~Ey7uhuIJ&Gci- zz8IJB0=RB>g@TB@Ly~WG~T zRyCy?CZo$MLt{*&=L7WKi@~GW!zlAplc@PUT7mh?8O{#2pl?A4AJd41N_y`pn!=B= zLsl1Z+%+Jh12Pfzz03VIe|lbm*05N`HaO)fpc}>grh-QdX3q?_0XKu?qXSiM6Nj6b zJC4=j$=qI{k7k@Ipu-EUV=>bK_LrINUPnjtl0LSXp+B(bYGYmm4&|UeO6nYMO)#`D zt`7xWEexs+Lf~?ZeMXb&&F9`@wU~PX>cNRUJy);qZ}+4xauAMGQS;XP7DVX zbf1Igvi2wtf`>%7ih$PSrr)-pyhEt#D7SQKQ=UYHTeZS(Am0P&sSCiJ#dk%;;9g?JF)U&mSxTD!PSy8O2G5o|}~=A{E&&7v;Ydjxz(hEP>-A8Zr%3-#kOBLV9Ge znl5T6rhE0u(tZ_HX4rSmSSI?yBq{09cmt@9jXDvUgMXEjEPVd{j)}|4P`R%)kX_(( zF=ibvvn46N$yQ-^3V@{Gk0}+*Y^2r$vT`zyI26LHcvwX6ArC6ttP%Y1ur%YuRZUm- zW2D9jIDV*Z#CWcT%Fhbfp8>EQ6PrD-SZ`((aXAOminvR9?@nltCNmUmWao%@T|7;# z3FIH_mDE6^)g3yvWUP`Afo-7F9OPu+PlSp-P4{*IW1puElu<%dDtP{W*VxW^K#}XM z-uS_>{@YeWqm4DK>{nJ^bD88S{4bG&>nm$-It0#h$b~si52Qp8UQQ|%Du2+fP5)Wa zb$^y4Y)NBL3Rw{eXZnN(GC)!X$M+MUap_)izK6Q;pP(^nolWh|)Vt{rVv6E2!x5icpwR(4R!`ApTP!w!z>~K5Y13tGK8>gUNbS6J(ZO%RnUAvaq{-Js zx2=92!ps5@+*sAD-kT$DF5)wuu~L;6_|~qC?4(?Rv&NyDgG*d8 z)~7HwZ?0X}!F(Otar$B+{|6&sIL&{zLEtaj-V1PWF)MEUMMKc(=n|>&iW7ATQ+hG< zOirO%Xy{kR@uhuFjMBL00dCvoTp8Ulop8dJ9ZA*ds_0_pVN5kvi*$;_uRLg>Yu($L--GAvIM6~}on7TWLY;?m=-nI?(dImqWj3hn8uT^uz} z8EQgd04IN=cB$ce)vSvsENjh00js0hQytruE7x7`#4i;onxxEQkQdq6`_6ucPcP8g za_Idg+F>r$ROGr9FE!KWU02yL-m@ziKz-o*=Jasosw&WQ4G7uMVkTmQ3e445KYE@r z@}}Vwp+OqUUjknD%6`m!U&m-jv9Bjhm3xvfSKC_ZU5h92b8T#!fGK?Sx>J>Pcv@C| z=xG0BTys>#=L#0B#P8PGu!ic%I5j(vg9RmWyY%5p#AVq->*_c!u7kRr8U0sOAAsJORIJ;@+W;hm#V@FF3)SWUew06*IL^*K}l9+Xfi}-Z)*%T z`0`bmy~TZNk9}enTd353!Tq$djD_0#lz+g_@D*3#vwdZ{I-*?YB@ryr zi)2_{{X?WH-gS8D`o`xe%XIZYr7PWzYmV%TrIcmldJQ}rgW{;<8XsgnC)qLfM?-E^ zesPrI3Vb$-eGX?-L+|}-2F~T=?441KS5$>|=`R&m9cd+vpM;lf^zh%%esca<(E?C;2E ze)M4|(B}3T$#y{B<8G1S#PUpM;aQvxN;UG?Z4VvAl8@S95pXi&-WMg`?+$>tH1e=dEdLkMTuOa*`84qNS`p@CULKc?$&rs`>$RPlmngUGHwx3$Pv%cri1R^KnUV~_&TPUo z#+j{FQ|3d?T&m1oFH|;~i)X%2wB*qI`;%Qj|J`}wYV9Aq8{-G~>Lq2f>EwwO*m&I* zF;gJy^*;`#orpUjJoE6;fpa@q^y=~*X$P=Bc4I$_aG?!F<+DAMw3!s5=&^5}n3j-F z5t&jT4OtF#4`mxS)ZndmegM>eIR3jq*nw=kDvM-z7-PU0a5i8A(t|2fMK3F?$Xe8! z0J+=$rc?8I(gCkgv#YpTw@ybELuOFF+Me>zB1EeSm%1=6&!IZW;c`J+yEB*e?vqFQ zNkmoxWFVk0i^FQO{9-9+rMU3#cU#vHI%i3pXqW5aZmDRBkQ6#LFs7lahog};HPj~( zWOD^Os%#ivn%F4Xq~q6~Jt1;fkz%l)%}lVg5~TjvzSMPB1j}s={gCj-EvZfvJ>Tie zbx@*RZTsB+u()t@;>%#M!}fzCy}H2hfJ3a*CEuS>;ouB^@!I{~m9;Wu0Dy0f0Q4$Y z)oQu`c9Hub<2)-P1T7IFo8#fgIPte_t~(#8=izV;!^?8(g)CDtCe_t1S1Cpv;@6EC z&tU_ltNK_V=z>MVXQQQg1#Lb&b+nS?H9Nvy*VUTH!+E}t&sHW0jV=!;-hTT-B+xU? zZ8G8gWUPVU-gq-DKl;@G!S7Aa3VwkAnt`t8X1QX!W_g0-GoznPS?}#h2G_WFUc?5BDIziM4I_!p4Zu1rO2h>gpNgQkpt-e?`w>?Ryk7H0LtCr0m zmGipmNEa6TA_}xZB^qF(>H~ptQ(&h!CnMT zQ?ZvmjAoTsgDv8Z((-%k8$Xxi)wH4%iI+oRi`rkYi8@b&(AL?n;mbROd8dtSh)r&5 z7iu#27u=x7>o+xyjM}N@9&?z!8tlFjKO-%3*V6L~MPzBbti%|VRg6gmi|+3AHNt=~bP3USH3etLGFGrIpof7_K8pVEK^A-Y;1u;w zv7hmWNE`W0u4K|g+w-o@SD$*UTv0t&f7@5LBA`#3I-OB0kZ!GfD!)=hAt-o?l>XT& z5CLAmka7QsMx%uZqk20I^X%xXHzMK>?NLC|F!$jnWar@FJdj7`w8q^n zv`w{}x;4lN>d@Ww_bK=NbiiElJ~(!xO5f9p83#DOa9DHP>gv3|qB4L>L`+QeZ?2*w z=O$Z{vy1>!;sjB_qv<%{`L&@UdR-g|3oAQHjKp%J3u{TDK-dBdQ9+t*<+$BF;@=H9 zkLL>2Q{0!>yQ8rRvM z7SK_5uF<7`i7t_N#En9RcCKlK;vsKC5VBKCOH!7gFYzsKj?V{3YR0<<$XibuBOPJV zUxS4=uXl;~4^eogK;acP;oT1uk0RP|L#mBQTcdz4bv-{wue-A$Zq^e)^`76toQtSwWP5_3oY)^&o zUrok!9dK@Lt|HiufpJAQ(2~O|cRO&(wWxa7A^RVHa99s>X(18xPZ3P?j?#zf%{4z~ zm+~RR$aSC9Z&APF`$p+%L9J$QvTjZEU$HdmiFrFdY3<w5lClE=jJyj=H$>| zdxmQgecf^vM)F2{m<3+S&H;3&pWJK&;fUw7es4%fkl%NiRaQxZ6voFhQDr5df^Ii9 zfuNT9Ur^h2aId}0$ny-!Enp_^cbXUf(@bums(^%EB~@gEA~C`|Nap?S(-YKhW7n_% z9K~N$0If~8QBAfK#I=hTxRqtkqpfJqkAzbIzt=#^jp*Ow0J+N)*mYl(O=_snKdQ2g zAy8Fn5*05Qi^%Kx%d|zpt3a1Cx3)om7c9S?kx=>3e@e5I-vqErV%g(%`L|edExeFQ zRm>dBxbY&+T+p*CG$1FJ2^`%{n7Dcpn1Vj||HrC=#htKLZ61{?zOa@7D9dVnf|mq7%B5BGsSFZzbm?;h*;q2bjA6 zP4tI#e*tR$|I~T3|8a#GL%*g`1SDXuwI!L6wJfS?sSmMmMOfH0s7tP zOU5>4hqiM>Bgoo+Cjb(Sk~#CuUTP-MqiP>J7O89pe%)kV?iT|5WtWO}?cr)cK#=?| zo$f|TRvFnREeB9*tuP(l{2^yH36{CNsL%#IUSG-qxV4_>>y_d@Y*Z#P20f0OaNdxk zK$!q@o$wfVoRTe8fP^h9EE|cC_-mAIKc$J55|AU`eNpG)ZW{e}sy*X6?7Xh zpQ^;)3A10#4s)TSP4%M_ z!AWn$lAk8*B+ zo{{?g{;Z0*NsFdmye#bzO_5CTggTpMj9(C7iW@2=0^a#(2gss~GR{CgvrlTXRJBdxtlUViyw!ZTB{v;9Vp zfffJrDt!c>%TvyWNV7X8t^FOxiH^lfrqYYd9RQYIMUvi;W=l>NlkcAG`M#00KGh0j zSs;IBpzpUz?Oy641^5VzMHlXQt6H;HFz_BlV)+3JB(2gK(=nKdMfTJEzvmem_S;h{ z#=E<=OTUNqjaC*gXnxAIcv>Es6R%Ov1GsYEF^)c2rPT`)?j$k zE%NOj{%#r~K6cry|MiU^;-bcTm?TX;P{%xe!TIR2Aqt)1h=9yzskIZ+&5x`D0mmc3 zxlN@&&UfOaTOGDksN`clNM#S(L3)A?4JaGS)_LWSh74a|l^L

*NS1zaLdaDuq5o#itsb*Guf~{mIj-{gkK&_gFRgx7>3!D1gFMCI5!ZE zSrdnmgkdr))3rRh1J9&5T!>@iW119)W5!nK#1Xbi<3iloXeK$u)>FOywk!QqFH2Zw z3d1^ww$(!CNev7i)xh*#HOvTSG{0Q|OXBLRu9x!pI-75cV0*Cuc7%P}pUs8+v2-{P z?&)+i9nLLj7)kdogn_#5Pl7Aqpl)>uaA8<%F-iGOI8$3V5iV#?*syI*2<=}3p`YXj zh3I9kM9p*poaMZh+N;T3Fqxg!PSb z*b;7Oce4Wa*UI2_Yi7;Fz^4;-B8pV?WJ8B1O>Q5H+u`sRP zf#C(jk!Ed%4)LTzGXtQL_BC`;zvj`4R9@=-m^slH9`*TDE*6^uwm+^xd!JC!i}z8r=(2*W>< z0sZq?(4jh(@`-`evJ^-Xb}Qi-0f{j)AZ1|&X(ux%%pMEfujj)oDFC*G#3fY5z_~si zPK;wFY~O{(hfBk5xYW}4AWYf1ECNdD{ydpY9t>Ax{l8cIhTP+#KGX&g!`kb9v3j~6 zqn;ijtobm;KRt|b?T0X~wHf1{9OTPq5B9_L+cJ#0QAOXPR19374{6_QNG{z2iG>Le z_uL7IX&WK+8R5_NE|9S>A`Ds&gB;EH__LbFcxWw}0)veUU>dy^wltTHq_N>#7Z1IX zP{^nIwM&dNok+g3HAS!IA5|O_*J!Kr)NZeRsYf`q4aG683trA}a~992+co&?;eObh zti+IzV&c6kG049VQY+FSu{asx^I{?95ed;5p%DFIBSgmgK*Dt<`q|h++ROmbMmmJ2 z>p<3Y7zSDzK+etxN)w%-m*@w@qQF08HqGAhy6)Lt^3A@K?t7Jp>oL`8t366_{Kn$2 zd)kE2&!`Qk9T?uvaQAWThoM_o2DNRq7)sbXAOByK1qo^cal*gFs0~DCg+pXY5QHZM z5Dvc{f`oYqxq3s)ZW8)f*rC6vF=R~i`0+4IUfjWmClj`^QAktKi5%4<`QY!bW_!Id zjfpihp3ji{rT4fNW)ExONIc4C)UQV0KM19rCD4yP0K*-`VFd3*U*B9vEKj2|2#FmW z!dH>}vk1b37YVs<;GanlB;1gX@|lQt6%;P;w~SOx*&0t6fg2j^qJ#06N>841QojBw|1Mw5P~ zt24#$=@K{rwYF+Y;!k#Hwbz}J*^~NGIj9;|u?Jv7+=bQ72FUsqcJLQVQz1reAnFwh zk*}g4G9wH^Q;5f4T*}z>5E$hH0cXO|8GdWWaP%&Gg7FvNhPi;#ak|VYS+PCZCr?Ap z?3EN>n()1Pa5Wqg4#JtlDZU8`0i_-M2c1C?U%UsRgt>{#je*GQNWT9aLNW#Ke_X)zzJU~i!V$0OOtprx5Q?QNp2?YiB>5r>lwF^Zv|7k!@=Z-)1VAT0LZ3ipMpp*Y%$#sFcrjL+fo2wmEsGX~Qyd7Q$q zFtYaP_s3JVx=XCehE`A|#w8!Zgwz(2!?4@c*unp-&gI9T;+u-V*i=6MQ&3QVyu3VQ zW@aKGF%e%bS_A1(#DTDW#OLK0FZQ}0b#O8?7AB-}gNJ|oIPTtvv%=R(5H71ueBDIy zdurw}OwBrmiNs;`^(%qox1Kr4P z){oxAK)f3BzoQ*aekiuz-Q}*o4VFa2jSNhK(xy^OPCo*Vyc6)qKgAcN;2KD6D98BN zY8*Lo1a!6u=gytu^B(Me)22-zybXkV@j00l{(+EmrWhC(z@Lx0w1M^9SDf`%qr2Mv znERrlA|Vn_;$qtOWaq&qtQ6z+9KwR)Ggv_U*#bJhWft3vzMHBMQqcm!^>Op&P5kh~ z5BTxNA5mIbis8eDzu~eNpXTK33|FtU5MtxOk@!_Uuh%68E6=ST<8#-%4Gj(I#39`z z;gwhJ#e&2N*wWm;xa=Gjmw$uBl^5}O_G#iC_93PD0`A_uiziQ>@cAdk$3;X$Kut~U zAN~^t3LpQ?ko}C#nEhicyzWERb91`-@GA4gU{u%xo76g znV2|nB9<*%hKh;`P@7_PNHlSpzVuz7e$+V^*?93dE6@2K<8#-%bS852wQJY-oTs<9 zH(Xs^VejY!>+Qt3#~y=m`URNfUWI1vO$^DqgEc2w`P?JpKp7AF=bwM_+k=f|#*eeH z%{bh{hYw>(a4esb6nOJZW8Z_hk2!M>h68<)M0};(?c296KYH|t$M{kql%H{PN2$e9oBh(N~DK-CbCPL0?h-r#(UkFV8=l@SOa2 z|5UzrmP7R}OPp~71%H#+<}}-QPfN&lL}is-#DkxP%+BL5*nJ#nCvM=(&AT{%<3}`~`xdbk2cf$< z2}0AiL2%M0zW;Pq_z%u;$zD!xifC_qW;`v;BfZABI?JTC??Z8TQ5|NUzj&!7xu)fX zPdZ^;;g#qg+Jyew>6}1B3#7LnM*pyb=o{RC{(;rd39W*8SOqlw>6{VmGlgk?AT%w6 zIQ0N018o%H#*#(h2N&hlP`qQ35={;l3z&P`oD~>`tN&BApFC@NfBEzhiC0=CgOTl?eF~ zPg2v1GWja(%s%<(ZPLpXpr%Nw4OouKBy% z3zA!bhZQy)@x zZ}PmJ4?|e{^hW<}I;<`z{;5>PII54>x9R*ZKl|8r79#)I|Do1U487vN{+Hu<@8YQ+ XDY15-jQ>~q&3ile|11Z literal 0 HcmV?d00001 diff --git a/images/icon.svg b/images/icon.svg new file mode 100644 index 00000000..b2f16d0f --- /dev/null +++ b/images/icon.svg @@ -0,0 +1,19 @@ + + + + + + + + + + + + + + + + + + + diff --git a/images/logo.png b/images/logo.png new file mode 100644 index 0000000000000000000000000000000000000000..6dd6a3c1bf34bf622c1dd231648a0439d7e3b0fa GIT binary patch literal 40924 zcmV*fKv2JlP)?rDMeNhwvQ4zrc(nLUt^xkU- zNk~E(>B(mM^n1@a?;rQh?j(RB9p3qUK6ht!=FaWE^VIV^f*NXgXCVL?zXBM*Redkx zr2-p&U%gHZHPrAvf%-S!$$#@*<+uOt_cgpui2uAw4K=(a5P(AnA*I|_2)VtKayl>p z7zQMPJg^SIa%=4(XWe|9ooAg}h;!iv+o|CVqel7~-foC^u}XWNkV<_4*y{EDQh;*? zc*$9NpS9*{Ys`ah^wPq1D!q-s4dkzx7OC+CC+WU6=S%RUEoFY=)Yp9`y4*(Q^ zRBBf*nOZA^NV8#Veq&S*0y=&#H(Dy+|EJJi4gUePk+z1n0|G!wb&9k0KIdGX=vfP2 zV}1LM9a#;vQL~1(1qMJ!xvMqiPHMiR_Y`WRui>qR0Gtq_5g~?weV)kSR+HxM5!6Uu z!&{AtR3C|Ro^w<@_BBuYdjvJo*YH+D00=3E2_Y1%kqkRf`$%)5Oc zHEf6)>1%kq;8Anny(dv4eGP9HDq*_ByL})vY=|1^Yj~?saoF_{rO{^M|7%kJJ%SqP z-=C<=KC52dQ}@zY+wDLKDcaucW2s?7yr&+N8eSJ9ku(iAS}a59s_!N$l72mr6K`Dn ze@)WAM^H<{eSbj!RtV7|l|D&GF$D;mwX?0YcRFjA#_LrE^LH~LrXT1QLQbkA=)H@_ zQbQGLlK%Yz2_U50O#6w4gb+J9$8zT=NvV$4Ug9C`CvKNY?MXy-%eynEJ3u<;9L_CA zh%MiRW2m8q4+Io|=O^yfo`2=WUkWM5DbM>~FPU1e{lv{u%8FXOadp{u0vbSj{+~TR z@m$q$)KEhWuM-t1zp0;0Ipuk05)A@LHge(#DMxDGzs66d29)+bQ?-qE($?1iN_(I6 zlBt{!B3U`Sckmc$sNsE$3W>h#CsS5=-gm1%k0Gl6ms%g=B~v|~pSUZoL{nM+ogiLi zKK+ht`^8fy69KnNkcL~@pwNIEZ(>=QzcjK6vel-Q2_O}{bqFc; zqw>#dV=#1QjkzZ*m!<#-?I-3+rFJ1w_unq!I{<6#okED=LWqg+=QYy5%czn5eF`Du zHV9Ffy854p6av~=yD}`7ws+RvqCNi+rM*+*@6_wq#(xV7;H-PpIRYuwehf9Qtf7V) z{tYVXeXEy9u8(bc=OvO^AqTzkua~fcgj3r4c5LVWxUvi0dO}J7?I#}h{KOrVUBAWq zsG)`r94e$wdx_+-nDE9+B%P3|;uZgIh$JfOE3J?BlPRZt|0*FwWgBlL@iicn=Y8Ev zrt)zq?8-6JP(uy>4nhdLL~>?K`m&cyIjQt9@z-xedTIcv)E<5^RrC_c2ZRvyZ(@6| zhRBI19bN0C^r(5;wn?VRD=j^gD zDDMdLD$jpPNVPc;@!xDn&jJ{0t`I`BA;gaH=e1|=T|(``zdsO%zwF~gec8S0&))(V zKsjqW!*Y2yhh3_D|4Av;G~RqH-vZKEH^(`*Kq-An{CQ2%*HFWMfB=N{6Hi6n`;?JN zpB{hrRwdX(>bwwjo}YNaOQr^5;=kE_R3;34(@P|~KvV{;8u4qW;ol(gwrlTmULxhZ zMA8YRc8|Y%E4|^-G+rUn+D|;}N5r4{CihdB8a&cZrmR$IU#dcfHPleUt3v>Sh>QkNTm++lBoe9L__>}O~tRFhW~(fk|YX12-)N%k}rCR zR@ICKTKaqT-YHmM52&9PCQ39kGJi*E!P>tGP@Fq~Z zcs0BpD8L9Q$7;`?iF3~d<5Ys_y333`z9xD}3dID*| z1cofet_AxtxH;fv0WYbjt}fO^jz1wVARHi^bJeB3Ypt+fCu*dx;q^iRMoKwF`-z#> znCrrzd=il)HbkToL_*pq389snfIJ)_KQgA-+h)cHzj1np$D~$|k#&;#q$H&Up@m#v z>CRi0_8I1N2FzSj4(6^4=Jw~ycY*tdQsOZHV+L#JJ1HeX2&}a@=M=RLxvvW~(%10c zKm(yvdS~rD zYmBKp&o!z4Z%`wB4gW1-;;Tco=ig+);1ebcFB1ZUki)E%|1+v7ar)?G!Qn|#k zrAxW*?t8f5`oAw;y>jKbLWti-+i?}MqlQ;Ojr29V5qLnLwD&2c_3vC*-YqarA-unS zcKiCV-`>4}kyl2+IQyrZ^|{jx zQn_-eByd(JshpSao$q;K#flZ`qVs>vMZZ4(7Mwd(O4((ttr7n=ue&gi5L2MdJYsqtg1=;S4WNXHM~)1 zB}HhgI%kXa8GDc zsT`2-JhW0MrBG6#r9#VC>I-P7uVYL{2RP>)xcA<{MZf?3c0$Svq?DER-8EF9c0p?R zZ=s|hw5Y>}H~2rf@#wa2WTTS(ffJqwPJmEDgcr&oDtUGqL}l?D!gEl)Xh$+7ShhY# zzdw@gx7mtvsf?#JQYw^`XjNGhQV1IA>R7yRAz%E`m-+N3PUrT!@9{o*?6Kh)XP)_S zW6XX^skf>a$Oi^B^}dGx1`=>e%8^L#`F|eM-g59}Dc4uB()YksB)f8huR>KmBYH36 z?}Qt2?8Pt^(n-w&GrRfHy_2~8t~+qXpp-@{iIUMGgv2>Z!uOdodk*{VyASI-I{{Ei zapg7Fm;(+tKu+CaizimCSh1Uw(phV3wtbA6q_5$>flyL7W7P$p-lo3w;7KXdl{ZpK z2pojM;Rejbzd9mGJ^!D`7-Q7!n{{RC6S$cc==pQ<6`e zc09M;dmj&b$MEqBmXNHMI2B2Jp>5>A^Ua`?A6#N-j|@3{*CcoS>o$sE08-#^;2>`L z5o`Q{Ff{z)7r)?(U;Gj{M{b~>hK5GI^WEAiT-1}YQpLWK(y(g+3yBwL=|$DgjhpIvv_mO`P3=Xoe0kuq}TRn&P0cuFyI)(b3L z_!3j6O=H}I2^ed`M<01aAGr74zY{{7>71)H+W3EsnxwDc)u5!5#u{__wCOXBzu>pO z3xXiW`Qxvct&iZUTb7e((ij}5SiSc` zoFCv4AQKSZm4UvRw4r-*VsD?I~unC>_tKl@6qh?D|9;qgMu`MQJeEfNy@R zgJ&POn(O{~D=m$U6wBq9@D5`w*1BkMQQpl!E=N~?Klyw>gE17WurBsw2t0YQA>V9*5JVVcF5zs<`lz^?dx}pJCaG<)qVTisdrK8jQ6V zW1=ON{LTSFMAba3B?v?L$ioi{;OU_~)legS4I9J8Qt|LZ4n5TQzUL}gag}ViC^Jr> z#L)i&s7s~z=gl{B$RQtLpFQ{D^wZB^;lh_tS}|#}&G_5Q&FNm3Ln#GDfKiA*K}kX& zA(UVR*eJoj)N455pcYQuZY_K6e-zn)K2qs4g<^@&*s8@ui!~Ty34##MPuTej7KjJ# zy?2=q;z?_5MT7Z&j~eM~sD=QnwKkzWf7+JQw-R9xNUap2;y{x!%8Zjk0|C~ON+h`U zy6ZUX&_lWH)?0Y&(MP!akC(E~hd#su_dUp%)_OX&IFzgJE+A8yuq+@@P}WfL5G4-+ zFCxAXU?dVju{+?x&y8l+#+mG}>wzqP`DI#Lnkkh_l*(m-3gJy8`GYW^R4$XwWbCic z`=#jb>-~+CG9S;auSt4pq_1H^h}a|E($Lu0+R@R0HAaX^`YR$XRlVWjc^+XH^5dWU z1cV?{-#{XjCR5))UvDp8_}u5{%MNhJM-Jv+vl_7jAlKT0+fS;c_ip!2Q zocooN*=PU5xbc?T39Y5Exsm3UCR$rsD3$~F`7eJ%eB-RM`jnK%8e!@^)Zea)IRB&1;E zCR_3NoTr#FMPo`fntfN5k%tjDBVyu%GXh~jIzhHO;IM5P*=J${7u^0JKl;>N{GfF+ zCT+DXqsNY+yQ_<*@4u6+#d5KEM@G$Go6U;2`qsN~G2g4GN%|UI0jX3^E<3Pl`SRs> zzUO$2Y__O$wZIrdDw*Q2!w$uo5YO`>r=L=T$upBDPXb{4#L3KAYNJG)vc#1XloYP4 z1|_`ZBua#R#mW`+-&)H#-~BF+ zKmG)dKlbPb>z#7i$JuWC?J3!i_V!^s)gz!pL~<1g_`wZYz(z!m$Sxq3_!05JgwUT4 z@qoQIYi92)T4U!Qgn5H4gtjBC6o^srQPzU=Rj84^hK(UylNLKA-u);OAz%5|g_AV${M{z}wH zU&F>Q);g3@x6Ph8bJaa}-96&4!w57M5$b|2(T_nd5xAz3apFr->t#oC#;MPWyUZO!DBEO0A;GF|0<&X&=RbH!DC&oOPd4`m?Si14AnRmdI^iS3X` z!iS3A|AvGgT2fK{+e~~u73x<=T~(iV@Olgy7YM8nn2i^P^o+{7a-;3MF9w2| zdjG+P0Gt$r0~mTZHfs=JTtT<8$I5HG3=%p{%^h;`MYC}S z)&6Q4#=P@nC8#(^aN@OHrL#`;Bphd{b57KQYs8Uuhy+8WvmLlHPyo>a3@pcNv0g;530uvpe@U3Nxb8a$@EgWJZ zi1DCCD=k~SglO=SGNpZ`v@Zwi1R%hMf}mt56~cVEU=nUU|)JXsKL9n6i_gbo7wPFfy-NPXPtCboP8uhQ;C#R+kZ&2Jhui*N! z;mu0f?>K8-u#QSj`m62NSZlRZ>b&*qI_nQVdH2ex?e1nXl!a^=FC}K{HtGb`$tD&OOckqt7Xk>q^3u7i3z{4N?#@6UEKUK4g`pN z-~hC>au*AG0o#L~nrZaLHgzQYwlPWCMkQ$*l_t~b$g~I=n;J=^CE6D#C0(?`=*;q^ z0bD4AE2q-={vh4IzTDBhx;S;+vI6Ut6j{48Pj6>&xijI@Anp``yM%O$oOQ+xX#npD z^1m-^kT-~T(r;TC(JT9WGdH)Q9=@6FNWghYGzP|d@RVKZw*2D>&E-|yMh;}Hy5NzV z`27>PJU0Bg66QC?ZV)No7y^(|Dr24d6!5DvKl_=~SzrH#89in+g+f7=ibY{7Q(K9= z`p!9olr%Lrv3TJ^_B!BjJ~na{zdCvp1Gxb8?|RO}lD;DKMYS~FNV-SUa)$^XT4+j3 ze(-QF-+#C#962tnJ0@iqH?@vYlj|5ZCP{s(Zxd-{wN{kEoM=go7b(@u%RSEt={#^k zxVQqZwFW69liT*B81}lQ{ZEka(ny69QaB-mD-;S0beDA3GRxwb{mg%|mt}JXiluz{ zVQ^O}#f{e4-dOs->*W6+lD_%@uGn;MmWZ#mj|$1n#@mgjqSgy&mr@J^(T1Q-2$=!W z@oG8eY!*kKb8fA5ZjH5Wor{CyRjAP8wbbxeJvOa`4UN9_BQr7wU4KSPST3T^xuuWq z?A=InTA5pB71XEy*~7}7@*lMdKNUtbrC)1eiU70_B9KyTZ%p`$QDesLamvR}~lDJR4;!#b`05Kh>&ccIuD{s*NL`8mFmtb z$d27e!V{CePI2Njt68#ZD<^+(6K4fRds3Lti7>Fj1WsTD#W2g1VF$3yh$FEC6vBRP zUhqA%^al^gI>SdM|A1mRz=*odd8z9m?qB*_Qo0fAEXD;ir$%zf7H9KZ=QVa=|NTyd z8P+VxsyPq$@Z4RUygX-M1%!W=ir+bBS5%Sz_1@FB6dy#=#}AaKlY%VHRh(B4nKN(H z!BQbQRNX8gKuOslrJRNk+gRjQp=EO{qp`FA{;O_nV@Ud?5-YTHFACg~*4T$bW1e-+ z6|2^IJr9ajlF(T1=QGA9zk1)7Tf;gf^-HS_C;X+$-Tv`mA}J+JzI2PbLUYn}UHb8t zi??_xJT@@4z!32(;i*d0d}s$+6NcfQz-LmK%wE%HY~8ly^y!QpHx}!q>F(`g@uEdM z``BZswe$GyUc)$j$JU7Of>*Yv-dNA5BC*@pGhexLwo1ZR!2%givh$x-vc)$ivg`iC z=|jE>6x=;vZ&T7ejdNk- z-x-wBuSfb<9{x)32qi^YDYZR9?o+nnL!n_y49x)27j)D~hBqiiHb^=elxwY%u0APT z+820AI1A*;P7D++>$6s@>N9eAw`o|}Yo_$&+!UNR3KZJciM(?Xvf4VtLc|t1$k1Q=QdvzePB};R?VtvktQT0mS{?TE5{F#G<7OWZwSz#P|k4;f3Sd^}E z$^js-E+nNJ5lHgo9;DDbx$;V;cO1&otFDR7220v&rV#Xy@muLGEN6ND^K8+!5BDy) z5MQP6WPAWu&hmBMvZu+XN{XR+s!&Oqno8@ z$w-+@3eZBJ5F4a)NTS_UEO!HiqIGNftedwcbdN16y9Zt>si$5pPbd_`nJ9f`BB>v> zCOkh3%yljz|Hj9yfKW;`&7m&2{ESAs*$AzAdK^|bE_pO(OlUiAoKqbB)jfQdD+-Y> zJA5g;YmaFQPWp$~?}jkS&GuXF*jP4*xCbt3G=z!V_=a_XJNaFH&yx896pZ5ZDa}FN6f= zkU~*3{Rp5w(N0(4W!7Zpv*V}+GF~g8Euw@+UvV{!$&q+6iLn8z24*v|e#&6D&pLw^ zKHbG-XqliTHHMzTa@II01wf(UZ4wd2cxE#dRSwSC_eWNsdh}%JM&ZH1mxe8%ZijHn7pD zZ*0fE8u4F+?1MzF{`}Q`>Od&L-{)r8bmtb*bvjb*RdmrEmSWI{mI)F%!$4^rU4@sK z(7ZGA*55<6w4TtG@nnj!Dd4FjYqBq*Wr7iPTQFDed2!wf&Xn zNeJ+?-qWDYG=V!5DUwr%c^oo1We=O2w$n#?GM$zZAh4Da4l6(?hm2b?S30%|NnH7r znDp@=02^F==OX3Z4(5~#l1d_!AU9ySX=cIx?_C3K*7CB_35mr1vRn?njdLZy*NSpz zyvvT+HFMgvUup{bx}8o*$&xiD-1*#2-IpsLATf7_*8B6f_G$juPxfmx-ThXzq$SI` zL;m<&jz2t`wX6F=IjP;_z|E2zym`uOJ<4|-4N~G!VGcvFHc`1?4vQ!kEQ`7f|C(Rq ze;?>$T8qao_iknShy=N!A+S*{ew55p3Gi3l#flwYor}NGcdrForP|N<>uNrE=~j%} zypB@dB4nh_YnfnD>+Y=0zQp?cVgNeoHshEnzu@ZGpQk-DiET$7%N5U`0Yc)*1o@zc z9Y>wS)+3JMx;bCrfQjE=Y44Lfvix!~UNhDiq)=qb>)CsgFVdVE&GmD?PMzP19kj*- z)&=Y`<}^mtZ%JQa9S^KLAL-ff?mw;6xBY%;V1jTL6NXRo4iNtXLwYSRq0_%UVe5u3 z{O#!0uqSV|1Uwl@(p+FsQjk)@B|Txg@nlc&{%dyY%Ux{Q?0ISSShx`^0P54B{Dnle(=kG+cQD(=T#t=fyhVJ5!K zqADyUupw9j8bQ*N=3|q8dgRB=VH&B+ zdB|vG4*FG>x@&g+4@!ogG}dV;L~AOUnDgiv9j#jo^X#u58Ia$^CV%bAy!NMAtBsw8kv@&3e^H<{w_|)`zzPD==4N1vB z(PCY!?!Oj>!sD+Sfq~{m&1u)H=E0FEzH#9+`uhT;6bJ-?EwS%}FELQ=WL)F6+`sfU zbmkXhZOD{i`?2+iqbQp^_b({`-vpUu;&u8zzE z1kM?Rkd(qKnT{#kdU1Fa6kJ^ zNZRd3B}8|@3Jn++Nt~1?tS4~~FE6Xl-qypcb>%*+YaCv$`RJ64J9W!CJ9U^R5r}+f z2>_`f95l2`R(uSk07HX00E+`Fwp`uGe69aREn#YVHsqW3`{&<;{b$;`!NKJmBi z;MrwmFP)Yvily-E`jr0lT|a1Q+$LsJ^#k(kzxOTDI@~r4?EsPABd|^$ zviqj>Hwc`3a8X|FIzG++?AyfN6BD4JXe?zHX)RL6=Ta4<9OUE1VKfUigmM|0>l8ou zS07hAm#4i>vUpv@MdH(mQPYk0F^x>%##+K1C$x&0zCF8a5rF|f|F)m@UWhaCXcv|q} z%CbBB>P|VoD-bOy>B2~0#nMK|1>K?g^Q@emwXPhtr&Z(kc58M|esqNS`Q9yJa@_aQc^_`!TMzXx{qj{@KR<`>37RqrEu+}FasQQI#}WO` zIcRCr+`Fj22{)|er0+~(#HJa_1ruu}4vC~}3xJ>{HI5biGf8{Rq`U@<4M^xZq>wlq zBkQNKsOM1vTcka+DWNR^f|S>Yr_!XnM)n;48IGCqE2a)Rkdny~mkgz3@^~^yz2A

PVH^Q2Z8ifN&!xua`cpxYe-2# z8xcDQ90lXT)qTM)_y5bf!V@b?qBSjD$`d3#!LJ_gXUfH^x%8<4`NeG;^iwB~aNpmp zNeoLV)tfgILyM9_M6XD?$eeY9pS>pG;~wlnyNJYt#Hni4>{Ttgiu{#^kJWwL6=)B# zWrwfenp0ZkCwHlL&RP4{yG!obnPpnqCCkuSP zCMj?BnIri44t3;1$3S3_3Y6!Nm3d=cOlZ%=z2_Xlj~CB@@8a(&g8*?$Y6g$@isM`O z{nG=?UR!42S4MN-W+{%jxs#nPU&(JC?V+#iXsS~*CM8-#dI zUf}*E7m+P>GOB(n0#h9H)EB}&b{%sXP03L_w){`*JK@VrY}uWX$zhzKH8q}(P5&c% zZSr{#1$-?DLs;DeW=9ULaVyw#15_<&16_MX40XQr-Y!%{vvXf4_V0C14F= zVk)UWbi@`Z%7K$YfR?!4q7@6e!cYkFo#j2nTMqb3=k7gyk(?awWES?t+3)TLK`2P*Gl}cObSicvJ$Q&^unk^T`#~Po0 zDX@%eR^0c=c6PXEEl;ls`1;Nb{PoFxmNof&e@(y-9$UddW0M>;wT_*}CTXwJu}%_A zS~1oQUJETn#C8ciS;OC->*u`3dwIFZ=ZxQM$(FmdFwheqm81~#V_ZN&W$@y7`eQ5p z!tP@~$%N)zSlInAWs@hN>!>6_Ivh%PBy~Nj24*v=VM{7S!kn{sGQsGE>D;&EBIb17 z!HDHk*=5WrEbM*|Ap%A6O45D~{cLd?GQm!wUzBP75HRgnO|!6s!8f zm%v#m+`68J>cK?|q?E7T-K=>$>o=kF=<~bs}#Zp2vn-%HT|M z7fbE9y|ju4fQp6#@sw8=&2^VzFzKc;NNJ#_V3@W^g2z8Qg6Y3p&Rt6ieB{&P*!QEO zky0}2@opZueI3_7-bdrzU2NH)*=|IFsqH>vnlz0`g(sk7VSO%SerK6SmKS+wZJFLy zkKIol%W21s#80c3@Pg2mm_Gba+A|Y*e(f!+&o3sSGIZsaa@V3?koH^X%q_%MX{xiH zBP$)WOt4~L7Mr#0O;Xokor%vWnnIh@^)w|rSlag}+l@Si#>5EL_V;qz3-__6r^L@b zvKP652`pXv9CgVINnevK4RG;;zv7d-ex7|MA51dY%YxMpxZGMT26{@Xg%m5D9rTpE zt$4qa-UuNSPMvVfv=mwjIn=sX*k{+2MNqc0l%OP}Y!*^~yI3^!Cr_($zu2oqwx<<+ zMH}Dfs;zG9SrQvAQbInk6pbaLG)7c;<^>T!B%o|0Idz5bl}3$KF?(QeQi(LU%7&an zxVV(Gt7sEqD4-)Evm-PPEd`e!-OA1vud&OzjrO(uTRC)nKmsUvTJ2T}~e zUCh{{l|#S0DI+J>k?ku}E@hd%dn4QL-9}$miA8g=yg0LuD__jg|56^4HOSDSg`{i) zTtbm)&oE-!CidU0g=yP0k#5lB1`PRZh}N2X(93RPPiJ()RxIs(isPo9&(*U(Pj_KC zzDiRF`^g2}crr=lM4Yp+6D}Yh_MwELGq;eWZlEKxIV%Tdlk)1Yrbxl8Wq94@%v^mp zVbDY9vaDJ+g2xuECG9uTlPj>SX9-)6ox#kNkE3KA#u_qy9gi-4fW_+^Q7ZPdi}Kq|KMDRIvmE#KX(V0?aL*RwIg5 z-`=3*Ru>%5!YNzUktP9|^J^34+j( zN(xj`^26)8`T65{_BeeU2Yz%k&N}+L$_Q!MchfJkx^EsYthswG138a7 zU%Z{8xBN0coOcPEj~T#|30C#w&`RPdi`Fn_^$Tn_cE>@}Oo~XNWD<3(?ps4s({eU# zP7{_Tb06&y;BIq69X#K5yq`%gA{AF2f55~fW16(>&RMAhcna53FzWFYrCz0Ez8K2; zH*51IUv)@Zxb^Ua?#Wvkd_g7^i+2nCMMG%8Q=&QxW$-p^KyF~5XlO~QS7-s%;kcp1 zoQhK}uH>jK=%_7ow0}udQ9?yzw{CE-a24tPiv0{NT0(!`a`@D=ICiUyxn^drJ(2Lv zE|p53ms)?*7%}mpeOtozjM9B&i}tI2zj5+VMdDZaB!p+Hh+i43B1v8?`JI)7WlLR? zX4y)^DHp6|eqD+$UcNPBH?O0=I{>0EpcV-RO6wTiv=wu^ZpT_jK5L=@FTQrEjE7Da z*t~H^w(R%_zR2*{%U7^$-AocX4H5#EC9Rt&g;_wv*}$NLPg8OvFRZ(jLnfU=#&5y8 z5Qx0=g>o-5MjTEt=x1T~BPi*!$GA_jpzAJXj5rKOz+DS3Aq-mS%_iy18VaR)9xpz` zrcHbEf;c*;lnpsA$-sdR-BH+;lr5lzJ){xsQ?lI_zvMm1A>#0W|A4E zxcP}ZJAQvHZqE^X_b)SOAD5zceL0#^DTRZR|YyR1V+#CmcKNe410E$(7d=m;!C-O>oZ9S6oHt z$}}cM5ZVA4$L;sp7m5h2TL zJ{GL8=!7(n-MB`Si{*a^=~gi`1^jKt`<3)k0LIA=ZPMnA*?pWpsM=cxS_|>evNB;g z9Jc$|(q|2yxN|+*UAcx&{Hu$Z>jD}xlC&oX zW6yE5gqH$B1ErwKw%tez82T(GuV5(mUOJE#rz64$KYEX}8?L(n(W{(i8}Fy;g5(3s zl;OTOc3MV+W!sF>|41ce%Qv=f6oGLNvG_8DCC|+Z&Y>;6DLs8{JUp+PY$3_&HJftpoO$3BDcyh&A)bx~RUsfW zlCi@Qys-9PT>a$DcuJwACRfgpEe%BRerNgX6PJ=L4shyDpX9E2w=%wEDrbD?5FCB@ zDc`MlF)NF+;Uj?u07P{9Y@Ee5?0&5(fn2|wyl2XZzIAqhb+p66sU$)3s?CVto z%c~Av#Ol7qX%k8O*v2>3;b#P80f&8kJ(n)@`1aqnVT)aT7#L34YoctkaoTG2o*e?o z%Y9FyrB7%}jBDJMQ)gVx_9Ks@6y{JuVQfHeaW&%_x1*=Hl43YO#&4l)a*S%&l2rq< z$p<|=yzEkZoo4g4eRzDu75FN{hbNuGp5s4F!K`B-Sc<2Tk-tZT{Q2=ea`j`6<7|R* zelun-TgT$g#T-2CU=mv6DUERfRKlfkM8(%Id9-Bd>ISrIL@P;up`V>M*^T42J(*Ha zLbMq^rX@l#n|SXJiQL)%4LN{W2oq9mKh^T##w^BgxNjV(LY4Oo&& zaLg8IZaJZy2TtukY1r+G)qLS!-Q+??V@3`pheSJ%>c531o*J?-cU;w>#)5h<7-L1` zxU*5+cNd7N&noK={cf;}gWGu9uY@FW>TNsHXPXf|INT{yG9s@0^ct7)KcITQgDJw% zYgh)qL9%R(WyjOISw6O%bN)JmWTRo5#!qnIW@mBiwDZ}#ZEs59zy`#3&f=>KOL`t> zY41~bDovd~j3-uH$*{~se4QE0mYcQqW=1t^$=>5XkCHx}`Ngp(zK@JQjD*fm3Ja_n zn9KO4?J(rIX~9=f%25nL{`~ZhnESHj@p%E(3i|V~W_>FwyL(yHzk+SX%^;mfaP4zf z&|25dr}y|0x6HngVo)6H%xj&Yuh7rL(IpnF>|uev+bw^y+wzP)&7hxczAhvM+9TRpBjwvAg(Z0FgvCAPX`6@PptOI;$e_baKN&P6JI1w&O_AtHM_;>k2C`)AXd8c#ykp>;@4aVaU?!a&Yr?(zZ7z2iL2z5QbDd!duwzLAV+=^&8^ zShOlmArc{TR?lMUi0LS)Ct znb7vc7vvqW}U1|8dCg}@?( z;J32|*mHb>39Xu3;P92;krgEbkWhkL;ONR)b{y&R(5b`u>Ap>T|NdU~xNHq`)&>o&vwUnz0$cc*Z~ zCr8uM84%hsZJABzDXhdf%S+u4u|B_)Er#t!Da;OruYg$kt9bX0ly0OH=IAf4V^qU5 z0$Ys2a56xK9oaLOkhj*z?-usR871h38D7D`qo5iZVh#vN0$?=!xb9{d5 zdeVs~2S56ez#7Za-hew^F7WAX>M$lM)0{~Ne)~+8&)?BQeJYZNO2R32wR?Q%vNfD@dk=NKq$wro&zm6?e`8WsOZG~ts*zP)T@(Zw${NZl`aF!+sm!jc zNPb&QcCjH*t6rUp2CfzTAxpYL_8FHPx^k~Z_UdD;ydNBRs@?|z0*Nb0f}AAL;BocU zWsd)e;o}!=#ZLRT(!H*X)B-6rxuA=>#4vo7Mj%+xKa-JlQwLL81%l9)*dWT0SK(dn48)Z<70`Y9c$J^0Lye

)rvz8d9N^1$^w6A^gSNXA z(3Q7n2^W31mD^5e=e9X{w)xFkW_KDIJVB{sH!R*5&xd!hN+0!Li=})hqMqv;w{xnH zDv%IHi!*YtL+fA{*Nw_QyXrw8UPmdxysi+8W9xPwYlg1Ft7-{V(!ILGbVbTXeg_F< zMUYn{>wGT%bCEOuAo$YdTQhljJ-uB4Do$YWWRf-6d8EB&nvx?an*u$B6@xR`076%$ zB{i1j8ah#AK)WAqLr69t;<1pWDt=5#YHnKovxnu(`8#qPKu*8qi@}+A#n7>&H{|ELH*xEW zc|Lh-H!T?%alu82FUCPv*0AjepC?Wq!AGZNIP19^IJxJ&zOM4b@*Zdn%BFS@$9FNb1 z*-Hd3ttzq0CWmn0v45nst`R7aNu()t~$l`EM# za&wA7X)sf+6c)MR`Dj9-w$qs$JZ*oJ3Rt)*U}@JfrjD3KXhO23 z0ebR1bTo8eOh6Dq%1cmR|1t@`#Ms77DFh`N+Y;u^eHZANOkD_W1=)yQz!jbW}mLBK38w^7of4-`b;rM^5XYGcVcuk}e)wSfZg` z;cT39URC{uSYo?ZNcka09)XOVc@h`OXk9}zNSJCVFE*0)RsMsjM`&qpNJm=Hl9Yrt z&L`N&b{~}JZfM)if(ZZ}o4|I*)32>Li>#^q??^VssglF$vYIxNx3(U5FlU2ZmQ=}C0=C3ta}<++z6 z_s-07>)b0@(-W|!JL;F`2pH2clC4H7MW7ZPE{5A8KF>0Hr#kP>-%Q>cvn9gBa9gVMiN@0hW zCKQVGB(%@n^KWC<2_IrueLJ~wmilBJ-Pt}IiV>}_;}$S}xa5X8-y^G5I!`9TYksmo zUH*-^>x%+mC6}<98VC+k9Z9lwe|M_QKl0$;iJCW=!T2VhMe9PYKd6~cKRCc)S9fy5F~i7*j&e8@ z@&o9~TE;hN?)_Lh7d$b*5x-y0Cw9y5q&|c8eYJ|ep-B;uztZ+;ac{_|Iz>WDN+r9YdJlsWc3d>64}`#lQCzPs9hoc- z&Mh%-eL%KsnYxKjX&~e)H-~)o;%T(DW%$sDuj6|jP07*R_R^18ot;}XYb{F7zh%J> zsQ25+2J6sDk@P&A6RaQTr&Lam%_mv3d@ft<)4;OT9dze=@RWzIVM0ee&o8$48k}<| zsTnBt^FOzp&1d)c3NuD;%N=uXCR@t#**(6(wqtiBp%Y9Qu_br*EW_7Hv@%TFL@;Yf zfHQTxuMTL%5KFBaKTYBz z1o^F)ztS*&^;(Kyfo;Zai**GAiisl?tGXappeof_49-?tu19uF z=GCV#Z|z*R9kT<*g#-qsj(5Dgu1N1d9a08#<+^xc>7(Er?e!fTJoQk1ddK(p%+;p= z@bO*Fq`%M?4GN`xR`o4s>WHle>e#6W)|Kg$L|I3b>c{HXhMhG%Z zp6Ojz z?2H&x?`D3xy)0P!B3lff z3NFcJqaC?Yz_ek<^M$>>z{hs{B*q!)5_ObKnc)p1$On0P3%xWX8~MuC}yn%F*8DT2ctQXikkL>3fuIj&-?3WaL!lE(>|!g&fW5P*$vM;F(3896RGn z5pBF?w4H#7PukDWkZ8bD9xrvy$2rUR)=kNk3P_U7UiAXwTF0=%#74HA z6tekf!>E=FEzQmNb(-}n3gX6JE>=H3`8oHmOO{-h>n-dgl{>>4`*K{>wsMur}|ytLS-q`3-buKdFbSy2uh8BemPE8wZMWlou% zA&B$ut%IZ|nBN_cEjhOB@F|uo4M|NyD!Nze`pfk61$=$iCjNL(8znLXF*aaw$L5s6fc`?Bo_r6@=~mLIkZI$R3~OD&JxhMbgb|X4Mn$I4C$yGX z53lDh-+s~k;$zR{?!0WtpZnJ3caq8;>#$G7&bs%Ubg#zyh4e0xSaRPFZIZy#!c}tN zJtfRvX67lE!|MdDq@=s~vE_y5u6!}4l8xH@b#9Jv&5G?u`{c?)r1!ljC3jRtF}z+= zifdkUH29H<8d$mq%Jk<9hfGTG@W~zg`S~0t{(U_OC8+ZS!A4`aC!?a3xsv0TA8z6N zy<0f*@?L&)LqD06LRo<+6G`z3!3P&MkoK<9IWJx|mhc-p_JoV7=(HphO$o`0fiMc; zzXtoAiyCT_N|sBW%ks@#8wkr$%4u6h@~>H0o|)RrAt#My$&wPQx?uJa%dABiu6b-G z%evRH&xFrn!bY|jKAk09^T{MUq;T{Xd)TyXGKHYP(yqmfZXQcHjONnUCmQH4^s=UZ z6;nrSNwHkSQ-Y?t5-uFh+P)qhT68y$EqRE&ChtozC}6DR&KGZG)3(VN8&aQa;Kt{# z<>nXuL0zIAV-23tNZrM%zQv5lY=c&km3_>3=8^JKXcg55>o4TF>&1IG zX6uiE$kNtWqCQd2=waRLw3%e`XyIC0oy~enLL)GXW@owVXAAk?kIl4y{?Z)x#4W3r zlyl`NQn^zR_DLI+CaaMBJty6(@qQq^0E|JTrINesu}Ok3y1*ngEbk5Fb#wAo2=_Z< z!CF@qgrE8L!@YKSXIb2|Fz=3=md4X!!#g41-lZjW9hE?+hy=zt8d93(j0Zq>E~HR2 z^cNiC>orfGHUcFCJ6yJg`CTDxb#gElzXBy9m3sYv;lN2r9y+yyr{@G5^_%q+gQ%lw zSl*EE8%X~uq7M>&(1Xulww|-G=RH>Wt&ME+6$2qXdE;hxmH+L0O=BY|f9;$C^+~}# znnty>nZwkP({WgmKJ;cimUpjXL{kT? z={9DsnniQE1z#s91w|fT`UuVGR_YTC7;Df{qvcR>fK);-P%u2X^jUVFxC`~kM%MJL z;rSIaIc=xUv%|POdAVx^#u$8Unb6+AGpjGBK4rM*xZm7D+tthMKJx=xt-VY%= zDZn@@wj4Xm8$GSvbH%_(=OCSs=He%Fva>IEP)qwv=qfGK?Bi?tOJCpr>NUUUEn9Q! z)U+y=T;#xWQGC8XZ+T&DnKP%>-1vj49#(579@Zk&A^P3Zz*lM&-u3#h2 z^sA-RR%hJ3!ig6XzM|I0uhxL?3;y?seu`x)?q6PFZGT8ZoQ}M)Z6A-M{C4I5pWU%; z$RuzMrv=9~!9%~F$3i4>L>fIZP1C3hbu9@JsT9R9%XyD{3!i${_IBZk6osNeiX<;A zpUZ8tZsDxG&tX~55}sf29BF?joR`oE=B$36y(aHVL#mO1e2Ax#EbCcBAtEo^nyi@cYKP_e@CcKjGgAgZB!w z5`@O7uO2m{@#oi_&=U3*oYn%Y#q4y+IyHYy{wRU@hjoLoQ0++(23Fnlk!kgZ-gIJ{ z>CIcEL`05yKbmB7{I%6*!XFSSgD9op&pf?|~ysLl`f^N|Xzhh<3DDV&rT zCkR7HPd1BkY5H<`g!3_mc)kNz>+qDv*p>+_UcV4WG#RRF0@^ae_}Gr8W1QidXa36C z{?#PB1ch>*twwE4w$#V6o+Tu`1X4J(iUx7tgQO1$4?I7hG3}8_D-sDo$qiVggw6PM zgk@xIAe4QbWjdFaSoP8XFU?ua>V4*(Af$!sB_MyAH7-;8TbeCD=~ z4+|$YYvn+^xHbq6y1Y}b83?|nMDPpq9*V{PJG>uAFSW#mR^Rx`!`cq}@^1BE=YY|} zGRpqt*_=4#+V%652&NnBf_Qxg2qlHK&U@m|$F^>L@{Bs$6W7lR4YXt=-?^ucd*|i( z+y2d@Gd^`mg|TjMo}-q6HGL(z1_G22G^7>7n-hQ|z)_!&2nFA`ua{dEb zL!oHN$9V!3?ExUL(3+C09xy!8S>%wdlSAF+sXFqFQ)LHd>_wl8su_Hhkeed%Q|x-h zn(&2nq1GbY-#JQ7=m-9MP^&pe>@z+7r}uBxveLk!e=c zHzi1>6@F5o;wg`oXmE?{`DARKI6zVk%NT3oi7$eL=OeIaB{7unwIt7 z&bp{xf(ab?Tu5JUNU<*@yFR3ARf%=Wb962*(7m!4^sO(gDrSS1z|IEo3@}p&_mYsV zL|p1RHrq7dloZBT{j0Gp{+Dh#r7fJ+?kQ{Gxm9Iz@SoS~?p*nEN`$|84dPex>|M>q zgZDn^BcHtxDWUv%_kXfu?9O9+J5X{mp@jL+rR&twFXuiZh52ojlve_b6KdDtP05F! zIdhn8PAVD1LxQ!?kdW+p`C9ht@HusIl7Y}Mx+O^}F+_ZbNV}@1%s?SPNkLsoF}yj6 zBkIE@71Y%!9$H%9D|hxVWthizcW-23s|FjDuPVpWei2dejmCt;_XO*#p&=uNs_#_i z(N|~Axee9%>TJAENULq;}R4SHE6M$6u3qtB&|9H11zPw#S=qVuw zidOnsFsHN3vA1>d$Y~v9bd-ElIdAKr&KEqns?4Wv?dGA64j+s^c^Z5zBBI58Ef;r| z+4izE;=H{ZNhraBu7Fi}%i6%v>jZgEU=k9SQKq5UD<#kiFi=(eTkQl zXf07%!~?L4@0%NJ@KGunN<~W{Z^-o<$~l8880Ye#4F*Dbx&jpp1f-2~D?0;`Ed=+0 znFTBaw-8taV!4nMqZH*0NM5NET%8ROT~3KH*84$Qy?4$vC$_r1#wT285l^lt+e81_ zsk*b}&namt#IHwPJV`BB)E%&-JG95{-AY0^+0$2M zRZo%j#sm#1J=j&nIvCL$`Dk**kiL9~aV18z_$U!QD1H5g-A5;R=#=67^wB;}zHvR1 zTQmnvOtZsiKTd}o8e7+74d>o7z)9PrnUV1*zKV(uky;<6=dwXqu0nhp+x!hl$#?JX z4gWMV=OvT!xl*b0IVzR-On8BW<=?ok`}}_{DvbT|w)N~eCTZ*IH5cABAP<hhb`VA2$dL)n0h^{oOG?sPaukeXz&Zj8Wg8_J6hk+7Nwk1UUlA?9 zmx57A;o8&6f@4k2s`*`^>dqQ|ymyn^c2q{R)oc5&MMd?|8#-Us!hET6Un4Wh;p||! z()dc_s-B~2IKUcsITXdkXi#R;Cyy)*sjH^zHi(pT*H(+-?cf7vakw1s^Ir_(x;@=q?! zpCv{3OY1f)Mj)j~TIbEZ{$s;7Ibur6_T{ZqL=!Mt(vsgkF~B8H<#_HBqsZlrS=V2d z-Pu4`C+KMO7*^+FiE7}9*hcT{E7Ow?2rSg4G-F#6QTUIjXQ@(<$w<~`4Y#~h;FkG0 z)?_U$X+>*Bk<@}hU|BU_IA&U!k8fW`rEXsZgHeF0gqsa){rIsoCVKGGzT}(-dcz++ z)T@(8xwu>|?`f@F&QMDX1sJVlYiQ(W5%T2Ch9xHMH73FBOAGw-_%lPsAdS*aw1h8%&z2l>G9wfP}O-?DEb)-@LHE zZ3~M$x3-+`&WDc{OHS^!X?pM5k8g9u(20hGG$-89rLKOy?=&UMpG@Rw538>3fBj5K zL5S4*TciH`08y0N&1q3Y!EJ7 zUsAorP_)*0jBiQeD?t$76)B-R8?vUaOwm~ClA7^t36fe6##u4OMUJFoDyq*lYej+C z>&mPv8iqCajBWPVY?w!Ty&@NP)~V=@gV@N6U+vg)Cbq|YNiMK_;jZrRvYA<(Ov;7j za``}O?IK=TjYR=Q3J^kOj1#-y`1*0v8}_;Ss5aM~i>e4$$X4eI=5_{dm&@0ReaEM0 zOiJ5Ru(|%S?ddJq)jb8Xw5MnmyB(Sy(OnAY0lO`#uGfg zvcxm11CE`NVpM~6#yXJ;+y-gQV(`y~MSiSvNFm60!ud+rl>?!?ePK~tJvYxo%S**_ z$=nb2I&gP`U8a@vLZhBLd(WmRKm1UW8z@=PU9@he%h&dF_7}Hw&aQDo>iZjhE(L^2 z$#J1km+rDjYTxq?Xkw>Lli`}KP@ZvPx4dP3e&Lki{)8{=P%rl#pJZ4@A{=xVEO#y` z^1}!F<}UBaA1MU$-)V{eUMIa&Qedoi-`_skw#RYP)21iq*!A~o)q*v}GX&wKRkl0~ z4H6ffzDwh0|8P`u*fU_Y2AmTpV4&pO_Lr=6E4mBQz^s9Wr_wlrYRwY*GGNEjf;l(Y|*f?w?uc*knkj%wk2puYXWNrOW9cmLPk!e_1S=R z{bi&QOzcRJNl3Eg4a0kbs7SVI8DyjFSreHi2~Y6YvXc4yUET7TRb`P(iU-T(@`={k zmArQOB@urYDgja@9=Q3$_C5BUkT8A4h@_QlK2Hj>_m%6^6EEk#3FclP53UR70}2~& z*q0DG_4zZlZ@S=j2e*V>d8@0>%@Kt!GrlBOwiH8N;pE)-64-dKOKIWKzOco>@#LzK zx@K;Hn_nuhygPUv>@`Sxy~ZuDLt23c1WKtrlD>ccW2d&;O`5gJc+y<@Tuy!RpPkn$ z5gcvYTjFJpwJ1So)LH4IKIgML)Hi%>w|a&(=<*XwivIXUO-H>7^Cc$&j9<^Z5st^zSI4Y==(&ZO40=^b&AV3a7lp%7;ERY~+rcrowY>?bScLv*$S_ z!ySxu8#`N^k|JsFU;O9C+r}TVdCKr+=fdb=5t|F8 zKe^M$)NzMS%8cA~giovIC6D%Bacmm zJ@J*53VQN}#oZ2gKyzn5jH5lRCNSHe#vMKlH7g9n9ThTpVDqai>&N3FwmBf;g{~{)xUnU=UOGh z<6KpIKVB`W-tVG@wM6CYgV3q3eq)c8bAPsX(Z-m0dQ z3=h6~y}oNf|8Yv#YmIwL>eETUDS@*>PIOkCJ+j$9>9iSX@3a|p46l>Xc@t@9W#h<~ z9bLufk$rq+z*p|*JgHO;{uVp;-jQ(Md)1LA0qZG2VAMC;Q*!tg9lm{jX^CGxIDk_2 zOV+rMI9gTtjR0e;*m>+QJ${#QzAeP_?JG_(A+WbBC`zoo1>r*L@T9;B+&p7~uROLq z_~B#AvxoS;cl_2J$q(%`!q05ks@bgBF}*!uo72i-5rJ`XOrtNFQ<~KSWtR1nS(gi$ z*qWd*8Kw7C8b+89T2h+E5sFp)0ZZ4H$rde}wkBdPWpwZr=?0OFl2QWg3oy7@>&o_u zIXUsC*;&~+VDJ;-uA~aT9t38l7Lm8eK{z;Qh)NyZN`r?X+s8V2(s5JLWD?SL=d9EM z$_Cof(*Es*oc{IW{Y$m7XND&BV!I8BK5Xph8YEl#RaKXvmDH;Z1o1g*3H83PS_#{g zx9aAl1^M?E^YZpZ1-pJAd)as3` z^MrKZilHlH%eFsPcHIMIvnE%v%W!Uvl=g{OOuS>c`tL>3YarB0)CWfYVakZaahD$3 z;{U6+T~S9hc(m7P)((WM&xW*TG*W;T0#6fH0K}Eb&c4(g zx(AmRBt$J`-N#62OLEgHs$b!!Di#U5d!v0-L_q=t3jYTSXxAxy0TjapB_GRnL zg+F|x|63m#Kg{IHR!tx7o3HL%Kl}&x2ES0!oEB%%y%n~!0c2>M5V+Y&+OL*M=3BE@ zm4-7kFc!dNfg)6eN8g2MIqx;nYas}wl#@fFuReN4dh0(N+hQ7%(jD`s_5RZ@mmb$D z{CX^fs&AGcDy8Kk14b<94&=;rfp}(ZSnImPb>UWH)6=Jq^cdHq=%`b)q$Ks8i0cFh!Vt1$OJC8kE^AocXIR=B^4zL&Xi*nW4u63*8+ zErqkzi8VQ^9#~dVcP%b)&(b2Z*OeDw%gH~*MdIQ4~9xvQjbcWxZ{c@wYz4rFMN0v8Cu@(nK5;GYdb zh3|O0{=GzcEd`+yYHwrpRp%Vo(Efu18^iVeMr?onTK&wj(oKuhtFrakT=sUI_OuiT#%of?OeNo0l(5 zo3~bMb2DNFh%JzEY{HkVjR~om5`w0Lq|uk8JV9EE$O%}sgch;(cU`EZ0AD(cbCi*UPGDlyH&)0c zI7w8cHZ;f;eJ0VBH&I)znDBii8ytDvdT-JC;`vIN-~N}Wq_M)}o_~IAuqq$85p5|U zWUR5^z*r~pp%ZI!Ml9?JnYlLL(G?}0T3s%z>7Rkdh~bv?Xh)v>LBg-~U@% z*8$2|+as_){^_Ir7alS>X@!7{FT~IHZFC1;S-x1f@^qXlFeHuN3Rq%at{ckpsvJ}0 z_0`OhcMR_ZM_yI7lRhnz^e(yNxF&t#HW`C)9Ct;xy7t-ZeZDpamqT0R)$Op@d87a5 z_#NuMd)3F9!`=a>HBskN4fGeB+xEBXSk+V5NjUrDtC$D>@faDhy<9xACl+Y}hiEb| z94AKO7!6`1Faksqh+)71Q%CwFlpvvn%at9oR+R=2?$1K7LJGH3;+EjtQfpnOsZy4O zK!N(mq!Fnbp8jfkSno@H)zf(%c&W(Vlah>UQVh>18hmNfp0vJhS|)#PhkCaz zYh-)9G9SOGTV4Ki-+4-gUp4mKX*%(4;oX+>5^!1x6B_k{v8{=7{`RpJx9fP{;+#19 z_I`Qov$>U?HYdk~f32HiqORuhzyr5PfQ$-jRlMNogtRx!&C69i!3#>bXX5@8|IJAI z3I~>rz1V<@rjNN^pjW^=7#a{FKZE!EgEr3`aLY-A{ouMmtQW$SDl-dXZ+-Q7z7dK3pN>lz>qTuHe(yL1i}B_+b@4| zXMew^&0#_KZ)4CBu%Q!^8tc@I-6#44fs>V*_tDHbXa7E@AaVA(D8=PXhetLZ1R$0y zm4Mw~F+``fNhI_+KiI3$=1W$klr-PGyI0>ZXW%L&&0mehXXs(8mU3ZyZpJt{CY99t z9x**BN}*HI2{XUbXe~Kcir^1HS#+!|F>GzG7&RM41EWEV0?`hl9VMDQEm|_3NT)rK zs`sUOsXOenLHH9T&G|9m-)M$f<+!Dcq6TZgt=afX7v;B0FlmQ+`G}N8!&2gqvn71Y*1(182nDjU`JysB~#y>eE4?~?-uD}54~&-jJoEC zt?Tx??8N3UsnA+tesXueI{W6n0jYeBLP`qZY#{ zPq;;0p}J>LF%VIF7T%r_y_z%vBBxbo<*z@xW1ZM`gl9v8z2&8X{P`pOFDq$2Yb-K8 zuh%*rsRY(IdDzaI`spo3cww=G9w-@{w1{x1uq-mY zC0ExA4Imn&WQi1ZU3?60hVT{GN-^B3rE*;=ukVH}D~!cTX}|fKC$kTIY|AvG8#Uc| zt4`fI6J9g7u+7~IvS%p~{_k6qwDX?FyHPnu3qh!)oDdrQz!&yu*zcN;w}hTT`&yW@ zZ|hfQ-`v-uwfV3K%|ovvX~9V$0J;CZoBJqF4CZVb=kR@D|2eNnUp9P12{)g2MvYTa zV2o3r-n2cj^VfE%56cF*qCd2szO_d<8=Pj{V8_ybcPut0@6m^EnM6xOLJK=*O(<6P zg{!1wb{v9_CfGQXacJYvp+$$*DPx_m)*0t)zrd{$xOoEmgtc~E?74m`?B{A!$h}cX z&v#9Nn#i4DAMVKp*Z=g<0hRKEu{f+lox5M7%_O~Z5OT5@(yl(xc&AS=nyetyj>dTp z{^FRXEq{GA zDm$)zz97KoJXr6|OaaP;56FYSKv{Bj?11E%xCwnBMdu&-r zJ-ec`UP^aUtO2~;54uni7aH~Jqo-$@j+mNqAl#209?*9$$UUN!`QGdF7R#tMggAJo z3BEtEU5BL*5rod&zpwei5_+J!Ukr@8zVL5ug~T&p}9_Cdl>Q*ztQv1@irEN4^igdM-jP4rx50Rx5I4S<7e9TVuTM+!vCy~*;r`Qc#HzhK?BJ>Af%IS6)3kubNZRmW(5lkm?r)G+L$9xkAX1 zR0fF;DBkW9i(;ipn9%C)ZZSG_$vt0fW4DREa}M$B^1$qQ{(AlR;=&c43J(ZE+sS_y zKQ~c*6}iWUHuIwW$td+TL3GA6S_ef{MPZ?_$!z;20Ukwtl#>p|Ml-_yY&3#HGC)2*P zk1j6LpABD7!Y<&=PZ9n91P4fKY@fh>ROCd7wBjj z(*JwXSK_JqHI`~=SG{egm9U{v7w{EYRH_JgkIy#=Y?d1X4+ibF#%4eTKFE z6;34Xwbs8iwEAb(slAjG2ndzL21Z@*;Vsi6KE6}NW^>k_aZ`_8(-&MUg}KJ@AMNr6SKgu|DloV7Zbba9P??QHhxLJyjSRpXg^SvumBnPKiMKh)P zc2Y=zuc1F@x$Mcj5W-y)tHE!?YcBw6taoaAoBr|12kDU?n&Q#oYx?>GPt7+6|Ko|` zH)bs@KaMnCbB?hcP2P#WI<(RFp5!Yx_2`Ed<)8MH`Ep?XD-FgZY8*d;N}bAx@2X#sE{3@ z(bu2#;rd;FdQ3y89l9^)!p~gMtFL-Iw?u1mf-&X^`^ICVBI;Hncieu0CXo>~TS8g~ znUu8GKAo2fR+nE=%KX!qH{_|7Lg0Mg`_ZRQ&}aYfGamIxKcGhma!OFv96Uq1uOCn+ zuXwny)7jS#Jk;4|f{TxCc9X{X;tzLc`OPB(eV(!>2F8|o<$QtHJTJf+C)%{9_SkQW z1VP!UOhVW@=a%H^?(iig-7~Sd@_~97-yuYfI%j4VO5*2Vy`%5!yG|VzY9(|}!N`+l zq|M*w6h_`OD|?=j_Oy5|!UyU)zZ&UPlm{@ zcse{e2<>X#TxMK+BjipyPSOzfq}K|HW$S+ZKu#j;ubp+JH?p5-in^ewQ5`&SJj|Kz z*tErZ!|Iel3v2BqOTq*G@mT4AyJnU+`-nPD*ez`dk^g+EAkMg~Pp#_< zF7QaG7efGpd^+iAbhG67yX&8_M zc_mFC1))G#qSnerVk;e1t;9n(PYa6>4D>o#pOVgo*q_~$)8F}ff5AEPIVH?x##SoH zy(yJR3W0S_P95EmeEx->H>x_VTv!ys1R~twN8S3x6~)g8!r#6L^I_=1DB_aSawN!o zKpdJ#s6D2Q(e1lWO0ego1lx}GAs{&Pk}mJLmy1^@8J-*$Y4~qq;sS6=inPI>f9n@p zM<1|x(h3Lvc(y2y`NR78N(5UP>nhb;Yj|_0%+CDf0bA64`PS3g!feTEfJ{=DgDzXI z?tU@PE<4z>D^#U zWr+&~CWLYsnlsXM_Br#}KlbbE9?#EL%AD-1dCCrDp}pA$R!e~eb;zDGQa9c6{RY$D zBUD31o1a{lQ)ge>KU=9_J7ZkHo6jq+L@ull*h-zZ(27=^+!e&ZAom>I=4~b=Eb9zz zQ6f0f*w-D8uf||C-?8nk}e8@vfpPln{ z7hsHw8gPxx#B~Lde1wfcY-n(SF}i6=PFQgw1vTG&(xkvHd8Wh>UtAac^1iNL<)ZlD zB-u>xBO#&0c8{>unWN`TH@Li`sVQ(v9xu8t|GtATOg>^I1UM2ZneKSA&S4Tu7$<}{ z*A|%McTo_Z7KWw0o@?#i|Jk*@Cx)hULVCp3aF~q=0s!;6MZ49p!^FS_2A*q6!doBh zwHW)$7gYNZ1qZk!vE5-1zw>|p&|X@yyhuw!;5PPH_MDwFpMQ0eOOo(AVHh-%r8JLg zh(L%IGPzS5Ig}E=jV~*x?Xn0w#puwMXw+hqVL^d6WG7(cWAD~c z+^OT~blO@_g=+;nfSCuuB7$JQLV=wc>dch-ylF1vO+#Idd_FY!d}N|9gj{F{F*N24 z9X*cQAM9NQ@u{In{y2i$Hnp!Nij0lp-1qj|Jv#N;ujG=Qrsl#m{|=Iup3xDmXf0hF zhVki1GOP+PTiiD?U&V0Vt7q`-Gr6g5$pfF8YNk#IOkH5&V=h@2-L$mh9FKb=!-Y4}dXJQ8o;v z;!9Se+yr1Iq(vEaf?#(D_JQDK5G;b=CSlOX@ZC2yJG+$FoltIwV1+@-rFm%MF;h5V{03a0D5+ql0Y3VzCM zXpEGe(krNOg#rG#Y$KVzXn-Im1b_9b_HT6Ux3(0M+gqIb>!joxADrl*epN0CW|3`# z8IKSM)#vZ})+y7S`@#I!eSd!GOD9aTkh?A|D?eVxHeMh!Kp2J#>+{D5;=aAB zU3=a<=PunPK4IS$2f5#;+<9As1c4&0QL}$LR9$N=Q-HHs5zxt>H8(r7! zEydDpEzaHBV*h+uQ>n3`;KDHXPBZ=&AQB{s-godGO|IvAb4&Kh`N>UZ&T=NGTNVVS zklr^Q&6e>9k(|gAeQeL!6Wr>r%`J6)Yo3cQn{WT{u~~5#)t7Q%_=@yiU(lF}YfQpe zRTq`uZ5-#WJo13(ZP%TbE9D%ys2-L+;gaLeDn*^$@n@VXT{VQm8OAEuv}nt*uidMT zx|}h9z2&E{s(=8#7>lFeeiJyGQKX;5t>p|GC9Ic1?3>_Oyb7{8vKZ zvEy0716n#im>T8kH{5&nEN6c-KY9ARo$chNLeZFDk5pv5fH?i$uUXjaAa^H3XF_f+_liDo(}nrCAtDNMuxz!9U;VLS z^!W1R@+d5QARhf#{MLd{4*Q6%{?xI}C!KdvQ_{J~nnho03mx950*Nw#fffr!d zkcRS15+~so_nTMv+%G;f$&_4yOBFTZ`cDG3y^YD;yB5D>A(f~&)#@JE^e;dOz@>wpHa*` z{%rD{C@h}l72O!`1aANr1VIZX_xLTJo-*~73+s~`{?Zd3b5YxqhT>krrjxjE}f&i(M7Zk$cNZ*aYASuxKSU}V4nNo)zjAii|j+Mc6dbAH=@zIA;^ zWCN@To$2Vb!NzVIwD(%mS#qYc*99G&Htg(3qUkNheffP2rJKJpan8&s`J1c_4oYh) zZWH<&3q&6Y!AEBMDX}>7a#f2V)VrFboUdb(1n=h zW@DQtn|NZAi5nY83I%8Ld1s5((!MUq9ky>2|L)6;6PhRHe`%eWEQMOejJ7>$kQ0Yo z?)|&XX>iY8+Z@|FTdcdM#Xb1r=F%+j(qEwYscc^pBp$jTXbQr*Rkxls)46MRDSq;p z9UMegq#1bQHQ8u;2g%$E3=qDJApeIK&uLtD^5HG+{CDr*e)qrA-9NuQ!>#=3OxOL} znaqYrwmzgFY zf(IUP_H~~uhO67+uY_UgtYrIA=@lX=>=_=v=iI_?{&vokWK##`rDw0TE8BYZ#n}h9 ztBoDo9|V944bB42nHgXf$QA6C%LnsYCYT+kHiuKEwU~+1Cz-}6%_f@M1aoEueCOKY z-Af+nIzmdLZ9i3c`hzs%DLirZw1R!~n&!ma-D2InE%vU!jE*e zlO|78yS6q%3?tEUe)h>XOmXh|`90tGuW6Fo{X(Aoc!Ch|%@76<1rU_2oM`|v9fB7@ zus;NcL-;C8FiUS3;`_!_#$UDE{A!(3fBt|sJ6%jN4gEID_U>-ovtc(>UToH5~|ZKd|T0GpxKzLHv3 z1muv+yt?$t8zVUwnxqp3Qn)o78avnatKp)57R8`j-I8oJB|KvwLCz*mtH`qXpY*8Y z&5R%nC?+<1&GgCnU)}iiT(IXn0}<}mx7y(C=M+sTj!z9td}Zpy>P_FJ6RCs{13f^g zqCc9dI^a2Rh`I0z;5`-38lV$64_KAHW29B=wBl{>h68rVHSfD1-+R?fU3HH?SG+R} zl0T(&2wqU@w+(1bge?Z=@Q9(T{W=E5D>Z5Td+O6?6+gNurvatseoN8~hv-9qz>oxn z#FAFtai-ufEjU*Ia$tIaPH^juadD8=mn$XVk&~M9zgc`)R~x(%{X&H3tHE_Z z7qGFPc7_Yq0el{V;RFVf;N9;yw1BfNxZn?6K=Ktg_(LhTHfSHVdt1{3#fvk5SEk>M zl*Xk3Wf}0l2yXSzm%*sSP7r)Q6gU!iDKHD6gAk#CR^Vv__wNB7Oxw+;?N&|b8Vb>i zQXY_42iBSC)`f>ca9CrbSvY-CFlSnencPsIE--|}5^-BQU8~pIHLdI2lZmx=C%FGi zX^t*kd|tj`|GkXGU@rVgY|gy6v^)sov$4r8*6|NuZ-#Eut^jTZehU0K9dsTj(id^{ zZ~sVVjfWd#LT3U`0Ubl^2b7yOmG6!W#MTBcn>sl<=;Xuds@4aDQZy5tp zzS4R4d?J1w>muNot@MtS!1oXVRid2e{keZ9a5^IVYQb*6!N7l|ZQTQ01^gD+glGkQ z@PDvG9|nfR0f`M?0pV%;zBqc_tM?BlAG)_;!5qW%Nrpl_nF}l-Op4UCZgP{JTIqKG z%fH;~Z(Cyd^xHxX+AnYvUGVwumfRPASlSqd@nhE7&pXFl@1135t5@lh^FBjhN8nw+ zJAlQ&1;BlXN>rq}v{su)3y0|YazG=(G=?H|L%{h}b6v$74J2Uzy>al#Qx7kg2@?YQ zh3h&nBxgJ4#=`k@Dq(VDs;-`UKT@U7#yWH(V;;$4tMw$@UIbU$5G_&#gA)84;0oY1 z>2qqXR-*%dEpotN>Gfye9N=yQ|9kP6%V3B;J@F>k1C#sGTaJw0blMw27VRAnHj^m!Z4{fbNXcCrcXES;QeuDoD>=$uszm%H zldXfS#RQaSjzd(U3xRJT(z?jd%5M)v(~v}hK)7^bgPjs*4uD`GFbB*OWUjlwJ`-Eo znALR@+z8AgqXl(XS9_n2ql*6^zN+gGGvY_-cbf@+AfgYWkR(o`ckI7M{^IjL z5;YyZf8elK*G*8`K)AV-7U`$g4eNlC9KsaGkgJ0tj(2^r#LsW*B?=Q0C)9a^@%!UT z6>Zfgc9qC3Ev%dx^q?I4QeT!7o7Ie;<2; z&(T*BTkkvn4EUD=J0p(2A|s2mdeY7;=W>@Fy(m2Pv=ajkdr6Mwi9v!593=y>oDM`1 zfHjU28VrTNec{TE=$VzpABKVbTe1x|V35u8o@DC09~PYsv{Tzp@qB{yWPYBZ%!A&$ z5{V9}xg^H0M2mK(FTpDI#M2FMfA9^A0{Cg-#(m=+w904R9y$Geq#UB3}7k@Ap5Z zzaN9B^E;%!{|;d;tqg+b1LsH}T=b&Z;bH$=YAJOPO`8nOje@{Q;^^#wj$Y{Kgtm4^ zTRW^-?^w0ov3fnM+2B~a!Le?W>sqnawfFYM7l%Q7Oq@`lTouRrdpQzv`)AtOwTMmG zMV8Eyc0MC;h1|O3v&&!I4jdOMk?MO28KAX(aC3h$n1C65w6s}oLqfq zTo~d)@ohNr$jLu)=AWTt3^s6|xV$rW$I|Yr!Z7)9l5AGRh6ASJQ(>rHc8h3FyVq!M zB*AAQzJ@azkpVx2)vskY6euEsNVU)5texhNfY9I(UjHD&@l}XJ?l}a<*C4`A=^#Y3 zuyw$bz^w=kv>}pyB|<-C$H`SAdTSjRlicy&=XHPB1k7=m!o)`Nq6q~v8%&fWF6ruZ z-MvM+N--T++QGJ^ldv}Ze=D#WY&(Y4fpMoKmY35v45Bv6GT<^qo&QgIwIf`r+F_8o zcmYEEn-E9HUl8Oirh|Wu%$^?!q%z{P!_1m#PCDx}>u@G$n&P0>nYatHu@ly>cdTi1 ztXgY%ZmnZwtK&bdj;B{y{wGBV%q*{N5l2PeHwe-}=dDJmfJ4hpM`?og!QA*mS@Q*c__% z@xTC&B6P4k{p_T#;L@V01qAn(BH`u|LW3&oHbXU{m&`%uHbLnUm)^c<6KvX4>2J$y zcm2OSn|(U@Y_#G@*TIxGBE)?a@ED@fKZmG4{T&3U-)ZJiFT(Y@5e}#E$&HL`sL2^a z?3`h@`RsVjVq+eB7`t+f>s_%nwrg7B4XqnkzoCOQo!xdV)~x`$63j}ltAKTZaeueA zs<&PlSgnITeZm3p(9T9O7VFaQHy{#X4Os{?sL`e(FljrDWLlTr8zXDcfgn&N3`8_) zx@e!9(|1iFOP*>albn!cUJgY3nn&ROtvIIYvc5k?=Icf2yL*6V5n|FgAC87IT)fgb zZc6)A4K^V7yMZk7RBa7QNdLPL#;}fzG4*+-KkvOE-8bw2W3b|f_kI^U)gz26z- zu5UyZx>~Y>LA;C)AslTvBDDv5!mZw;apWR`po$P9l~kh0^A>CQFgSb1J*Ay*|FcmR z_bg&sJOyq&qSR*$k$nWDpPkok2;6`q6-D%Q68U!jD-rwcP+%v@k*0_c{L-|& z#mJm$zPGPd?g-3BoIr1_7_YA9PDp0hdNQe@RbL(eev$UAQj=2Yo*+`_4Zxd${g7-& zKXlcFF#3Cd-yqJi9%^KikxgeE5{I5irZKf4p76gTOr;C)V1E?JT-${V?^gj=r}wLn z87*)%{_n$NvQS%p_^bF`O0P<@X;eW8{I~QaKvh$g10MtqM^v4mX2Deh-*)S-UmqZo zc7C6_fsY^#!bUR50q{9*3@!|?QHTpeTEl=tf`F;UkT>Njapm=}--dTBMu>0<<;+q4 zzYF4^srIb(`tM)=20n#2chrg5#N=M ze>yjv2yM(n7}tnw`QDm)wl)4Z4apDo?K;Rf(0!D9Lr3Badw+pP8Z=kG7jTJ%IDsS~ zNGv;BOS5x?t~`BJFIAm8)89%JpLrxE?lV4>miqyBAyEqJwdW(S{@=fLMkLwQh_6Et zdk%s~sv6c(r4`}*arCCo1qjmCdHvPT2=USU5+OY8kM%xXjqi^_nA=;^xhPN_S3^WA z)H&37+#Mm5gAfL?kW9<*O-yBqO$dV+%_xPhnf%B54zC{#!2ba!kZqz-2llW-s9Tfh zwe7tSDdSJJ{$4_M25msp$B}ZjZB6j^T33d)28w6v9MsyEokx&Qol`o8(-F0ICK<6s z-aR4Z{R+J{lwgk}(_HY{R&At~)H3wdSn>$s71B$`?Dywqz-eU6N&DpYv-1CX#Yiaj za^UspJ9J)rs2sD7VpuGt8?RXo&ir^zUl^iK)OPNF?fQ z2rejdQw?-T8pqZ=9YzPbApK0}8-rYp*mCWRY)EZeK$g9*Wo`IKq6F}1L^4-qYZ=5m zdK8fuAPja7L<0ML`8i_iHzG)K2f_$cYSdC4{R}+?wds2)SAo@s^G(E$1Zl=>12f*Hehk<3`}#wEw{7NYH?hh|xF_YHpS9e4Tw7IcpSKeESix+q9qR^K7=~p!@!HlOc4Ei3Q^I! zk#+Lz;#R~|=}zx0BAaziCzGyzf1ga-QYO_wCJYt;pP-yj6C{*)D)5DLKGY9UYtBHl zgck3;`h8E@uRNK${O|Pp!KP?8r+v&bwB(#MWTuF|$0BodFJgY3LUqOt8ROk0z21xX zBW?n&P21=ot0hshU!y_v)B5>s9a*;BP-3&gA>yH^aAN^1&OuD!my(4Vec|Tk*z4TPL!8m4A<1dqL!7}q>HSp& z>1YtW+K|F50WvRj z2r_H9uA3~`$&w{K>EAsy`r!}>wGwf*`UK}kWD+C|`T(!5c=uh0WR*RW_DMIL=vWH~ z;(i~AHfR{rBGHV45f#6V!Ku|s7&jo%qm^VrMu|bqjaCM)X{!HVsIW6409z>+KT;|{CpRrV1g!=_~9i@)=-I|DxV+OP>=RN-uoh%dS8t=lnCBT z#-Mx?;open(TXsR%6$-ytRCVGz?qc$e;gtKe@*89=@0b1_omORK$y)sGAXOXqkaOF zu7iQ0XpGvI79>irp~^kFcm5gR{lL3{lM&O#ZwG2K0G-d*A*RxMkvs|*MDG1agG!KU z6P2s--9S)}Ox{pnF2ZHo7+8%PE1>hGj+#m*PafI(-i*YKAEn%*>xb_u+w}h)3<6ii z!7HCpr`s#&8=3`4lz9q~^s8#TL#6jW5h9&K`F7Dch_7M|Ss1fcUjxL)@w@bWPau+1 zYtmJQ?ffwg1eNy<<{8y|YDIRGu{vaY7bBrYb>8hsc`}o)(J3gyiV=Lj0`UbsMS10; zQ6c)aetw(C(Bq9NK?d`TF770RQEVK-d$u(w^8O34MQ3~8>DU{QB93oIHe1|?gnyP% zzD;u|x#dFz)i7p}ZSOP{pXzv0r)@3G&LGTeLPgtAD5XpT2lB=;hJVHRY*tz)!elm3z3S{xL7B-zi1bvmXfV$u2=i9|)j;>E zb6dCIu0WXiWJHQDL~7w3NVyrMpuQhV89mwe%Ng&c@XNHR7d5Oj@c*rYY}Fmg)jzsE>8Qhfr0Y@d=pB;p%KFH)+14r zC21cHL_)1E^7=>mX%S8OWoe%`?=9GxM6aCnUx>76rhEY%qO$FVAZL!unNtlE(N_X* zLUOYw!R8u!ElAnm@hH*z1Lgc52EGKG=>6t9cl;Q>GJyS&%0ZW<&xT}9w2@Z#`%}4x z>+$ws4doGvfk5YBu(tYOAwh`mIRvi<6KYnQUGoHn;yX~^Opz=TGoQYD1>ys_jPe`{ zeIDf8hzSV8F^qzwj}Y~K2~sG)59R3p4?!F*1qQBeqYM5pGU+X3$p~|iT~wOYHD0BA zC1^Z+4HC+_1IbB0kZkje-UsCc5_K*=fH2grrGIxYa)hAmbbIfwgu4(&T7OZ3{vbwd z^!03=;-*7^5;A}5k!ZkR`?4j>ry2w3LUQx}4xEpq9Q!!0|DS}IU7O9K(U6Kogs5Lx z@x3C)!H9QqEkeKp)g9B2kwqq7Q&lUgjBYQW>S1U4_q9lvZXZP3IS|p7bo@TISKH1a zgnw81Y%e3f&tYBKW}b}c>gR(<_Mj5&P~WTG=k#qWW$5XYUb;#)+*pNepDt%ElhFQ>+))hoKGpJ&E9DB*8H2&;q; z*4w7TY7gISFQ z@Ey~Gt>mM>t3asbAne0rw>HtM9sUgPL&`I)ARFvqh&_HGVt;I6VCRc+JLPJl#r^x^ z?1M2Wh8yxxiQSDLe+M#88xRxfUSwz6iAaJ+rI*_wp|eS3rIbek9I~FRNUp$+^j)`I z$$S&*8T>{SKV&!3w)t2ArGYv`B5VHqV4?$xOOp^OUkzHRaHMX{t+fieq2dZ8{Zdma z6;u+PfgC0oBGO;eYzG2$VvWXFKnP9W>w|7H?a#xkn@EPUJ4+)RI$ymA@usy1S&4*_ z&O-LJ@lxP=_RjkqVuiW=vsV=K_<2&QFH^NtfKa{L; z^GC@Fhx8ZD)=;!XwjZm8;AiVOL?CF^;iwV468N)7K>#RUJVm|;@i2c02|MZbO^vZD z!uWfqb<)(iu{A;as|61#!y|blIYsl{D^q8+4%@W!8jQ2imvVj~4kcQR>-HcFU>%}j z-&do(ZiY_m8Y=VmzxyFW$f5;{5ozvk406bPoPsc%T4w|*^H#=akqspU@B6LooE=T) zukUF4DYDsScO>`zS4egvWSW#;&pSCNQ0ku;3KvVuR|75Gs}F*Ttf24uSb}} zVEU%J(b15M+^&3 zdkv!*L{c4x5Vy9k2!0A;-(QYwnp#5^f1ZpSLG~`nkBL4S_y%$wp&@hFj%PJr_YNFF zR(rr_XbFNR??&e15oG_s`t+XZ$d+r}kW=Z~3Xr+I6T!iajB@V1g4&^*5c@qQL*&XZ zld30E$Tl7H2dXvr2_T9XzRI(uV30BV1hKTs{-i7Q~E#HyZALxGQ zHzM<-#IXZ%8tDfR!g&l~hLe!+=Rpgxi9?&6I6Of{geRq#DmMg!r{yJ}~;(YYACFhmRw6{s+jsm5`}>P@Yv)>4}DN zdsDyH-xS%FB7`v<2U@lM=Ma_i-3Wmxm)8EMeZObLv$}mUM3{l9$o;*m0U;i3+ed~S zkb(}C;LnvJ(t1SgZcG2~hwGFuZ$TmfP4v}7^lyJXnvhC2XHZ^3s(@$!a}mwP?}s0n znx0-h_$kKvz8qP6zJ)9aXgq^Dx35Da`2}R*Y(JzOdf!#^gZXr#^X8B!&OOLJjI~r$ z1sW}zUO^pFod|-RkL-27pK@Zilrw=!C!TK)I`4ktwX*`L!g3)}E@~rvW4vld+A3GA z{=2mQXQc0lyf*xa_k9n#$N{2qT|>r^{dZPs4l$BNw~nl^qauZGw=l9=dpp^&*68C>OnS%+k9Ds9yuOf4<$m>TFDn0mqO0j+h$WFO$ zBASZdhJycmt+^a3Xy2;g#YmYI3Bn*3Bj(sGl^QJar{}jm6)LsDwu&YV1M=-e+G;1qD$tpGJ~a?negQiV){I`c~*sXIrJZ-B59# z59)__7|AX88sea<>|bS@>yWLlw<1LGcCsy!%Iq`*gZJ(qAO!g##KEIG+5G3KL4pv< zwaEU5Wk^`BT1K2^G+u`6TYqnr_DscRJA$yS2=caj{np|g-%Q{A0faeJ+jLb6RsnxP z!hp-uzuV~RaBQw<*EbV~nk}dEqQyh2wX62{mV!eVq(Ro{qsaQc1=&VhU;EKMYte(4 zXE!4D{|ZEluGH#)ZMQi^Iksk)#mL5xTM(ju84|+S2~p`P>z>DC`N!+XLK9CQgKbUQ zZX+we=x;4mY56pQRE?C^J@2B%gr!hS?_GkZ`6nVwYZt_UrE096+_Ds5L{EGD{WMbK zU;>$T@+jrma%w949r1j>6!C)Zhp5jjWDHVGr?u%kEJ3{KYt#3wA}h>TIVP9Bvj;dE zVZtXNk%A^~Je>&RcrYE$V`A`{SadF zuO~aPwKDUpgh;h{WZQ04@;%MKTUjeXHMN`4=Nb`a=$j>vlVwG2snpkGD~+gJi}-0? zf|!mA$=10*hMz@bE>+|LpAZ`IkJM2mCu{>`Np*0J0i$8BOQZF+h{{f znU1urHnNgkGZ4F0@9!cbQY9Js5=FC(ddNx{7a4dDw@*wBl$#VPHGO2O)rMve z>VXW}=SnHoL3Y-jO7I3U2}L@NPWo1SRiw~;)%qSibfQ|aPFH&AX*7cgb5%n=eUCq$ z9+93e69Z0dtb{JBy;mhaQiwbu0PN)xu~OU4@bo=HCwtQx`lPCWl_y=O4*@WgrGZ`Pzd+%_v6 zEPgxvA+CR~>F3^R&(`Yqmd;77x!=57!n) UfcJV$oB#j-07*qoM6N<$f|{NpwEzGB literal 0 HcmV?d00001 diff --git a/images/url.png b/images/url.png new file mode 100644 index 0000000000000000000000000000000000000000..390feaa6421ada072486df10273e14e8216b7aff GIT binary patch literal 50689 zcmXt91yozl(@k)9D+H%laVzcxf@|^O?(Pr@MT@(8uoi29LUEVk4#kU8io1O8_nq_S zga>&EVRvU{XXf6!(dw%5*cjv(AP@)}svxTg0)cUW-*3@SfGZ_78Zqz#$wL~djRt%L zqFF`(&(U2K^gVz#PW|@{#!6DY0B*kWe5dEB&8#-TlEBx?aSC2FzI3_=sq@7 z`0hN*N^1ORJ^hK{yIDn3h{n!rq>eXY9!jEE)Ko^%=vnlzl(8o|s(k)r!FXYL&XDD& zs%AG8{K012GFdc>fw$nMO`^b(2Pwd^9CI(%w%Ezdo}4?px+?HLf;S!Ec`?%`wb9r_b$vY#x zGOci3k7c1l0#)ER`_$o5J~v73!VUah9`8Pe>In2K*mP=nV6xaoQJPp+eM+e!VLu8v zR7%zm+sY-D!u7Obx&R-QeC~I(+s0|ft;ciPe;4ez*YeLc2eF54Wl(X>WsDZRp>o6*zO zqL$;sbzOYLe{YtmA`?@~H2L4c z@(5czK%H&bNYi>K6>d6gC)3%4n-D%3XvR3*L_RU$ZA>KQyXzMD zBJaN%c(ubj0{ZBHKii8viTE5aQXMd(rGtV>aWUDC=)cnt8eS=8|Kw$NxHe02wb&=1 zENLcUQd*-0SI+7dk_hGYzPdb^x#69C)rBJjdwC5kIaZ;z8GLa%Sv1Hg#sVR!R9RLR zRf{quN+oN~BXhZ=y4-GNC(^&o|H|Hgjl>0E615QC*m%XL^79(@N$MIjI8K~1y+qU~ zfnn}XKI%5dT4bPREaK?Y%;GZrodHyQaj*!&Cp8H?^UJ-cJn+1ilO8e{SnN+>R1p?{ zs{}A4igj^ICc~VxMFRD)eHS6{C*(N$uHMVi_aPRV$M%a!W8CDZbRbC*4x`MGy^D3^ zNfGDYKh}lfFStNkttU>L9k<0AOxKxjtnM=Yp}E!f3ySXL9)ID!3-@@NEN-Qu2)1+4 zw3U>g4zs%!-GrwjFLra`@cnT$YOPT=rd0Nd`);-T-Ol9Kj=K+h=0Jp$!^rpV*&ua@ zq^|CQJBT4oG3aXS?RwjwXS~>jP~csL1tiyl&c(isV?t6CO3?8u?N23Txn7lwHuh2e zJma;Q2iwRMA4_e`SnDHHN^6i!`7bZ{%bdRdUY^hVo_otb1XGy^tQJC$-`!5e0-p3P zBJ8MD;&E|MT)?R6g{p9J2$f0@1p}mPN}vnhX@eeWvimMpv^WRL5O}=Pd4H7)x3Ha7 zK7%gfqas0OOpv&;HSe2k3WBZ+iW@4m4{xMghpzvwxO|kmu8V63qzSd4 z$&&u<_vd2R&He|52Jx>{Yb~Ta&B~Fv0#U}-+DEnyk=M|Ix)auwy0X{cyrJt7>Zc0s zf{73cP0^`cXB{^S7o!_SMG(9PlewuWF&_w+R=#)K`&?ae z0{yx>_c45t)428f$uHiC$LT3~>Zh&8y9?^^9d~NOX6IFfK14bGoEq#WWL%1hzEGKD z0Wheh^Z6eCF7UY;q^_&$lfa>7Y`p4H)(mI+a`tPrzPt;-YMdBI19~gd%12{>7ZP} z)#_qIN^?-t{>B(4XcDqssx2j@G7}n+{h^x&;}b&rVO2Gp-K693V|;wP7HBq#+KMO3 zil3kV=x1xI_4V~N4sP(oCT_Q;6=#8NANpo?AaunZA%Q_=tfA!nUqTH|H+IK2{z;sH zo0zD37k9DX!ltI{P_>Zz8DyCcsw_`&dw%k$Zk2)JAq#-N2mX$GyQbIh{cc_LS%)l= z)lE%VZ`zTKe%Qt9&gqIb&FQx@VAiK{wSs5r8MsQ$TbH%Absta9Tx4`Uxdf`G&I!?r zUw2E6-n^hc7k*#}Biu|;>xg*a`WY4xZM+}vwbRD~?~f&2$D+Y5w$W>|!6N4I-j!S%gyA4{a8226p{A<6tR?ZbXa~@a> z31`}R+=Zqvigmhf_AIddeQr)(#SugS@!ePdf~JBMtl^89;Vi08Vt02FA>#K3EYHk%lg^Pz zC{a^ZW)gDX{8z(_HDE$2bZ*Cs^*4IK6eE*GGA}+4*MHY#^(8dRG!NbaF|tbpYRPPR zF&uJ6hKSU$dQXei>(o7pA5`fC1=q<-38m9o$YUO-|*&ptSnq@@1LJTF$lyUpBf?sRI!- zc2Ge*WGO0XchPyQX{zm~oHW9paNK|&y)R5{ZWd4={K`f{74d9r?GUyO_V%ULi9d1957r|G#s zALu4?kR!s=UXNfEo(Ib=#Z>tU7b=?S47r_L!v0EYV(4M*hX=E+qzC zIk_QTA>U&{%%{`qXx>ME!6aF0>v?wrOQ3xE5w_h*6 zK{u7%q8x1Lzi2B@6)i!d4aw%So0bRC;jN2^?9b6ii|S1LY$86#w}l+5L@uKxTJIZc z%Eew=WQp`C&|j`yjc{}Fd=sHdyp#Va*7{>Gx?{}=JEE- zh0VCtGc31`mng`FLvoIKX$P)|zzvp6&Jl=r$~@1?j6U-zF9^ObqQ0Mu3~n*){x}C% zaUc{F2jqR5+P3qNgo$kaBeyZ3xsD23L<=^g_?ns;YyS3_vFkr-J?dLzQTEx|4$>N+ zKS0#BWqEwyT>d>d$#n!6VfFpHjM!(aodF6LPr$WQa*`6w!V~3H#ja+LAG&e8nMfLb za^nz^_3da#A*Q!i8g+?F7eCQ00#4;CEiXSf_qkq+TT=2^aC`s}SM+pu>6?v2T!UtG zYbH_F(4Ar#!I>y0pN_x3KY5tvjO!QZpBCyv>sFFE9vs3I_0fgA9MDF={cQR8@xj4e zX6izokjE}%_BDeUb>cuz@UqIHFmUX4gH9#(ru&C!|?% zudChBaN%^}pfuH;pcC6HVz#y|!`o>rBEbl44FMR~|K0flDZ;z-&X zm&AvLj*e%}x4&I`QBuy<}u! z^78Fqrd-W`JMQgx$oVi+0q?Ad)3?Q=Of81Cpa<_XH@^MlCd_8R>mtNEg`XMhhVO6A zM>*HS_k&<=+XKThqbkXlQbYsBz^dRP>N&C|dUX6tXE&sJ7}4#ZONS^U;LIT?Hp0GX zS!RGE0j(%+AjX$cc9zweq;OnQRmxqEaAE853^zZe%;uw7|7l|GsYK z%^B+m0!o!}eq+?IMIwREoyQHMdWdA*3E=k zWqhu>^yoHQtykTI@z4ygtgGzt?6ghuZs?@euAE#`Zp=(W`%zFPj`ADg(9b-{|5UHY#q*XY{ zp|<@xoZYa=sFaZ^_;#~XbA+3v?C-m`Nu67BAK$L|Zr`C3GK!81NsQ`AjdQkMPAjbk z9DQwm=7qDvFCyNtxIx#}NW2y+FT}mkUGb!EL0<@*El3XQe@GP#m!uDa6ZswHOMz64DBPTAUu;DN&WPAT`ZuI~(i zRM4@yv?N3*Iqyj@aZ!`QV>NOQkFil`H+c1<)xaIl)pI!xQv}wJOxFOpyZ))^h0dL&HZ~swc&S-|un2n>d z79(17BzzYrF1;x>QLVddZ$VkicBkXTK{>i%hUnVJ&ZwpFoM(`LVRgTD({aPT;1&7i zOmG8+7(6pOTUuidZ-OF6F%;rgF8prsSjT>k{!{U57#q-DUE9sTzh+57v)Uu#5wkVZH-IBkaW z&x+s0imRYa#TAFojqJGN<_6y%F#Q+P)mVO8Yu{CQ?0zweV2XN^ZcKiF$cU%($fQ`W zc;3DF^IsN1FjaD8ev|v-poLN8MmGJ(wyGf7j=>5-^#46LuKEf-D0+k;e7b6*C?IIn zvT7eRg@otURYd3(e7+USiIc@^Gw$P+b-iR$uukQ_U*u<;4R;oJpJdDO36;vX5NP5m z*T?rRI_LhRSq=TyF#PE!HF-?>ucw91pH5y?4;yK8%Z$o-qxc>-Xb_jtx^Ys!X4PC~ z0^`mo#MoKVIRBB%XkHe=66pU1ma{bcW<3^azvr8u5ocHabv(X7-rMyXm;O(S` zh&R)Qf_*DP8pDQJE4L$tW$_-Isg~U+j;=Dm>8F8#K|_m?vx{*gP>sIJd;=9Q7Evio z`mStf*tk&s?Hg~<3XroPpD}cX#2=0?27wIXqo^Ug)8GU>Up#r`{&cnQ0@J*2stK;T zyuJ?p*GH(a7~30X;ssOx9dooFH`+M#IzDMYz|`$MaE%nbp;xs2Z`<>ECZV@5WF+ z%FOW3N9Q^p8d8|b#!m?C!0-# zpy6JHxP9p@C31r`l&D#4r|5roINR!v%o46>Z*Lz^w>HXAiB~7o%aj~Msxl~hDtNgo z=miqodu|c!q5Y|n0#U#mKMOmrp;=`CoH0)#nn)Y!iYpiwh{?P`V%{fa#?8pfZNq9Yxn)tQQW%K!SV4gI(?!w!g0qhBTS1xw+(a$VC6mP6aEMU6 zMc~y3lJUDmprU#VZSmNvQa!TIjBIki+61Q=<3Mz1Lxv9hDf_EXJfV4R$MJ} z3Aqncs@4kUDLTJ?RZ%(u(`1ZRx;QPTGDc^alYeY%)cI@jwb&@=YEBn`?uNem=>z_K zap||0*R1oDw@a3NX%AYOl^}Lw%5{}KShPrJPsN*du4-nhIih+Pa|yoimTBMaQYW1s zRs|=lS|RA=?!(bn&X=dRIX<39_s=Qik(FUGRVD;(57Z~ddG zHLWjVS&+UNnb+Wa`^@CsvsTcQlWPLhDK?J3f1H&J;L!IyOlNd>7QjVwrgOx3kPmK} z0_5Ws#FzfHU5wBD*BMFxc%6$FV{M3iGJpo1xeZb_pQY)3Xd&Klpi`Pt!XOWpC~Ocx zlqZ0JicJY_DD)lQHe!p;VrC+70kx>G3uJpLn^jHx?oUz^3TbTr_tXLx{qvy@PA`GG zlWX%KC%VT=fv}Z@`p&@^uVT2-aac$$A_UT15CTE!JImHRL9neF9OiRYSioLYpW(8% znAtCR_B)m5FwqdPUH@35)*IaV^8A1s2v~))vC(3-LSrI7J0oc~w4w1{K0YRIEe1X}P5>b+lP$+6ER3YxFAFV-`iI$2sCYU@fqF7eJTq$`g2^zg=I3= z65a<=gCClTy(d4I6MaPN8i=F#Mr!2Rtv2@>Ok}oTc&QSl04mukQDEqF(dlmEP> zQNC8!RY|#(A8^uQ5^%HXqt@g#JCsCk;ty=Xov0$F`&}}~pPv!r=_c{qf%?W|WqSHN z?nfXf0c z^tzF9i9g1p( zqK z@3VdtUw6rgM*E0qa@!M>-mw+odu7lEj*p^9ZuGc`2E*9pq9&XZTo{Nu-_uU%zC$i~SQDN^3MPx+ z^NWYvT$(()GhB%J)?hYke>7wbeFqAs%>hFcHw2MV3DgN3wU^N(nI$V13GtHNnzDw| zB(J|yk0(XMH#AxoI2Fe=T^j5(6#;~0 zVY_5p4?Nlf8tlG(D!Dvk|0dJ_wK}7b;Ye<@BIU~)WJqw^Z^zh39B`v7cx^)A7=rQx zX19N4A@R#U!#!hCDP%?|io`*wU@3$MuDo!VDFWUrA~f3W_p0<@uISsqq>aiQ`pt&L zfSdxadHru<6H4X{*|6eecAQ^?YPWzl?FFy>{{5KqLkP~N5`}rY z1b@9-&2auv=d*JDNLsJcrURYoOydy<>VT9tXLa0;T*NThvHK@)nnxd4ka z3sHOAigL-v!*RtYy=5DjN!Tc`gpjKTYShr$*pe8?|4w0tA`>AAi$jeA@Tl7LS}wIJJabn zo-34KyKHJcSO51RgIouEj|~yv{Ydki7ybM((ty)phTu-%Z^32lQH0rWbfME#t2Zd)=2!An0zJEp4$w9{>&nES2@KLfE8i^!Z)&al{2 z1D&jOiiokip4m55G`{9Ty!;C{#9@WPH2Enzz;#3_Ckcu_JInv|mCtlqGj-4|whR3o zbBuHJ;E$tXUcxO|u}C|M|6ug;`5NN~ncJIO6^w^}oi9rX63@4z+*T)G9H?h-3aVRQNC~Jrr61G^fcw5-sK1{+||rVBvz9R;)5Dq~niPdk$&-M0aHY z%OZl+HSR|Y{Ph=_cc|e){<~g>VR$$_=q_Yl`vv782hS#N{{>m%{_+q| zRNG#le~BIpN<6#8ZbBG0Dr}EBgF{xZq>-%WT2`l(&Gr9gowMXe)~ovHioaG-Bn|h z-M!LN6>WciC_*?dX({(>p8wImIqgx4++JLuCM+<0ye)Y71==4 zd)y&O*Y?@|qLYRAyZ_OJYmWeFxoW^nG(NQ|5uVsIoWewXm$UvTckNSgH9h}?hzQZ) z&>^`Ap9e<{D@XI{I>KX|h6GrAEB>PQ8XFr2_;`5*mFlrk4GK}STltY(mHt}OOBu6V(D zGeq3R{QbXwSIYsO6k%YIJJ)0D4Phh}98kV*OvX~A?>QoxJe0Ld1_d=5+xCetLXh$(G3%4* zo3ssl9?E|kcBDZe1G*hlL|c?-(j?o3pK_{;x{c5c`H*>XFB6| zmMv5kgYjbf06P4eCU1RtAoMYN1qV@Hf<)qPLW}viPX(y>UI?HL38*87J*To{?ZrSa zAd|NOT)dL#goyqhEh+kVyZx@-3(-HV%@!*;on7F#f2O`EVV*JJ;@ITzU5QbC(x6j3 zqerGjHri^m#YkMD?BNf^lvqXPyf}%!&?cAhVOppY7N;1i%vVl`M-CV31yWh>qLPxM zURjutAOu?CO@(20ku91HhMWu}1l{HPO}g52j9+08%rQot>T9 z$M}Y9+|PSG=%)P^;8+$*D@&T3enJGj5vsX%)DDj(o!q*Tkhr$2v#R!zCa^)JB_$Tp z92pN)k8bAN^@=E1gfm=VxzwQ4x}bwF!LF>Z%`nQBetv`(YWaR!MLpa~U1(w7<|?T= zAr2K7ckW74Bh>?K$AJKzT7tz26Z)FevG1EvY#P1m4P)v;e3Qlu*cFUTZbmPz!Jz5 zfY*8jv_8??$=2D+>wjldQc@aisX6*U)Rw!88;Vj`#1r=nifALg7w*nj50{bmrc0sQ z{KF-!?j3_Nj&qksG!z047qP|V9fI$BiClt&V|xNs=%hGzsVvK8oz42 z7|N;1Z*9V2wgu@h`yr%HQ4=x@|8tC&qu8Fj6Wr=5Ay4U-BI4?)+w(g}Pfs5O=jsJF z8u?FCC=*fW?Qa<`eA@G4-Ze>MgDCAr z<5GbTQ7<5~$1hMms$k^Y<;^6_{?~jB;9`Rp=?U-e94(~%*dgH8AC@4~*>qqoTQa%V z+lgIB2Q_LRUM_V9Ai>g+?jcLc1VU}}g+&$+=vpl|I09Rgb(w#%5X_)`O^FoujPU8i zS6)BTDf}HO0gFm*bP(F)(D(1(ADL+XFe|cXZ*1f~TySf$M0tW4^7o4L*otdwaRC%h zo2}h%t30iB{oTcGfw_7UVWnw3Wg^dZq2@nz1lM|VQr`_Coo;#%2p<*fiB!5t!>4k` zX^BGp!EH)=Qu0>B85vvXce@q0fyoPP?p9Nmf^;9096{#Se#~0xrm5;`dhO$lh{;z9 z(PC!Z)rJgW%|Vc3$LAkqSUY@;SSghahIQC9VAsnTHYftgQ?ynu;^xslr)z~70!=%B za~^?|%TlEs5dg{>9W3J=e<-I5(2A|Ct&=M&4%))a7Ox^4pI(vkw45s%7RyLC(bmUv zv2!I8cj2Xb2x4KL>FY zZ}dL6AJyD~zO^kYV#?)%-_1S6JL0tV**EXuW{95wz1vJGf1xAdqEMHVQ#8qBV%j`O z?Wm2``43OR90g6ekUm)pFKGav6dx2SP-OYEE$9kCrOO?&P8ze`6nr+E<-x=^dnJiR zYwPGJYhhv0f*?f>syopkn>r`tvvhxUxH#Q&zWw(#aZ-wP?e0{h+hhr|J2`ZdC#0A@ zZ{uc%F|^Enl55ACPwko3wo0I<(9?+~JlsjGdGu$R<%0K^*lDH~4N!u7Z|AiZ^1pxx63K&z;L)I}+% zj9o$u*%obw^o#T@&-q>8{>1*#kxacF7A5>Veb_Q__;&QD#khK^ZHH+HuGsy+JX^q` zh>rFV-GnIVN~+gOChQJU2!4R!5~-pIPXFXwGP4!lZ64xFz&)yQPMuCv84}N_3u6$V z8!XdgnVp?2tgJ-$^78UW*%BlZI;duzO^_Mh!B;i2B+16CKxhmhbiHqW=r{|yt?py; zV}6ZKDJU7EWbV&kM$vk4*dwCD3M(180Yq0)%m%g#}5vFv5wbsOHR)%X<5-nc>BS1>Z&!_s{++6bWCk60yi;q1`k=g63 z)OIUH((9nY`1LQRU-cBku~kPg5j_ZuLq&(wD5+`$aiYsWNDN|RDuZHC$`|-@a9Xm6 zZNapWO2}w1ZewnCHaQ<3UpN~Bg0d}Dk?)FO_H&CQ?6VwX-xPO2u6ev z_VRE<*n#r!gy+MVsfxBRzymCzIgw3*2@8OvYGj z=Dsx)H0YM;Z94}C`iif~>*&hl%Sy+X8E^!;q+$o(SD{$mqtoQKTTvUp`?q_u_}PBFt!LWuP!7=M^#ei_ zDQ|=%4YpL7M6oTo(#SxCqe~)XnamG8)bxv5fV;j*gK(rN;)g^G>(c!qSt6nfH=W$4 z4>hBf>L3zSg6<%rCwS2~!N7{JjA!4`5Te+i_*AWo75FTjsud~YjO^QPC3eyZq)&oP z8u-oM9w}f*5R@jf*l(c`=rqd8l7noXrue9gbI${P!pepCLMO+^ZMk?``}q~mBCpU? zujx{n(rENYsjnqgdn4 z-$SWU@+JXlBAyQq8wvP6Uvxlhae;o*yCK6Ml^I`wC~?vAvDl5}&IlTUL(H6nDuA2?H5l zRx<3fZ8R?$@lLtu(|}u5tPoK|Y-PSBoz?I>5IITYI zVLjd^gd`ZEA1AVXL5Bp6y{c_@x0!uOE)1S#EeFN$-q6 zwg*}L_e9yq{!LOV_*;}qMPZv}Jt6`sO(o6o+fY36Uu0Kf*+aU^ACrg`gD%1e5VkB6 zDMK*PR4DR_n(iphKZJYXXz}A(V11INHJ^^wV7Pi4j`vw`%kh$g*{Jple+=e#@;0xu?sIh~ZIiQJ<)PuK$+v>xnE)}3E#!!RMTxnH4?5ta;07xr z6WTdAQ2g^-6Kvu21QiBM^z{jAOAh-ueY`mNJ7k?}8s29^lnVzdN^ZaiP=B9whiRgQ zm%_TV@nvewcEa>c8M-L()&b;e1LDd*9fmW*8q!EZfJ)ZNw@HZa2I|ou5vqX)qqPmV z`OnvfCA%1U{5qPD;2B9SGr+6sKN!*Jo&1Abg%p+7FRa{!k4n&bYd=Pq!%~cx_=9xZ zvD*LOJX4acW*pctgiSnQ_UkJof1s^?AJAJ~Hy4W=Bb2dpK|=*`&{Q6RvGH6FVOki{ zxu$dx5`R-%3*t^UMwL32=!oz((XbHYJ`VIIifv%-VA`7zx!EOA^dgY8iuMHJ2eC}l zrQO6mKoUCWYRSx&G%Cm3@B73Sf}kmuJS>jZO1Zr=NHk5`!K34#gjo{l$RM0EA@GaRS1Z?zXLf~;bM3y9 zVxp!Ls4u8}XmMlmMTlVT*)JZRRc=)kdtkf<9?<4c+OQS z!As7MvUu#24>=d76}W1h>dk_x)2;-k3WqBd?`0#kQ<*LU3*`)w13e8;kKr1#Q+ai1+s}luq(Ks(RAuptwZf8HQuf5q!Iv@aGk0AcA zG*JxX$IslCO||m=7l=cA@W6{c=$$p13Nps9WUlANZE3fG7*cp0h+UA3nOT%tT@Z{I z)(evJ1Mz_NKor3k4ZUzFVoKOhfS+@WaEnshOf-mulB}hs8X24Rz){S1HJG{eA59*@ zP?uPyMh3Fq{WEpm)#d@|8uGV)wCL&;eqIgq#H1M-$0`SDOgp()NWS|u>I5joT6)ZG zbIx$4klg4ZUX87-z z827aqeiO!5#&Gs7v#a;ddR_EEH*7(8YjwVq%mJk! zA8{rvQEsUuRG3RlvwJ8K@eu9w6dX>7nw!`KIeW`RElMU|_ehOu0m&Z>O|t2(q}VoT zV|~OIo~)4DT8ui@6M+8R2-0A7tE0ipc*Y!M(}Z7zPlYY>DM_3iQIs3H|Hb;&i$dTA zN-A~ydW;6EfX7rOJ`2d&h*!X)Afyhgw!sd(W}bTuS!}|lmoebt#bPfEg9TO*!~kOjk#cc|pH4Ly}AF()rw6+z#}f;_S`$<99spRrGXm z4GlVfkrE5eVpI^wM<+82saKW?zkYvl5U*l&H0^jS&$05Iso&hKc!ZC+3_p^Y7Acm5 zy$3knK%dtqB=6$F3Cv|`W=3n1g3RaC79wm;2r)oSG`(h;rr|>#-{i$gmb-lxeND3g z!hWOz;W1OtpOR8vI}ve#X19T=VqodWNbu^RkbH`zITs%5V@wLjpM~eY=!XIAS15Nx z61}|nuT1Vt9xIILgYP6{o?7QSL98pV;!zEAq*n*ur^vNyc8aq3G3IaIN`Tdmm-lKz zZfnC-pDvqqi~I@cqccI_U07&FcTPrBcZ;4U>Q!F(jBfFa#KhY7y#&gidr=t9I{=YO zk8n@>o?n(`rDx*s3TNcrSPtp(K{(#!Uh~G zxqQqjVSeviOQq~Ea@7U)v9-Fp>OP2?WBGcmy!7EwtRA>!nTechGM7w3>Irgrn%i zZKA~kd)-#ruwrg#0Zhcs&R3;osRh>3R>d)t6+_MNV7#!0!^Gf?pZ3VF&{QqF2X-B6J23Z__k`%+X>kR@-aggK6{t zNlc{UIUhRwYbveiqw@DcvWv3L&%N_f@W9q~=%!C!YSPUU7|V&$z2*0Z1_?w-TX;o$ zFW^Q+B_)VJZ$+$FDLyB(mw87^+PGvuP1eVz&V)V5C-?>?_$txVK&WqFDN1i#_c!r# zDd$H?ZrQHwQ#4d7*dXk95tDu|{RkerFgC}B+gZ1p|bu!G+U2le**r^ zCSk#?EV6oqX-`W6(&tE}QWQHnyroa5R<7!%vMK9DAfJZ_v6p?$8?n2#+S+=7QUWC9 zn~Yy{f7YY;TCmcV_B~bpd;>?tH)>mTWOm8ytTt6U$!Hq3vj2;(L&)RR!WoPk>5D6s zg#d@2%zlgrDD%o18X8WmtQ@^nM=jR$rWPRl*U z(fhWtGs<9m_J>s(ZQK25bZuMK85cHMU4|Y7nNhc> z`4F4dS}!-Zo9};%-jDftd8rmxgGS)7taDsADKc{Dg053L(H0DIT=>SpTKev-U)eBV zz!@AlFQj+Jb9dNqbY;5QFEA$R)$rDPX%I$7*4DRJKEcs2K|A&AvoYecF~nPXR>-de zbG}EtWZ$;)jSJh3gx*fmgQRi?{fKzt{%9-+0A>CF1hO(I%BbS*)R}<{A35P}}jMXtLD-y+In8CS3& zR`97I$wc$&7nbVwN@~x@js2sx&5ZPqWI{%~D|PeH=Qm9zf~t@zynX2zVIcVP8iZ!? z<-HX-9GCI$w*G#u)0%0k*t6LNOu+DMDTp|V5?KkLWDkQ3!o7ETLNHPtly$~YN z{f~)GuDcVRZoUX1ea?WH>6f{HL5y<4YVwZ`; zd}J2Py8%6AI3+Z&Q)=9oYCAJ+29^H$R7B4vXYmm)Y;`Iz>5Jse#yt?c*#(M7LXGdrz^w{NC2DN%dIQ zb^~iRbMl1Y#LUWDQ9{?aHCDa{Kl%U46KO_Jo~Gua(qn#&@PI@dFneI}Wc-v@yaCt8LlsHGNr!$^=XavO1JIH8BXJ775UPoW z0@|A|=5jTl#FM{AiS8fKT@lE&Q)Dsm2x5~j-W#zLxZxpuF}mDZ{BL3(ZyyF3gbClF zV7~%cx=w|nYQ(3+{0((xRl=kKNuq7KeJ<&gES3=}E**7)*Qt8i3DeZyy(I|!)Fu=^ zQ-X+tpMFCxAK#bCqIG(IR4f06g=H?vY)sHx7FF`_iH`|cdYVQR#q>1{E(IkowBmgY zt1Fa$C5YO$C0*bMQ}L$qZ`*i;?ZIh%3(kgI+88gdyV|w zrsRWXp&}vh@z&KC5cAp`zN?mrAe=Y4HL^A~<=0Q~m63C_DOZvTaS}_hv#?SJDO-|X z>@fJ@#~V!q-cN#(uvZ(9Tmn24S3E8`5Dwn%H!QO%R}>&6QzcRgPyK5{f&t*cmhaW! z93ZoZzC?w&TGbkH=$?&78#EcfZ=cS0M!A6_YccQQjhe$d#<9tkk<<;Sl5x0U8KZ=3 z^ay0`f((}ac_m2uwo5-ak4%u&%E-6N<~Q_|Z%cXD~zpxw{Qz zJSd_YalZR$DNU50h)<+%KZHw2cBgKfyVZ88I7XbABzAv)Us6p?tsg>~a!22Deg>Q} zX_Q&}n)5#Nhft{&QmTR%5(5n$c1nU@R%l^z+l_gTVWw$op_xE&YkzJ@1WGgJo*m%RB; z_snfKT^cog%-?|ifX)6f>?7(oseU4laZ&Yb6a-%6QWP3}P!_XqRP7sR?{O~b#alsC zvM!;E(2~u>A^SgW^o-EvuZGQXyVjRiS4BU6uH0K!qswZOOfN52A;+gM@cEsJ9T^OCyS!qtE3}mf75)#)Ks3LYma#OgUS|S|{w#lK0=GCIvF%|v z?w6?|D2^JZicE1-N+s`g9%hsCh%05O0HLc}oajJAE9rE4MPFavF}AfSga}1wYHGq2 zS6qSC)>f=ou>zf)o!B@17M%3<2w+er3&Q;+0SV0nBPr?NWw+pbBnAJLFZpl4f(9KX z7={>v_2*-A4tSh{WJZO6CYIzAbvHRglZ>#mwY7zSB)(2Z2njz#rS?Td#~aKR0O;!K z(gH{Od;h?oG(kGWAwCSL0)W;`;Q^Rbun~J3-BPh0Wv3>Q!r~s4MRzRB5^*fY$@g=T zMUK@Orm_j7ikz;ilN{#C?`-h^%)ns+2cjIyh3d*sPpI3=K%fH+IZkwtoClh)3RFqK zB#w2=TeoiAF*?vt9enCjebAsmm^yVTxK}{kvRR<%(ZNVb01z|{kC%M5;iQ8Fdji)! zNcgD(-_Jn+QV4s4GY-Z)aL)tdUim#0&H)eLnV}IDAcKdt6A})~_Vo0G!>Q`(XKHzJ zp9rz}Tk`pQC1`AY7p0y}s2fUEuTwOp zIaa|VDS$vZrpjKYH%Y)luAds#N(ul7A)HrEEj1w%v83sQYr>=WMdpQQ-HR=35W8)w~+#IXz*T23Ml14(%HZi5i>~x1%Wsp z_ZR3PY4{FM4$gyNOD;w?{11gO?lTT2)m&jd$cq8%hF_D&;K75#k1N^wL4-{sIx5&w zN>N{5UjZENt(c3NG*FvC_yv2JZJ5A8q?hA)MZ8uxz*wmUm5f8lob}(6)<#Q zVFM6o;!NwktDvH3U0649Y-Jg;gp9)g9&w@r6!oM!;E0FQ=D5!`@ED0Ya&ZsL#N}GIaO8Y@(RJ zLw9Tf2|3QD13EF?^RjN}+Ms13Nk>)W6X-fh1qd~t+%R2O9LwL-RKKsO`{t%DUtGND zLYUeSk#Xb3JUOAIRX(E~LOIBZ}b_9k>RZ92ubfrAcAOdw(cj3g6o;)DP|U0q#nuf6tqKnDzR zoG#62oeRntoKyxt`5Pzz0cR!nUuBc-2l*KxZvM~#>V+(ZF%Qgh;W5W&9lZRG3S)rB z^q`@>t*Uz~6@WI(ImgtgQ)6G~6JfO+oyh5j9CApxzNI48FMbM*H3W58z#%|{Z3u9H z*avYc+lYV)w}J=ffa?L2Iz{L~Lj?>QfSABzAVyM5BPz04r2>NrF4YJD6z8ZInpI2| z6UC9!MS)3`P~?*xH&lwNsHi4v5}WxV$LmZQaSmOZ!?+0(Cd}Kqb?Y&?Ty6qTNyAM( zj&$d%AT*2cO;}0+>VR_(ga^w7a{u%k4B)C65!~#Y1uVk_W3InsAPblWPXnyzzZDxO zmfP1Bnc^+J2_sCE^2e&nUx*j&(oND5?WwD)!$AifgjK6nK@}5|NHW?|TU(1!qeg{b zw)X88;J6)VY$C{}g8qpBMJ3w^0YcO_xPtO;Y~lqOB*r1%r|PJRdYpvOs`sHOUVWQZ z7dGjL?*7(El01?8)tD-BWppPou@MvNurUoCv*^!eI=@;!27UdF^3mguzbl4R1XDsk>c<{If#!6tJlaz(O;XDIy!0TG2^=niM61sQ*xagvb!agz0 zXOTtc`w%j1+O%*)rK@!lHZA`f2Gt6-A%OtbGYdHmKomd{vJC(z`c1>2p`&HsfFWG% zW?~sOc9E1h*w{iGfFuSNBjX^1a07tI5(geO1vNP^lQ=`G1&XBRi>}Vlgu}SzAnUMcsg=a#gN(QNzkAk6iD=wu+Co174Mo(mvYS%PzLaESL!1iSD@OGMc7*@ox) zH@NEtfDxEQ9N~Np|7Eu{P}quMU#HPTu>pzz36lxqx-L#T?KDi8G9?kyumQ>F(W7zN zX{UwtTlxBI)TLbvt|O?)`0J@M88lR8VTgmmI+92s0FGg!_ZPqc^dBgy-PCjG&JE2T z36FJY)i9DB@^PrgNx=gsn=(4^NWwa*WgtmGL}eXNj@3N@fMLUj=1(~0@L4Ty{Jgbg z?JICg#j<}Rza6z*bAzWK0dXAvW)_Y|<_16#TsOwRAV4DUQ^;Y)!9Cw}VV>&`$uiFe zi*QsVsz44&qj&3@+L4qbY{L|m%enmW%Q0rmm~y9C%o5#DZ$m=^F1qL<5aQ1{S^M^j z=-s*m&2iOCSh65;^D7H2wE z>uwV(pu_@40@eZ8!9qQ@nI$>}?vMGQD=*3K)HdnjE80ydEpjlR~EsM+)&C`uq8pi~-y4PH`X%zwW_yddr~4gmsXFkpc*6yYQ<58U&ahv&KB z`zhg&DuOv3oWmg;eVbqDw_hX9gejFuXliQ0_rCW%j2=C@Vm^lsD8dfp{^-o5mtKlt z!-m20JhX3HhcydkVQ@Xc;97se7l_~66~JJMLnXK*m4fU4K9$Pkrp>r`V-Rig7nlIS z^t+^{h3?pvS0{0KOg&@mu|zMqX=as1p14vRk@jjnU04-{=&-ChL@N(P#a3I%E6&T5 zu$(4uwzgCV1!1)f_1%N|fI++qUhzeE8(u_Xy#tyCKnOTeAcTX!0}&quVZy%w5YFL` zp)zm;>uvC0VdB7aff!L?Vr0b)!oiGDVWI@JCxE|hMcZ3&OL;h{EW{V+)FD{g;zYKI zU}9@(YQinI+=9m*dkla5>tEq{Ud3>(Y_bqSFlo{xoOar2sI9H_XNVT_SoZhdp|Qrr zkUEO`T9n^{t}US7GJ3B~mp1(+L5FL%K_9#v z4F?{0V8kc1;ZC3?O)g(eBbu`0yin40?eoQ<;rCiW zDR;8MB@AQpuMP z0`_4?pR^;up;KgCnr^SV7h#Cgg_8n^EGk2{O4aLeQ~@49Y+ zCdMI$lQ1bi&T#-2NM{>+At^2ovhXnMkFqfiFhW5Xlg{F^1;+i?(*1l7fuXM*Iu(0LVm2aw>E?$)?dZ`LiMV5r%p^Mi6hOCfr0HZPu`5ZvB zV{PV$?pTeJw+ayP;J;iT#DT->29NAu9B{aQqdOE};BbPUb3y|6Ai;ES50p4QC@_Fi z1{%!m5Uu850f>+QEWB{4IcE&a_01OXA=Plw7z>IKz7E2X%@B01x)<3Yha*$dpk3N8 z=>kP2KZm?NsBN`088liqt-`Vw??zLNhhYsA4K@A@(GVyEXmEt=O9lr4njtGuKqaX# zRtAoJ4mfIF;NM6J7^Zv21`xXVxY0f}PFIeto~qc|XOw;~R=fi~4>caB1P>KN)O>Q| z1emBMN8E*iB~B+=VDzN@urcGeuHgP~9;F=q?9kvRBzTP;zW);hfoUOtgM$!&L0S-* z`^Ox?SLrzq7V7bVgRzp&LV}ge3G)>Vl5;ddToPb0_TWIQavmo-Krn#`Pzfk%8RA%{$hr;zDmzAZ z446Br;#`j7aKIxD91c*A@x8y1&D7uMkad^zzbG36955@2uM@_jNxKx zFE9tX^M1Itu5s%}&bs_vRNfalWlN&;rs)B;-Ge#* zN~)3WLfI*L#FQC-^&br3#i#}(Y@jjFd(-($K%xT=S(5Y`2i_mX(H~L4M+Zhp04nYr zw&TR&P=^!8fk!FN)C_9PcWoQOI7fLg-9UK>01`~>3U2P;4Hfw6z#uXbF?T_v1x`^W z0E8EP--h(02g*t%x<)9T47=GhL33>hTNd1g;!#(jari;8Y$NQXDu6==9)-RhteE#l zbZ=gaLG=W~8-V6|AR}I26TTVIa*n`Q^Fbp72>4-yV1WpLR3i*PPrx?P*}9I;T=%Qz zp$Cj40AiZp*dHJ<{a$x&TODCYgA#@k4@26H1IG^fI#e;G?+8GceyhgY&Y^2xC>{WC z$^go<4MVs0b#6TvOgs$2BpBKNG}RDfQeuXw=-9F!Lzo-_G|Il;ppPW52ww=SQo3^G zw_y)H^4Mpa29GGAQn{)HiMY6GfFmx=wn4LpbZknq%I?NEss#vQ2q!IjM?k8aj{%ND z;d($#QDxErh)J2~gTbQ?H#R)ZRFQ!r zpXbb~}0){sbG}i+SwLq=-5*r~w`+SDP0u^GSJ-i$a+ZwbjG)p)SyR$0c zKx;3;ew%RYCmtEI|M;H&$KAUJ-I87PVZXI^cc1e;?!9yGozdtu%nS%22}g<;DgkWF zTew279S33JSBk`Cmz>0o9~GC&!71#-k5meSP5Dvf1dFl-MH%Gagv4N{Fm{1t9zp_- zC4m@(G>m5M`+J>px_hscKh|2iclSN#eCNCOj=sd#_*r)?=?-K9%Bg zp26}wka*Pgi(SED7XaA?CTq*9!8mpiz9|4~1B$6oZ>mpgKGXHncpTf^@f&@)NjKjxXzi9R^zrw4dCnXYW zkUW9_!sbEYq*EXm3n4JT1lSlBjq|YQDN%tTDdS)^wvkv_is$YCE0cf;BDvrJ7^yDE zb-X9U0PHt}c!jVx1MZ%D5_doPUcB`45%twCKy&#i>|c33_Ab8`b+eDy>_ddntxnM{ zPH=kfC7j;*b)4M#Bu;O?fQk}kHSpvm!of_3#qT$SCaUcPP%NZ^O+W@?=Qu_K*<0kl zlN&{geJ@&8J#hQX?gfC;_}Y&b0bl2R)`7)Efa4;zjZNUN4H!0o2evF@vL0*J=kid% zu|Z9lmQ4UrF0T{ZX}xfPeb;4B+5|4^+J!B3#-L%3sOu6mqgRmmrRy&qua2IKBH+Tt z04bh~^iZ)009T+;1aIX!0FZ3n6tS2M9_$+sl#?;3uvPh71Qj|WV;z&X`N)g{!K@O5 zJ@vX=-UCjSjFZLlIQ#VTc;y8_+o>Ia+1P>KR8sJ%0^!L^8Y{Bj0JB=XB{4wNk_-^G z$OD*VwS#p%vd zny9~t{9KYx!6PuB%zWIine7>ZffY%yo&ZV1vLAO>@DKrR91V$jPFBFD?f~ETMPKp> zzv~b9*#;WB0LM1l#)E*xdCTj*cP7tkoTZ4>5A>(2v+X`O9Zqt^k%x@)?h zW!N@m*3q#sAa!Qu;~-bx@YWZ9^%wp*n8|c=69O}40tO;52?+yZ2xOBh`93B^I-EY_ z+@fp-QGhbO=n;E>3)Vr%23l2R#N5ZoM#j-`>{uZR8V0HvVO|6K3@{6N1~}0OfCALM zqIom)HYvf8++7u@CL>!}2I+9S2?o$`W|El_7pe`$K)GwVbqcf|xW~W?w}Jh=+0nQD zkw5cqi)t4Ii*>e(^H|6fAT9zNo7y;Cj|+THm)90g?DZh21J@pJ!|!e2;lFnQi1QM; zZptvd3#gpOQjk&#m|O$1z9SQWl$baro!*3jC*S<#FM?xYu~DuWBN&3-RF7)NBkJ6r z+#}I}r2|949Uh-G93ziD09!p3p_Cd$8c-Hgmwj~1}frp4dq|PdX zn1lxsR6$TjU{>ket@|@z)&P4o&{Tpd&+K5w^zd*-9!ukZ#pQ>g48m_ivbLId376{8 zBu?XksB!CL7{tZ`yl_jupZ(m=|M_}8XPbLwDtOa%B>;hQ3g3BgQ zncmHH>vDM%z;PaEl^h9sRkMSPH3r32dXFna|R$)T>c$++QyflL5P;tQkcrvu_ zK@FiI3L2+vZaO}F7g#0@!T-{){Q3{tYMDIm^BBh_rf^YP$R@4Fe20O(lKnp0QYZ#6(%>SZo9Rj_O891mH`0pP|7aMo#Zqf3_``}IHaC;rlhie1EKy|eKuQ(@Z%Aln$gHc;3m z_)`Gbv~JqQ^7p#$>GHO9_vIem2{3Fm!Kd_?MskLnxvq0hS${`K3I@`<;!)y-#PkWhUgLEbOunN(eI)k2X3AO#MQ z2$lo_LGo-B0Xl?U`;^9S3`~PrkVL>-gOIX71H)T7&)s4|On|4j$Ow>tY&>_(g+qRxAlZUzLftx3Kt9|>e zQ?+r_1{!tUoc>q;?O*(t4iFRY5J1^S0Uo1mTRywg*#s8W;{3hs*x2EFy1dKpTsF;e z@SwnP9!q=}(AcJ8?5d(%#7&z&6a6+b3IJf{%tVgU#|~cXzwezV^z&b|#IJv$pl^JK zU<))0n7v_!^iqe!j3f#moRTFKX29$MBmy8oz{WZP1VhxQMvS(|bH?iH7{El}J%I-g zs(?R*fd!Or|3di7<3QJsJs@@*NCpW%uCox~@p;Pqow2y5B};-#bBKyEA4h`U@48+9 z03ZNKL_t&yUApXCqbdR%t$@2{!zvcw<|(l32c|&)zWB?&=AXRgjn6KbnE;gFF*;=5 z_o%9>IK#O@x(zJW*+kZXMtPnoh^%YNw)RZdom zFA5qLVJ;WhXYIA6l)zZVUt0e4=WeAR|LUc@^mf?91#44P_^tMBA>iOm2e|!X_mJuY zmH=Tv>RMo?8A}C%zzl={kg^Te>nWLq3`T0>+t3~DnWbZMI!J%NE$ zu)WsB+b;u134e$V@7`-jN@LqZ56@R+a+02w^iH!$Cc(efsM*aSA~zz7@m zSyyk{@}_o<%8nDTT}V~g1RA^4yNJ;50tmaZj!o-FZJWAWU+#bO$xkld`!&bu`LATG z9@}zHMn%V&P=2)gPwgXIibzXF-vLc6IT=q(>bhqn1PRJ6Fi0E|vRy=l&$DMGt9QS9 zGP_(&W6`&`OgtVK0|k^F_^H)K0YGY~@f8_RP=bmdf1gy7(@Ydq$@*%ef< zW_jJYrt3DfE0s zG!AEUNdF5UPzVU9aqy-~IQ@kqXppI(@7nBAB?{_hjR_u)Cqx<$1pw^RI0i_vOy<;^ z+M}SrnRyhw${9sr7z8FvA`7_I7{-7g8#~>2nd@gT8jdBCW1UxDFKfM8vh^+#MxY`B z+%aQkK|GLHZ5+L+CV>D7)WTIKu!HZdyNn z$tEz=FN3&5CQoNW*Ip+bCrjYo+2B(4YdBqLP*)^XO+v7aGJuDC$=CmZfB1%H-*Lpu z==(kZ5D}reuF2GMiZQw&oE=CEJO}{#z7JJZIbN-MF_mTAx^;rTscZs>b-&N6s||Vp zhA}N`fn$w*W0$w@rhRs0B%46wL0QYDb^riY={P>W`14<*V8M#FX7JJaY-F18EQTXM9tPGBq}+II*FBt*b7Z~f9A`)$ALzx(iNwF)UEsj7+{ zG`g+}bzNtHHnk-HkQgHu22%$Q+Mvdq_cASi?)1HOtes0!Z|WC;#Jb{Y>|xf2fy}r`BCqtO<(~ooZnJFzUqe&MXWpfPjDs zgv!99Mvw@EU^WXoaUfiI^K~5mn^(Y0NJ)_T4v7UJ=c8x?8r4ix|M;et{o>fh}8g1sS%0GT*0QGg%cW>?7IEQhHq06ifV-Sxe3} zH2G%H)PAQc*nJt=m>9UTP&-Ezfe?X;bnf0r18@WaE?<4^C%)?UefvM{`<_IEL?nn7 z0^s1`U@%f!00aY%l6g?UJl6GJOqR>vQv8<5_KOJoHuYpw9tF!N%Y&#YyV9}g_Vd7F z8!)Up*1Cr@ZI6Eo0Q%_Suf6@nKWniQjHL!0tF%h zRUp*LG$;ZRR9!&;NYu^&hET%wcf1Doe&J zpp{NCEpE{CWpmBim5QjELs_6@#1f{FoqnfE!>8BN1r=-ulk804DGRZv(BfQWDTN-u zFfo;@9E2Qu(kGqgWU;hNYc${q&m-h>_mTE1bRQVT$P=I{Nx?K?0QN3jd+96w;Q#tP zNl2EifEp_vuoL$rn;p!6+r}uz+Hv%p_HpkwcW+*3AYR_=I zBV#2lG^AKaL2%^@pFljAf!l<>XLMbMSxOKIMe0C*ohn-Z1H&ZJW(YNj9~mc&QTW)6 zQuxQA-bDe7$f2{CI^V3Iv@DKj#>b-xVlY5^(xt9AbQ~e>q7)6ah4I<63 zzrTmN-owdlLf=Nr=5xeQn~D+)FnnqO0tL!%P@Rb_pu4-m*{ANIssgHt5bFxDs-b;Y zXRy!XH$*aM=xl8|xGnXag`R7gz!DhLotwtq4i{t&gDO$(gR#K#efy>XbV^nMZ|C?} zR4n_QzK>;;A|lFUx&$08H0I)Lg&}g(jtenEA%W9;um%$_9NyVc{)%RMe%4VdkPED1EZB9Enj0m5tk z7YwS{EQNWpLbKX_5kZ%xszJ}J96GuLTpI~4{v5RRKY9L@L8))hiz zgv8*EQGpCJfDi?-2ln2yhiX6I1U>_~+g2`#um68{+^GIopuZ z))7JoRaN=WdRJ*&ZOIHI#wdkxblYdjI5yQ2jQjPJrIdI2yfUgMBJ^s2$HUk-wy~2< zg1w2ooCglm?J`-Pe=qTw>Zk90>wj~ae&zSThK_7%$`mjHYEwb>>M5l8=WaeD2o zzx(~w8xTDLKuNW2m!V}-=D(Ml7*d!sO!4uK!f{1 zW8LrR$L{h7G}a0H6kN7~%5N0o7`0)?b<^$Zn8>>Qw;gMGGpA?_0QB+YcfIM8i|_fL zk}N)VTa(rT2Gtoj-1zCZL!fN8AOfyjYY?jrr$;?*z0A0BwLvqVV3aBM$~*{ygeSlB zjrjBre+(&ANL`PPdr+!1?OsR-Og?`SqyP>zpxR#{UD5~k-(hAyen88!x6E(S^;Z-PvVcf`crP#8EvdcOUG1H)kx=0tjGK*z50M{t? zf`J|ZM`4D$aSx}V9;At3mQDXQwIpa#6wTE`g6&DxhE z#(;9J3iGO~Di0X+HLbggin43j1QPzu0K(1wgO@*Y{3pM?m0MrF>D221qX9{UH~|du zrcP}aYs&s%g%Alxw_6Cm-;OOMrg!ssH+HyDIiRe1clH{kXMKaH*ey1qqB5lx7i zSCrHfFa%X;u2G-7X}J6`HF%I-VM@=?BTJz0f3BXC<1;fO8rVwD$|x3{##D?VQSF-r zXer`|X1G_)us{t&OL|O%V8;$-0uPZ8f*P8uiV)4HTvb5TL{ycaY7%0V5CS7+!l(B< z&d*-czy2TpuI|RWe=vkVAp{~K#27;eK>)DpEhb#9mQbm>nEDc$rjZyU1JKGtpB@SQ z7S>VR!TWwsfdsf89H#ZBrCdL%^J;)c297n8M{lA^)AAsoP?qV<^L5j0m@MO$7T@vg z3(NQZZ!F7a)-`J!v{hw>K~gJVgf)P1!ONNNMc`V4quVVOr#(2y$nN2HG=RV%;fde& z6khqMPa*bN{AK#_FR6~c_9y$kM^ID{02D$XAFo10TriM8L~Pq3 zBGfbu+5Tj%aStG4i|q(iRY_rV-NZ1~**(0JjH8r=^E%|F7wlFh@~~wa_cPxH$lmyn)&bqM>VSa?ff`*lz|d(E!)U(;DGna@Znan~6Yjji zxcbDbFq#heHkxaDIC$nUEI)Y{T@0vGkJuA@R-+JAC(u|99|bEAtVVGhkPRSGs1QaP z*4iYn<~vV6V9Pm1!~kQ}swZUGs|!^y4pmnbgm6SNoP-8P)L_{vqlgp`Dnf832USPZ zbxInYOU^QY7zrT;gs8w#)ugWMI-r>cR5hV)fKVrdIv`XXLP#Lu0h<7Xs~`W0<=N9O zQ1|9f#~3MuKz-k%ZQD})aPr~yjm$Jc2$E7l2*F2DyPA>>rG<9|7_$z)yHoH$@q3+m z0}R^ZR4&T0v&!U8NWni`*Ah*?`9d-v^})F&%`Sfn}?S zecMi5sN|)1(9%M{EDHLhXcj|2j0O@Ga1k6loMn`3BNAdHL<5aj z1=Mvw)e!1N!K0c*1f50>2rWXz05L3p27y57HUIqg@=MSD^|lQOfU71W6{Bt201Hyh z_mc=_AAzYYfU*@I_?88J5(qX`IX1D4b>K0rGaXD5Fec4ub zo6hFnnOUHCxTknw{((FH#b55^&Nqw?>1v6MP=$epGYrzWjE7N4b}5IKYot!FTqN|p z;P#Cbu3n!ZL?C-vK$yS&GMd*NqW{bZ+87Y44uNW&@r}shE0lSV2!z|LH>mAE=bcqV z7!PtIKm(*QV#NEl4}b*If(Fn?Ojve;MJrges)l$xNEuOjh8oA!498)HHq==RLbPW= zdbgzpLaPU-0Mzr4LBiD%asV;Fh#?qo0I^cA(V)42x(bL*F!ljdO@&xjK!|F*4V~GS zEj=TE*rff_D?jqCy<6Y#BW9m)I|mi^3IX71N(doHj8Q@eoKnj64ohr26?RYhGc}T) z=Yri7F5rEBlghH|i+CGbd8qOzY#i(AuG@DVo7kq4mEeIF7vJ~zzghm(zr(V8`%qFs z`?Cm1P)`Pfb||EbCO3&l*5%DzCPUV+sm814XfpKC$@j z*MDO9cfOaUE8;V#P)nRyG=*!mf(|_MH{h!a7`ashTz;&%X-=so19(9;38rc=fr~rt%c;Pp&h0k#yz%ac5$3nG` z%ox~aPHP!htO$!%&~gjzEj?h;?<9nyIK%0D52tYkG4+B7s6DPb6n2ZC6HL1rq65xU z!0RS}Sq+!XTvf9va41;VajF40LQr)j#-Jl$>yW)H(U=JZ1+HFzBne;vA%X3_gh;eF z_=*3tzWen5o;whL7$bFE7hqnN)oMjVlyi@2)-|TKZJl)p08jSgBWOcaRVjCY^{&|| zAnXDjDC+N`vTS1VdHGR!ST1Vgn3i4Ck@9zWAsc}L1v)>!{3}m=YVi-=C+OekyJnVQ zA>jcH0yQ8pwS+3$KEz>swu%T)%>wohDx95kfB=2VIJ(p0%GDYW6-q*nXTIqR@$v8e zAo?z$4H2Pc^uma-%Hu7VSAa1OD$d~xPC^C{QBVO#eN~Zxh6PC_5iC2x>5{PO1&KSG zdXa28OTk%ea8&K#Y_^9K^vxQ5{EcPDaYH7^ydXr%Gz#@Y0YQK79RRCE%7)Lqg9C%F~D$}2_ir$&zjqW8Jd#7c$-nKcyRJLr}v(J<9`3vPnOTj zr}H$m1*WFBu{3*aQ%Yj215MMQX_}nG*wYM_8Nv!9>4Sm?a50r-(@S}@?ooipHsIJr z{aCjTu)g6aww0p~Hx>T*(?9dp&$K`CJ|I2ryAIwGH+(h-P_@LPJxe>rHq4MM0aX{p zno-oH0DFf4r>Da?mZv=e2@WsU00<(4=J9>ZpSguV;KM-qRartN7RlV|gf6vFKl#d*MxGlqjz+#MIb`2Qo@BwaRX0UB# z9T;rdwh1`SV-lNGmW^`wu;8)H#&KTR)X?envT+U?Q}CF6)6X1z>pO4tKlMI9u8diR zF$`lFu9m2`#OvMPnA|Aw_wu+x8OmLbP1t$^vak;70YtJZb2g7yob=fUd3>UJeq9ry z03-=dedAm3vG4vMV%sBz2&sXZ1(C$ANdWX<(NUqoWOr2nA_%KW5}~6CM~8>FM|1QD;Gp-oY9eAC1_3quK_D06!Y)CQ8G)-9S>?Q@O8)i=;>d2-OJto^Zf$8t~->VF&IESc?Cm* zek6$*{WNYM^{gI&UUl8xW=i(}D$T_(Y36-!cz;vXz=sj2Ou+(Ba3UJ!EK!xkPQYRY zMfI3m`R%X6&0o8Vo&}*#pellry{REXK2l4=#ReM87A?wEp1UN8rrA4Q3K9o&U8{yp zCM;ruqr*ci=X;vXjDbcaPX)b+*xQeV+CT`Ej}@Uoz#TB6sSG2gVJsjTM=V^e1j1}D z;^bbd``h1auH7&M9pvYP&+R-p=EeaG0t#xg2{iN)BC%ve5iwZY4tZWz(j2WsklBs5 z_kI5(FoUCcSU^P5_r0uEDNn9G1GnNW5@sV6TjNO?;ivFRCx$jKZFT=uemol z5g-Ikf|L>jZpBBo<|B&60z{xeA=p-@Y+k9Vj4sS`Q}}PQDL??x8Hx;$ig`+n!So%B z*$n3v5dxS(^tFu?1XVLk<=~zS!0ew<)dO{fhU^Z_fD*^5g}-=RZ5YLK7`PBg zMuTp_Kmht)uvfV;RJR*O=#$U%WBAI?F!v1aD%vM>eyfNODp0Bc`0RQ5ykpZ|>B6sH zmF{&nfXqsz>pB1^8HHH#9|<9_1BZ>U^#sNuBI461^nEWA@Yp1z+Zf2Y?U>a4tf?jI z$_O|%*g1AlO*YXeU^7sRmOpv+L-S8|-}m>UI&P1Gw+q?0qL z{}2cN(H>rUzCl{_7S7E8MN|O=%IV28t~5coMoNNZ4;(LmRR^S=kWz;(0n5+>U)KtvK_6tizg2qCK}#wxh)dk+wk7$e&;@^HX4^UaoJ6FZm! z#dKS_?|IA~MZHH3CJ$rdI1eP&EekeyQ|_goxGdmj%XLHoM9pCBaWH<6b#`24>#X8) zlUO>wfr<`74`iSjc`3*{ZE?X;d}rMw4<#9n!@$`IV{wvlD0ed>%nmhRNdU9*142r- zQJF^|1U&gokK>j1zKp;T5(LTA8xk2D`C^7o4(NNq@rrP|5^xIW`c?;^g$760=UD7l zV7LcFa?FK-hGs0h*Ml`#55=z1V@0f7Rd14$s%O+=^y zstT6mc>wOJ23GT^ZeU@gPNQUFr5u9I^(=1B5bHwrTqqm32{P(hdVRVzB9f;~SRsM3 zT1f%utD8?z-~Y6;4Oc-da@0X1vyAD-Sx8;i+;v?B4@(dv<)`a96k}H~>HnL+V!Cac zin4Bb=kg#PmPcXZSf|QN8&OiRseu##yW7z9N%GdE4{qBA?zOp#CiY~KZdJ2 z!xRMs0V6zCp?T*ax(_dmQR#VNpd3D^!CV2SEpWONbV-Nja{@Y!XkRl&`{WE&2)IN5 zA|W7X7&Zf4$ErTq{ZduVXLK>(hykFKS%ZFD9i(ipKrmJBCRptN03ZNKL_t)?T(t=R zrvPF?pH!&*I;tk3ibLR5Jqu`;3DFE1i$#x1O#{G>IRHGU=ujencr1za4?$tfcDoV* z6cH?<7}6DN5s34%H>Q-FC5RWdU${-9>pHHgimR$}I|s)Y+2&*s8#T*r>i~eduARl? zSIuO3)Fs!fv#UP$g}kZ0|Mugrd%gF=2CWFx1yE=nJQ5y~w>$eTe!oh{C`6LsDV*8(+Fc!y*lyq#x!R3gm z9**I&yDKQ^#L@u-CV{2Rg>V3farMit;-!!GNGnZHLnaJl?dlmgJ`*f@K}rFAYSE{N z{z`-XbM}Ef^*h0n+nBMk>r{nmW~wU1nlRrZ8$03*41;hP+sX5*2tg8Ofz{wD%2A>Y z)*zi+n@Jgp#lWI(1CrTiK<46&>JeiGx-OybbSer{81l2!QNhlJ0&*raasYPNv)*C= zpH&tccA)B=gHeCUhRutsD-Id}IL62!1gWYDUDrufRcxRkAq2Kz^z4a%O2%RJN35gh z$mQ}b9ekUr@z4P9pyeWrV-verS7#kqY`OuXVeaHjCQ5ncK?!&!|*E|xq|%$m{2GSA(|$JH-QE4tQqm=$fW)Q* zrMnxcO?Nla-6=?SHzFl1Al=gSKi|R630xNkd#|--=AOByd``vtQqVAQeK^Yu6VfIm z+i^4zG`%anPTay}K68IaAW?uaJA30S%T;{Ep_{m7Cwkz_Jem`QW+51AR2PLIXwb^u z%h13gHr19GAgl=`$VC88+QY1Yd>M)xrP`vJK{f=2QKL;#h>)<{75%&mN!p8SNAvZvNPi#)O}c{y=ND(&jM;O=6(yo0!sEnol|q%=&0OdK331*I-;= z8IIGvDvS;Ln~39A+;;sL0R%$euHfkGbtt#>s@)Glr|R(bo@qwM+YmWsf)|N~b=tmn z_~tKRyf@EJNf*@M8bXSV(*!*3Z^*}Aev)oH#b5$i$0VtdGlKEiVS%L9_&6uZV>p|2 zQdg_AKc}-GGYs9O?z;MX+nn}1t;inKueNi9Vl9aBjIkB3|#Usoe zHGU}JNU~X^poV{_C=$2dXEUh%8>T2qFG={$cBHZi?R_g;-)3@RBLXBI(ql_*OO-Og zUr{l36J1eJRC8oHyxDALCZ21tKuhFH_0-`hV}ZR2fWewnAdQzHOa}*7`lV zwubgI*3W%HNKEbd>Cy!Ey2_9nQDYXq(%0BlFd_e29UPJ@lo*GGq2=GrxaV$K+U7Sg zqVk^=KV|T6I42#?2h(t{=o8C%j!)=2V|S&uwGTVPb#SpJ*8dz*EPIVT{xvm0prSLU zPS9svp_1J=7axhCP3=&?DO{I8MDJa8!d6+pP4E5a!JUV}bWO>hQcBK+VhDGQYj-rw zSWrP%!E|{3(Du{in?Vpv^ezsEra-L_g3Al%AiWr(aU0ztMi&eS`f#R=S4F0?3;1W2 zY&F|0>!3}W^Ktf+nv%}#9t?XEMV=wE?u{UoY?ZQ8nAeR*}h zWq!o=yY76MlTeJ$&X|3^AlVZH8`Wu4?YAA6$KrH@9i4v+Uw+_TiSqiayT+tTh}-ik znzn#v*cpeAa;eMI@dp~JsM^`}#yx}I39@}&r6M$1mGV_MLQx4qfG6R5>Vj*(rFSc-ZtYXY5$ zC81M;65$538M^0vrqJOv@9xgx|EjG6l~U=9DOc0B-*{(eHD`w;O^Bh88BP*2_>ntB zU22fJYLDi?8^X26b>quLN5>=2L}$N}nvsFYon=sj6XlRa!Yq~ZGU$d5HyZ^^~_D8gebXXHH&|6 z<8;eCcIGhu$G5P?^i4b*TqMBOYqN2UAbt-oh;sv<^AyP*uFcNSENWh$Rx}m-pfZyU zrwQL!(f5)s=htUPxco7%YFwFAZ4uMJAi|Gu^?mVRP=}7J%wN_D>lEEAC;q@m0cM@m zxhiwNq@zr337S%>F@&XDLRy_;6i9f}jztfK10EBKs-n2%NL#E2Ql<}7ddV$qjGOO6)jg|YeNkGgMuPd^7yw?D_eM;$+-$Q|hI zCk;M0mThf}Lwv{EJ|ga)lvrgPuI3C6t1-!1H6O@y^ek?mFIS?Gq?*d<g0(I*my_&j&R2gel|0WejbOlBX7lE!mRr}pkB2Nm{keO1 z+3$i0wEn?wlcs<~cPCq&x~M#Ir(|8P|JT=cLKCMxnu^P|P8sPkX!U4jLL)T^H-KY= zfQBRw_oMtH7SeVgVx}OPlC?2`Y}_WH^ooZLIOj^LfGSF3Xqkt|BKIZhwa{S545ias za;ZRul&!AD?_8odkn9M`rciRjZ0_Vexr-PjsFQR~F zk@bX1Pd&Wv@{l;}8pA}-!sfoshWh1`g!|r)i6mElx4H7rUZwTL80)uu0?+_-+R;`H z{r~FU8xgs(4~-fO9n%Zb!MkzCL(Q7xH3 zD&}Q`^=CGUHLsV+joBQ2k8loYGeaa&{ya`y`xSv!qhIj74_$lBc5^=BXI_m zy*%=)y@d$zn16vX_*W=hmmGC8oH5fO2Mx2m+YrG2tyA(H0>mk`;Wk)k^-(MHM zc)xHV6?8+j64b8q>EU_DfMPW~{VdH7F)R|%yQJI4LR&wt!*w3({yTYQOmnwTh$Nj{v4lsw}2@VnR}J-tJkFG!&mB(ob~9#3r+n`UkR?J^!(m2oNK4 zwxIF(gf5?mIkHQ-1nbaqY_M~9R-U1Z&*guvg2Te1uTVO|8m@Dpp`oGsNLsXuZ%Rmg zl^7oqXA}I1+woEWhu_ZxFbc(*n!CgGrK+d3rfR>qr|MQ~2(HxKzqeWua5G42t9kFcQJSo7#eLm^LzKX8#!mvvM?lS~f*-3Vl0n`;RB(sH zD|4N=jE4lke>`7;NRFM^T?;^lhV0JT9%dzQS)L|OHuDcdHLO^f1f9RZHPY||Wk9~Y zvGW?z$>Qbw67yH<-@}9Ga^h>A5I74J9>vm|LX-gPbp4D}0EWLGouK2R1{a?A@TIp1 z^>q4QCr(I{=kfM1?(G%w1A*nEogo8if_+0CX|_+Qe-G!+?Z*sOq>60bVwhk`iK1xUJ`^z8e|OFaF)$7GU6C3j67Pz%>)7(omF`Mvqf!{je_YA zK1U%PAwyS#;oVE;)(_y7tTcmJt0UiK$WRY`My>#l3tmDn4@(t%+(A!^Gr6Ng`iF6m z8#vpV?xUNS-2hnscx=r#1F52_ZcHd5tmEjg`&onb*M`7YO2TsJ>(z(PqNew}sBWFw z83B3(TIfTE99x&rNq)}3`la+>^sXSJUi~=IWFHzbX9qJXP9vec87(4eQ!SF0q845g z8eevyw@PJu6*)UAf0krTYz8z5FKwo&XfY=G!fM-&*buV=!@Y#3ZDde*Uc z@31|wsMrvS^2YTN*9)}oST#r=&8Sr%8_`ka8aaKYp%QPm5e_`^AO5XP9=-msI&|!W|7P37i{g<$=X9g?=oil6W}Qc#OU@nn2I&Yuw;+oy$Q3X+Pl}9) zOl%>{@EO^0-rA7t>lW5RYYa>6wB1{)4XJ*_Hd6}U4Q7{Vkc{LIvHY*eZhSW^+_PS% zsM)SjWbRh1y-gi1H2nPK#cxz}Z0ZLp_t=ZLfk!UKhXn)8=Hum4f@9Q8TmYH7jPq?7 z_uOUPeul%$S<6jAx773TCz3v09oc9^9Y*h!mdvfZEnub^FZKOUaV72TcUx>8Liugc z4>Y>DO|Ywr29Mr(9IA!1?9A%_NNv&nYWTfv0ixZl^4x_Dn@dA~KT0oM2@fxFs1SaL z*KcAwD~LNje-!*adz5@?TA7q=wa=p4yB;w0Jry~dGzfCjW40Gv2kExw6}j* z$zZYEDwE%g!jrpsYuv0m7jA$lqm?_{M~ig0U%*g#nefA9%>4ti-&WWm|07X6gDP$* zoOr!NI&_^h_$wSq6yMMb;|h}7hLifrFu;!_e|*&@9RNdrGkDl2v^j#JqeR3jF2H-9 z4BHk+n-8q_*G<}StQqbXCt3fCMFK1T$nr-Pp+qktAlz)C!Mpe|ZsQlY^MJ8;xqk8! z9>Nv;x9%ncBDHIIw5@D=jmD&)z;ae|i+T-JV7}sa495wyPgGqF*#ha(9XC(a@}%v3 zJ`%1lrcr~iW_m_@%{c8I)m@W5r&p8SZd^dPCNxE>5{e}!fh<%)<3x3pKGw$s2&;Y; zc<%b>1%<(rG{(PkY_S92fV(zk@k_svl-dA8UH9*gnrVS!B@8-QOo*5$ct<`I$%y0g z7*v|67zFw)UUI|QBuS$cA7>*3{WdAR_y4g~1@5928t`Co9^u?WOg#-5IL|}G&u&OA zZO(rW|9My;hX!4R8VM)$^Trw>8UB_oid1mmd)zAlcQt2I}qz8SwA`+XiORD zP_^7_|2X7~`-pU|3m5BFj1Y~!q%b8nRT}ql6BRdl=E}u(MbR$-1AIdT` z`)T|aZou3o|5ulYPCPeC3#1-#VRmo@Q3lhMV~6uMx2O&bL#Hu?h&m6U;Hk7d(jEuT z$G+a_-*B! z+^&1UF054_3szJ3Y+{qi!4=k4YhFJSMit%7=V}4ohPDf^Taeb8dbHM?Q@^_=XK5wr zzYlGWdz>mw4inf)`DDK$adqyRTF7?udJ-Fxg>g&^%Uaoy4Kpg7`kj`6{u9Fc8P&hx=5mRYy$lMNdugZrNtn*%3}Ms5Hcw|5`WM z!7};6MTePq+a0+_OkAEYqfwqN?A*K-$m|hEj8fsL8*|ziFPW z{um70e8VKY6ZFkS{cP$NZwWcIsMWc^cu%V`glNG_+*~k7xgLaNo|s`_qfj-07B!{qx$HO{&9#5^h^@cB7_2=#Rsm z4CUCrkzi zcz3CTZR;eW)3TvL4I}|%^)b7O((R`F-ob4F*(*OPZ!HH@JX^j**28PKH1(5$ZdgVI z1>*Q-aB{L413E4BDK+s;>>nLcexPpn8}~!u`0Ln%W`R*6qi3Y#mo#`F z@swdu5fQ>oIn46nxwR_Lz#V3&y>Lh-#-&`l!GAY}6Iqf(`Xq&=l!SUmm{4Q({YWRq z8NjVd6)FE^8^E=Os;MyPp(n^(b+w~j?jr_*<}B){>FQRVA# za?h&jO~S-TFrp#}vj={B1#$Z~hbQ55u-%FLzocg(ZDoC^BC8pau^}BRBN(56h;;fz zc3^y0J$1bJyB)jT%GmYM;_{whFMu=GFd5XqiGyKtm~yoEpM(A_cWlJQuZpok7Hx0` zZnK?L*HdKDL&(NW&bQiU^w((fTA#4+iKi3&4p-VhBQgX6cgXH-hIN|o+f~C1{9Bah z|2z*RS5#!`lHMuSq<&qkfT$~(K$$dr*@CIgv$8mY;dX}FjW^dAsNa262sTIdJZb%A5 z>1M<>#?7E zzyS2PQh1v8Oa+1Y*AU_*T9bB8PrSd8RmnWBQjo1j0N)AW7i0)cy`**yt8w$r(o=BaPj?IiRknH|k@V>@qy0 zF@wNbnfFKC=@$}e9{s%~abEp*D@+_J%Oz?g$3(|$Wcnwpws}vTtw&> z*r(c33*{_2jrIl`8{enf8r+`?!?Po?L8qs^>Z0m{7@MXKKtn-EsTQ3O9uD@HUENbe z4bn}R^I5M5aPvW3ben$O$7>TxU{(QIMm99Lw@^^!wUwCYPW;Io6gH#fiWNF^=3xv7%k8IE%>0OAm zXxl=$vaIe9kh#pAIUmRChbW`}9v7ZIvEhOr9^0i?fFa1*>X9|(w|z7cf7Jcx34%(z z>+SW~$?50UU6Mk{6H;FkS}DvTMcF40YH6?wvJ^*}uAs{*Q&d(wxTkSBIwRw#jmZ2j zDC!`}|K!K~dnKIXd!q}1RchD%oqAYxnHKvVkJV|jeP-}O3~vT_S<`?D1dNomL`n}T z)CC2>TdfdX-Rn!EfO7S5v5CU(uh^^g?CaPfXYSVxYB`sqZl=VtzTJmcZaoeCOEj&- zgD%v6hE)inr3c)i2Dr@E(G_UsE_DyW1S>a_8EvjTX)eZi4K1-!&#Va((@CXP41uaP zJ%tZ#*vzvoJdR(pxHqCiT)sIv`3}OQJ^%zVs+6{3Sjq5~F&)DCunAuQG;Iyhw)yDE z0;A`eJ_RRz=ej_oo9tDUeQy|X+_MXX*wt2rdSSS?3L3dQ+$L$4tZNopY4T4W@mp3} z5Y^2oqv(JlE3(|)9BeGC9Z+}RU}vkX^njy}k3QNSJ?^h}PF3$ni+%IC=QhHfrG^qD zxVoR5!+An+;Igx_2EaIHrrGYejg#yse0;Y-uUc!~x+&6kL)${ze>2hq)v%OpqV?j>12(Fb3 zih)x3w@GtfrE+xIf3m-*DJV=+rDYGmtDw^2^xe1u(PG!F);qi7e$pY8T+ki`=y+T^ z>k7|1H9lL?oT`3hk%GUwO?Ikc2i>V<11%_F>Nw+~+kd_rE(VJM8D4%z-^6_$elB*s z6}#?oh)ffIW)6`}y7#6@Bqu|^*$UJSZHDJQ;I55wd+EX-&}HtKEt>1!2$y`#)TS- zh+euNzeo4yf?R*`P5T-=ti^X=TO>9vZuerC+M}5kxC7^h_RUvnQ)^0oNqMGvW9WJB zfhj~)rr+hG3!0Zxc+zjg^y!74(?|S9JO95HAcI_F^G5q+-2c~?m(A%vZW0$3<)6g` z8D6P+jML3~uD|O1$=tF#jz$D@Tn@~QSVKOiZ)tF1U<^PIbtUEZ5D8(MH6XK(!60{O zq-|2wImMt=8i<3M&s%6cT2%F{dql#6)Xh97kUl@{P6g|SZ2C!(U`*O){hd$|<%E}n z$Y%v8K@j$}0=M6a+{ONU;hOi@xbMWPP*G?Yhw|^q^KvUpOxPct@8S@cPUx^9T6!KM zQBzTY`GT6}&4ubu&d(2SK+nqhO|!O~0wj63c=W7kcODLd>vbJ&I7wAJ;+vpD>;feP zg+lbK+p47r`cmLm(cbrmZCEYOtF_m)9ouoGf=}mZi;EH$RFP|Q1jzYh>&coZNf;Oi zLz=*O=2KeDk9LuG-f)yqAq)*=rbE@#)u$nkls-*7HuHk<6kC4}_QpIr2%2OWf}x!Y zDUk|)nI$O-PZ!wG{eiduVmPLtmCLoze`_E{D#=86EY~nb&mq7Vyq`|dp0YkQAL;Z} z8?${MOG2O8clI%OI})YX<_%xupSK}6OR&I<46v*MXN#n8<&CqX&Ky73=w#ZYm(q;c zo4Hxz(fzn~1lL6+caMpTxhW{@SXJ+!Z+Uyg#@)!TOb<59k%s{^#j-^IDIA1sT;Pw# zafyHQUyt*HJ`a?x1%{?zLgQJ?; ztwvG@wr);V0>W8>ai*rG=x}G9TcbgG$JKziy#~Fsw6y;0_4TH!TDxXP?eyO#bZC+39-c(UF7ik-Aosoefb` zNapn3kg&yJ8xyW7aa2hCgOTt{wZvoE@_LZJ4BD=t^zB`TcuT=w-0K7=C0vl1lL11s z7Lu$^L;e$tg=78BOA0|7t`DwB`C5-df zA?R+|y81spkBL=MlPaDeJsZ2s5`l^GKUpr~3qSUHx)C?YOJot{YdHp;?rS@Izh=};bz0f%#s?rPnOj&JVUxR zM$rKd9+k)&&%>{3ac3{HJM4n5c5QDSiGR~DcM{O0J@jl+i=Q4LU&NOO4{`%u-21RP z#-eYvyb0^3D|y~p9qvJPiSO~&8*NCL3_^_&5mF6@D|2m7f-jzQ0uAcsr8cNVqf0!E zxYQ5~a{QO#fic{ZTxThz+}ba%6U}mYG>^=@8r{M9fUvF2!o2sH?`DbYMC3q zGs%wDU&a+TZLgJU9B)c^eSLk%N8I8}EJxIcOruXo|X0`Ji!rup7U!%#^TL?zEf1;&0*7!#1s`!9ITF z!itJWTM)q4nybNJ=it&)^!e?NS1)1TCuI}QT#SVt;{!5`5a=>9wFGZMo$_Zv1O%?8 z(`YHbp$eN>E)K2;?(pJoePWg*O>n?PI_)?~;NTeVJDkKMjtp6n^9w#MgT(#5(OZhZ zkS}f;bnI8}9j|lh)9p-G63s7K7n||^`ki0oP4<%OgJd?ki ziis*V|Iz;DRMudp{bUmTJ~23B8QgPw2j+}QFFQNMrWz)42L(Rg1iZ?f6q{3g7uhX( z@~Gj;wO}9S;fotK0qee^3eJChL(c14%}d@B2E8@*;=HD2J)4x0@`JW1CdC4<3(whh zZL(R_@1fNHjJ`JK>Bq+1L)^>g-R40zZ-#_sq;%ZJU@4d1V!9iGzhsc?pPgw??Fan# zrSQ`CSWIhfcX85?b7Zhk1GW~Yx)Fkv)d(R7x?doK7*auLK{;s&@ydp@95Q5#w?8MX zYr;A_<~%2IZw9T8CoG=+jZyDEWK-kehLtsFE7dRPIsX@mA>ANTtv-hMUw^&@gL)$! zt&X|5O5y+?Uz|KWD97cw=VMVV_R(A8XvG=wXENeSbNxofng<}nkYqglgg8n_`-^KDe~k=6T~18yliGa1 zoOh^)r1PJC)9uD{VfVEKTEzA5VU!GYrB!s?)=uu2%~)9hl}z&+<-+d^QzV6v1(Hx` zRkCzu$$r}uvtq#n-s+QNKZ;(Pluv-Zoom1b)H`ybRk*P9Qc669yg4lF_H zHn3mZu1Cm0rs+BYcYA-06kM-!-~KhmSc>quy526<%tbl3uRh7sFM!Z+Kw`DU7cbqM zUv^W7n)?H`|8wEqDbiUO8ex*~nPVK^Z}TIYGVi5PotiIvwF8U1s1sL2)Rd$S#8J_8 zQU{O$+~zHyg+Z}O6kn}0xuH)enn#=)m-vO>_>vgcDYK-D1&wR>vTzn9}s&T?dQ$3)Jqn6O(udApaz)jev+>!$O1)~x28$P9Fcz5Q(q{L`j5p0^_m=8;suy}2C;#@TeN2))WPhv* zyHDd`#4*>WlkjiDD)E&t(kZN{I7+gOG+3Zhur|kEL1aNv?mMcUM3}5>=m#@I7A=GH z8dMGG?--u0leSN#le4=H8gIo>tM75xKG9w?ZqcUHx}P7zt<>q+uutRh&=zpQ6p(w& zbORH(;Dq`?+(BnVI?MU>8?`v*8M%Ma=dU z$r-5r!*SHZT5x^R0|uiF6MTGLXROmMH|sWZ(uoY>Eo|!~g+Z@X{oToES5_=%oz7F` z@gM#|$_6Le&t4X1UoY4F_s0Eyc ze44iKxP1vHevAEmVW#c1+;SQAZV4&a398{mpbi+?{zg{k`u5{mby@q+G9UR>we3|Q$6UURQ%-`JQ?%dhFzK-6 z?q+oQ_9e?e?-mAb$J`xtAPS)8#+eniRlxNKNLb=m7K4FD$EInBU^pn`w#pQtyBs1> znfo;+9K!?tW9n*^S+UPx5dwhLOm}P_6a=!f`~%lEzgM2~P^XT_W>ndMr_<9K5pnTY zas<&!QIB=+CLN9uy(oU=-QiMxW#2_NC7Y8u{%bQ`HlWLjjIaSO0YYlFD0)P(K8(Kn(rY9rMy-^rwHxRn`)MO%V<%HI zKx*Oxu3~u@0?6V;2=W&Dzb}Ypd}y&E)9?19v|CAZeRvoW?BYQPN5>?R@lN5#+8lI? z-MSpxzPl5$rUbaZO(v^HR#ec<@&(2Qlvr7c=b8w7D|ehLTeg*VGpMYnkVVdCR?Ybc zP<85XXs$HxWX&nhfr4STQUUP()_YG^ao9x~Y;6AKck$EO#*j9bf5wY@>#l<-qu+bx zWLCs*fdWlOBcvd=`6iO7&au6QXl29i2w8r+$G%5<@eg=CUOi1>3a-^*M}Lld-j;r1 zj+^kHdJ^hh=KL_=cnu!CUlMNG) zx~+5-R>&?MaiJ>&A+G5Y2g~yvdU!JqtLYx=BWHgQ~c0?T%4|}Ya@osN{2^dWUd8RzNA0h zzNB}THa&EyM<_X~tUps;pBDdE^WNqBqU!R>nEe1RN288VjQh#{UsC`3{+vns;doqI zW9+?vG2SbU&%|xj9xyl9(Zl6U&9(2Fx;m^dX1C&Q3GbrUKl|?vxrkGQAw7Dx_*P*Z z`0i6utRXmzX%Ts0zmWoM`u6cVY5wfzP3EH87xN9>af}yqC6ts~Qq-_Srr##)wRTV` zvF0+MMxFA20&^rnK}FTjB(N?KGzHG__!Bd9ak=7UNcR{3m4wXfc~k`;OZYw{k2U;T z_eMKK7T#kS=WSSUwvB~C)3td6;HA>W0W=_}pxO58*mN%F(Oa@x zWggCK|Ez;P1)Xg!e91UQ23QpFvK{U(FXwOGuOPp?zhCkf{%`dyTYXqjOYUR}@R6&H z(>pID(HFJuWA0NI98rygQUT?92-8>>SNK(z#E>v7JFF@Ep5{U{r@cAu4S(ID#c z&IRuCe5r)GL`rXM6Wq++7>Z>0Cuv!f;*-nv>J~;?+^CFY# z@8^CY--ZMc{kak)F6q}zby6eHCm2N2$uc5;6B}hu3=s--Cpr>xSr*<)QD;yz%HQJL z^NOuGDzp3aQ!hz9GMXpZj8vHkKWEastYU1>OGSBZ#<-q_5CaGyopB5QS=Lqq4e!`@ z+^(c4arPwjFnHG%_v&QHIRJcL(Bt7tRv+`8Hb~LTT^)Pb=zOj8BPkHoV=canpp3aa zDfaoP;QzlcXf7g%3+|%1r zV}B$1wh84wHkw=?%y}Q$FD6`d<(^Oi$V>#f@HFYOL5O@Vao=u`q*>Y0Q86_6do<>byt%mJqa7FgvCQLVA$nFGr_gQ5=aL3RCo8S*e^TqGq zl_(T4p|s;iRVFQzdmP(8i{C$O_QD%cb~H7~fo=v<-q{9ibiW?^ucY*hii&~@&ku1_ zVsypH{{+Elgp>f`Dk!lDx1X)5LGSXy zVTsn2d$01oE{F^Fb;mOJMJ$GTE5g-o2f{#&#IgK^7x*jA-h=|bT)#9!F{%s72bHrCOSk&PlN@hUShK4k7D6+~aCeB#$ z7nE4Be4l0pMy^&trP2f;8H5<=wu+h~mdMZ%biPKxAxo~-7$FNPc8-PM}ikLbNnZM!uXYF8=G$GeF4jpCBN~RSe8pidpo&qSW{}PLHiAdq0YTV+la{Ud$3{5 zaE~WpEAEGruSoW#MXoQ=EvOHoK6mjz9t+wpH)VrGT!29_u-UHM?vtsmypf^C$Pr9k zso1LGC%@U5*c)Yf${|hNR8SGz3?8{x($E3AVWEhIJ3r~25!oH#ecFPkyE;X9!|Eoq z5B=!j!JBHooI}Uy6p$oIuv8#PLc+MIHQCs3XO8Y3y7B-41WPxrN6JVXf0hHzap2Z{ zYH)ykelg!L#tx-1oF>tX)TY{tpUi7kc&x#cu;k6h{MI;_ylyJ@&|RLdBj{D_wPJH5 z+}_?UhyAUcMkOmfE(k<&tcSsS3V3EAG67$|+^lzc6C}ps*i(s%h!o`2%+1c?{^XaN z?kf-YiU@wfL zsUS*9%BG`|oBX4pRZ>4}Vkiyq??j>4TE4K}K*mGD;r+br4B$`~P&x6^>Ipm{^0nvu z>p>5~O_13(5fuveL%cqn7-U_*{rm$;CvB zu+XH8JutYH!a)X(kB>b8r-l1ohA(P$Yp$ZfIkTsbuYK-z;F$a{YFo!E2vdcFyfc147v17ED>DEZcJY4I0+g7Xw8E#Ggc zl;;#I)o$L8Xe0XThe7rmIp(%}5=V-HrX4_YM}pt9o)?AUi1QVuXG9UO%nc(sC1|FP z28`;$Z1&-=42|$1D8{H?mtp~dewWd+zF*AK^|w?Ns?kw_WlaK8!xhq@Wk6U9vs5jt zg>8rgAs|`CGX<320v6CG$TA~EuDg^|b6j+jnc1wauDOWJYU@v5n6%E^GL5$L_lCo#+#(J08uy|{H^T}+5hyOD&m z&A9z1i8~Uy6Y&wC!80O|{7{2l17P;hi6abrUqG2Ye)Q53hK0`QMPdDcN+{xDr*^`y z;NFv>6)Psn{y^+RAzLGy6+~#sa?vM1gjIVBosn4i>|XS)tErZy9~Cn&GEoTd^I;ZnvplJ$LoNSQt#T$bi3yqY$;B zgtyqFNPKgkMN$>N53Xvmjn0?6AJt=5G~yU^^2&Lay7{=nM=f3Z*{1qZRFL*h-ryTl znk~3~Zd&I&8G=FFgwJtR+#^G7Abr41lL zX_E$vI9np^=|2o(lOTtOCxLy^1rP$CR5O%Q6i^Fi-}Jdd&=9a=8#%6YQ|rIKByGb3 za;XpiT-g36W;7Q78S)xF%pu|1@sjH(G_oFc1PY6~SY^rj2qRk&zvp*ih(e1s2HPt? z9O#ub5|@WJz8?MevOMi44+e(s3USX)PH<Zjsa6!4 zOJ9Zp09;67cu_f~O|`FrzR5qhYLrqLTsIV=f&|CR5;bl@;6NhwUC!Pr zSb(I9G^-sb#P)`v>S5r&9WQXeYGYA8v0MO-o*YVTgG(&qPdLCLAXGo7zrWuiMCXf* zFT7_6Xnc&$0Ri|CU0D4^nEzSlEdX;^?;!ordA;!IY2lN@7PG`XYRKXHbuLB0je5HZb^A~5eH(ZQKTGN56+eFz#Ce+A}hA+HX022U*8OA@(Nhc zKyZN<1L611@3-R3`c-eQlxt<(827rY=E89b9M)z?kBRRi6B6&LUrp!h%VUez6%%A! zF{E^km71D*uG9OL1UNfA9dg;|_TNB;q+E)Eh*Z_&>g?R7XCopkymfM7$A~dN64Y!b zJne#IcL$x38~9I6H`NL?wXAC+SZPiZ*`U9&t4iPdj1{J6O0L9;N_x8$CtfTb7+QzX zy4CM8F{qFzD_x`V9hL5q_DxPkhTPGN9!Q4q0bao$Cd-NuBqM3J290~SFVY)1 zWLBC*$zm90C&Lb*2uJ~<(>!D`@q5By5BN%R$>R+KI2L;pVJ~bzDhGUzZeIOp&TdK7 zX-Cfk(s`oxaE{p?O7ZlA__rI0vp0so-Q9K^Do}Klv~;raW7`X4;otXt1X*{$GBXsA z1xR`0;|-JwXB7zL@}=!QaAcy}M+yv;<$3Mr1;hjI5*hS#wwn4gKoANC8cukOIb$#U*4*N0FA z0XAT@%`M7dwM9xYw~__`AzBumbv&`U2_&`NoKKIE^iK(YNiAORb->Ow%fQNv!0y3i zx0h+K)R+Gv%G@@jq{;YCGp3;E)Pmilm>_K6jldAigjhccUtt7n*)1C-i$Z!Jp>;$O zOH9uz#!NPiaoS~;?+@(1alE#(Ajtoa(~?-~Uoa1nnXYpp1L)6(oe?aD0)?{5Qvk+h z8VXr$P-kMaiRBlVudRqXrOs3*ErRGm=~KJg5s~k|*>^`Y@_^&^5GEoA>4>Wc)NTb2 za5Z?BG%5=|5?onX1IH*yVKmQ}-g}6UCdzOHgQDdKXSjlyZV4dUObcxXj0?!>4r_$= zbf^p#2X{(KtUBsq;*LDj|+BStgPD5v($;63X33`<5D?{5n$?Bl1O?agP z7n_1xwljZX|_WTLEUun^7h@4Z7+4 zRB^1b+L|L9k}S#*sSK!!BwFdAF0liX2`BlbC=jvuk+8%CPnpg5rxfrAEew|=EsU$p z|9!IYo#UtY`)#YOhQ1K|_F$4po12C@-`$B=vJlCRRs^zT5Lpmz z_Nbexw&*4+Yt2tq4tn5HGo0MWe*u=H4Pu&4#^Sl}dPo98 zckI5~9f1(aUM%#%dJqb|z!5?crgG9Ppnm)IEknd7Fx|rNFxcA*0iOwE;eY@B9hp!G z9ua(wP#UnTwFfR<2Ay`{3h9}A#uL(xLl*Y*aT=oa{Ax=JZshx$shkf><1 zt;{M%Jtk@1^iQ@NWPX8l@>+OHfEYnYywuky|0wI*4#!PB9LL-Nf7i?Yet78|xR4#p zMm#1Z;kSWrZB;$*DH{9ltA#~add83D=8>>K;eRn+z5x1$8)b+iXNsBP(T747G)09Z zh^Q$QMdZWd0S}NnTd83Xf5fjU$o57Wj_+9}^UNZgV3N-*A3Mk4-X-S?9%uR}59 zuD7QKUms6?9BlQ6eedq3UW=Z!WC$z)KWfFqco!9Ceu5eJKn`@@v<1Le zsy(ll7kM@bl2^foI_^BjpZFBgR^wH#a}J(|K~d)~nJ@@xVCWE_0^6P~d{_Sfy}?YY z{nCw(B8@wvyz zh;;b;-4hfxg1&TaF0$LV+TsK+NgV-!{n+sP1_65|x_+;$z|Uas&sq|sU5SEqebrDs z8#H|6!8Q^=eBuBOPyq%>WR70Cfnd?;y{q)BdAIBj9TRXAZU!JT#O-OG_jVgfE z#X9O^^}=Mt91$EvVlJ&MeXus^zMMYb`S8ywx(w*~ zUj#jUdzJf|wI~Ir1BoQees>ItSBKw!p#s?|R(5m@n^zm-Tm?>@H=L16r+Bfe4OQU!#=!eK}=M`Ty` zS;fesNzAIA-rnD+#eGHq8aY4XckkY@$M^{b1gFCW+c0Z~LxuIG(d%>5#C%da!{5(! zkJ_M+^?;wsYCGvxU?lSBD#8fC%Cn$=Bk7Y51?V_rY?ML%=d%%I!NLe+L@tD=XXY;;49l)lP4ZU44*sWzXaJ9uFM{&bPv-bG)(r&U=w9 z+B|G-#*HbaJ73)pF*h+CI|!PwD2^T_UhYf~IWmTEL=i}K?m?}vUXxq?j#1UdicKXN zzOuMRkcW(EWyoMZc5&h4<6HKB0X_l2{tyPx*X0b~N7zKjrhV46c-A3bO + + + + + + + + + + + + + + + + + + + + + + Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + +

GlusterFS Documentation

+

GlusterFS is a scalable network filesystem +suitable for data-intensive tasks such as cloud storage and media streaming. +GlusterFS is free and open source software and can utilize common off-the-shelf +hardware. To learn more, please see the Gluster project home page.

+

Get Started: Quick Start/Installation Guide

+

Since Gluster can be used in different ways and for different tasks, it would be difficult +to explain everything at once. We recommend that you follow the Quick Start Guide first. By +utilizing a number of virtual machines, you will create a functional test setup to learn the +basic concepts. You will then be much better equipped to read the more detailed +Install Guide.

+
    +
  • +

    Quick Start Guide - Start here if you are new to Gluster!

    +
  • +
  • +

    Installation Guides describes the prerequisites and provides step-by-instructions to install GlusterFS on various operating systems.

    +
  • +
  • +

    Presentations related to Gluster from Conferences and summits.

    +
  • +
+

More Documentation

+
    +
  • +

    Administration Guide - describes the configuration and management of GlusterFS.

    +
  • +
  • +

    GlusterFS Developer Guide - describes how you can contribute to this open source project; built through the efforts of its dedicated, passionate community.

    +
  • +
  • +

    Upgrade Guide - if you need to upgrade from an older version of GlusterFS.

    +
  • +
  • +

    Release Notes - Glusterfs Release Notes provides high-level insight into the improvements and additions that have been implemented in various Glusterfs releases.

    +
  • +
  • +

    GlusterFS Tools - Guides for GlusterFS tools.

    +
  • +
  • +

    Troubleshooting Guide - Guide for troubleshooting.

    +
  • +
+

How to Contribute?

+

The Gluster documentation has its home on GitHub, and the easiest way to contribute is to use +the "Edit on GitHub" link on the top right corner of each page. If you already have a GitHub +account, you can simply edit the document in your browser, use the preview tab, and submit +your changes for review in a pull request.

+

If you want to help more with Gluster documentation, please subscribe to the Gluster +Users and Gluster +Developers mailing lists, +and share your ideas with the Gluster developer community.

+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/js/custom-features.js b/js/custom-features.js new file mode 100644 index 00000000..a76a7228 --- /dev/null +++ b/js/custom-features.js @@ -0,0 +1,40 @@ +// Add ability to copy the current URL using vim like shortcuts +// There already exists navigation related shortcuts like +// F/S -- For Searching +// P/N -- For navigating to previous/next pages +// This patch just extends those features + +// Expose the internal notification API of mkdocs +// This API isn't exposed publically, IDK why +// They use it internally to show notifications when user copies a code block +// I reverse engineered it for ease of use, takes a string arg `msg` +const notifyDOM = (msg) => { + if (typeof alert$ === "undefined") { + console.error("Clipboard notification API not available"); + return; + } + + alert$.next(msg); +}; + +// Extend the keyboard shortcut features +keyboard$.subscribe((key) => { + // We want to allow the user to be able to type our modifiders in search + // Disallowing that would be hilarious + if (key.mode === "search") { + return; + } + + const keyPressed = key.type.toLowerCase(); + + // Y is added to honor vim enthusiasts (yank) + if (keyPressed === "c" || keyPressed === "y") { + const currLocation = window.location.href; + if (currLocation) { + navigator.clipboard + .writeText(currLocation) + .then(() => notifyDOM("Address copied to clipboard")) + .catch((e) => console.error(e)); + } + } +}); diff --git a/presentations/index.html b/presentations/index.html new file mode 100644 index 00000000..8aef8057 --- /dev/null +++ b/presentations/index.html @@ -0,0 +1,5120 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + Presentations - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + +

Presentations

+

This is a collection of Gluster presentations from all over the world. +We have a slideshare account where most of these presentations are stored.

+

FOSDEM 2020 @ Brussels, Belgium - 1st & 2nd February 2020

+ +

FOSDEM 2017 @ Brussels, Belgium - February 5, 2017

+ +

PyCon India 2016 @ New Delhi, India - September 25, 2016

+ +

Openshift Meetup India 2016 @Bangalore, India - June 11, 2016

+ +

GlusterFS Meetup Bangalore 2016 @ Bangalore, India - June 4, 2016

+ +

OpenStack Days Istanbul 2016 @ Istanbul, Turkey - May 31, 2016

+ +

NLUUG Voorjaarsconferentie 2016 @ Bunnik, The Netherlands - May 26, 2016

+ +

Vault 2016 @ Raleigh, NC, US - Apr 20-21, 2016

+ +

Incontro DevOps Italia 2016 @ Bologna, Italy - Apr 1, 2016

+ +

LinuxConfAU 2016 @ Geelong, Australia - Feb 03, 2016

+
    +
  • GlusterD thread synchronization using Userspace Read Copy Update (URCU) + slideshare - Atin Mukherjee
  • +
+

DevConf.CZ 2016 @ Brno, Czech Republic - February 5, 2016

+
    +
  • [Ceph, Gluster, Swift : Similarities and differences] + (https://speakerdeck.com/prashanthpai/ceph-gluster-swift-similarities-and-differences) - Prashanth Pai, Thiago da Silva
  • +
+

FOSDEM 2016 @ Brussels, Belgium - January 30, 2016

+
    +
  • Gluster roadmap: Recent improvements and upcoming features + slideshare - Niels de Vos
  • +
+

T-DOSE 2015 @ Eindhoven, The Netherlands - Nov 28, 2015

+
    +
  • Introduction into Scale-out Storage with Gluster + slideshare - Niels de Vos
  • +
+

Usenix LISA 2015 @ Washington DC, USA - Nov 8, 2015

+ +

Open Source Backup Conference @ Cologne, Germany - September 30, 2015

+
    +
  • Scale-Out backups with Bareos and Gluster + (slideshare) - Niels de Vos
  • +
+

2015 Storage Developer Conference

+
    +
  • +

    Achieving Coherent and Aggressive Client Caching in Gluster, a Distributed System + pdf - Poornima Gurusiddaiah, Soumya Koduri

    +
  • +
  • +

    Introduction to Highly Available NFS Server on Scale-Out Storage Systems Based on GlusterFS + slideshare - Soumya Koduri, Meghana Madhusudhan

    +
  • +
+

Gluster Summit 2015 @ Barcelona, Spain

+ +

Gluster Conference @ NMAMIT, Nitte - Apr 11, 2015

+ +

Ceph & Gluster FS - Software Defined Storage Meetup - Jan 22, 2015

+ +

Open source storage for bigdata :Fifth Elephant event - Jun 21, 2014

+ +

Red Hat Summit 2014, San Francisco, California, USA - Apr 14-17, 2014

+ +

Gluster Community Night, Amsterdam, The Netherlands - Mar 4th, 2014

+ +

Gluster Community Day, London, United Kingdom - Oct 29th, 2013

+ +

Gluster Community Day / LinuxCon Europ 2013, Edinburgh, United Kingdom - Oct 22-24, 2013

+ +

Gluster Community Day, Stockholm, Sweden - Sep 4th, 2013

+ +

LOADays, Belgium - April 6th, 2013

+ +

CIALUG Des Moines, IA - March 21st, 2013

+
    +
  • Converged infrastruture with oVirt, KVM, and + Gluster - + Theron Conrey, Red Hat
  • +
+

Gluster Community Summit, Bangalore - March 7 & 8, 2013

+ +

Gluster Community Workshop at CERN in Geneva - February 26, 2013

+ +

Gluster Community Workshop at LinuxCon Europe - November 8, 2012

+ +

Software Developers' Conference (SNIA) - Sep 17, 2012

+
    +
  • Challenges and Futures + slideshare + (Jeff Darcy, Red Hat)
  • +
+

Gluster Workshop at LinuxCon North America - Aug 28, 2012

+
    +
  • Translator tutorial + slideshare + (Jeff Darcy, Red Hat)
  • +
  • Translator example + slideshare + (Jeff Darcy, Red Hat)
  • +
+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/release-notes/10.0/index.html b/release-notes/10.0/index.html new file mode 100644 index 00000000..0486a28c --- /dev/null +++ b/release-notes/10.0/index.html @@ -0,0 +1,4724 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + 10.0 - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + +

Release notes for Gluster 10.0

+

Release date: 16-Nov-2021

+

This is a major release that includes a range of features, code improvements and stability fixes as noted below.

+

A selection of the key features and changes are documented in this page. +A full list of bugs that have been addressed is included further below.

+ +

Announcements

+
    +
  1. Releases that receive maintenance updates post release 10 is 9 + (reference)
  2. +
  3. Release 10 will receive maintenance updates around the 15th of every alternative month, and the release 9 will recieve maintainance updates around 15th every three months.
  4. +
+

Builds are available at -

+

https://download.gluster.org/pub/gluster/glusterfs/10/10.0/

+

Highlights

+
    +
  • Major performance improvement of ~20% w.r.t small files + as well as large files testing in controlled lab environments #2771
  • +
+

NOTE: The above improvement requires tcmalloc library to be enabled for building. We have tested and verified tcmalloc in X86_64 platforms and is enabled only for x86_64 builds in current release.

+
    +
  • Randomized port selection for bricks, improves startup time #786
  • +
  • Performance improvement with use of readdir instead of readdirp in fix-layout #2241
  • +
  • Heal time improvement with bigger window size #2067
  • +
+

Bugs addressed

+

Bugs addressed since release-10 are listed below.

+
    +
  • #504 AFR: remove memcpy() + ntoh32() pattern
  • +
  • #705 gf_backtrace_save inefficiencies
  • +
  • #782 Do not explicitly call strerror(errnum) when logging
  • +
  • #786 glusterd-pmap binds to 10K ports on startup (using IPv4)
  • +
  • #904 [bug:1649037] Translators allocate too much memory in their xlator_
  • +
  • #1000 [bug:1193929] GlusterFS can be improved
  • +
  • #1002 [bug:1679998] GlusterFS can be improved
  • +
  • #1052 [bug:1693692] Increase code coverage from regression tests
  • +
  • #1060 [bug:789278] Issues reported by Coverity static analysis tool
  • +
  • #1096 [bug:1622665] clang-scan report: glusterfs issues
  • +
  • #1101 [bug:1813029] volume brick fails to come online because other proce
  • +
  • #1251 performance: improve __afr_fd_ctx_get() function
  • +
  • #1339 Rebalance status is not shown correctly after node reboot
  • +
  • #1358 features/shard: wrong "inode->ref" leading to ASSERT in inode_unref
  • +
  • #1359 Cleanup --disable-mempool
  • +
  • #1380 fd_unref() optimization - do an atomic decrement outside the lock a
  • +
  • #1384 mount glusterfs volume, files larger than 64Mb only show 64Mb
  • +
  • #1406 shared storage volume fails to mount in ipv6 environment
  • +
  • #1415 Removing problematic language in geo-replication
  • +
  • #1423 shard_make_block_abspath() should be called with a string of of the
  • +
  • #1536 Improve dict_reset() efficiency
  • +
  • #1545 fuse_invalidate_entry() - too many repetitive calls to uuid_utoa()
  • +
  • #1583 Rework stats structure (xl->stats.total.metrics[fop_idx] and friend
  • +
  • #1584 MAINTAINERS file needs to be revisited and updated
  • +
  • #1596 'this' NULL check relies on 'THIS' not being NULL
  • +
  • #1600 Save and re-use MYUUID
  • +
  • #1678 Improve gf_error_to_errno() and gf_errno_to_error() positive flow
  • +
  • #1695 Rebalance has a redundant lookup operation
  • +
  • #1702 Move GF_CLIENT_PID_GSYNCD check to start of the function.
  • +
  • #1703 Remove trivial check for GF_XATTR_SHARD_FILE_SIZE before calling sh
  • +
  • #1707 PL_LOCAL_GET_REQUESTS access the dictionary twice for the same info
  • +
  • #1717 glusterd: sequence of rebalance and replace/reset-brick presents re
  • +
  • #1723 DHT: further investigation for treating an ongoing mknod's linkto file
  • +
  • #1749 brick-process: call 'notify()' and 'fini()' of brick xlators in a p
  • +
  • #1755 Reduce calls to 'THIS' in fd_destroy() and others, where 'THIS' is
  • +
  • #1761 CONTRIBUTING.md regression can only be run by maintainers
  • +
  • #1764 Slow write on ZFS bricks after healing millions of files due to add
  • +
  • #1772 build: add LTO as a configure option
  • +
  • #1773 DHT/Rebalance - Remove unused variable dht_migrate_file
  • +
  • #1779 Add-brick command should check hostnames with bricks present in vol
  • +
  • #1825 Latency in io-stats should be in nanoseconds resolution, not micros
  • +
  • #1872 Question: How to check heal info without glusterd management layer
  • +
  • #1885 __posix_writev() - reduce memory copies and unneeded zeroing
  • +
  • #1888 GD_OP_VERSION needs to be updated for release-10
  • +
  • #1898 schedule_georep.py resulting in failure when used with python3
  • +
  • #1909 core: Avoid several dict OR key is NULL message in brick logs
  • +
  • #1925 dht_pt_getxattr does not seem to handle virtual xattrs.
  • +
  • #1935 logging to syslog instead of any glusterfs logs
  • +
  • #1943 glusterd-volgen: Add functionality to accept any custom xlator
  • +
  • #1952 posix-aio: implement GF_FOP_FSYNC
  • +
  • #1959 Broken links in the 2 replicas split-brain-issue - [Bug]Enhancemen
  • +
  • #1960 Add missing LOCK_DESTROY() calls
  • +
  • #1966 Can't print trace details due to memory allocation issues
  • +
  • #1977 Inconsistent locking in presence of disconnects
  • +
  • #1978 test case ./tests/bugs/core/bug-1432542-mpx-restart-crash.t is gett
  • +
  • #1981 Reduce posix_fdstat() calls in IO paths
  • +
  • #1991 mdcache: bug causes getxattr() to report ENODATA when fetching samb
  • +
  • #1992 dht: var decommission_subvols_cnt becomes invalid when config is up
  • +
  • #1996 Analyze if spinlocks have any benefit and remove them if not
  • +
  • #2001 Error handling in /usr/sbin/gluster-eventsapi produces AttributeErr
  • +
  • #2005 ./tests/bugs/replicate/bug-921231.t is continuously failing
  • +
  • #2013 dict_t hash-calculation can be removed when hash_size=1
  • +
  • #2024 Remove gfs_id variable or at least set to appropriate value
  • +
  • #2025 list_del() should not set prev and next
  • +
  • #2033 tests/bugs/nfs/bug-1053579.t fails on CentOS 8
  • +
  • #2038 shard_unlink() fails due to no space to create marker file
  • +
  • #2039 Do not allow POSIX IO backend switch when the volume is running
  • +
  • #2042 mount ipv6 gluster volume with serveral backup-volfile-servers,use
  • +
  • #2052 Revert the commit 50e953e2450b5183988c12e87bdfbc997e0ad8a8
  • +
  • #2054 cleanup call_stub_t from unused variables
  • +
  • #2063 Provide autoconf option to enable/disable storage.linux-io_uring du
  • +
  • #2067 Change self-heal-window-size to 1MB by default
  • +
  • #2075 Annotate synctasks with valgrind API if --enable-valgrind[=memcheck
  • +
  • #2080 Glustereventsd default port
  • +
  • #2083 GD_MSG_DICT_GET_FAILED should not include 'errno' but 'ret'
  • +
  • #2086 Move tests/00-geo-rep/00-georep-verify-non-root-setup.t to tests/00
  • +
  • #2096 iobuf_arena structure doesn't need passive and active iobufs, but l
  • +
  • #2099 'force' option does not work in the replicated volume snapshot crea
  • +
  • #2101 Move 00-georep-verify-non-root-setup.t back to tests/00-geo-rep/
  • +
  • #2107 mount crashes when setfattr -n distribute.fix.layout -v "yes" is ex
  • +
  • #2116 enable quota for multiple volumes take more time
  • +
  • #2117 Concurrent quota enable causes glusterd deadlock
  • +
  • #2123 Implement an I/O framework
  • +
  • #2129 CID 1445996 Null pointer dereferences (FORWARD_NULL) /xlators/mgmt/
  • +
  • #2130 stack.h/c: remove unused variable and reorder struct
  • +
  • #2133 Changelog History Crawl failed after resuming stopped geo-replicati
  • +
  • #2134 Fix spurious failures caused by change in profile info duration to
  • +
  • #2138 glfs_write() dumps a core file file when buffer size is 1GB
  • +
  • #2154 "Operation not supported" doing a chmod on a symlink
  • +
  • #2159 Remove unused component tests
  • +
  • #2161 Crash caused by memory corruption
  • +
  • #2169 Stack overflow when parallel-readdir is enabled
  • +
  • #2180 CID 1446716: Memory - illegal accesses (USE_AFTER_FREE) /xlators/mg
  • +
  • #2187 [Input/output error] IO failure while performing shrink operation w
  • +
  • #2190 Move a test case tests/basic/glusterd-restart-shd-mux.t to flaky
  • +
  • #2192 4+1 arbiter setup is broken
  • +
  • #2198 There are blocked inodelks for a long time
  • +
  • #2216 Fix coverity issues
  • +
  • #2232 "Invalid argument" when reading a directory with gfapi
  • +
  • #2234 Segmentation fault in directory quota daemon for replicated volume
  • +
  • #2239 rebalance crashes in dht on master
  • +
  • #2241 Using readdir instead of readdirp for fix-layout increases performa
  • +
  • #2253 Disable lookup-optimize by default in the virt group
  • +
  • #2258 Provide option to disable fsync in data migration
  • +
  • #2260 failed to list quota info after setting limit-usage
  • +
  • #2268 dht_layout_unref() only uses 'this' to check that 'this->private' i
  • +
  • #2278 nfs-ganesha does not start due to shared storage not ready, but ret
  • +
  • #2287 runner infrastructure fails to provide platfrom independent error c
  • +
  • #2294 dict.c: remove some strlen() calls if using DICT_LIST_IMP
  • +
  • #2308 Developer sessions for glusterfs
  • +
  • #2313 Long setting names mess up the columns and break parsing
  • +
  • #2317 Rebalance doesn't migrate some sparse files
  • +
  • #2328 "gluster volume set group samba" needs to include write-b
  • +
  • #2330 gf_msg can cause relock deadlock
  • +
  • #2334 posix_handle_soft() is doing an unnecessary stat
  • +
  • #2337 memory leak observed in lock fop
  • +
  • #2348 Gluster's test suite on RHEL 8 runs slower than on RHEL 7
  • +
  • #2351 glusterd: After upgrade on release 9.1 glusterd protocol is broken
  • +
  • #2353 Permission issue after upgrading to Gluster v9.1
  • +
  • #2360 extras: postscript fails on logrotation of snapd logs
  • +
  • #2364 After the service is restarted, a large number of handles are not r
  • +
  • #2370 glusterd: Issues with custom xlator changes
  • +
  • #2378 Remove sys_fstatat() from posix_handle_unset_gfid() function - not
  • +
  • #2380 Remove sys_lstat() from posix_acl_xattr_set() - not needed
  • +
  • #2388 Geo-replication gets delayed when there are many renames on primary
  • +
  • #2394 Spurious failure in tests/basic/fencing/afr-lock-heal-basic.t
  • +
  • #2398 Bitrot and scrub process showed like unknown in the gluster volume
  • +
  • #2404 Spurious failure of tests/bugs/ec/bug-1236065.t
  • +
  • #2407 configure glitch with CC=clang
  • +
  • #2410 dict_xxx_sizen variant compilation should fail on passing a variabl
  • +
  • #2414 Prefer mallinfo2() to mallinfo() if available
  • +
  • #2421 rsync should not try to sync internal xattrs.
  • +
  • #2429 Use file timestamps with nanosecond precision
  • +
  • #2431 Drop --disable-syslog configuration option
  • +
  • #2440 Geo-replication not working on Ubuntu 21.04
  • +
  • #2443 Core dumps on Gluster 9 - 3 replicas
  • +
  • #2446 client_add_lock_for_recovery() - new_client_lock() should be called
  • +
  • #2467 failed to open /proc/0/status: No such file or directory
  • +
  • #2470 sharding: [inode.c:1255:__inode_unlink] 0-inode: dentry not found
  • +
  • #2480 Brick going offline on another host as well as the host which reboo
  • +
  • #2502 xlator/features/locks/src/common.c has code duplication
  • +
  • #2507 Use appropriate msgid in gf_msg()
  • +
  • #2515 Unable to mount the gluster volume using fuse unless iptables is fl
  • +
  • #2522 ganesha_ha (extras/ganesha/ocf): ganesha_grace RA fails in start()
  • +
  • #2540 delay-gen doesn't work correctly for delays longer than 2 seconds
  • +
  • #2551 Sometimes the lock notification feature doesn't work
  • +
  • #2581 With strict-locks enabled clients which are holding posix locks sti
  • +
  • #2590 trusted.io-stats-dump extended attribute usage description error
  • +
  • #2611 Granular entry self-heal is taking more time than full entry self h
  • +
  • #2617 High CPU utilization of thread glfs_fusenoti and huge delays in som
  • +
  • #2620 Granular entry heal purging of index name trigger two lookups in th
  • +
  • #2625 auth.allow value is corrupted after add-brick operation
  • +
  • #2626 entry self-heal does xattrops unnecessarily in many cases
  • +
  • #2649 glustershd failed in bind with error "Address already in use"
  • +
  • #2652 Removal of deadcode: Pump
  • +
  • #2659 tests/basic/afr/afr-anon-inode.t crashed
  • +
  • #2664 Test suite produce uncompressed logs
  • +
  • #2693 dht: dht_local_wipe is crashed while running rename operation
  • +
  • #2771 Smallfile improvement in glusterfs
  • +
  • #2782 Glustereventsd does not listen on IPv4 when IPv6 is not available
  • +
  • #2789 An improper locking bug(e.g., deadlock) on the lock up_inode_ctx->c
  • +
  • #2798 FUSE mount option for localtime-logging is not exposed
  • +
  • #2816 Glusterfsd memory leak when subdir_mounting a volume
  • +
  • #2835 dht: found anomalies in dht_layout after commit c4cbdbcb3d02fb56a62
  • +
  • #2857 variable twice initialization.
  • +
+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/release-notes/10.1/index.html b/release-notes/10.1/index.html new file mode 100644 index 00000000..dd8bec51 --- /dev/null +++ b/release-notes/10.1/index.html @@ -0,0 +1,4545 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + 10.1 - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + +

Release notes for Gluster 10.1

+

Release date: 1st-Feb-2022

+

This is a bugfix and improvement release. The release notes for 10.0 contain a listing of all the new features that were added and bugs fixed in the GlusterFS 10 stable release.

+

NOTE:

+
    +
  • +

    Next minor release tentative date: Week of 15th May, 2022 + (As published in the Gluster Community Meeting, Release 10 will have updates every 3 months now on)

    +
  • +
  • +

    Users are highly encouraged to upgrade to newer releases of GlusterFS.

    +
  • +
+

Important fixes in this release

+
    +
  • Fix missing stripe count issue with upgrade from 9.x to 10.x
  • +
  • Fix IO failure when shrinking distributed dispersed volume with ongoing IO
  • +
  • Fix log spam introduced with glusterfs 10.0
  • +
  • Enable ltcmalloc_minimal instead of ltcmalloc
  • +
+

Builds are available at -

+

https://download.gluster.org/pub/gluster/glusterfs/10/10.1/

+

Bugs addressed

+
    +
  • #2846 Avoid redundant logs in gluster
  • +
  • #2903 Fix worker disconnect due to AttributeError in geo-replication
  • +
  • #2910 Check for available ports in port_range in glusterd
  • +
  • #2939 Remove the deprecated commands from gluster man page
  • +
  • #2947 Fix IO failure when shrinking distributed dispersed volume with ongoing IO
  • +
  • #3071 Fix log spam introduced with glusterfs 10.0
  • +
  • #3000 Enable ltcmalloc_minimal instead of ltcmalloc
  • +
  • #3086 Handle excessive log in case dict is NUL
  • +
  • #3133 Fix missing stripe count issue with upgrade from 9.x to 10.x
  • +
  • #2962 Fix volume create failures without disperse count and ip addresses
  • +
+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/release-notes/10.2/index.html b/release-notes/10.2/index.html new file mode 100644 index 00000000..7098a05e --- /dev/null +++ b/release-notes/10.2/index.html @@ -0,0 +1,4537 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + 10.2 - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + +

Release notes for Gluster 10.2

+

This is a bugfix and improvement release. The release notes for 10.0 and 10.1 contain a listing of all the new features that were added and bugs fixed in the GlusterFS 10 stable release.

+

NOTE:

+
    +
  • Next minor release tentative date: Week of 15th Nov, 2022
  • +
  • Users are highly encouraged to upgrade to newer releases of GlusterFS.
  • +
+

Important fixes in this release

+
    +
  • Optimize server functionality by enhancing server_process_event_upcall code path during the handling of upcall event
  • +
  • Fix all bricks not starting issue on node reboot when brick count is high(>750)
  • +
  • Fix stale posix locks that appear after client disconnection
  • +
+

Builds are available at

+

https://download.gluster.org/pub/gluster/glusterfs/10/10.2/

+

Bugs addressed

+
    +
  • #3182 Fix stale posix locks that appear after client disconnection
  • +
  • #3187 Fix Locks xlator fd leaks
  • +
  • #3234 Fix incorrect directory check inorder to successfully locate the SSL certificate
  • +
  • #3262 Synchronize layout(ref|unref) during layout(get|set) in dht
  • +
  • #3321 Optimize server functionality by enhancing server_process_event_upcall code path during the handling of upcall event
  • +
  • #3334 Fix errors and timeouts when creating qcow2 file via libgfapi
  • +
  • #3375 Fix all bricks not starting issue on node reboot when brick count is high(>750)
  • +
  • #3417 Fix crash due to unaligned memory access
  • +
  • #3470 Fix spurious crash when "peer probing" a non existing host name
  • +
+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/release-notes/10.3/index.html b/release-notes/10.3/index.html new file mode 100644 index 00000000..810e927f --- /dev/null +++ b/release-notes/10.3/index.html @@ -0,0 +1,4536 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + 10.3 - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + +

Release notes for Gluster 10.3

+

Release date: 27th-Sep-2022

+

This is a bugfix release. The release notes for 10.0, 10.1 and 10.2 contain a listing of all the new improvements and bugs fixed in the GlusterFS 10 stable release.

+

NOTE: +- Next minor release tentative date: Week of 25th Mar, 2022 +- Users are highly encouraged to upgrade to newer releases of GlusterFS.

+

Important fixes in this release

+
    +
  • Fix qemu-img crash on a distributed volume
  • +
  • Fix a possible deadlock scenario In Brick Process
  • +
  • Allow opening snapshot directory(entrypoint) via glfs_open()/glfs_h_open() and snapshot directory(entrypoint) to support functionalities of Samba
  • +
  • Implement seek fop in DHT and prevent EBADFD related failures
  • +
+

Builds are available at

+

https://download.gluster.org/pub/gluster/glusterfs/10/10.3/

+

Issues addressed in this release

+
    +
  • #1000 Fix qemu-img crash on a distributed volume
  • +
  • #3774 Fix a possible deadlock scenario In Brick Process
  • +
  • #3373 Implement seek fop in DHT and prevent EBADFD related failures
  • +
  • #3666 Prevent snapd crashes on opening snapshot directory via gfapi
  • +
  • #3765 Allow opening snapshot directory(entrypoint) via glfs_open()/glfs_h_open()
  • +
  • #3307 Fix return from glfs_open() to honour O_DIRECTORY flag for Samba
  • +
  • #3725 Fix mismatch in errorcode between fgetxattr() and glusterfs.get_real_filename
  • +
  • #3778 Handle spurious failures of spare_file_rebalance.t test case
  • +
+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/release-notes/10.4/index.html b/release-notes/10.4/index.html new file mode 100644 index 00000000..2076546c --- /dev/null +++ b/release-notes/10.4/index.html @@ -0,0 +1,4541 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + 10.4 - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + +

Release notes for Gluster 10.4

+

Release date: 27th-Apr-2023

+

This is a bugfix release. The release notes for 10.0, 10.1, 10.2 and 10.3 contain a listing of all the new improvements and bugs fixed in the GlusterFS 10 stable release.

+

NOTE: +- Next minor release tentative date: Week of 25th Sep, 2023 +- Users are highly encouraged to upgrade to newer releases of GlusterFS.

+

Important fixes in this release

+
    +
  • Fix fuse concurrency problems
  • +
  • Fix memory corruption in debug builds
  • +
  • Fix recovery issue with posix locks upon reconnection of a disconnected brick
  • +
+

Builds are available at

+

https://download.gluster.org/pub/gluster/glusterfs/10/10.4/

+

Issues addressed in this release

+
    +
  • #1000 configure: Force 'char' type to be signed in order to eliminate anomalies
  • +
  • #2752 posix: Fix directory gfid handle if a rename fails
  • +
  • #3345 Fix inconsistencies in big-endian architectures of hashfn
  • +
  • #3346 Fix stack overflow when processing glx_dir(p) list structures in xdr
  • +
  • #3882 Fix deadlock in gf_print_trace of sys_log
  • +
  • #3901 Fix segmentaion fault in io-stats xlator
  • +
  • #3954 Fix stack-buffer-overflow according to AddressSanitizer
  • +
  • #4020 Improve regression test suite
  • +
  • #4029 Process stuck listing snapshots from NFS
  • +
  • #4031 Fix Input/Output error when using linux-aio on big-endean architectures
  • +
  • #4042 Fix recovery issue with posix locks upon reconnection of a disconnected brick
  • +
  • #4071 Make timestamps of .snap directory stable
  • +
  • #3894 Use opendir for directories in glfs_open and glfs_h_open of api
  • +
  • #3636 Enable posix xlator to consider storage.reserve val
  • +
+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/release-notes/11.0/index.html b/release-notes/11.0/index.html new file mode 100644 index 00000000..55533a43 --- /dev/null +++ b/release-notes/11.0/index.html @@ -0,0 +1,4658 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + 11.0 - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + +

Release notes for Gluster 11.0

+

Release date: 14th-Feb-2023

+

This is a major release that includes a range of features, code improvements and stability fixes as noted below.

+

A selection of the key features and changes are documented in this page. +A full list of bugs that have been addressed is included further below.

+ +

Announcements

+
    +
  1. Releases that receive maintenance updates post release 11 is 10 +(reference)
  2. +
  3. Release 10 and 11 will receive maintenance updates around 15th of every six months corresponding to thier previous release dates.
  4. +
+

Highlights

+
    +
  • Major performance impovement of ~36% with rmdir operations #3685
  • +
  • Extension of ZFS support for snapshots #2855
  • +
  • Qouta implimentation based on namespace #1750
  • +
  • Major cleanups and readdir/readdirp improvements link1 link2
  • +
+

Bugs addressed

+

Bugs addressed since release-10 are listed below:

+
    +
  • #1831 virtual images in replicated volume are not healed
  • +
  • #1459 gluster_shared_storage failed to automount on node reboot on rhel 8
  • +
  • #1458 sharding: fanout mknod process into multi threads
  • +
  • #1457 systemd unit files missing from Debian 8.1-1 package
  • +
  • #1456 virtual images in replicated volume are not healed?
  • +
  • #1455 docs.gluster.org is down
  • +
  • #1454 Geo-replication gsyncd at 100% CPU
  • +
  • #1453 Disperse shd heal activity should be observable
  • +
  • #1452 all glusterfs-client crashed at the same time
  • +
  • #1451 Add details on ssl_setup_connection_params to help troubleshooting
  • +
  • #1450 Please consider repackaging/providing dependency package (stubs)
  • +
  • #145 Snapshot: improvements
  • +
  • #1000 [bug:1193929] GlusterFS can be improved
  • +
  • #1002 [bug:1679998] GlusterFS can be improved
  • +
  • #1060 [bug:789278] Issues reported by Coverity static analysis tool
  • +
  • #1686 mount-shared-storage.sh issue with systemd-automount
  • +
  • #1757 RFE: improve namespace support in glusterfs
  • +
  • #1774 RFE: simple-quota implementation
  • +
  • #2123 Implement an I/O framework
  • +
  • #2164 gf_proc_dump_call_stack() is not printing ctime correctly
  • +
  • #2308 Developer sessions for glusterfs
  • +
  • #2469 Python syntax error in syncdutils.py
  • +
  • #2483 Place holder issue for fixing potential bugs in protocol/client/src
  • +
  • #2491 Add s390x support to community CI
  • +
  • #2664 Test suite produce uncompressed logs
  • +
  • #2717 GlusterFS doesn't support O_PATH flag in open()
  • +
  • #2735 Remove byte-order.h and use the normal byteorder functions
  • +
  • #2793 cluster.rebal-throttle description doesn't seem to match the code
  • +
  • #2832 selinux: make it possible to persist ganesha_use_fusefs from one up
  • +
  • #2846 glusterd log filled with error messages.
  • +
  • #2903 geo-rep restarts because of 'list' object has no attribute 'join' e
  • +
  • #2910 glusterd: volume start doesn't fail with properly if the port range
  • +
  • #2912 When glusterfs uses an untrusted domain name, it cannot update the
  • +
  • #2913 gluster lib cannot be dlopened: /lib64/libtcmalloc.so.4: cannot all
  • +
  • #2916 Replace SHA deprecated functions with newer ones
  • +
  • #2936 Wrong value for inodeSize in Volume status xml output.
  • +
  • #2939 Volume log commands 'filename' and 'locate' described in the man bu
  • +
  • #2944 tests: valid ip to be used instead of localhost, 127.0.0.1 or loopb
  • +
  • #2947 IO failure when shrinking distributed dispersed volume while perfor
  • +
  • #2962 cli: volume create without disperse count fails with ip addresses
  • +
  • #2963 Do not use an iobuf pool for the CLI
  • +
  • #2964 Cleanup the stub pool
  • +
  • #2967 Make relevant functions static
  • +
  • #2971 core file from /tests/basic/fencing/afr-lock-heal-advanced.t
  • +
  • #2973 Allocate socket ioq outside of the out_lock
  • +
  • #2975 quick-read: remove unused 'sh-failed' dict_get() function
  • +
  • #2986 AFR: reduce variable indirection
  • +
  • #2989 GF_ASSERT_AND_GOTO_WITH_ERROR improvements
  • +
  • #2997 HA status is in FAILOVER when configuring NFS ganesha with pacemake
  • +
  • #2998 Remove old authentication schemes
  • +
  • #3004 Use gf_strndup() instead of gf_strdup() when string length is known
  • +
  • #3005 multiple files: improve gf_dirent_for_name() functionality
  • +
  • #3012 Reduce the impact of Cloudsync on IO path
  • +
  • #3054 Update the links in gluster test framework documentation
  • +
  • #3066 Online upgrade - 9.x to 10.0
  • +
  • #3071 Log spam with glusterfs 10.0
  • +
  • #3076 __lease_ctx_set() is not checking correctly for the return code
  • +
  • #3103 glusterfs snapd crashes when snapshot is de-activated.
  • +
  • #3130 Reduce the number of include statements
  • +
  • #3137 Cleanup common include files
  • +
  • #3166 bug: Incorrect Mac OS version assertion in configure.
  • +
  • #3182 Some stale posix locks appear after a client disconnection
  • +
  • #3187 Locks xlator leaks fd's in some cases
  • +
  • #3191 dht: Fix double free issue in the cbk function dht_common_mark_mdsx
  • +
  • #3194 Log message for RPC clients is misleading because it logs unsigned
  • +
  • #3213 Configure geo rep SSH with AES128 encryption - to improve performance
  • +
  • #3217 Disperse volume with more than 16 data bricks fails to mount
  • +
  • #3228 event-epoll.c: table_idx will always be 0?
  • +
  • #3229 handle 'option remote-port' properly in client protocol.
  • +
  • #3232 The config for creating /var/run/gluster on boot is missing for glu
  • +
  • #3234 SSL certificate wrong default path
  • +
  • #3236 nfs: Optimize ctxcount value to reduce memory consumption for nfs-s
  • +
  • #3240 dht_revalidate_cbk() needs to trigger directory heal with root perm
  • +
  • #3248 fault in gluster command completion
  • +
  • #3262 dht: Synchronize layout_(ref|unref) during layout_(get|set) in dht
  • +
  • #3264 Posix private struct - reduce size
  • +
  • #3288 GFID split brain resolution using favourite-child-policy leads to E
  • +
  • #3294 Remove dht_nonblocking_inodelk() - it's not used
  • +
  • #3304 CID 1476381: (OVERRUN) @ /rpc/rpc-lib/src/rpc-clnt.c: 249 in __save
  • +
  • #3321 server: Optimize server_process_event_upcall code path during handl
  • +
  • #3329 mounting with ipv6 hostname leads to failure
  • +
  • #3334 Error messages and 20 seconds timeout when creating qcow2 file via
  • +
  • #3347 Test Failure: tests/bitrot/bug-1207627-bitrot-scrub-status.t
  • +
  • #3359 meta_lookup() and other small improvements to meta xlator
  • +
  • #3373 DHT doesn't implement seek fop and causes failures
  • +
  • #3375 glusterd: After node reboot not able to start all bricks successful
  • +
  • #3382 Dictionary: remove all hash related code
  • +
  • #3394 autoupdate - Update a configure.ac to a newer Autoconf
  • +
  • #3417 Crash due to unaligned memory access
  • +
  • #3426 logging.c small improvements
  • +
  • #3469 Improve regression testing
  • +
  • #3470 Spurious crash when "peer probing" a non existing host name
  • +
  • #3507 thin-arbiter-volume.t execute failed
  • +
  • #3521 changelog: A brick process is getting crash due to SIGSEGV
  • +
  • #3527 Brick process crashed when global thread pool is enabled
  • +
  • #3604 is_nfs_export_available check and mount_nfs commands fail in RHEL 8
  • +
  • #3636 posix: small storage environment and storage.reserve value
  • +
  • #3647 wrong debug log in upcall_cache_invalidate()
  • +
  • #3662 Some times Gluster Volume info XML returns wrong distCount
  • +
  • #3683 dht: Cleanup linkto file by rebalance daemon while (hashed|cached)
  • +
  • #3685 dht: Introduce rmdir-optimize option
  • +
  • #3688 Infinite loop in dht when lookup fails with ENODATA
  • +
  • #3695 test: ./tests/bugs/posix/bug-1651445.t is continuous failing during
  • +
  • #3708 ./tests/basic/afr/afr-no-fsync.t is failing on FSYNCDIR, not on FSY
  • +
  • #3710 Brick crashes automatically when writing small files
  • +
  • #3717 syncop functions: no need to copy iatt structs if you are not going
  • +
  • #3729 gf_svc_releasedir() logic error
  • +
  • #3750 Bash completion is loaded every time bash starts
  • +
  • #3774 Dead Lock In Brick Process
  • +
  • #3778 test: Increase timeout for ./tests/basic/distribute/spare_file_reba
  • +
  • #3781 Prime time client-only access control
  • +
  • #3793 tests/bugs/replicate/bug-1586020-mark-dirty-for-entry-txn-on-quorum
  • +
  • #3797 cdc xlator improvements
  • +
  • #3823 rfc.sh: cannot detect upstream remote for non english locale
  • +
  • #3831 afr: posix lock behavior is not correct while received an interrupt
  • +
  • #3836 tests/bugs/glusterd/daemon-log-level-option.t fails on regression
  • +
  • #3845 fuse_readdirp_cbk() - minor modifications
  • +
  • #3847 gftest build failure
  • +
  • #3855 reduce work for memory account
  • +
  • #3876 fsetxattr() failed with EBADFD on opened directories
  • +
  • #3891 Possible trash can size overflow
  • +
+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/release-notes/11.1/index.html b/release-notes/11.1/index.html new file mode 100644 index 00000000..acc9636c --- /dev/null +++ b/release-notes/11.1/index.html @@ -0,0 +1,4546 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + 11.1 - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + +

Release notes for Gluster 11.1

+

This is a bugfix release. The release notes for 11.0 contains a listing of all the new improvements and bugs fixed in the GlusterFS 11 stable release.

+

NOTE: +- Next minor release tentative date: Release will be based on requirement only +- Users are highly encouraged to upgrade to newer releases of GlusterFS.

+

Important fixes in this release

+
    +
  • Fix upgrade issue by reverting posix change related to storage.reserve value
  • +
  • Fix possible data loss during rebalance if there is any linkfile on the system
  • +
  • Fix maximum op-version for release 11
  • +
+

Builds are available at

+

https://download.gluster.org/pub/gluster/glusterfs/11/11.1/

+

Issues addressed in this release

+
    +
  • #1000 Force 'char' type to be signed and add '-fsigned-char' option during compilation
  • +
  • #2752 Fix directory gfid handle if a rename fails
  • +
  • #3346 Fix stack overflow when processing glx_dir(p)list structures in xdr
  • +
  • #3636 Fix upgrade issue by reverting posix change related to storage.reserve value
  • +
  • #3701 Fix error "not supported for ipv6"
  • +
  • #3732 Fix an AddressSanitizer issue heap-use-after-free
  • +
  • #4005 Fix maximum op-version for release 11
  • +
  • #4020 Improve regression test suite
  • +
  • #4029 Process stuck listing snapshots from NFS
  • +
  • #4031 Fix write failures with "I/O error" when using linux-aio on big-endean architectures
  • +
  • #4042 Recover posix locks upon reconnection of a disconnected brick
  • +
  • #4071 Make timestamps stable in snapview-server
  • +
  • #4074 Fix fuse Crashing with "Assertion failed: inode_lookup >= nlookup
  • +
  • #4107 Fix the issue of cli not showing the correct volume type
  • +
  • #4148 Fix possible data loss during rebalance if there is any linkfile on the system
  • +
  • #4190 Fix the issue of glusterfs encountering a SIGSEGV in __gf_free
  • +
  • #4198 Fix warnings raised by glusterfs fuse script on fedora38
  • +
  • #4224 Add simple-quota xattr to afr and ec ignore list
  • +
  • #4196 Revert structure of per_thread_pool_list_t
  • +
  • #4255 Fix the brick process crash during the upcall event
  • +
+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/release-notes/3.10.0/index.html b/release-notes/3.10.0/index.html new file mode 100644 index 00000000..08689b3e --- /dev/null +++ b/release-notes/3.10.0/index.html @@ -0,0 +1,5167 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + 3.10.0 - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + + + + + +
+
+ + + + + + + + +

Release notes for Gluster 3.10.0

+

This is a major Gluster release that includes some substantial changes. The +features revolve around, better support in container environments, scaling to +larger number of bricks per node, and a few usability and performance +improvements, among other bug fixes.

+

The most notable features and changes are documented on this page. A full list +of bugs that has been addressed is included further below.

+

Major changes and features

+

Brick multiplexing

+

Notes for users: +Multiplexing reduces both port and memory usage. It does not improve +performance vs. non-multiplexing except when memory is the limiting factor, +though there are other related changes that improve performance overall (e.g. +compared to 3.9).

+

Multiplexing is off by default. It can be enabled with

+
# gluster volume set all cluster.brick-multiplex on
+
+

Limitations: +There are currently no tuning options for multiplexing - it's all or nothing. +This will change in the near future.

+

Known Issues: +The only feature or combination of features known not to work with multiplexing +is USS and SSL. Anyone using that combination should leave multiplexing off.

+

Support to display op-version information from clients

+

Notes for users: +To get information on what op-version are supported by the clients, users can +invoke the gluster volume status command for clients. Along with information +on hostname, port, bytes read, bytes written and number of clients connected +per brick, we now also get the op-version on which the respective clients +operate. Following is the example usage:

+
# gluster volume status <VOLNAME|all> clients
+
+

Limitations:

+

Known Issues:

+

Support to get maximum op-version in a heterogeneous cluster

+

Notes for users: +A heterogeneous cluster operates on a common op-version that can be supported +across all the nodes in the trusted storage pool. Upon upgrade of the nodes in +the cluster, the cluster might support a higher op-version. Users can retrieve +the maximum op-version to which the cluster could be bumped up to by invoking +the gluster volume get command on the newly introduced global option, +cluster.max-op-version. The usage is as follows:

+
# gluster volume get all cluster.max-op-version
+
+

Limitations:

+

Known Issues:

+

Support for rebalance time to completion estimation

+

Notes for users: +Users can now see approximately how much time the rebalance +operation will take to complete across all nodes.

+

The estimated time left for rebalance to complete is displayed +as part of the rebalance status. Use the command:

+
# gluster volume rebalance <VOLNAME> status
+
+

Limitations: +The rebalance process calculates the time left based on the rate +at while files are processed on the node and the total number of files +on the brick which is determined using statfs. The limitations of this +are:

+
    +
  • +

    A single fs partition must host only one brick. Multiple bricks on + the same fs partition will cause the statfs results to be invalid.

    +
  • +
  • +

    The estimates are dynamic and are recalculated every time the rebalance status + command is invoked.The estimates become more accurate over time so short running + rebalance operations may not benefit.

    +
  • +
+

Known Issues: +As glusterfs does not stored the number of files on the brick, we use statfs to +guess the number. The .glusterfs directory contents can significantly skew this +number and affect the calculated estimates.

+

Separation of tier as its own service

+

Notes for users: +This change is to move the management of the tier daemon into the gluster +service framework, thereby improving it stability and manageability by the +service framework.

+

This has no change to any of the tier commands or user facing interfaces and +operations.

+

Limitations:

+

Known Issues:

+

Statedump support for gfapi based applications

+

Notes for users: +gfapi based applications now can dump state information for better trouble +shooting of issues. A statedump can be triggered in two ways:

+
    +
  1. by executing the following on one of the Gluster servers,
  2. +
+
   # gluster volume statedump <VOLNAME> client <HOST>:<PID>
+
+
    +
  • <VOLNAME> should be replaced by the name of the volume
  • +
  • <HOST> should be replaced by the hostname of the system running the + gfapi application
  • +
  • +

    <PID> should be replaced by the PID of the gfapi application

    +
  • +
  • +

    through calling glfs_sysrq(<FS>, GLFS_SYSRQ_STATEDUMP) within the + application

    +
  • +
  • +

    <FS> should be replaced by a pointer to a glfs_t structure

    +
  • +
+

All statedumps (*.dump.* files) will be located at the usual location, +on most distributions this would be /var/run/gluster/.

+

Limitations: +It is not possible to trigger statedumps from the Gluster CLI when the +gfapi application has lost its management connection to the GlusterD +servers.

+

GlusterFS 3.10 is the first release that contains support for the new +glfs_sysrq() function. Applications that include features for +debugging will need to be adapted to call this function. At the time of +the release of 3.10, no applications are known to call glfs_sysrq().

+

Known Issues:

+

Disabled creation of trash directory by default

+

Notes for users: +From now onwards trash directory, namely .trashcan, will not be be created by +default upon creation of new volumes unless and until the feature is turned ON +and the restrictions on the same will be applicable as long as features.trash +is set for a particular volume.

+

Limitations: +After upgrade for pre-existing volumes, trash directory will be still present at +root of the volume. Those who are not interested in this feature may have to +manually delete the directory from the mount point.

+

Known Issues:

+

Implemented parallel readdirp with distribute xlator

+

Notes for users: +Currently the directory listing gets slower as the number of bricks/nodes +increases in a volume, though the file/directory numbers remain unchanged. +With this feature, the performance of directory listing is made mostly +independent of the number of nodes/bricks in the volume. Thus scale doesn't +exponentially reduce the directory listing performance. (On a 2, 5, 10, 25 brick +setup we saw ~5, 100, 400, 450% improvement consecutively)

+

To enable this feature:

+
# gluster volume set <VOLNAME> performance.readdir-ahead on
+# gluster volume set <VOLNAME> performance.parallel-readdir on
+
+

To disable this feature:

+
# gluster volume set <VOLNAME> performance.parallel-readdir off
+
+

If there are more than 50 bricks in the volume it is good to increase the cache +size to be more than 10Mb (default value):

+
# gluster volume set <VOLNAME> performance.rda-cache-limit <CACHE SIZE>
+
+

Limitations:

+

Known Issues:

+

md-cache can optionally -ve cache security.ima xattr

+

Notes for users: +From kernel version 3.X or greater, creating of a file results in removexattr +call on security.ima xattr. This xattr is not set on the file unless IMA +feature is active. With this patch, removxattr call returns ENODATA if it is +not found in the cache.

+

The end benefit is faster create operations where IMA is not enabled.

+

To cache this xattr use,

+
# gluster volume set <VOLNAME> performance.cache-ima-xattrs on
+
+

The above option is on by default.

+

Limitations:

+

Known Issues:

+

Added support for CPU extensions in disperse computations

+

Notes for users: +To improve disperse computations, a new way of generating dynamic code +targeting specific CPU extensions like SSE and AVX on Intel processors is +implemented. The available extensions are detected on run time. This can +roughly double encoding and decoding speeds (or halve CPU usage).

+

This change is 100% compatible with the old method. No change is needed if +an existing volume is upgraded.

+

You can control which extensions to use or disable them with the following +command:

+
# gluster volume set <VOLNAME> disperse.cpu-extensions <type>
+
+

Valid values are:

+
    +
  • none: Completely disable dynamic code generation
  • +
  • auto: Automatically detect available extensions and use the best one
  • +
  • x64: Use dynamic code generation using standard 64 bits instructions
  • +
  • sse: Use dynamic code generation using SSE extensions (128 bits)
  • +
  • avx: Use dynamic code generation using AVX extensions (256 bits)
  • +
+

The default value is 'auto'. If a value is specified that is not detected on +run-time, it will automatically fall back to the next available option.

+

Limitations:

+

Known Issues: +To solve a conflict between the dynamic code generator and SELinux, it +has been necessary to create a dynamic file on runtime in the directory +/usr/libexec/glusterfs. This directory only exists if the server package +is installed. On nodes with only the client package installed, this directory +won't exist and the dynamic code won't be used.

+

It also needs root privileges to create the file there, so any gfapi +application not running as root won't be able to use dynamic code generation.

+

In these cases, disperse volumes will continue working normally but using +the old implementation (equivalent to setting disperse.cpu-extensions to none).

+

More information and a discussion on how to solve this can be found here:

+

https://bugzilla.redhat.com/1421649

+

Bugs addressed

+

Bugs addressed since release-3.9 are listed below.

+
    +
  • #789278: Issues reported by Coverity static analysis tool
  • +
  • #1198849: Minor improvements and cleanup for the build system
  • +
  • #1211863: RFE: Support in md-cache to use upcall notifications to invalidate its cache
  • +
  • #1231224: Misleading error messages on brick logs while creating directory (mkdir) on fuse mount
  • +
  • #1234054: `gluster volume heal split-brain' does not heal if data/metadata/entry self-heal options are turned off
  • +
  • #1289922: Implement SIMD support on EC
  • +
  • #1290304: [RFE]Reducing number of network round trips
  • +
  • #1297182: Mounting with "-o noatime" or "-o noexec" causes "nosuid,nodev" to be set as well
  • +
  • #1313838: Tiering as separate process and in v status moving tier task to tier process
  • +
  • #1316873: EC: Set/unset dirty flag for all the update operations
  • +
  • #1325531: Statedump: Add per xlator ref counting for inode
  • +
  • #1325792: "gluster vol heal test statistics heal-count replica" seems doesn't work
  • +
  • #1330604: out-of-tree builds generate XDR headers and source files in the original directory
  • +
  • #1336371: Sequential volume start&stop is failing with SSL enabled setup.
  • +
  • #1341948: DHT: Rebalance- Misleading log messages from __dht_check_free_space function
  • +
  • #1344714: removal of file from nfs mount crashs ganesha server
  • +
  • #1349385: [FEAT]jbr: Add rollbacking of failed fops
  • +
  • #1355956: RFE : move ganesha related configuration into shared storage
  • +
  • #1356076: DHT doesn't evenly balance files on FreeBSD with ZFS
  • +
  • #1356960: OOM Kill on client when heal is in progress on 1*(2+1) arbiter volume
  • +
  • #1357753: JSON output for all Events CLI commands
  • +
  • #1357754: Delayed Events if any one Webhook is slow
  • +
  • #1358296: tier: breaking down the monolith processing function tier_migrate_using_query_file()
  • +
  • #1359612: [RFE] Geo-replication Logging Improvements
  • +
  • #1360670: Add output option --xml to man page of gluster
  • +
  • #1363595: Node remains in stopped state in pcs status with "/usr/lib/ocf/resource.d/heartbeat/ganesha_mon: line 137: [: too many arguments ]" messages in logs.
  • +
  • #1363965: geo-replication *changes.log does not respect the log-level configured
  • +
  • #1364420: [RFE] History Crawl performance improvement
  • +
  • #1365395: Support for rc.d and init for Service management
  • +
  • #1365740: dht: Update stbuf from servers having layout
  • +
  • #1365791: Geo-rep worker Faulty with OSError: [Errno 21] Is a directory
  • +
  • #1365822: [RFE] cli command to get max supported cluster.op-version
  • +
  • #1366494: Rebalance is not considering the brick sizes while fixing the layout
  • +
  • #1366495: 1 mkdir generates tons of log messages from dht xlator
  • +
  • #1366648: [GSS] A hot tier brick becomes full, causing the entire volume to have issues and returns stale file handle and input/output error.
  • +
  • #1366815: spurious heal info as pending heal entries never end on an EC volume while IOs are going on
  • +
  • #1368012: gluster fails to propagate permissions on the root of a gluster export when adding bricks
  • +
  • #1368138: Crash of glusterd when using long username with geo-replication
  • +
  • #1368312: Value of `replica.split-brain-status' attribute of a directory in metadata split-brain in a dist-rep volume reads that it is not in split-brain
  • +
  • #1368336: [RFE] Tier Events
  • +
  • #1369077: The directories get renamed when data bricks are offline in 4*(2+1) volume
  • +
  • #1369124: fix unused variable warnings from out-of-tree builds generate XDR headers and source files i...
  • +
  • #1369397: segment fault in changelog_cleanup_dispatchers
  • +
  • #1369403: [RFE]: events from protocol server
  • +
  • #1369523: worm: variable reten_mode is invalid to be free by mem_put in fini()
  • +
  • #1370410: [granular entry sh] - Provide a CLI to enable/disable the feature that checks that there are no heals pending before allowing the operation
  • +
  • #1370567: [RFE] Provide snapshot events for the new eventing framework
  • +
  • #1370931: glfs_realpath() should not return malloc()'d allocated memory
  • +
  • #1371353: posix: Integrate important events with events framework
  • +
  • #1371470: disperse: Integrate important events with events framework
  • +
  • #1371485: [RFE]: AFR events
  • +
  • #1371539: Quota version not changing in the quota.conf after upgrading to 3.7.1 from 3.6.1
  • +
  • #1371540: Spurious regression in tests/basic/gfapi/bug1291259.t
  • +
  • #1371874: [RFE] DHT Events
  • +
  • #1372193: [geo-rep]: AttributeError: 'Popen' object has no attribute 'elines'
  • +
  • #1372211: write-behind: flush stuck by former failed write
  • +
  • #1372356: glusterd experiencing repeated connect/disconnect messages when shd is down
  • +
  • #1372553: "gluster vol status all clients --xml" doesn't generate xml if there is a failure in between
  • +
  • #1372584: Fix the test case http://review.gluster.org/#/c/15385/
  • +
  • #1373072: Event pushed even if Answer is No in the Volume Stop and Delete prompt
  • +
  • #1373373: Worker crashes with EINVAL errors
  • +
  • #1373520: [Bitrot]: Recovery fails of a corrupted hardlink (and the corresponding parent file) in a disperse volume
  • +
  • #1373741: [geo-replication]: geo-rep Status is not showing bricks from one of the nodes
  • +
  • #1374093: glusterfs: create a directory with 0464 mode return EIO error
  • +
  • #1374286: [geo-rep]: defunct tar process while using tar+ssh sync
  • +
  • #1374584: Detach tier commit is allowed when detach tier start goes into failed state
  • +
  • #1374587: gf_event python fails with ImportError
  • +
  • #1374993: bug-963541.t spurious failure
  • +
  • #1375181: /var/tmp/rpm-tmp.KPCugR: line 2: /bin/systemctl: No such file or directory
  • +
  • #1375431: [RFE] enable sharding and strict-o-direct with virt profile - /var/lib/glusterd/groups/virt
  • +
  • #1375526: Kill rpc.statd on Linux machines
  • +
  • #1375532: Rpm installation fails with conflicts error for eventsconfig.json file
  • +
  • #1376671: Rebalance fails to start if a brick is down
  • +
  • #1376693: RFE: Provide a prompt when enabling gluster-NFS
  • +
  • #1377097: The GlusterFS Callback RPC-calls always use RPC/XID 42
  • +
  • #1377341: out-of-tree builds generate XDR headers and source files in the original directory
  • +
  • #1377427: incorrect fuse dumping for WRITE
  • +
  • #1377556: Files not being opened with o_direct flag during random read operation (Glusterfs 3.8.2)
  • +
  • #1377584: memory leak problems are found in daemon:glusterd, server:glusterfsd and client:glusterfs
  • +
  • #1377607: Volume restart couldn't re-export the volume exported via ganesha.
  • +
  • #1377864: Creation of files on hot tier volume taking very long time
  • +
  • #1378057: glusterd fails to start without installing glusterfs-events package
  • +
  • #1378072: Modifications to AFR Events
  • +
  • #1378305: DHT: remove unused structure members
  • +
  • #1378436: build: python-ctypes no longer exists in Fedora Rawhide
  • +
  • #1378492: warning messages seen in glusterd logs for each 'gluster volume status' command
  • +
  • #1378684: Poor smallfile read performance on Arbiter volume compared to Replica 3 volume
  • +
  • #1378778: Add a test script for compound fops changes in AFR
  • +
  • #1378842: [RFE] 'gluster volume get' should implement the way to retrieve volume options using the volume name 'all'
  • +
  • #1379223: "nfs.disable: on" is not showing in Vol info by default for the 3.7.x volumes after updating to 3.9.0
  • +
  • #1379285: gfapi: Fix fd ref leaks
  • +
  • #1379328: Boolean attributes are published as string
  • +
  • #1379330: eventsapi/georep: Events are not available for Checkpoint and Status Change
  • +
  • #1379511: Fix spurious failures in open-behind.t
  • +
  • #1379655: Recording (ffmpeg) processes on FUSE get hung
  • +
  • #1379720: errors appear in brick and nfs logs and getting stale files on NFS clients
  • +
  • #1379769: GlusterFS fails to build on old Linux distros with linux/oom.h missing
  • +
  • #1380249: Huge memory usage of FUSE client
  • +
  • #1380275: client ID should logged when SSL connection fails
  • +
  • #1381115: Polling failure errors getting when volume is started&stopped with SSL enabled setup.
  • +
  • #1381421: afr fix shd log message error
  • +
  • #1381830: Regression caused by enabling client-io-threads by default
  • +
  • #1382236: glusterfind pre session hangs indefinitely
  • +
  • #1382258: RFE: Support to update NFS-Ganesha export options dynamically
  • +
  • #1382266: md-cache: Invalidate cache entry in case of OPEN with O_TRUNC
  • +
  • #1384142: crypt: changes needed for openssl-1.1 (coming in Fedora 26)
  • +
  • #1384297: glusterfs can't self heal character dev file for invalid dev_t parameters
  • +
  • #1384906: arbiter volume write performance is bad with sharding
  • +
  • #1385104: invalid argument warning messages seen in fuse client logs 2016-09-30 06:34:58.938667] W [dict.c:418ict_set] (-->/usr/lib64/glusterfs/3.8.4/xlator/cluster/replicate.so(+0x58722) 0-dict: !this || !value for key=link-count [Invalid argument]
  • +
  • #1385575: pmap_signin event fails to update brickinfo->signed_in flag
  • +
  • #1385593: Fix some spelling mistakes in comments and log messages
  • +
  • #1385839: Incorrect volume type in the "glusterd_state" file generated using CLI "gluster get-state"
  • +
  • #1386088: Memory Leaks in snapshot code path
  • +
  • #1386097: 4 of 8 bricks (2 dht subvols) crashed on systemic setup
  • +
  • #1386123: geo-replica slave node goes faulty for non-root user session due to fail to locate gluster binary
  • +
  • #1386141: Error and warning message getting while removing glusterfs-events package
  • +
  • #1386188: Asynchronous Unsplit-brain still causes Input/Output Error on system calls
  • +
  • #1386200: Log all published events
  • +
  • #1386247: [Eventing]: 'gluster volume tier start force' does not generate a TIER_START event
  • +
  • #1386450: Continuous warning messages getting when one of the cluster node is down on SSL setup.
  • +
  • #1386516: [Eventing]: UUID is showing zeros in the event message for the peer probe operation.
  • +
  • #1386626: fuse mount point not accessible
  • +
  • #1386766: trashcan max file limit cannot go beyond 1GB
  • +
  • #1387160: clone creation with older names in a system fails
  • +
  • #1387207: [Eventing]: Random VOLUME_SET events seen when no operation is done on the gluster cluster
  • +
  • #1387241: Pass proper permission to acl_permit() in posix_acl_open()
  • +
  • #1387652: [Eventing]: BRICK_DISCONNECTED events seen when a tier volume is stopped
  • +
  • #1387864: [Eventing]: 'gluster vol bitrot scrub ondemand' does not produce an event
  • +
  • #1388010: [Eventing]: 'VOLUME_REBALANCE' event messages have an incorrect volume name
  • +
  • #1388062: throw warning to show that older tier commands are depricated and will be removed.
  • +
  • #1388292: performance.read-ahead on results in processes on client stuck in IO wait
  • +
  • #1388348: glusterd: Display proper error message and fail the command if S32gluster_enable_shared_storage.sh hook script is not present during gluster volume set all cluster.enable-shared-storage command
  • +
  • #1388401: Labelled geo-rep checkpoints hide geo-replication status
  • +
  • #1388861: build: python on Debian-based dists use .../lib/python2.7/dist-packages instead of .../site-packages
  • +
  • #1388862: [Eventing]: Events not seen when command is triggered from one of the peer nodes
  • +
  • #1388877: Continuous errors getting in the mount log when the volume mount server glusterd is down.
  • +
  • #1389293: build: incorrect Requires: for portblock resource agent
  • +
  • #1389481: glusterfind fails to list files from tiered volume
  • +
  • #1389697: Remove-brick status output is showing status of fix-layout instead of original remove-brick status output
  • +
  • #1389746: Refresh config fails while exporting subdirectories within a volume
  • +
  • #1390050: Elasticsearch get CorruptIndexException errors when running with GlusterFS persistent storage
  • +
  • #1391086: gfapi clients crash while using async calls due to double fd_unref
  • +
  • #1391387: The FUSE client log is filling up with posix_acl_default and posix_acl_access messages
  • +
  • #1392167: SMB[md-cache Private Build]:Error messages in brick logs related to upcall_cache_invalidate gf_uuid_is_null
  • +
  • #1392445: Hosted Engine VM paused post replace-brick operation
  • +
  • #1392713: inconsistent file permissions b/w write permission and sticky bits(---------T ) displayed when IOs are going on with md-cache enabled (and within the invalidation cycle)
  • +
  • #1392772: [setxattr_cbk] "Permission denied" warning messages are seen in logs while running pjd-fstest suite
  • +
  • #1392865: Better logging when reporting failures of the kind " Failing MKNOD as quorum is not met"
  • +
  • #1393259: stat of file is hung with possible deadlock
  • +
  • #1393678: Worker restarts on log-rsync-performance config update
  • +
  • #1394131: [md-cache]: All bricks crashed while performing symlink and rename from client at the same time
  • +
  • #1394224: "nfs-grace-monitor" timed out messages observed
  • +
  • #1394548: Make debugging EACCES errors easier to debug
  • +
  • #1394719: libgfapi core dumps
  • +
  • #1394881: Failed to enable nfs-ganesha after disabling nfs-ganesha cluster
  • +
  • #1395261: Seeing error messages [snapview-client.c:283:gf_svc_lookup_cbk] and [dht-helper.c:1666ht_inode_ctx_time_update] (-->/usr/lib64/glusterfs/3.8.4/xlator/cluster/replicate.so(+0x5d75c)
  • +
  • #1395648: ganesha-ha.conf --status should validate if the VIPs are assigned to right nodes
  • +
  • #1395660: Checkpoint completed event missing master node detail
  • +
  • #1395687: Client side IObuff leaks at a high pace consumes complete client memory and hence making gluster volume inaccessible
  • +
  • #1395993: heal info --xml when bricks are down in a systemic environment is not displaying anything even after more than 30minutes
  • +
  • #1396038: refresh-config fails and crashes ganesha when mdcache is enabled on the volume.
  • +
  • #1396048: A hard link is lost during rebalance+lookup
  • +
  • #1396062: [geo-rep]: Worker crashes seen while renaming directories in loop
  • +
  • #1396081: Wrong value in Last Synced column during Hybrid Crawl
  • +
  • #1396364: Scheduler : Scheduler should not depend on glusterfs-events package
  • +
  • #1396793: [Ganesha] : Ganesha crashes intermittently during nfs-ganesha restarts.
  • +
  • #1396807: capture volume tunables in get-state dump
  • +
  • #1396952: I/O errors on FUSE mount point when reading and writing from 2 clients
  • +
  • #1397052: OOM kill of nfs-ganesha on one node while fs-sanity test suite is executed.
  • +
  • #1397177: memory leak when using libgfapi
  • +
  • #1397419: glusterfs_ctx_defaults_init is re-initializing ctx->locks
  • +
  • #1397424: PEER_REJECT, EVENT_BRICKPATH_RESOLVE_FAILED, EVENT_COMPARE_FRIEND_VOLUME_FAILED are not seen
  • +
  • #1397754: [SAMBA-CIFS] : IO hungs in cifs mount while graph switch on & off
  • +
  • #1397795: NFS-Ganesha:Volume reset for any option causes reset of ganesha enable option and bring down the ganesha services
  • +
  • #1398076: SEEK_HOLE/ SEEK_DATA doesn't return the correct offset
  • +
  • #1398226: With compound fops on, client process crashes when a replica is brought down while IO is in progress
  • +
  • #1398566: self-heal info command hangs after triggering self-heal
  • +
  • #1399031: build: add systemd dependency to glusterfs sub-package
  • +
  • #1399072: [Disperse] healing should not start if only data bricks are UP
  • +
  • #1399134: GlusterFS client crashes during remove-brick operation
  • +
  • #1399154: After ganesha node reboot/shutdown, portblock process goes to FAILED state
  • +
  • #1399186: [GANESHA] Export ID changed during volume start and stop with message "lookup_export failed with Export id not found" in ganesha.log
  • +
  • #1399578: [compound FOPs]: Memory leak while doing FOPs with brick down
  • +
  • #1399592: Memory leak when self healing daemon queue is full
  • +
  • #1399780: Use standard refcounting for structures where possible
  • +
  • #1399995: Dump volume specific options in get-state output in a more parseable manner
  • +
  • #1400013: [USS,SSL] .snaps directory is not reachable when I/O encryption (SSL) is enabled
  • +
  • #1400026: Duplicate value assigned to GD_MSG_DAEMON_STATE_REQ_RCVD and GD_MSG_BRICK_CLEANUP_SUCCESS messages
  • +
  • #1400237: Ganesha services are not stopped when pacemaker quorum is lost
  • +
  • #1400613: [GANESHA] failed to create directory of hostname of new node in var/lib/nfs/ganesha/ in already existing cluster nodes
  • +
  • #1400818: possible memory leak on client when writing to a file while another client issues a truncate
  • +
  • #1401095: log the error when locking the brick directory fails
  • +
  • #1401218: Fix compound fops memory leaks
  • +
  • #1401404: [Arbiter] IO's Halted and heal info command hung
  • +
  • #1401777: atime becomes zero when truncating file via ganesha (or gluster-NFS)
  • +
  • #1401801: [RFE] Use Host UUID to find local nodes to spawn workers
  • +
  • #1401812: RFE: Make readdirp parallel in dht
  • +
  • #1401822: [GANESHA]Unable to export the ganesha volume after doing volume start and stop
  • +
  • #1401836: update documentation to readthedocs.io
  • +
  • #1401921: glusterfsd crashed while taking snapshot using scheduler
  • +
  • #1402237: Bad spacing in error message in cli
  • +
  • #1402261: cli: compile warnings (unused var) if building without bd xlator
  • +
  • #1402369: Getting the warning message while erasing the gluster "glusterfs-server" package.
  • +
  • #1402710: ls and move hung on disperse volume
  • +
  • #1402730: self-heal not happening, as self-heal info lists the same pending shards to be healed
  • +
  • #1402828: Snapshot: Snapshot create command fails when gluster-shared-storage volume is stopped
  • +
  • #1402841: Files remain unhealed forever if shd is disabled and re-enabled while healing is in progress.
  • +
  • #1403130: [GANESHA] Adding a node to cluster failed to allocate resource-agents to new node.
  • +
  • #1403780: Incorrect incrementation of volinfo refcnt during volume start
  • +
  • #1404118: Snapshot: After snapshot restore failure , snapshot goes into inconsistent state
  • +
  • #1404168: Upcall: Possible use after free when log level set to TRACE
  • +
  • #1404181: [Ganesha+SSL] : Ganesha crashes on all nodes on volume restarts
  • +
  • #1404410: [Perf] : pcs cluster resources went into stopped state during Multithreaded perf tests on RHGS layered over RHEL 6
  • +
  • #1404573: tests/bugs/snapshot/bug-1316437.t test is causing spurious failure
  • +
  • #1404678: [geo-rep]: Config commands fail when the status is 'Created'
  • +
  • #1404905: DHT : file rename operation is successful but log has error 'key:trusted.glusterfs.dht.linkto error:File exists' , 'setting xattrs on failed (File exists)'
  • +
  • #1405165: Allow user to disable mem-pool
  • +
  • #1405301: Fix the failure in tests/basic/gfapi/bug1291259.t
  • +
  • #1405478: Keepalive should be set for IPv6 & IPv4
  • +
  • #1405554: Fix spurious failure in bug-1402841.t-mt-dir-scan-race.t
  • +
  • #1405775: GlusterFS process crashed after add-brick
  • +
  • #1405902: Fix spurious failure in tests/bugs/replicate/bug-1402730.t
  • +
  • #1406224: VM pauses due to storage I/O error, when one of the data brick is down with arbiter/replica volume
  • +
  • #1406249: [GANESHA] Deleting a node from ganesha cluster deletes the volume entry from /etc/ganesha/ganesha.conf file
  • +
  • #1406252: Free xdr-allocated compound request and response arrays
  • +
  • #1406348: [Eventing]: POSIX_SAME_GFID event seen for .trashcan folder and .trashcan/internal_op
  • +
  • #1406410: [GANESHA] Adding node to ganesha cluster is not assigning the correct VIP to the new node
  • +
  • #1406411: Fail add-brick command if replica count changes
  • +
  • #1406878: ec prove tests fail in FB build environment.
  • +
  • #1408115: Remove-brick rebalance failed while rm -rf is in progress
  • +
  • #1408131: Remove tests/distaf
  • +
  • #1408395: [Arbiter] After Killing a brick writes drastically slow down
  • +
  • #1408712: with granular-entry-self-heal enabled i see that there is a gfid mismatch and vm goes to paused state after migrating to another host
  • +
  • #1408755: Remove tests/basic/rpm.t
  • +
  • #1408757: Fix failure of split-brain-favorite-child-policy.t in CentOS7
  • +
  • #1408758: tests/bugs/glusterd/bug-913555.t fails spuriously
  • +
  • #1409078: RFE: Need a command to check op-version compatibility of clients
  • +
  • #1409186: Dict_t leak in dht_migration_complete_check_task and dht_rebalance_inprogress_task
  • +
  • #1409202: Warning messages throwing when EC volume offline brick comes up are difficult to understand for end user.
  • +
  • #1409206: Extra lookup/fstats are sent over the network when a brick is down.
  • +
  • #1409727: [ganesha + EC]posix compliance rename tests failed on EC volume with nfs-ganesha mount.
  • +
  • #1409730: [ganesha+ec]: Contents of original file are not seen when hardlink is created
  • +
  • #1410071: [Geo-rep] Geo replication status detail without master and slave volume args
  • +
  • #1410313: brick crashed on systemic setup
  • +
  • #1410355: Remove-brick rebalance failed while rm -rf is in progress
  • +
  • #1410375: [Mdcache] clients being served wrong information about a file, can lead to file inconsistency
  • +
  • #1410777: ganesha service crashed on all nodes of ganesha cluster on disperse volume when doing lookup while copying files remotely using scp
  • +
  • #1410853: glusterfs-server should depend on firewalld-filesystem
  • +
  • #1411607: [Geo-rep] If for some reason MKDIR failed to sync, it should not proceed further.
  • +
  • #1411625: Spurious split-brain error messages are seen in rebalance logs
  • +
  • #1411999: URL to Fedora distgit no longer uptodate
  • +
  • #1412002: Examples/getvolfile.py is not pep8 compliant
  • +
  • #1412069: No rollback of renames on succeeded subvols during failure
  • +
  • #1412174: Memory leak on mount/fuse when setxattr fails
  • +
  • #1412467: Remove tests/bugs/distribute/bug-1063230.t
  • +
  • #1412489: Upcall: Possible memleak if inode_ctx_set fails
  • +
  • #1412689: [Geo-rep] Slave mount log file is cluttered by logs of multiple active mounts
  • +
  • #1412917: OOM kill of glusterfsd during continuous add-bricks
  • +
  • #1412918: fuse: Resource leak in fuse-helper under GF_SOLARIS_HOST_OS
  • +
  • #1413967: geo-rep session faulty with ChangelogException "No such file or directory"
  • +
  • #1415226: packaging: python/python2(/python3) cleanup
  • +
  • #1415245: core: max op version
  • +
  • #1415279: libgfapi: remove/revert glfs_ipc() changes targeted for 4.0
  • +
  • #1415581: RFE : Create trash directory only when its is enabled
  • +
  • #1415915: RFE: An administrator friendly way to determine rebalance completion time
  • +
  • #1415918: Cache security.ima xattrs as well
  • +
  • #1416285: EXPECT_WITHIN is taking too much time even if the result matches with expected value
  • +
  • #1416416: Improve output of "gluster volume status detail"
  • +
  • #1417027: option performance.parallel-readdir should honor cluster.readdir-optimize
  • +
  • #1417028: option performance.parallel-readdir can cause OOM in large volumes
  • +
  • #1417042: glusterd restart is starting the offline shd daemon on other node in the cluster
  • +
  • #1417135: [Stress] : SHD Logs flooded with "Heal Failed" messages,filling up "/" quickly
  • +
  • #1417521: [SNAPSHOT] With all USS plugin enable .snaps directory is not visible in cifs mount as well as windows mount
  • +
  • #1417527: glusterfind: After glusterfind pre command execution all temporary files and directories /usr/var/lib/misc/glusterfsd/glusterfind/// should be removed
  • +
  • #1417804: debug/trace: Print iatts of individual entries in readdirp callback for better debugging experience
  • +
  • #1418091: [RFE] Support multiple bricks in one process (multiplexing)
  • +
  • #1418536: Portmap allocates way too much memory (256KB) on stack
  • +
  • #1418541: [Ganesha+SSL] : Bonnie++ hangs during rewrites.
  • +
  • #1418623: client process crashed due to write behind translator
  • +
  • #1418650: Samba crash when mounting a distributed dispersed volume over CIFS
  • +
  • #1418981: Unable to take Statedump for gfapi applications
  • +
  • #1419305: disable client.io-threads on replica volume creation
  • +
  • #1419306: [RFE] Need to have group cli option to set all md-cache options using a single command
  • +
  • #1419503: [SAMBA-SSL] Volume Share hungs when multiple mount & unmount is performed over a windows client on a SSL enabled cluster
  • +
  • #1419696: Fix spurious failure of ec-background-heal.t and tests/bitrot/bug-1373520.t
  • +
  • #1419824: repeated operation failed warnings in gluster mount logs with disperse volume
  • +
  • #1419825: Sequential and Random Writes are off target by 12% and 22% respectively on EC backed volumes over FUSE
  • +
  • #1419846: removing warning related to enum, to let the build take place without errors for 3.10
  • +
  • #1419855: [Remove-brick] Hardlink migration fails with "lookup failed (No such file or directory)" error messages in rebalance logs
  • +
  • #1419868: removing old tier commands under the rebalance commands
  • +
  • #1420606: glusterd is crashed at the time of stop volume
  • +
  • #1420808: Trash feature improperly disabled
  • +
  • #1420810: Massive xlator_t leak in graph-switch code
  • +
  • #1420982: Automatic split brain resolution must check for all the bricks to be up to avoiding serving of inconsistent data(visible on x3 or more)
  • +
  • #1420987: warning messages seen in glusterd logs while setting the volume option
  • +
  • #1420989: when server-quorum is enabled, volume get returns 0 value for server-quorum-ratio
  • +
  • #1420991: Modified volume options not synced once offline nodes comes up.
  • +
  • #1421017: CLI option "--timeout" is accepting non numeric and negative values.
  • +
  • #1421956: Disperse: Fallback to pre-compiled code execution when dynamic code generation fails
  • +
  • #1422350: glustershd process crashed on systemic setup
  • +
  • #1422363: [Replicate] "RPC call decoding failed" leading to IO hang & mount inaccessible
  • +
  • #1422391: Gluster NFS server crashing in __mnt3svc_umountall
  • +
  • #1422766: Entry heal messages in glustershd.log while no entries shown in heal info
  • +
  • #1422777: DHT doesn't evenly balance files on FreeBSD with ZFS
  • +
  • #1422819: [Geo-rep] Recreating geo-rep session with same slave after deleting with reset-sync-time fails to sync
  • +
  • #1422942: Prevent reverse heal from happening
  • +
  • #1423063: glusterfs-fuse RPM now depends on gfapi
  • +
  • #1423070: Bricks not coming up when ran with address sanitizer
  • +
  • #1423385: Crash in index xlator because of race in inode_ctx_set and inode_ref
  • +
  • #1423406: Need to improve remove-brick failure message when the brick process is down.
  • +
  • #1423412: Mount of older client fails
  • +
  • #1423429: unnecessary logging in rda_opendir
  • +
  • #1424921: dht_setxattr returns EINVAL when a file is deleted during the FOP
  • +
  • #1424931: [RFE] Include few more options in virt file
  • +
  • #1424937: multiple glusterfsd process crashed making the complete subvolume unavailable
  • +
  • #1424973: remove-brick status shows 0 rebalanced files
  • +
  • #1425556: glusterd log is flooded with stale disconnect rpc messages
  • +
+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/release-notes/3.10.1/index.html b/release-notes/3.10.1/index.html new file mode 100644 index 00000000..4d64df2d --- /dev/null +++ b/release-notes/3.10.1/index.html @@ -0,0 +1,4558 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + 3.10.1 - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + +

Release notes for Gluster 3.10.1

+

This is a bugfix release. The release notes for 3.10.0, +contains a listing of all the new features that were added and +bugs in the GlusterFS 3.10 stable release.

+

Major changes, features and limitations addressed in this release

+
    +
  1. auth-allow setting was broken with 3.10 release and is now fixed (#1429117)
  2. +
+

Major issues

+
    +
  1. Expanding a gluster volume that is sharded may cause file corruption
  2. +
  3. Sharded volumes are typically used for VM images, if such volumes are + expanded or possibly contracted (i.e add/remove bricks and rebalance) + there are reports of VM images getting corrupted.
  4. +
  5. If you are using sharded volumes, DO NOT rebalance them till this is + fixed
  6. +
  7. Status of this bug can be tracked here, #1426508
  8. +
+

Bugs addressed

+

A total of 31 patches have been merged, addressing 26 bugs:

+
    +
  • #1419824: repeated operation failed warnings in gluster mount logs with disperse volume
  • +
  • #1422769: brick process crashes when glusterd is restarted
  • +
  • #1422781: Transport endpoint not connected error seen on client when glusterd is restarted
  • +
  • #1426222: build: fixes to build 3.9.0rc2 on Debian (jessie)
  • +
  • #1426323: common-ha: no need to remove nodes one-by-one in teardown
  • +
  • #1426329: [Ganesha] : Add comment to Ganesha HA config file ,about cluster name's length limitation
  • +
  • #1427387: systemic testing: seeing lot of ping time outs which would lead to splitbrains
  • +
  • #1427399: [RFE] capture portmap details in glusterd's statedump
  • +
  • #1427461: Bricks take up new ports upon volume restart after add-brick op with brick mux enabled
  • +
  • #1428670: Disconnects in nfs mount leads to IO hang and mount inaccessible
  • +
  • #1428739: Fix crash in dht resulting from tests/features/nuke.t
  • +
  • #1429117: auth failure after upgrade to GlusterFS 3.10
  • +
  • #1429402: Restore atime/mtime for symlinks and other non-regular files.
  • +
  • #1429773: disallow increasing replica count for arbiter volumes
  • +
  • #1430512: /libgfxdr.so.0.0.1: undefined symbol: __gf_free
  • +
  • #1430844: build/packaging: Debian and Ubuntu don't have /usr/libexec/; results in bad packages
  • +
  • #1431175: volume start command hangs
  • +
  • #1431176: USS is broken when multiplexing is on
  • +
  • #1431591: memory leak in features/locks xlator
  • +
  • #1434296: [Disperse] Metadata version is not healing when a brick is down
  • +
  • #1434303: Move spit-brain msg in read txn to debug
  • +
  • #1434399: glusterd crashes when peering an IP where the address is more than acceptable range (>255) OR with random hostnames
  • +
  • #1435946: When parallel readdir is enabled and there are simultaneous readdir and disconnects, then it results in crash
  • +
  • #1436203: Undo pending xattrs only on the up bricks
  • +
  • #1436411: Unrecognized filesystems (i.e. btrfs, zfs) log many errors about "getinode size"
  • +
  • #1437326: Sharding: Fix a performance bug
  • +
+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/release-notes/3.10.10/index.html b/release-notes/3.10.10/index.html new file mode 100644 index 00000000..0472230c --- /dev/null +++ b/release-notes/3.10.10/index.html @@ -0,0 +1,4528 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + 3.10.10 - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + +

Release notes for Gluster 3.10.10

+

This is a bugfix release. The release notes for 3.10.0, 3.10.1, 3.10.2, 3.10.3, +3.10.4, 3.10.5, 3.10.6, 3.10.7, 3.10.8 and 3.10.9 contain a listing of all +the new features that were added and bugs fixed in the GlusterFS +3.10 stable release.

+

Major changes, features and limitations addressed in this release

+

No Major changes

+

Major issues

+
    +
  1. Brick multiplexing is being tested and fixed aggressively but we still have a + few crashes and memory leaks to fix.
  2. +
+

Bugs addressed

+

Bugs addressed since release-3.10.9 are listed below.

+
    +
  • #1498081: dht_(f)xattrop does not implement migration checks
  • +
  • #1534848: entries not getting cleared post healing of softlinks (stale entries showing up in heal info)
  • +
+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/release-notes/3.10.11/index.html b/release-notes/3.10.11/index.html new file mode 100644 index 00000000..c3abdf77 --- /dev/null +++ b/release-notes/3.10.11/index.html @@ -0,0 +1,4531 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + 3.10.11 - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + +

Release notes for Gluster 3.10.11

+

This is a bugfix release. The release notes for 3.10.0, 3.10.1, 3.10.2, 3.10.3, +3.10.4, 3.10.5, 3.10.6, 3.10.7, 3.10.8, 3.10.9 and 3.10.10 contain a listing of all +the new features that were added and bugs fixed in the GlusterFS +3.10 stable release.

+

Major changes, features and limitations addressed in this release

+

No Major changes

+

Major issues

+
    +
  1. Brick multiplexing is being tested and fixed aggressively but we still have a + few crashes and memory leaks to fix.
  2. +
+

Bugs addressed

+

Bugs addressed since release-3.10.10 are listed below.

+
    +
  • #1486542: "ganesha.so cannot open" warning message in glusterd log in non ganesha setup.
  • +
  • #1544461: 3.8 -> 3.10 rolling upgrade fails (same for 3.12 or 3.13) on Ubuntu 14
  • +
  • #1544787: tests/bugs/cli/bug-1169302.t fails spuriously
  • +
  • #1546912: tests/bugs/posix/bug-990028.t fails in release-3.10 branch
  • +
  • #1549482: Quota: After deleting directory from mount point on which quota was configured, quota list command output is blank
  • +
+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/release-notes/3.10.12/index.html b/release-notes/3.10.12/index.html new file mode 100644 index 00000000..6d9c6487 --- /dev/null +++ b/release-notes/3.10.12/index.html @@ -0,0 +1,4545 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + 3.10.12 - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + +

Release notes for Gluster 3.10.12

+

This is a bugfix release. The release notes for 3.10.0, 3.10.1, 3.10.2, 3.10.3, +3.10.4, 3.10.5, 3.10.6, 3.10.7, 3.10.8, 3.10.9, 3.10.10 and 3.10.11 +contain a listing of all the new features that were added and bugs fixed in the +GlusterFS 3.10 stable release.

+

Major changes, features and limitations addressed in this release

+

This release contains a fix for a security vulerability in Gluster as follows,

+
    +
  • http://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2018-1088
  • +
  • https://nvd.nist.gov/vuln/detail/CVE-2018-1088
  • +
+

Installing the updated packages and restarting gluster services, will update the +Gluster shared storage volume volfiles, that are more secure than the defaults +currently in place.

+

Further, for increased security, the Gluster shared storage volume can be TLS +enabled, and access to the same restricted using the auth.ssl-allow option. +See, this guide for more details.

+

Major issues

+
    +
  1. Brick multiplexing is being tested and fixed aggressively but we still have a + few crashes and memory leaks to fix.
  2. +
+

Bugs addressed

+

Bugs addressed since release-3.10.11 are listed below.

+
    +
  • #1553777: /var/log/glusterfs/bricks/export_vdb.log flooded with this error message "Not able to add to index [Too many links]"
  • +
  • #1555195: [Ganesha] Duplicate volume export entries in ganesha.conf causing volume unexport to fail
  • +
  • #1555203: After a replace brick command, self-heal takes some time to start healing files on disperse volumes
  • +
  • #1557304: [Glusterd] Volume operations fail on a (tiered) volume because of a stale lock held by one of the nodes
  • +
  • #1559352: [Ganesha] : Ganesha crashes while cluster enters failover/failback mode
  • +
  • #1561732: Rebalance failures on a dispersed volume with lookup-optimize enabled
  • +
  • #1563500: nfs-ganesha: in case pcs cluster setup fails then nfs-ganesha process should not start
  • +
  • #1569409: EIO errors on some operations when volume has mixed brick versions on a disperse volume
  • +
  • #1570428: CVE-2018-1088 glusterfs: Privilege escalation via gluster_shared_storage when snapshot scheduling is enabled [fedora-all]
  • +
+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/release-notes/3.10.2/index.html b/release-notes/3.10.2/index.html new file mode 100644 index 00000000..aac82eed --- /dev/null +++ b/release-notes/3.10.2/index.html @@ -0,0 +1,4583 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + 3.10.2 - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + +

Release notes for Gluster 3.10.2

+

This is a bugfix release. The release notes for 3.10.0 and +3.10.1 +contains a listing of all the new features that were added and +bugs in the GlusterFS 3.10 stable release.

+

Major changes, features and limitations addressed in this release

+
    +
  1. Many bugs brick multiplexing and nfs-ganesha+ha bugs have been addressed.
  2. +
  3. Rebalance and remove brick operations have been disabled for sharded volumes + to prevent data corruption.
  4. +
+

Major issues

+
    +
  1. +

    Expanding a gluster volume that is sharded may cause file corruption

    +
  2. +
  3. +

    Sharded volumes are typically used for VM images, if such volumes are + expanded or possibly contracted (i.e add/remove bricks and rebalance) + there are reports of VM images getting corrupted.

    +
  4. +
  5. Status of this bug can be tracked here, #1426508
  6. +
+

Bugs addressed

+

A total of 63 patches have been merged, addressing 46 bugs:

+
    +
  • #1437854: Spellcheck issues reported during Debian build
  • +
  • #1425726: Stale export entries in ganesha.conf after executing "gluster nfs-ganesha disable"
  • +
  • #1427079: [Ganesha] : unexport fails if export configuration file is not present
  • +
  • #1440148: common-ha (debian/ubuntu): ganesha-ha.sh has a hard-coded /usr/libexec/ganesha...
  • +
  • #1443478: RFE: Support to update NFS-Ganesha export options dynamically
  • +
  • #1443490: [Nfs-ganesha] Refresh config fails when ganesha cluster is in failover mode.
  • +
  • #1441474: synclocks don't work correctly under contention
  • +
  • #1449002: [Brick Multiplexing] : Bricks for multiple volumes going down after glusterd restart and not coming back up after volume start force
  • +
  • #1438813: Segmentation fault when creating a qcow2 with qemu-img
  • +
  • #1438423: [Ganesha + EC] : Input/Output Error while creating LOTS of smallfiles
  • +
  • #1444540: rm -rf \<dir> returns ENOTEMPTY even though ls on the mount point returns no files
  • +
  • #1446227: Incorrect and redundant logs in the DHT rmdir code path
  • +
  • #1447608: Don't allow rebalance/fix-layout operation on sharding enabled volumes till dht+sharding bugs are fixed
  • +
  • #1448864: Seeing error "Failed to get the total number of files. Unable to estimate time to complete rebalance" in rebalance logs
  • +
  • #1443349: [Eventing]: Unrelated error message displayed when path specified during a 'webhook-test/add' is missing a schema
  • +
  • #1441576: [geo-rep]: rsync should not try to sync internal xattrs
  • +
  • #1441927: [geo-rep]: Worker crashes with [Errno 16] Device or resource busy: '.gfid/00000000-0000-0000-0000-000000000001/dir.166 while renaming directories
  • +
  • #1401877: [GANESHA] Symlinks from /etc/ganesha/ganesha.conf to shared_storage are created on the non-ganesha nodes in 8 node gluster having 4 node ganesha cluster
  • +
  • #1425723: nfs-ganesha volume export file remains stale in shared_storage_volume when volume is deleted
  • +
  • #1427759: nfs-ganesha: Incorrect error message returned when disable fails
  • +
  • #1438325: Need to improve remove-brick failure message when the brick process is down.
  • +
  • #1438338: glusterd is setting replicate volume property over disperse volume or vice versa
  • +
  • #1438340: glusterd is not validating for allowed values while setting "cluster.brick-multiplex" property
  • +
  • #1441476: Glusterd crashes when restarted with many volumes
  • +
  • #1444128: [BrickMultiplex] gluster command not responding and .snaps directory is not visible after executing snapshot related command
  • +
  • #1445260: [GANESHA] Volume start and stop having ganesha enable on it,turns off cache-invalidation on volume
  • +
  • #1445408: gluster volume stop hangs
  • +
  • #1449934: Brick Multiplexing :- resetting a brick bring down other bricks with same PID
  • +
  • #1435779: Inode ref leak on anonymous reads and writes
  • +
  • #1440278: [GSS] NFS Sub-directory mount not working on solaris10 client
  • +
  • #1450378: GNFS crashed while taking lock on a file from 2 different clients having same volume mounted from 2 different servers
  • +
  • #1449779: quota: limit-usage command failed with error " Failed to start aux mount"
  • +
  • #1450564: glfsheal: crashed(segfault) with disperse volume in RDMA
  • +
  • #1443501: Don't wind post-op on a brick where the fop phase failed.
  • +
  • #1444892: When either killing or restarting a brick with performance.stat-prefetch on, stat sometimes returns a bad st_size value.
  • +
  • #1449169: Multiple bricks WILL crash after TCP port probing
  • +
  • #1440805: Update rfc.sh to check Change-Id consistency for backports
  • +
  • #1443010: snapshot: snapshots appear to be failing with respect to secure geo-rep slave
  • +
  • #1445209: snapshot: Unable to take snapshot on a geo-replicated volume, even after stopping the session
  • +
  • #1444773: explicitly specify executor to be bash for tests
  • +
  • #1445407: remove bug-1421590-brick-mux-reuse-ports.t
  • +
  • #1440742: Test files clean up for tier during 3.10
  • +
  • #1448790: [Tiering]: High and low watermark values when set to the same level, is allowed
  • +
  • #1435942: Enabling parallel-readdir causes dht linkto files to be visible on the mount,
  • +
  • #1437763: File-level WORM allows ftruncate() on read-only files
  • +
  • #1439148: Parallel readdir on Gluster NFS displays less number of dentries
  • +
+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/release-notes/3.10.3/index.html b/release-notes/3.10.3/index.html new file mode 100644 index 00000000..293e558c --- /dev/null +++ b/release-notes/3.10.3/index.html @@ -0,0 +1,4552 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + 3.10.3 - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + +

Release notes for Gluster 3.10.3

+

This is a bugfix release. The release notes for 3.10.0 , +3.10.1 and 3.10.2 +contain a listing of all the new features that were added and +bugs in the GlusterFS 3.10 stable release.

+

Major changes, features and limitations addressed in this release

+
    +
  1. No Major changes
  2. +
+

Major issues

+
    +
  1. +

    Expanding a gluster volume that is sharded may cause file corruption

    +
      +
    • Sharded volumes are typically used for VM images, if such volumes are + expanded or possibly contracted (i.e add/remove bricks and rebalance) + there are reports of VM images getting corrupted.
    • +
    • Status of this bug can be tracked here, #1426508
    • +
    +
  2. +
  3. +

    Brick multiplexing is being tested and fixed aggressively but we still have a + few crashes and memory leaks to fix.

    +
  4. +
+

Bugs addressed

+

A total of 18 patches have been merged, addressing 13 bugs:

+
    +
  • #1450053: [GANESHA] Adding a node to existing cluster failed to start pacemaker service on new node
  • +
  • #1450773: Quota: After upgrade from 3.7 to higher version , gluster quota list command shows "No quota configured on volume repvol"
  • +
  • #1450934: [New] - Replacing an arbiter brick while I/O happens causes vm pause
  • +
  • #1450947: Autoconf leaves unexpanded variables in path names of non-shell-scripttext files
  • +
  • #1451371: crash in dht_rmdir_do
  • +
  • #1451561: AFR returns the node uuid of the same node for every file in the replica
  • +
  • #1451587: cli xml status of detach tier broken
  • +
  • #1451977: Add logs to identify whether disconnects are voluntary or due to network problems
  • +
  • #1451995: Log message shows error code as success even when rpc fails to connect
  • +
  • #1453056: [DHt] : segfault in dht_selfheal_dir_setattr while running regressions
  • +
  • #1453087: Brick Multiplexing: On reboot of a node Brick multiplexing feature lost on that node as multiple brick processes get spawned
  • +
  • #1456682: tierd listens to a port.
  • +
  • #1457054: glusterfs client crash on io-cache.so(__ioc_page_wakeup+0x44)
  • +
+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/release-notes/3.10.4/index.html b/release-notes/3.10.4/index.html new file mode 100644 index 00000000..6757b710 --- /dev/null +++ b/release-notes/3.10.4/index.html @@ -0,0 +1,4553 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + 3.10.4 - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + +

Release notes for Gluster 3.10.4

+

This is a bugfix release. The release notes for 3.10.0 , +3.10.1, 3.10.2 and 3.10.3 +contain a listing of all the new features that were added and +bugs fixed in the GlusterFS 3.10 stable release.

+

Major changes, features and limitations addressed in this release

+
    +
  1. No Major changes
  2. +
+

Major issues

+
    +
  1. +

    Expanding a gluster volume that is sharded may cause file corruption

    +
      +
    • Sharded volumes are typically used for VM images, if such volumes are + expanded or possibly contracted (i.e add/remove bricks and rebalance) + there are reports of VM images getting corrupted.
    • +
    • Status of this bug can be tracked here, #1426508
    • +
    +
  2. +
  3. +

    Brick multiplexing is being tested and fixed aggressively but we still have a + few crashes and memory leaks to fix.

    +
  4. +
  5. Another rebalance related bug is being worked upon #1467010
  6. +
+

Bugs addressed

+

A total of 18 patches have been merged, addressing 13 bugs:

+
    +
  • #1457732: "split-brain observed [Input/output error]" error messages in samba logs during parallel rm -rf
  • +
  • #1459760: Glusterd segmentation fault in ' _Unwind_Backtrace' while running peer probe
  • +
  • #1460649: posix-acl: Whitelist virtual ACL xattrs
  • +
  • #1460914: Rebalance estimate time sometimes shows negative values
  • +
  • #1460993: Revert CLI restrictions on running rebalance in VM store use case
  • +
  • #1461019: [Ganesha] : Grace period is not being adhered to on RHEL 7.4; Clients continue running IO even during grace.
  • +
  • #1462080: [Bitrot]: Inconsistency seen with 'scrub ondemand' - fails to trigger scrub
  • +
  • #1463623: [Ganesha]Bricks got crashed while running posix compliance test suit on V4 mount
  • +
  • #1463641: [Ganesha] Ganesha service failed to start on new node added in existing ganeshacluster
  • +
  • #1464078: with AFR now making both nodes to return UUID for a file will result in georep consuming more resources
  • +
  • #1466852: assorted typos and spelling mistakes from Debian lintian
  • +
  • #1466863: dht_rename_lock_cbk crashes in upstream regression test
  • +
  • #1467269: Heal info shows incorrect status
  • +
+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/release-notes/3.10.5/index.html b/release-notes/3.10.5/index.html new file mode 100644 index 00000000..61408913 --- /dev/null +++ b/release-notes/3.10.5/index.html @@ -0,0 +1,4562 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + 3.10.5 - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + +

Release notes for Gluster 3.10.5

+

This is a bugfix release. The release notes for 3.10.0 , +3.10.1, 3.10.2, 3.10.3 and 3.10.4 +contain a listing of all the new features that were added and +bugs fixed in the GlusterFS 3.10 stable release.

+

Major changes, features and limitations addressed in this release

+

No Major changes

+

Major issues

+
    +
  1. +

    Expanding a gluster volume that is sharded may cause file corruption

    +
      +
    • Sharded volumes are typically used for VM images, if such volumes are + expanded or possibly contracted (i.e add/remove bricks and rebalance) + there are reports of VM images getting corrupted.
    • +
    • The last known cause for corruption #1467010 + has a fix with this release. As further testing is still in progress, the issue + is retained as a major issue.
    • +
    +
  2. +
  3. +

    Brick multiplexing is being tested and fixed aggressively but we still have a + few crashes and memory leaks to fix.

    +
  4. +
+

Bugs addressed

+

Bugs addressed since release-3.10.4 are listed below.

+
    +
  • #1467010: Fd based fops fail with EBADF on file migration
  • +
  • #1468126: disperse seek does not correctly handle the end of file
  • +
  • #1468198: [Geo-rep]: entry failed to sync to slave with ENOENT errror
  • +
  • #1470040: packaging: Upgrade glusterfs-ganesha sometimes fails to semanage ganesha_use_fusefs
  • +
  • #1470488: gluster volume status --xml fails when there are 100 volumes
  • +
  • #1471028: glusterfs process leaking memory when error occurs
  • +
  • #1471612: metadata heal not happening despite having an active sink
  • +
  • #1471870: cthon04 can cause segfault in gNFS/NLM
  • +
  • #1471917: [GANESHA] Ganesha setup creation fails due to selinux blocking some services required for setup creation
  • +
  • #1472446: packaging: save ganesha config files in (/var)/run/gluster/shared_storage/nfs-ganesha
  • +
  • #1473129: dht/rebalance: Improve rebalance crawl performance
  • +
  • #1473132: dht/cluster: rebalance/remove-brick should honor min-free-disk
  • +
  • #1473133: dht/cluster: rebalance/remove-brick should honor min-free-disk
  • +
  • #1473134: The rebal-throttle setting does not work as expected
  • +
  • #1473136: rebalance: Allow admin to change thread count for rebalance
  • +
  • #1473137: dht: Make throttle option "normal" value uniform across dht_init and dht_reconfigure
  • +
  • #1473140: Fix on demand file migration from client
  • +
  • #1473141: cluster/dht: Fix hardlink migration failures
  • +
  • #1475638: [Scale] : Client logs flooded with "inode context is NULL" error messages
  • +
  • #1476212: [geo-rep]: few of the self healed hardlinks on master did not sync to slave
  • +
  • #1478498: scripts: invalid test in S32gluster_enable_shared_storage.sh
  • +
  • #1478499: packaging: /var/lib/glusterd/options should be %config(noreplace)
  • +
  • #1480594: nfs process crashed in "nfs3_getattr"
  • +
+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/release-notes/3.10.6/index.html b/release-notes/3.10.6/index.html new file mode 100644 index 00000000..eed258d7 --- /dev/null +++ b/release-notes/3.10.6/index.html @@ -0,0 +1,4558 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + 3.10.6 - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + +

Release notes for Gluster 3.10.6

+

This is a bugfix release. The release notes for 3.10.0 , +3.10.1, 3.10.2, 3.10.3, 3.10.4 and 3.10.5 +contain a listing of all the new features that were added and +bugs fixed in the GlusterFS 3.10 stable release.

+

Major changes, features and limitations addressed in this release

+

No Major changes

+

Major issues

+
    +
  1. +

    Expanding a gluster volume that is sharded may cause file corruption

    +
      +
    • Sharded volumes are typically used for VM images, if such volumes are + expanded or possibly contracted (i.e add/remove bricks and rebalance) + there are reports of VM images getting corrupted.
    • +
    • The last known cause for corruption #1498081 + is still pending, and not yet a part of this release.
    • +
    +
  2. +
  3. +

    Brick multiplexing is being tested and fixed aggressively but we still have a + few crashes and memory leaks to fix.

    +
  4. +
+

Bugs addressed

+

Bugs addressed since release-3.10.5 are listed below.

+
    +
  • #1467010: Fd based fops fail with EBADF on file migration
  • +
  • #1481394: libgfapi: memory leak in glfs_h_acl_get
  • +
  • #1482857: glusterd fails to start
  • +
  • #1483997: packaging: use rdma-core(-devel) instead of ibverbs, rdmacm; disable rdma on armv7hl
  • +
  • #1484443: packaging: /run and /var/run; prefer /run
  • +
  • #1486542: "ganesha.so cannot open" warning message in glusterd log in non ganesha setup.
  • +
  • #1487042: AFR returns the node uuid of the same node for every file in the replica
  • +
  • #1487647: with AFR now making both nodes to return UUID for a file will result in georep consuming more resources
  • +
  • #1488391: gluster-blockd process crashed and core generated
  • +
  • #1488719: [RHHI] cannot boot vms created from template when disk format = qcow2
  • +
  • #1490909: [Ganesha] : Unable to bring up a Ganesha HA cluster on SELinux disabled machines on latest gluster bits.
  • +
  • #1491166: GlusterD returns a bad memory pointer in glusterd_get_args_from_dict()
  • +
  • #1491691: rpc: TLSv1_2_method() is deprecated in OpenSSL-1.1
  • +
  • #1491966: AFR entry self heal removes a directory's .glusterfs symlink.
  • +
  • #1491985: Add NULL gfid checks before creating file
  • +
  • #1491995: afr: check op_ret value in __afr_selfheal_name_impunge
  • +
  • #1492010: Launch metadata heal in discover code path.
  • +
  • #1495430: Make event-history feature configurable and have it disabled by default
  • +
  • #1496321: [afr] split-brain observed on T files post hardlink and rename in x3 volume
  • +
  • #1497122: Crash in dht_check_and_open_fd_on_subvol_task()
  • +
+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/release-notes/3.10.7/index.html b/release-notes/3.10.7/index.html new file mode 100644 index 00000000..7e067b27 --- /dev/null +++ b/release-notes/3.10.7/index.html @@ -0,0 +1,4547 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + 3.10.7 - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + +

Release notes for Gluster 3.10.7

+

This is a bugfix release. The release notes for 3.10.0 , +3.10.1, 3.10.2, 3.10.3, 3.10.4, 3.10.5 and 3.10.6 +contain a listing of all the new features that were added and +bugs fixed in the GlusterFS 3.10 stable release.

+

Major changes, features and limitations addressed in this release

+

No Major changes

+

Major issues

+
    +
  1. +

    Expanding a gluster volume that is sharded may cause file corruption

    +
      +
    • Sharded volumes are typically used for VM images, if such volumes are + expanded or possibly contracted (i.e add/remove bricks and rebalance) + there are reports of VM images getting corrupted.
    • +
    • The last known cause for corruption #1498081 + is still pending, and not yet a part of this release.
    • +
    +
  2. +
  3. +

    Brick multiplexing is being tested and fixed aggressively but we still have a + few crashes and memory leaks to fix.

    +
  4. +
+

Bugs addressed

+

Bugs addressed since release-3.10.6 are listed below.

+
    +
  • #1480788: File-level WORM allows mv over read-only files
  • +
  • #1491059: PID File handling: brick pid file leaves stale pid and brick fails to start when glusterd is started
  • +
  • #1496321: [afr] split-brain observed on T files post hardlink and rename in x3 volume
  • +
  • #1497990: Gluster 3.10.x Packages require manual systemctl daemon reload after install
  • +
  • #1499890: md-cache uses incorrect xattr keynames for GF_POSIX_ACL keys
  • +
  • #1499893: md-cache: xattr values should not be checked with string functions
  • +
  • #1501955: gfapi: API needed to set lk_owner
  • +
  • #1502928: Mishandling null check at send_brick_req of glusterfsd/src/gf_attach.c
  • +
  • #1503405: Potential use of NULL this variable before it gets initialized
  • +
+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/release-notes/3.10.8/index.html b/release-notes/3.10.8/index.html new file mode 100644 index 00000000..4d470ffe --- /dev/null +++ b/release-notes/3.10.8/index.html @@ -0,0 +1,4545 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + 3.10.8 - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + +

Release notes for Gluster 3.10.8

+

This is a bugfix release. The release notes for 3.10.0 , +3.10.1, 3.10.2, 3.10.3, 3.10.4, 3.10.5, 3.10.6 and 3.10.7 +contain a listing of all the new features that were added and +bugs fixed in the GlusterFS 3.10 stable release.

+

Major changes, features and limitations addressed in this release

+

No Major changes

+

Major issues

+
    +
  1. +

    Expanding a gluster volume that is sharded may cause file corruption

    +
      +
    • Sharded volumes are typically used for VM images, if such volumes are + expanded or possibly contracted (i.e add/remove bricks and rebalance) + there are reports of VM images getting corrupted.
    • +
    • The last known cause for corruption #1498081 + is still pending, and not yet a part of this release.
    • +
    +
  2. +
  3. +

    Brick multiplexing is being tested and fixed aggressively but we still have a + few crashes and memory leaks to fix.

    +
  4. +
+

Bugs addressed

+

Bugs addressed since release-3.10.7 are listed below.

+
    +
  • #1507749: clean up port map on brick disconnect
  • +
  • #1507752: Brick port mismatch
  • +
  • #1507880: reset-brick commit force failed with glusterd_volume_brickinfo_get Returning -1
  • +
  • #1508036: Address lstat usage in glusterd-snapshot.c code
  • +
  • #1514388: default timeout of 5min not honored for analyzing split-brain files post setfattr replica.split-brain-heal-finalize
  • +
  • #1514424: gluster volume splitbrain info needs to display output of each brick in a stream fashion instead of buffering and dumping at the end
  • +
  • #1517682: Memory leak in locks xlator
  • +
+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/release-notes/3.10.9/index.html b/release-notes/3.10.9/index.html new file mode 100644 index 00000000..ee968281 --- /dev/null +++ b/release-notes/3.10.9/index.html @@ -0,0 +1,4544 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + 3.10.9 - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + +

Release notes for Gluster 3.10.9

+

This is a bugfix release. The release notes for 3.10.0, 3.10.1, 3.10.2, 3.10.3, +3.10.4, 3.10.5, 3.10.6, 3.10.7 and 3.10.8 contain a listing of all +the new features that were added and bugs fixed in the GlusterFS +3.10 stable release.

+

Major changes, features and limitations addressed in this release

+

No Major changes

+

Major issues

+
    +
  1. +

    Expanding a gluster volume that is sharded may cause file corruption

    +
      +
    • Sharded volumes are typically used for VM images, if such volumes are + expanded or possibly contracted (i.e add/remove bricks and rebalance) + there are reports of VM images getting corrupted.
    • +
    • The last known cause for corruption #1498081 + is still pending, and not yet a part of this release.
    • +
    +
  2. +
  3. +

    Brick multiplexing is being tested and fixed aggressively but we still have a + few crashes and memory leaks to fix.

    +
  4. +
+

Bugs addressed

+

Bugs addressed since release-3.10.8 are listed below.

+
    +
  • #1523050: glusterd consuming high memory
  • +
  • #1529086: fstat returns ENOENT/ESTALE
  • +
  • #1529089: opening a file that is destination of rename results in ENOENT errors
  • +
  • #1529096: /usr/sbin/glusterfs crashing on Red Hat OpenShift Container Platform node
  • +
  • #1530341: [snapshot cifs]ls on .snaps directory is throwing input/output error over cifs mount
  • +
  • #1530450: glustershd fails to start on a volume force start after a brick is down
  • +
+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/release-notes/3.11.0/index.html b/release-notes/3.11.0/index.html new file mode 100644 index 00000000..209a400f --- /dev/null +++ b/release-notes/3.11.0/index.html @@ -0,0 +1,5222 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + 3.11.0 - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + + + + + +
+
+ + + + + + + + +

Release notes for Gluster 3.11.0

+

This is a major Gluster release that includes some substantial changes. The +features revolve around, improvements to small file workloads, SE Linux support, +Halo replication enhancement from Facebook, some usability and performance +improvements, among other bug fixes.

+

The most notable features and changes are documented on this page. A full list +of bugs that have been addressed is included further below.

+

Major changes and features

+

Switched to storhaug for ganesha and samba high availability

+

Notes for users:

+

High Availability (HA) support for NFS-Ganesha (NFS) and Samba (SMB) +is managed by Storhaug. Like the old HA implementation, Storhaug uses +Pacemaker and Corosync to manage Virtual (floating) IP addresses (VIPs) +and fencing. See https://github.com/linux-ha-storage/storhaug.

+

Storhaug packages are available in Fedora and for several popular +Linux distributions from https://download.gluster.org/pub/gluster/storhaug/

+

Note: Storhaug does not dictate which fencing solution should be used. +There are many to choose from in most popular Linux distributions. +Choose the one the best fits your environment and use it.

+

Added SELinux support for Gluster Volumes

+

Notes for users:

+

A new xlator has been introduced (features/selinux) to allow setting the +extended attribute (security.selinux) that is needed to support SELinux on +Gluster volumes. The current ability to enforce the SELinux policy on the +Gluster Storage servers prevents setting the extended attribute for use on the +client side. The new translator converts the client-side SELinux extended +attribute to a Gluster internal representation (the trusted.glusterfs.selinux +extended attribute) to prevent problems.

+

This feature is intended to be the base for implementing Labelled-NFS in +NFS-Ganesha and SELinux support for FUSE mounts in the Linux kernel.

+

Limitations:

+
    +
  • The Linux kernel does not support mounting of FUSE filesystems with SELinux + support, yet.
  • +
  • NFS-Ganesha does not support Labelled-NFS, yet.
  • +
+

Known Issues:

+
    +
  • There has been limited testing, because other projects can not consume the + functionality yet without being part of a release. So far, no problems have + been observed, but this might change when other projects start to seriously + use this.
  • +
+

Several memory leaks are fixed in gfapi during graph switches

+

Notes for users:

+

Gluster API (or gfapi), has had a few memory leak issues arising specifically +during changes to volume graphs (volume topology or options). A few of these are +addressed in this release, and more work towards ironing out the pending leaks +are in the works across the next few releases.

+

Limitations:

+
    +
  • There are still a few leaks to be addressed when graph switches occur
  • +
+ +

Notes for users:

+

The get-state CLI output now optionally accommodates client related information +corresponding locally running bricks as obtained from +gluster volume status <volname>|all clients. Getting the client details is a +is a relatively more costly operation and these details will only be added to +the output if the get-state command is invoked with the 'detail' option. The +following is the updated usage for the get-state command:

+
 # gluster get-state [<daemon>] [[odir </path/to/output/dir/>] [file <filename>]] [detail]
+
+

Other than client details, capacity related information for respective local +bricks as obtained from gluster volume status <volname>|all detail has also +been added to the get-state output.

+

Limitations:

+
    +
  • Information for non-local bricks and clients connected to non-local bricks + won't be available. This is a known limitation of the get-state command, since + get-state command doesn't provide information on non-local bricks.
  • +
+

Ability to serve negative lookups from cache has been added

+

Notes for users:

+

Before creating / renaming any file, lookups (around, 5-6 when using the SMB +protocol) are sent to verify if the file already exists. The negative lookup +cache, serves these lookups from the cache when possible, thus increasing the +create/rename performance when using SMB based access to a gluster volume.

+

Execute the following commands to enable negative-lookup cache:

+
# gluster volume set <volname> features.cache-invalidation on
+# gluster volume set <volname> features.cache-invalidation-timeout 600
+# gluster volume set <VOLNAME> nl-cache on
+
+

Limitations

+
    +
  • This feature is supported only for SMB access, for this release
  • +
+

New xlator to help developers detecting resource leaks has been added

+

Notes for users:

+

This is intended as a developer feature, and hence there is no direct user +impact.

+

For developers, the sink xlator provides ways to help detect memory leaks in +gfapi and any xlator in between the API and the sink xlator.

+

More details can be found in this thread on the gluster-devel lists

+

Feature for metadata-caching/small file performance is production ready

+

Notes for users:

+

Over the course of releases several fixes and enhancements have been made to +the mdcache xlator, to improve performance of small file workloads. As a +result, with this release we are announcing this feature to be production ready.

+

In order to improve the performance of directory operations of Gluster volumes, +the maximum metadata (stat, xattr) caching time on the client side is increased +to 10 minutes, without compromising on the consistency of the cache. Significant +performance improvements can be achieved in the following workloads on FUSE and +SMB access, by enabling metadata caching:

+
    +
  • Listing of directories (recursive)
  • +
  • Creating files
  • +
  • Deleting files
  • +
  • Renaming files
  • +
+

To enable metadata caching execute the following commands:

+
# gluster volume set group metadata-cache
+# gluster volume set network.inode-lru-limit <n>
+
+

\<n>, is set to 50000 by default. It should be increased if the number of +concurrently accessed files in the volume is very high. Increasing this number +increases the memory footprint of the brick processes.

+

"Parallel Readdir" feature introduced in 3.10.0 is production ready

+

Notes for users:

+

This feature was introduced in 3.10 and was experimental in nature. Over the +course of 3.10 minor releases and 3.11.0 release, this feature has been +stabilized and is ready for use in production environments.

+

For further details refer: 3.10.0 release notes

+

Object versioning is enabled only if bitrot is enabled

+

Notes for users:

+

Object versioning was turned on by default on brick processes by the bitrot +xlator. This caused, setting and looking up of additional extended attributes +on the backed file system for every object, even when not actively using bitrot. +This at times caused high CPU utilization on the brick processes.

+

To fix this, object versioning is disabled by default, and is only enabled as +a part of enabling the bitrot option.

+

Distribute layer provides more robust transactions during directory namespace operations

+

Notes for users:

+

Distribute layer in Gluster, creates and maintains directories in all subvolumes +and as a result operations involving creation/manipulation/deletion of these +directories needed better transaction support to ensure consistency of the +file system.

+

This transaction support is now implemented in the distribute layer, thus +ensuring better consistency of the file system as a whole, when dealing with +racing operations, operating on the same directory object.

+

gfapi extended readdirplus API has been added

+

Notes for users:

+

An extended readdirplus API glfs_xreaddirplus is added to get extra +information along with readdirplus results on demand. This is useful for the +applications (like NFS-Ganesha which needs handles) to retrieve more information +along with stat in a single call, thus improving performance of work-loads +involving directory listing.

+

The API syntax and usage can be found in glfs.h header file.

+

Limitations:

+
    +
  • This API currently has support to only return stat and handles (glfs_object) + for each dirent of the directory, but can be extended in the future.
  • +
+

Improved adoption of standard refcounting functions across the code

+

Notes for users:

+

This change does not impact users, it is an internal code cleanup activity +that ensures that we ref count in a standard manner, thus avoiding unwanted +bugs due to different implementations of the same.

+

Known Issues:

+
    +
  • This standardization started with this release and is expected to continue + across releases.
  • +
+

Performance improvements to rebalance have been made

+

Notes for users:

+

Both crawling and migration improvement has been done in rebalance. The crawler +is optimized now to split the migration load across replica and ec nodes. +Prior to this change, in case the replicating bricks are distributed over two +nodes, then only one node used to do the migration. With the new optimization +both the nodes divide the load among each other giving boost to migration +performance. And also there have been some optimization to avoid redundant +network operations (or RPC calls) in the process of migrating a file.

+

Further, file migration now avoids syncop framework and is managed entirely by +rebalance threads giving performance boost.

+

Also, There is a change to throttle settings in rebalance. Earlier user could +set three values to rebalance which were "lazy", "normal", "aggressive", which +was not flexible enough. To overcome that we have introduced number based +throttle settings. User now can set numbers which is an indication of the number +of threads rebalance process will work with, thereby translating to the number +of files being migrated in parallel.

+

Halo Replication feature in AFR has been introduced

+

Notes for users:

+

Halo Geo-replication is a feature which allows Gluster or NFS clients to write +locally to their region (as defined by a latency "halo" or threshold if you +like), and have their writes asynchronously propagate from their origin to the +rest of the cluster. Clients can also write synchronously to the cluster +simply by specifying a halo-latency which is very large (e.g. 10seconds) which +will include all bricks. +To enable halo feature execute the following commands:

+
# gluster volume set cluster.halo-enabled yes
+
+

You may have to set the following following options to change defaults. +cluster.halo-shd-latency: The threshold below which self-heal daemons will +consider children (bricks) connected.

+

cluster.halo-nfsd-latency: The threshold below which NFS daemons will consider +children (bricks) connected.

+

cluster.halo-latency: The threshold below which all other clients will +consider children (bricks) connected.

+

cluster.halo-min-replicas: The minimum number of replicas which are to +be enforced regardless of latency specified in the above 3 options. +If the number of children falls below this threshold the next +best (chosen by latency) shall be swapped in.

+

FALLOCATE support with EC

+

Notes for users

+

Support for FALLOCATE file operation on EC volume is added with this release. +EC volumes can now support basic FALLOCATE functionality.

+

Self-heal window-size control option for EC

+

Notes for users

+

Support to control the maximum size of read/write operation carried out +during self-heal process has been added with this release. User has to tune +'disperse.self-heal-window-size' option on disperse volume to adjust the size.

+

Major issues

+
    +
  1. +

    Expanding a gluster volume that is sharded may cause file corruption

    +
      +
    • Sharded volumes are typically used for VM images, if such volumes are + expanded or possibly contracted (i.e add/remove bricks and rebalance) there + are reports of VM images getting corrupted.
    • +
    • Status of this bug can be tracked here, #1426508
    • +
    • Latest series of fixes for the issue (which are present in this release as + well) are not showing the previous corruption, and hence the fixes look + good, but this is maintained on the watch list nevetheness.
    • +
    +
  2. +
+

Bugs addressed

+

Bugs addressed since release-3.10.0 are listed below.

+
    +
  • #1169302: Unable to take Statedump for gfapi applications
  • +
  • #1197308: do not depend on "killall", use "pkill" instead
  • +
  • #1198849: Minor improvements and cleanup for the build system
  • +
  • #1257792: bug-1238706-daemons-stop-on-peer-cleanup.t fails occasionally
  • +
  • #1261689: geo-replication faulty
  • +
  • #1264849: RFE : Create trash directory only when its is enabled
  • +
  • #1297182: Mounting with "-o noatime" or "-o noexec" causes "nosuid,nodev" to be set as well
  • +
  • #1318100: RFE : SELinux translator to support setting SELinux contexts on files in a glusterfs volume
  • +
  • #1321578: auth.allow and auth.reject not working host mentioned with hostnames/FQDN
  • +
  • #1322145: Glusterd fails to restart after replacing a failed GlusterFS node and a volume has a snapshot
  • +
  • #1326219: Make Gluster/NFS an optional component
  • +
  • #1328342: [tiering]: gluster v reset of watermark levels can allow low watermark level to have a higher value than hi watermark level
  • +
  • #1353952: [geo-rep]: rsync should not try to sync internal xattrs
  • +
  • #1356076: DHT doesn't evenly balance files on FreeBSD with ZFS
  • +
  • #1359599: BitRot :- bit-rot.signature and bit-rot.version xattr should not be set if bitrot is not enabled on volume
  • +
  • #1369393: dead loop in changelog_rpc_server_destroy
  • +
  • #1383893: glusterd restart is starting the offline shd daemon on other node in the cluster
  • +
  • #1384989: libglusterfs : update correct memory segments in glfs-message-id
  • +
  • #1385758: [RFE] Support multiple bricks in one process (multiplexing)
  • +
  • #1386578: mounting with rdma protocol fails for tcp,rdma volumes
  • +
  • #1389127: build: fixes to build 3.9.0rc2 on Debian (jessie)
  • +
  • #1390050: Elasticsearch get CorruptIndexException errors when running with GlusterFS persistent storage
  • +
  • #1393338: Rebalance should skip the file if the file has hardlinks instead of failing
  • +
  • #1395643: [SELinux] [Scheduler]: Unable to create Snapshots on RHEL-7.1 using Scheduler
  • +
  • #1396004: RFE: An administrator friendly way to determine rebalance completion time
  • +
  • #1399196: use attribute(format(printf)) to catch format string errors at compile time
  • +
  • #1399593: Obvious typo in cleanup code in rpc_clnt_notify
  • +
  • #1401571: bitrot quarantine dir misspelled
  • +
  • #1401812: RFE: Make readdirp parallel in dht
  • +
  • #1401877: [GANESHA] Symlinks from /etc/ganesha/ganesha.conf to shared_storage are created on the non-ganesha nodes in 8 node gluster having 4 node ganesha cluster
  • +
  • #1402254: compile warning unused variable
  • +
  • #1402661: Samba crash when mounting a distributed dispersed volume over CIFS
  • +
  • #1404424: The data-self-heal option is not honored in AFR
  • +
  • #1405628: Socket search code at startup is slow
  • +
  • #1408809: [Perf] : significant Performance regression seen with disperse volume when compared with 3.1.3
  • +
  • #1409191: Sequential and Random Writes are off target by 12% and 22% respectively on EC backed volumes over FUSE
  • +
  • #1410425: [GNFS+EC] Cthon failures/issues with Lock/Special Test cases on disperse volume with GNFS mount
  • +
  • #1410701: [SAMBA-SSL] Volume Share hungs when multiple mount & unmount is performed over a windows client on a SSL enabled cluster
  • +
  • #1411228: remove-brick status shows 0 rebalanced files
  • +
  • #1411334: Improve output of "gluster volume status detail"
  • +
  • #1412135: rename of the same file from multiple clients with caching enabled may result in duplicate files
  • +
  • #1412549: EXPECT_WITHIN is taking too much time even if the result matches with expected value
  • +
  • #1413526: glusterfind: After glusterfind pre command execution all temporary files and directories /usr/var/lib/misc/glusterfsd/glusterfind/// should be removed
  • +
  • #1413971: Bonnie test suite failed with "Can't open file" error
  • +
  • #1414287: repeated operation failed warnings in gluster mount logs with disperse volume
  • +
  • #1414346: Quota: After upgrade from 3.7 to higher version , gluster quota list command shows "No quota configured on volume repvol"
  • +
  • #1414645: Typo in glusterfs code comments
  • +
  • #1414782: Add logs to selfheal code path to be helpful for debug
  • +
  • #1414902: packaging: python/python2(/python3) cleanup
  • +
  • #1415115: client process crashed due to write behind translator
  • +
  • #1415590: removing old tier commands under the rebalance commands
  • +
  • #1415761: [Remove-brick] Hardlink migration fails with "lookup failed (No such file or directory)" error messages in rebalance logs
  • +
  • #1416251: [SNAPSHOT] With all USS plugin enable .snaps directory is not visible in cifs mount as well as windows mount
  • +
  • #1416520: Missing FOPs in the io-stats xlator
  • +
  • #1416689: Fix spurious failure of ec-background-heal.t
  • +
  • #1416889: Simplify refcount API for free'ing function
  • +
  • #1417050: [Stress] : SHD Logs flooded with "Heal Failed" messages,filling up "/" quickly
  • +
  • #1417466: Prevent reverse heal from happening
  • +
  • #1417522: Automatic split brain resolution must check for all the bricks to be up to avoiding serving of inconsistent data(visible on x3 or more)
  • +
  • #1417540: Mark tests/bitrot/bug-1373520.t bad
  • +
  • #1417588: glusterd is setting replicate volume property over disperse volume or vice versa
  • +
  • #1417913: Hangs on 32 bit systems since 3.9.0
  • +
  • #1418014: disable client.io-threads on replica volume creation
  • +
  • #1418095: Portmap allocates way too much memory (256KB) on stack
  • +
  • #1418213: [Ganesha+SSL] : Bonnie++ hangs during rewrites.
  • +
  • #1418249: [RFE] Need to have group cli option to set all md-cache options using a single command
  • +
  • #1418259: Quota: After deleting directory from mount point on which quota was configured, quota list command output is blank
  • +
  • #1418417: packaging: remove glusterfs-ganesha subpackage
  • +
  • #1418629: glustershd process crashed on systemic setup
  • +
  • #1418900: [RFE] Include few more options in virt file
  • +
  • #1418973: removing warning related to enum, to let the build take place without errors for 3.10
  • +
  • #1420166: The rebal-throttle setting does not work as expected
  • +
  • #1420202: glusterd is crashed at the time of stop volume
  • +
  • #1420434: Trash feature improperly disabled
  • +
  • #1420571: Massive xlator_t leak in graph-switch code
  • +
  • #1420611: when server-quorum is enabled, volume get returns 0 value for server-quorum-ratio
  • +
  • #1420614: warning messages seen in glusterd logs while setting the volume option
  • +
  • #1420619: Entry heal messages in glustershd.log while no entries shown in heal info
  • +
  • #1420623: [RHV-RHGS]: Application VM paused after add brick operation and VM didn't comeup after power cycle.
  • +
  • #1420637: Modified volume options not synced once offline nodes comes up.
  • +
  • #1420697: CLI option "--timeout" is accepting non numeric and negative values.
  • +
  • #1420713: glusterd: storhaug, remove all vestiges ganesha
  • +
  • #1421023: Binary file gf_attach generated during build process should be git ignored
  • +
  • #1421590: Bricks take up new ports upon volume restart after add-brick op with brick mux enabled
  • +
  • #1421600: Test files clean up for tier during 3.10
  • +
  • #1421607: Getting error messages in glusterd.log when peer detach is done
  • +
  • #1421653: dht_setxattr returns EINVAL when a file is deleted during the FOP
  • +
  • #1421721: volume start command hangs
  • +
  • #1421724: glusterd log is flooded with stale disconnect rpc messages
  • +
  • #1421759: Gluster NFS server crashing in __mnt3svc_umountall
  • +
  • #1421937: [Replicate] "RPC call decoding failed" leading to IO hang & mount inaccessible
  • +
  • #1421938: systemic testing: seeing lot of ping time outs which would lead to splitbrains
  • +
  • #1421955: Disperse: Fallback to pre-compiled code execution when dynamic code generation fails
  • +
  • #1422074: GlusterFS truncates nanoseconds to microseconds when setting mtime
  • +
  • #1422152: Bricks not coming up when ran with address sanitizer
  • +
  • #1422624: Need to improve remove-brick failure message when the brick process is down.
  • +
  • #1422760: [Geo-rep] Recreating geo-rep session with same slave after deleting with reset-sync-time fails to sync
  • +
  • #1422776: multiple glusterfsd process crashed making the complete subvolume unavailable
  • +
  • #1423369: unnecessary logging in rda_opendir
  • +
  • #1423373: Crash in index xlator because of race in inode_ctx_set and inode_ref
  • +
  • #1423410: Mount of older client fails
  • +
  • #1423413: Self-heal fail an WORMed-Files
  • +
  • #1423448: glusterfs-fuse RPM now depends on gfapi
  • +
  • #1424764: Coverty scan return false positive regarding crypto
  • +
  • #1424791: Coverty scan detect a potential free on uninitialised pointer in error code path
  • +
  • #1424793: Missing verification of fcntl return code
  • +
  • #1424796: Remove deadcode found by coverty in glusterd-utils.c
  • +
  • #1424802: Missing call to va_end in xlators/cluster/dht/src/dht-common.c
  • +
  • #1424809: Fix another coverty error for useless goto
  • +
  • #1424815: Fix erronous comparaison of flags resulting in UUID always sent
  • +
  • #1424894: Some switches don't have breaks causing unintended fall throughs.
  • +
  • #1424905: Coverity: Memory issues and dead code
  • +
  • #1425288: glusterd is not validating for allowed values while setting "cluster.brick-multiplex" property
  • +
  • #1425515: tests: quota-anon-fd-nfs.t needs to check if nfs mount is avialable before mounting
  • +
  • #1425623: Free all xlator specific resources when xlator->fini() gets called
  • +
  • #1425676: gfids are not populated in release/releasedir requests
  • +
  • #1425703: [Disperse] Metadata version is not healing when a brick is down
  • +
  • #1425743: Tier ./tests/bugs/glusterd/bug-1303028-Rebalance-glusterd-rpc-connection-issue.t
  • +
  • #1426032: Log message shows error code as success even when rpc fails to connect
  • +
  • #1426052: ‘state’ set but not used error when readline and/or ncurses is not installed
  • +
  • #1426059: gluster fuse client losing connection to gluster volume frequently
  • +
  • #1426125: Add logs to identify whether disconnects are voluntary or due to network problems
  • +
  • #1426509: include volume name in rebalance stage error log
  • +
  • #1426667: [GSS] NFS Sub-directory mount not working on solaris10 client
  • +
  • #1426891: script to resolve function name and line number from backtrace
  • +
  • #1426948: [RFE] capture portmap details in glusterd's statedump
  • +
  • #1427012: Disconnects in nfs mount leads to IO hang and mount inaccessible
  • +
  • #1427018: [RFE] - Need a way to reduce the logging of messages "Peer CN" and "SSL verification suceeded messages" in glusterd.log file
  • +
  • #1427404: Move tests/bitrot/bug-1373520.t to bad tests and fix the underlying issue in posix
  • +
  • #1428036: Update rfc.sh to check/request issue # when a commit is an “rfc”
  • +
  • #1428047: Require a Jenkins job to validate Change-ID on commits to branches in glusterfs repository
  • +
  • #1428055: dht/rebalance: Increase maximum read block size from 128 KB to 1 MB
  • +
  • #1428058: tests: Fix tests/bugs/distribute/bug-1161311.t
  • +
  • #1428064: nfs: Check for null buf, and set op_errno to EIO not 0
  • +
  • #1428068: nfs: Tear down transports for requests that arrive before the volume is initialized
  • +
  • #1428073: nfs: Fix compiler warning when calling svc_getcaller
  • +
  • #1428093: protocol/server: Fix crash bug in unlink flow
  • +
  • #1428510: memory leak in features/locks xlator
  • +
  • #1429198: Restore atime/mtime for symlinks and other non-regular files.
  • +
  • #1429200: disallow increasing replica count for arbiter volumes
  • +
  • #1429330: [crawler]: auxiliary mount remains even after crawler finishes
  • +
  • #1429696: ldd libgfxdr.so.0.0.1: undefined symbol: __gf_free
  • +
  • #1430042: Transport endpoint not connected error seen on client when glusterd is restarted
  • +
  • #1430148: USS is broken when multiplexing is on
  • +
  • #1430608: [RFE] Pass slave volume in geo-rep as read-only
  • +
  • #1430719: gfid split brains not getting resolved with automatic splitbrain resolution
  • +
  • #1430841: build/packaging: Debian and Ubuntu don't have /usr/libexec/; results in bad packages
  • +
  • #1430860: brick process crashes when glusterd is restarted
  • +
  • #1431183: [RFE] Gluster get state command should provide connected client related information
  • +
  • #1431192: [RFE] Gluster get state command should provide volume and cluster utilization related information
  • +
  • #1431908: Enabling parallel-readdir causes dht linkto files to be visible on the mount,
  • +
  • #1431963: Warn CLI while creating replica 2 volumes
  • +
  • #1432542: Glusterd crashes when restarted with many volumes
  • +
  • #1433405: GF_REF_PUT() should return 0 when the structure becomes invalid
  • +
  • #1433425: Unrecognized filesystems (i.e. btrfs, zfs) log many errors about "getinode size"
  • +
  • #1433506: [Geo-rep] Master and slave mounts are not accessible to take client profile info
  • +
  • #1433571: Undo pending xattrs only on the up bricks
  • +
  • #1433578: glusterd crashes when peering an IP where the address is more than acceptable range (>255) OR with random hostnames
  • +
  • #1433815: auth failure after upgrade to GlusterFS 3.10
  • +
  • #1433838: Move spit-brain msg in read txn to debug
  • +
  • #1434018: [geo-rep]: Worker crashes with [Errno 16] Device or resource busy: '.gfid/00000000-0000-0000-0000-000000000001/dir.166 while renaming directories
  • +
  • #1434062: synclocks don't work correctly under contention
  • +
  • #1434274: BZ for some bugs found while going through synctask code
  • +
  • #1435943: When parallel readdir is enabled and there are simultaneous readdir and disconnects, then it results in crash
  • +
  • #1436086: Parallel readdir on Gluster NFS displays less number of dentries
  • +
  • #1436090: When parallel readdir is enabled, linked to file resolution fails
  • +
  • #1436739: Sharding: Fix a performance bug
  • +
  • #1436936: parameter state->size is wrong in server3_3_writev
  • +
  • #1437037: Standardize atomic increment/decrement calling conventions
  • +
  • #1437494: Brick Multiplexing:Volume status still shows the PID even after killing the process
  • +
  • #1437748: Spacing issue in fix-layout status output
  • +
  • #1437780: don't send lookup in fuse_getattr()
  • +
  • #1437853: Spellcheck issues reported during Debian build
  • +
  • #1438255: Don't wind post-op on a brick where the fop phase failed.
  • +
  • #1438370: rebalance: Allow admin to change thread count for rebalance
  • +
  • #1438411: [Ganesha + EC] : Input/Output Error while creating LOTS of smallfiles
  • +
  • #1438738: Inode ref leak on anonymous reads and writes
  • +
  • #1438772: build: clang/llvm has builtin_ffs() and builtin_popcount()
  • +
  • #1438810: File-level WORM allows ftruncate() on read-only files
  • +
  • #1438858: explicitly specify executor to be bash for tests
  • +
  • #1439527: [disperse] Don't count healing brick as healthy brick
  • +
  • #1439571: dht/rebalance: Improve rebalance crawl performance
  • +
  • #1439640: [Parallel Readdir] : No bound-checks/CLI validation for parallel readdir tunables
  • +
  • #1440051: Application VMs with their disk images on sharded-replica 3 volume are unable to boot after performing rebalance
  • +
  • #1441035: remove bug-1421590-brick-mux-reuse-ports.t
  • +
  • #1441106: [Geo-rep]: Unnecessary unlink call while processing rmdir
  • +
  • #1441491: The data-self-heal option is not honored in EC
  • +
  • #1441508: dht/cluster: rebalance/remove-brick should honor min-free-disk
  • +
  • #1441910: gluster volume stop hangs
  • +
  • #1441945: [Eventing]: Unrelated error message displayed when path specified during a 'webhook-test/add' is missing a schema
  • +
  • #1442145: split-brain-favorite-child-policy.t depends on "bc"
  • +
  • #1442411: meta xlator leaks memory when unloaded
  • +
  • #1442569: Implement Negative lookup cache feature to improve create performance
  • +
  • #1442724: rm -rf returns ENOTEMPTY even though ls on the mount point returns no files
  • +
  • #1442760: snapshot: snapshots appear to be failing with respect to secure geo-rep slave
  • +
  • #1443373: mkdir/rmdir loop causes gfid-mismatch on a 6 brick distribute volume
  • +
  • #1443896: [BrickMultiplex] gluster command not responding and .snaps directory is not visible after executing snapshot related command
  • +
  • #1443959: packaging: no firewalld-filesystem before el 7.3
  • +
  • #1443977: Unable to take snapshot on a geo-replicated volume, even after stopping the session
  • +
  • #1444023: io-stats xlator leaks memory when fini() is called
  • +
  • #1444228: Autoconf leaves unexpanded variables in path names of non-shell-script text files
  • +
  • #1444941: bogus date in %changelog
  • +
  • #1445569: Provide a correct way to save the statedump generated by gfapi application
  • +
  • #1445590: Incorrect and redundant logs in the DHT rmdir code path
  • +
  • #1446126: S30samba-start.sh throws 'unary operator expected' warning during independent execution
  • +
  • #1446273: Some functions are exported incorrectly for Mac OS X with the GFAPI_PUBLIC macro
  • +
  • #1447543: Revert experimental and 4.0 features to prepare for 3.11 release
  • +
  • #1447571: RFE: Enhance handleops readdirplus operation to return handles along with dirents
  • +
  • #1447597: RFE : SELinux translator to support setting SELinux contexts on files in a glusterfs volume
  • +
  • #1447604: volume set fails if nfs.so is not installed
  • +
  • #1447607: Don't allow rebalance/fix-layout operation on sharding enabled volumes till dht+sharding bugs are fixed
  • +
  • #1448345: Segmentation fault when creating a qcow2 with qemu-img
  • +
  • #1448416: Halo Replication feature for AFR translator
  • +
  • #1449004: [Brick Multiplexing] : Bricks for multiple volumes going down after glusterd restart and not coming back up after volume start force
  • +
  • #1449191: Multiple bricks WILL crash after TCP port probing
  • +
  • #1449311: [whql][virtio-block+glusterfs]"Disk Stress" and "Disk Verification" job always failed on win7-32/win2012/win2k8R2 guest
  • +
  • #1449775: quota: limit-usage command failed with error " Failed to start aux mount"
  • +
  • #1449921: afr: include quorum type and count when dumping afr priv
  • +
  • #1449924: When either killing or restarting a brick with performance.stat-prefetch on, stat sometimes returns a bad st_size value.
  • +
  • #1449933: Brick Multiplexing :- resetting a brick bring down other bricks with same PID
  • +
  • #1450267: nl-cache xlator leaks timer wheel and other memory
  • +
  • #1450377: GNFS crashed while taking lock on a file from 2 different clients having same volume mounted from 2 different servers
  • +
  • #1450565: glfsheal: crashed(segfault) with disperse volume in RDMA
  • +
  • #1450729: Brick Multiplexing: seeing Input/Output Error for .trashcan
  • +
  • #1450933: [New] - Replacing an arbiter brick while I/O happens causes vm pause
  • +
  • #1451033: contrib: timer-wheel 32-bit bug, use builtin_fls, license, etc
  • +
  • #1451573: AFR returns the node uuid of the same node for every file in the replica
  • +
  • #1451586: crash in dht_rmdir_do
  • +
  • #1451591: cli xml status of detach tier broken
  • +
  • #1451887: Add tests/basic/afr/gfid-mismatch-resolution-with-fav-child-policy.t to bad tests
  • +
  • #1452000: Spacing issue in fix-layout status output
  • +
  • #1453050: [DHt] : segfault in dht_selfheal_dir_setattr while running regressions
  • +
  • #1453086: Brick Multiplexing: On reboot of a node Brick multiplexing feature lost on that node as multiple brick processes get spawned
  • +
  • #1453152: [Parallel Readdir] : Mounts fail when performance.parallel-readdir is set to "off"
  • +
  • #1454533: lock_revocation.t Marked as bad in 3.11 for CentOS as well
  • +
  • #1454569: [geo-rep + nl]: Multiple crashes observed on slave with "nlc_lookup_cbk"
  • +
  • #1454597: [Tiering]: High and low watermark values when set to the same level, is allowed
  • +
  • #1454612: glusterd on a node crashed after running volume profile command
  • +
  • #1454686: Implement FALLOCATE FOP for EC
  • +
  • #1454853: Seeing error "Failed to get the total number of files. Unable to estimate time to complete rebalance" in rebalance logs
  • +
  • #1455177: ignore incorrect uuid validation in gd_validate_mgmt_hndsk_req
  • +
  • #1455423: dht: dht self heal fails with no hashed subvol error
  • +
  • #1455907: heal info shows the status of the bricks as "Transport endpoint is not connected" though bricks are up
  • +
  • #1456224: [gluster-block]:Need a volume group profile option for gluster-block volume to add necessary options to be added.
  • +
  • #1456225: gluster-block is not working as expected when shard is enabled
  • +
  • #1456331: [Bitrot]: Brick process crash observed while trying to recover a bad file in disperse volume
  • +
+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/release-notes/3.11.1/index.html b/release-notes/3.11.1/index.html new file mode 100644 index 00000000..48075981 --- /dev/null +++ b/release-notes/3.11.1/index.html @@ -0,0 +1,4631 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + 3.11.1 - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + + + + + +
+
+ + + + + + + + +

Release notes for Gluster 3.11.1

+

This is a bugfix release. The release notes for 3.11.0, contains a +listing of all the new features that were added and bugs fixed, in the +GlusterFS 3.11 stable release.

+

Major changes, features and limitations addressed in this release

+

Improved disperse performance

+

Fix for bug #1456259 changes the way +messages are read and processed from the socket layers on the Gluster client. +This has shown performance improvements on disperse volumes, and is applicable +to other volume types as well, where there maybe multiple applications or users +accessing the same mount point.

+

Group settings for enabling negative lookup caching provided

+

Ability to serve negative lookups from cache was added in 3.11.0 and with +this release, a group volume set option is added for ease in enabling this +feature.

+

See group-nl-cache for more details.

+

Gluster fuse now implements "-oauto_unmount" feature.

+

libfuse has an auto_unmount option which, if enabled, ensures that the file +system is unmounted at FUSE server termination by running a separate monitor +process that performs the unmount when that occurs. This release implements that +option and behavior for glusterfs.

+

Note that "auto unmount" (robust or not) is a leaky abstraction, as the kernel +cannot guarantee that at the path where the FUSE fs is mounted is actually the +toplevel mount at the time of the umount(2) call, for multiple reasons, +among others, see:

+ +

Major issues

+
    +
  1. +

    Expanding a gluster volume that is sharded may cause file corruption

    +
      +
    • Sharded volumes are typically used for VM images, if such volumes are + expanded or possibly contracted (i.e add/remove bricks and rebalance) there + are reports of VM images getting corrupted.
    • +
    • Status of this bug can be tracked here, #1465123
    • +
    +
  2. +
+

Bugs addressed

+

Bugs addressed since release-3.11.0 are listed below.

+
    +
  • #1456259: limited throughput with disperse volume over small number of bricks
  • +
  • #1457058: glusterfs client crash on io-cache.so(__ioc_page_wakeup+0x44)
  • +
  • #1457289: tierd listens to a port.
  • +
  • #1457339: DHT: slow readdirp performance
  • +
  • #1457616: "split-brain observed [Input/output error]" error messages in samba logs during parallel rm -rf
  • +
  • #1457901: nlc_lookup_cbk floods logs
  • +
  • #1458570: [brick multiplexing] detach a brick if posix health check thread complaints about underlying brick
  • +
  • #1458664: [Geo-rep]: METADATA errors are seen even though everything is in sync
  • +
  • #1459090: all: spelling errors (debian package maintainer)
  • +
  • #1459095: extras/hook-scripts: non-portable shell syntax (debian package maintainer)
  • +
  • #1459392: possible repeatedly recursive healing of same file with background heal not happening when IO is going on
  • +
  • #1459759: Glusterd segmentation fault in ' _Unwind_Backtrace' while running peer probe
  • +
  • #1460647: posix-acl: Whitelist virtual ACL xattrs
  • +
  • #1460894: Rebalance estimate time sometimes shows negative values
  • +
  • #1460895: Upcall missing invalidations
  • +
  • #1460896: [Negative Lookup Cache]Need a single group set command for enabling all required nl cache options
  • +
  • #1460898: Enabling parallel-readdir causes dht linkto files to be visible on the mount,
  • +
  • #1462121: [GNFS+EC] Unable to release the lock when the other client tries to acquire the lock on the same file
  • +
  • #1462127: [Bitrot]: Inconsistency seen with 'scrub ondemand' - fails to trigger scrub
  • +
  • #1462636: Use of force with volume start, creates brick directory even it is not present
  • +
  • #1462661: lk fop succeeds even when lock is not acquired on at least quorum number of bricks
  • +
  • #1463250: with AFR now making both nodes to return UUID for a file will result in georep consuming more resources
  • +
+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/release-notes/3.11.2/index.html b/release-notes/3.11.2/index.html new file mode 100644 index 00000000..a59cec6c --- /dev/null +++ b/release-notes/3.11.2/index.html @@ -0,0 +1,4555 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + 3.11.2 - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + +

Release notes for Gluster 3.11.2

+

This is a bugfix release. The release notes for 3.11.1, +3.11.0, contains a listing of all the new features that +were added and bugs fixed, in the GlusterFS 3.11 stable release.

+

Major changes, features and limitations addressed in this release

+

There are no major features or changes made in this release.

+

Major issues

+
    +
  1. +

    Expanding a gluster volume that is sharded may cause file corruption

    +
      +
    • Sharded volumes are typically used for VM images, if such volumes are + expanded or possibly contracted (i.e add/remove bricks and rebalance) there + are reports of VM images getting corrupted.
    • +
    • The last known cause for corruption (Bug #1465123) has a fix with this + release. As further testing is still in progress, the issue is retained as + a major issue.
    • +
    • Status of this bug can be tracked here, #1465123
    • +
    +
  2. +
+

Bugs addressed

+

Bugs addressed since release-3.11.0 are listed below.

+
    +
  • #1463512: USS: stale snap entries are seen when activation/deactivation performed during one of the glusterd's unavailability
  • +
  • #1463513: [geo-rep]: extended attributes are not synced if the entry and extended attributes are done within changelog roleover/or entry sync
  • +
  • #1463517: Brick Multiplexing:dmesg shows request_sock_TCP: Possible SYN flooding on port 49152 and memory related backtraces
  • +
  • #1463528: [Perf] 35% drop in small file creates on smbv3 on *2
  • +
  • #1463626: [Ganesha]Bricks got crashed while running posix compliance test suit on V4 mount
  • +
  • #1464316: DHT: Pass errno as an argument to gf_msg
  • +
  • #1465123: Fd based fops fail with EBADF on file migration
  • +
  • #1465854: Regression: Heal info takes longer time when a brick is down
  • +
  • #1466801: assorted typos and spelling mistakes from Debian lintian
  • +
  • #1466859: dht_rename_lock_cbk crashes in upstream regression test
  • +
  • #1467268: Heal info shows incorrect status
  • +
  • #1468118: disperse seek does not correctly handle the end of file
  • +
  • #1468200: [Geo-rep]: entry failed to sync to slave with ENOENT errror
  • +
  • #1468457: selfheal deamon cpu consumption not reducing when IOs are going on and all redundant bricks are brought down one after another
  • +
  • #1469459: Rebalance hangs on remove-brick if the target volume changes
  • +
  • #1470938: Regression: non-disruptive(in-service) upgrade on EC volume fails
  • +
  • #1471025: glusterfs process leaking memory when error occurs
  • +
  • #1471611: metadata heal not happening despite having an active sink
  • +
  • #1471869: cthon04 can cause segfault in gNFS/NLM
  • +
  • #1472794: Test script failing with brick multiplexing enabled
  • +
+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/release-notes/3.11.3/index.html b/release-notes/3.11.3/index.html new file mode 100644 index 00000000..c29f15a1 --- /dev/null +++ b/release-notes/3.11.3/index.html @@ -0,0 +1,4544 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + 3.11.3 - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + +

Release notes for Gluster 3.11.3

+

This is a bugfix release. The release notes for 3.11.2, 3.11.1, and +3.11.0 contain a listing of all the new features that were added +and bugs fixed, in the GlusterFS 3.11 stable release.

+

This is possibly the last bugfix release for 3.11, as 3.12 is expected to be +released around end of August, 2017, which will hence EOL the 3.11 release, as +it is a short term maintenence release (see release status).

+

Major changes, features and limitations addressed in this release

+

There are no major features or changes made in this release.

+

Major issues

+
    +
  1. +

    Expanding a gluster volume that is sharded may cause file corruption

    +
      +
    • Sharded volumes are typically used for VM images, if such volumes are + expanded or possibly contracted (i.e add/remove bricks and rebalance) there + are reports of VM images getting corrupted.
    • +
    • The last known cause for corruption (Bug #1465123) has a fix with the 3.11.2 + release. As further testing is still in progress, the issue is retained as + a major issue.
    • +
    • Status of this bug can be tracked here, #1465123
    • +
    +
  2. +
+

Bugs addressed

+

Bugs addressed since release-3.11.2 are listed below.

+
    +
  • #1475637: [Scale] : Client logs flooded with "inode context is NULL" error messages
  • +
  • #1476822: scripts: invalid test in S32gluster_enable_shared_storage.sh
  • +
  • #1476870: [EC]: md5sum mismatches every time for a file from the fuse client on EC volume
  • +
  • #1476873: packaging: /var/lib/glusterd/options should be %config(noreplace)
  • +
  • #1479656: Permission denied errors when appending files after readdir
  • +
  • #1479692: Running sysbench on vm disk from plain distribute gluster volume causes disk corruption
  • +
+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/release-notes/3.12.0/index.html b/release-notes/3.12.0/index.html new file mode 100644 index 00000000..d202aff7 --- /dev/null +++ b/release-notes/3.12.0/index.html @@ -0,0 +1,5046 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + 3.12.0 - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + + + + + +
+
+ + + + + + + + +

Release notes for Gluster 3.12.0

+

This is a major Gluster release that includes, ability to mount sub-directories +using the Gluster native protocol (FUSE), further brick multiplexing +enhancements that help scale to larger brick counts per node, enhancements to +gluster get-state CLI enabling better understanding of various bricks and nodes +participation/roles in the cluster, ability to resolve GFID split-brain using +existing CLI, easier GFID to real path mapping thus enabling easier diagnostics +and correction for reported GFID issues (healing among other uses where GFID is +the only available source for identifying a file), and other changes and fixes.

+

The most notable features and changes are documented on this page. A full list +of bugs that have been addressed is included further below.

+

Further, as 3.11 release is a short term maintenance release, features included +in that release are available with 3.12 as well, and could be of interest to +users upgrading to 3.12 from older than 3.11 releases. The 3.11 release notes +captures the list of features that were introduced with 3.11.

+

Major changes and features

+

Ability to mount sub-directories using the Gluster FUSE protocol

+

Notes for users:

+

With this release, it is possible define sub-directories to be mounted by +specific clients and additional granularity in the form of clients to mount +only that portion of the volume for access.

+

Until recently, Gluster FUSE mounts enabled mounting the entire volume on the +client. This feature helps sharing a volume among the multiple consumers along +with enabling restricting access to the sub-directory of choice.

+

Option controlling sub-directory allow/deny rules can be set as follows:

+
# gluster volume set <volname> auth.allow "/subdir1(192.168.1.*),/(192.168.10.*),/subdir2(192.168.8.*)"
+
+

How to mount from the client:

+
# mount -t glusterfs <hostname>:/<volname>/<subdir> /<mount_point>
+
+

Or,

+
# mount -t glusterfs <hostname>:/<volname> -osubdir_mount=<subdir> /<mount_point>
+
+

Limitations:

+
    +
  • There are no throttling or QoS support for this feature. The feature will + just provide the namespace isolation for the different clients.
  • +
+

Known Issues:

+
    +
  • Once we cross more than 1000s of subdirs in 'auth.allow' option, the + performance of reconnect / authentication would be impacted.
  • +
+

GFID to path conversion is enabled by default

+

Notes for users:

+

Prior to this feature, only when quota was enabled, did the on disk data have +pointers back from GFID to their respective filenames. As a result, if there +were a need to locate the path given a GFID, quota had to be enabled.

+

The change brought in by this feature, is to enable this on disk data to be +present, for all cases, than just quota. Further, enhancements here have been +to improve the manner of storing this information on disk as extended +attributes.

+

The internal on disk xattr that is now stored to reference the filename and +parent for a GFID is, trusted.gfid2path.<xxhash>

+

This feature is enabled by default with this release.

+

Limitations:

+

None

+

Known Issues:

+

None

+

Various enhancements have been made to the output of get-state CLI command

+

Notes for users:

+

The command #gluster get-state has been enhanced to output more information +as below,

+
    +
  • Arbiter bricks are marked more clearly in a volume that has the feature + enabled
  • +
  • Ability to get all volume options (both set and defaults) in the get-state + output
  • +
  • Rebalance time estimates, for ongoing rebalance, is captured in the get-state + output
  • +
  • If geo-replication is configured, then get-state now captures the session + details of the same
  • +
+

Limitations:

+

None

+

Known Issues:

+

None

+

Provided an option to set a limit on number of bricks multiplexed in a processes

+

Notes for users:

+

This release includes a global option to be switched on only if brick +multiplexing is enabled for the cluster. The introduction of this option allows +the user to control the number of bricks that are multiplexed in a process on a +node. If the limit set by this option is insufficient for a single process, +more processes are spawned for the subsequent bricks.

+

Usage:

+
#gluster volume set all cluster.max-bricks-per-process <value>
+
+

Provided an option to use localtime timestamps in log entries

+

Limitations:

+

Gluster defaults to UTC timestamps. glusterd, glusterfsd, and server-side +glusterfs daemons will use UTC until one of,

+
    +
  1. command line option is processed,
  2. +
  3. gluster config (/var/lib/glusterd/options) is loaded,
  4. +
  5. admin manually sets localtime-logging (cluster.localtime-logging, e.g. + #gluster volume set all cluster.localtime-logging enable).
  6. +
+

There is no mount option to make the FUSE client enable localtime logging.

+

There is no option in gfapi to enable localtime logging.

+

Enhanced the option to export statfs data for bricks sharing the same backend filesystem

+

Notes for users: +In the past 'storage/posix' xlator had an option called option +export-statfs-size, which, when set to 'no', exports zero as values for few +fields in struct statvfs. These are typically reflected in an output of df +command, from a user perspective.

+

When backend bricks are shared between multiple brick processes, the values +of these variables have been corrected to reflect +field_value / number-of-bricks-at-node. Thus enabling better usage reporting +and also enhancing the ability for file placement in the distribute translator +when used with the option min-free-disk.

+

Provided a means to resolve GFID split-brain using the gluster CLI

+

Notes for users:

+

The existing CLI commands to heal files under split-brain did not handle cases +where there was a GFID mismatch between the files. With the provided enhancement +the same CLI commands can now address GFID split-brain situations based on the +choices provided.

+

The CLI options that are enhanced to help with this situation are,

+
volume heal <VOLNAME> split-brain {bigger-file <FILE> |
+    latest-mtime <FILE> |
+    source-brick <HOSTNAME:BRICKNAME> [<FILE>]}
+
+

Limitations:

+

None

+

Known Issues:

+

None

+ +

Notes for developers:

+

NOTE: Also relevant for users building from sources and needing different +defaults for some options

+

Most people consume Gluster in one of two ways:

+
    +
  • From packages provided by their OS/distribution vendor
  • +
  • By building themselves from source
  • +
+

For the first group it doesn't matter whether configuration is done in a +configure script, via command-line options to that configure script, or in a +header file. All of these end up as edits to some file under the packager's +control, which is then run through their tools and process (e.g. rpmbuild) to +create the packages that users will install.

+

For the second group, convenience matters. Such users might not even have a +script wrapped around the configure process, and editing one line in a header +file is a lot easier than editing several in the configure script. This also +prevents a messy profusion of configure options, dozens of which might need to +be added to support a single such user's preferences. This comes back around as +greater simplicity for packagers as well. This patch defines site.h as the +header file for options and parameters that someone building the code for +themselves might want to tweak.

+

The project ships one version to reflect the developers' guess at the best +defaults for most users, and sophisticated users with unusual needs can +override many options at once just by maintaining their own version of that +file. Further guidelines for how to determine whether an option should go in +configure.ac or site.h are explained within site.h itself.

+ +

Notes for developers:

+

Function gf_xxh64_wrapper has been added as a wrapper into libglusterfs for +consumption by interested developers.

+

Reference to code can be found here

+ +

Notes for users:

+

glfs_ipc API was maintained as a public API in the GFAPI libraries. This has +been removed as a public interface, from this release onwards.

+

Any application, written directly to consume gfapi as a means of interfacing +with Gluster, using the mentioned API, would need to be modified to adapt to +this change.

+

NOTE: As of this release there are no known public consumers of this +API

+

Major issues

+
    +
  1. Expanding a gluster volume that is sharded may cause file corruption
  2. +
  3. Sharded volumes are typically used for VM images, if such volumes are + expanded or possibly contracted (i.e add/remove bricks and rebalance) there + are reports of VM images getting corrupted.
  4. +
  5. The last known cause for corruption (Bug #1465123) has a fix with this + release. As further testing is still in progress, the issue is retained as + a major issue.
  6. +
  7. Status of this bug can be tracked here, #1465123
  8. +
+

Bugs addressed

+

Bugs addressed since release-3.11.0 are listed below.

+
    +
  • #1047975: glusterfs/extras: add a convenience script to label (selinux) gluster bricks
  • +
  • #1254002: [RFE] Have named pthreads for easier debugging
  • +
  • #1318100: RFE : SELinux translator to support setting SELinux contexts on files in a glusterfs volume
  • +
  • #1318895: Heal info shows incorrect status
  • +
  • #1326219: Make Gluster/NFS an optional component
  • +
  • #1356453: DHT: slow readdirp performance
  • +
  • #1366817: AFR returns the node uuid of the same node for every file in the replica
  • +
  • #1381970: GlusterFS Daemon stops working after a longer runtime and higher file workload due to design flaws?
  • +
  • #1400924: [RFE] Rsync flags for performance improvements
  • +
  • #1402406: Client stale file handle error in dht-linkfile.c under SPEC SFS 2014 VDA workload
  • +
  • #1414242: [whql][virtio-block+glusterfs]"Disk Stress" and "Disk Verification" job always failed on win7-32/win2012/win2k8R2 guest
  • +
  • #1421938: systemic testing: seeing lot of ping time outs which would lead to splitbrains
  • +
  • #1424817: Fix wrong operators, found by coverty
  • +
  • #1428061: Halo Replication feature for AFR translator
  • +
  • #1428673: possible repeatedly recursive healing of same file with background heal not happening when IO is going on
  • +
  • #1430608: [RFE] Pass slave volume in geo-rep as read-only
  • +
  • #1431908: Enabling parallel-readdir causes dht linkto files to be visible on the mount,
  • +
  • #1433906: quota: limit-usage command failed with error " Failed to start aux mount"
  • +
  • #1437748: Spacing issue in fix-layout status output
  • +
  • #1438966: Multiple bricks WILL crash after TCP port probing
  • +
  • #1439068: Segmentation fault when creating a qcow2 with qemu-img
  • +
  • #1442569: Implement Negative lookup cache feature to improve create performance
  • +
  • #1442788: Cleanup timer wheel in glfs_fini()
  • +
  • #1442950: RFE: Enhance handleops readdirplus operation to return handles along with dirents
  • +
  • #1444596: [Brick Multiplexing] : Bricks for multiple volumes going down after glusterd restart and not coming back up after volume start force
  • +
  • #1445609: [perf-xlators/write-behind] write-behind-window-size could be set greater than its allowed MAX value 1073741824
  • +
  • #1446172: Brick Multiplexing :- resetting a brick bring down other bricks with same PID
  • +
  • #1446362: cli xml status of detach tier broken
  • +
  • #1446412: error-gen don't need to convert error string to int in every fop
  • +
  • #1446516: [Parallel Readdir] : Mounts fail when performance.parallel-readdir is set to "off"
  • +
  • #1447116: gfapi exports non-existing glfs_upcall_inode_get_event symbol
  • +
  • #1447266: [snapshot cifs]ls on .snaps directory is throwing input/output error over cifs mount
  • +
  • #1447389: Brick Multiplexing: seeing Input/Output Error for .trashcan
  • +
  • #1447609: server: fd should be refed before put into fdtable
  • +
  • #1447630: Don't allow rebalance/fix-layout operation on sharding enabled volumes till dht+sharding bugs are fixed
  • +
  • #1447826: potential endless loop in function glusterfs_graph_validate_options
  • +
  • #1447828: Should use dict_set_uint64 to set fd->pid when dump fd's info to dict
  • +
  • #1447953: Remove inadvertently merged IPv6 code
  • +
  • #1447960: [Tiering]: High and low watermark values when set to the same level, is allowed
  • +
  • #1447966: 'make cscope' fails on a clean tree due to missing generated XDR files
  • +
  • #1448150: USS: stale snap entries are seen when activation/deactivation performed during one of the glusterd's unavailability
  • +
  • #1448265: use common function iov_length to instead of duplicate code
  • +
  • #1448293: Implement FALLOCATE FOP for EC
  • +
  • #1448299: Mismatch in checksum of the image file after copying to a new image file
  • +
  • #1448364: limited throughput with disperse volume over small number of bricks
  • +
  • #1448640: Seeing error "Failed to get the total number of files. Unable to estimate time to complete rebalance" in rebalance logs
  • +
  • #1448692: use GF_ATOMIC to generate callid
  • +
  • #1448804: afr: include quorum type and count when dumping afr priv
  • +
  • #1448914: [geo-rep]: extended attributes are not synced if the entry and extended attributes are done within changelog roleover/or entry sync
  • +
  • #1449008: remove useless options from glusterd's volume set table
  • +
  • #1449232: race condition between client_ctx_get and client_ctx_set
  • +
  • #1449329: When either killing or restarting a brick with performance.stat-prefetch on, stat sometimes returns a bad st_size value.
  • +
  • #1449348: disperse seek does not correctly handle the end of file
  • +
  • #1449495: glfsheal: crashed(segfault) with disperse volume in RDMA
  • +
  • #1449610: [New] - Replacing an arbiter brick while I/O happens causes vm pause
  • +
  • #1450010: [gluster-block]:Need a volume group profile option for gluster-block volume to add necessary options to be added.
  • +
  • #1450559: Error 0-socket.management: socket_poller XX.XX.XX.XX:YYY failed (Input/output error) during any volume operation
  • +
  • #1450630: [brick multiplexing] detach a brick if posix health check thread complaints about underlying brick
  • +
  • #1450730: Add tests/basic/afr/gfid-mismatch-resolution-with-fav-child-policy.t to bad tests
  • +
  • #1450975: Fix on demand file migration from client
  • +
  • #1451083: crash in dht_rmdir_do
  • +
  • #1451162: dht: Make throttle option "normal" value uniform across dht_init and dht_reconfigure
  • +
  • #1451248: Brick Multiplexing: On reboot of a node Brick multiplexing feature lost on that node as multiple brick processes get spawned
  • +
  • #1451588: [geo-rep + nl]: Multiple crashes observed on slave with "nlc_lookup_cbk"
  • +
  • #1451724: glusterfind pre crashes with "UnicodeDecodeError: 'utf8' codec can't decode" error when the --no-encode is used
  • +
  • #1452006: tierd listens to a port.
  • +
  • #1452084: [Ganesha] : Stale linkto files after unsuccessfuly hardlinks
  • +
  • #1452102: [DHt] : segfault in dht_selfheal_dir_setattr while running regressions
  • +
  • #1452378: Cleanup unnecessary logs in fix_quorum_options
  • +
  • #1452527: Shared volume doesn't get mounted on few nodes after rebooting all nodes in cluster.
  • +
  • #1452956: glusterd on a node crashed after running volume profile command
  • +
  • #1453151: [RFE] glusterfind: add --end-time and --field-separator options
  • +
  • #1453977: Brick Multiplexing: Deleting brick directories of the base volume must gracefully detach from glusterfsd without impacting other volumes IO(currently seeing transport end point error)
  • +
  • #1454317: [Bitrot]: Brick process crash observed while trying to recover a bad file in disperse volume
  • +
  • #1454375: ignore incorrect uuid validation in gd_validate_mgmt_hndsk_req
  • +
  • #1454418: Glusterd segmentation fault in ' _Unwind_Backtrace' while running peer probe
  • +
  • #1454701: DHT: Pass errno as an argument to gf_msg
  • +
  • #1454865: [Brick Multiplexing] heal info shows the status of the bricks as "Transport endpoint is not connected" though bricks are up
  • +
  • #1454872: [Geo-rep]: Make changelog batch size configurable
  • +
  • #1455049: [GNFS+EC] Unable to release the lock when the other client tries to acquire the lock on the same file
  • +
  • #1455104: dht: dht self heal fails with no hashed subvol error
  • +
  • #1455179: [Geo-rep]: Log time taken to sync entry ops, metadata ops and data ops for each batch
  • +
  • #1455301: gluster-block is not working as expected when shard is enabled
  • +
  • #1455559: [Geo-rep]: METADATA errors are seen even though everything is in sync
  • +
  • #1455831: libglusterfs: updates old comment for 'arena_size'
  • +
  • #1456361: DHT : for many operation directory/file path is '(null)' in brick log
  • +
  • #1456385: glusterfs client crash on io-cache.so(__ioc_page_wakeup+0x44)
  • +
  • #1456405: Brick Multiplexing:dmesg shows request_sock_TCP: Possible SYN flooding on port 49152 and memory related backtraces
  • +
  • #1456582: "split-brain observed [Input/output error]" error messages in samba logs during parallel rm -rf
  • +
  • #1456653: nlc_lookup_cbk floods logs
  • +
  • #1456898: Regression test for add-brick failing with brick multiplexing enabled
  • +
  • #1457202: Use of force with volume start, creates brick directory even it is not present
  • +
  • #1457808: all: spelling errors (debian package maintainer)
  • +
  • #1457812: extras/hook-scripts: non-portable shell syntax (debian package maintainer)
  • +
  • #1457981: client fails to connect to the brick due to an incorrect port reported back by glusterd
  • +
  • #1457985: Rebalance estimate time sometimes shows negative values
  • +
  • #1458127: Upcall missing invalidations
  • +
  • #1458193: Implement seek() fop in trace translator
  • +
  • #1458197: io-stats usability/performance statistics enhancements
  • +
  • #1458539: [Negative Lookup]: negative lookup features doesn't seem to work on restart of volume
  • +
  • #1458582: add all as volume option in gluster volume get usage
  • +
  • #1458768: [Perf] 35% drop in small file creates on smbv3 on *2
  • +
  • #1459402: brick process crashes while running bug-1432542-mpx-restart-crash.t in a loop
  • +
  • #1459530: [RFE] Need a way to resolve gfid split brains
  • +
  • #1459620: [geo-rep]: Worker crashed with TypeError: expected string or buffer
  • +
  • #1459781: Brick Multiplexing:Even clean Deleting of the brick directories of base volume is resulting in posix health check errors(just as we see in ungraceful delete methods)
  • +
  • #1459971: posix-acl: Whitelist virtual ACL xattrs
  • +
  • #1460225: Not cleaning up stale socket file is resulting in spamming glusterd logs with warnings of "got disconnect from stale rpc"
  • +
  • #1460514: [Ganesha] : Ganesha crashes while cluster enters failover/failback mode
  • +
  • #1460585: Revert CLI restrictions on running rebalance in VM store use case
  • +
  • #1460638: ec-data-heal.t fails with brick mux enabled
  • +
  • #1460659: Avoid one extra call of l(get|list)xattr system call after use buffer in posix_getxattr
  • +
  • #1461129: malformed cluster.server-quorum-ratio setting can lead to split brain
  • +
  • #1461648: Update GlusterFS README
  • +
  • #1461655: glusterd crashes when statedump is taken
  • +
  • #1461792: lk fop succeeds even when lock is not acquired on at least quorum number of bricks
  • +
  • #1461845: [Bitrot]: Inconsistency seen with 'scrub ondemand' - fails to trigger scrub
  • +
  • #1462200: glusterd status showing failed when it's stopped in RHEL7
  • +
  • #1462241: glusterfind: syntax error due to uninitialized variable 'end'
  • +
  • #1462790: with AFR now making both nodes to return UUID for a file will result in georep consuming more resources
  • +
  • #1463178: [Ganesha]Bricks got crashed while running posix compliance test suit on V4 mount
  • +
  • #1463365: Changes for Maintainers 2.0
  • +
  • #1463648: Use GF_XATTR_LIST_NODE_UUIDS_KEY to figure out local subvols
  • +
  • #1464072: cns-brick-multiplexing: brick process fails to restart after gluster pod failure
  • +
  • #1464091: Regression: Heal info takes longer time when a brick is down
  • +
  • #1464110: [Scale] : Rebalance ETA (towards the end) may be inaccurate,even on a moderately large data set.
  • +
  • #1464327: glusterfs client crashes when reading large directory
  • +
  • #1464359: selfheal deamon cpu consumption not reducing when IOs are going on and all redundant bricks are brought down one after another
  • +
  • #1465024: glusterfind: DELETE path needs to be unquoted before further processing
  • +
  • #1465075: Fd based fops fail with EBADF on file migration
  • +
  • #1465214: build failed with GF_DISABLE_MEMPOOL
  • +
  • #1465559: multiple brick processes seen on gluster(fs)d restart in brick multiplexing
  • +
  • #1466037: Fuse mount crashed with continuous dd on a file and reading the file in parallel
  • +
  • #1466110: dht_rename_lock_cbk crashes in upstream regression test
  • +
  • #1466188: Add scripts to analyze quota xattr in backend and identify accounting issues
  • +
  • #1466785: assorted typos and spelling mistakes from Debian lintian
  • +
  • #1467209: [Scale] : Rebalance ETA shows the initial estimate to be ~140 days,finishes within 18 hours though.
  • +
  • #1467277: [GSS] [RFE] add documentation on --xml and --mode=script options to gluster interactive help and man pages
  • +
  • #1467313: cthon04 can cause segfault in gNFS/NLM
  • +
  • #1467513: CIFS:[USS]: .snaps is not accessible from the CIFS client after volume stop/start
  • +
  • #1467718: [Geo-rep]: entry failed to sync to slave with ENOENT errror
  • +
  • #1467841: gluster volume status --xml fails when there are 100 volumes
  • +
  • #1467986: possible memory leak in glusterfsd with multiplexing
  • +
  • #1468191: Enable stat-prefetch in group virt
  • +
  • #1468261: Regression: non-disruptive(in-service) upgrade on EC volume fails
  • +
  • #1468279: metadata heal not happening despite having an active sink
  • +
  • #1468291: NFS Sub directory is getting mounted on solaris 10 even when the permission is restricted in nfs.export-dir volume option
  • +
  • #1468432: tests: fix stats-dump.t failure
  • +
  • #1468433: rpc: include current second in timed out frame cleanup on client
  • +
  • #1468863: Assert in mem_pools_fini during libgfapi-fini-hang.t on NetBSD
  • +
  • #1469029: Rebalance hangs on remove-brick if the target volume changes
  • +
  • #1469179: invoke checkpatch.pl with strict
  • +
  • #1469964: cluster/dht: Fix hardlink migration failures
  • +
  • #1470170: mem-pool: mem_pool_fini() doesn't release entire memory allocated
  • +
  • #1470220: glusterfs process leaking memory when error occurs
  • +
  • #1470489: bulk removexattr shouldn't allow removal of trusted.gfid/trusted.glusterfs.volume-id
  • +
  • #1470533: Brick Mux Setup: brick processes(glusterfsd) crash after a restart of volume which was preceded with some actions
  • +
  • #1470768: file /usr/lib64/glusterfs/3.12dev/xlator is not owned by any package
  • +
  • #1471790: [Brick Multiplexing] : cluster.brick-multiplex has no description.
  • +
  • #1472094: Test script failing with brick multiplexing enabled
  • +
  • #1472250: Remove fop_enum_to_string, get_fop_int usage in libglusterfs
  • +
  • #1472417: No clear method to multiplex all bricks to one process(glusterfsd) with cluster.max-bricks-per-process option
  • +
  • #1472949: [distribute] crashes seen upon rmdirs
  • +
  • #1475181: dht remove-brick status does not indicate failures files not migrated because of a lack of space
  • +
  • #1475192: [Scale] : Rebalance ETA shows the initial estimate to be ~140 days,finishes within 18 hours though.
  • +
  • #1475258: [Geo-rep]: Geo-rep hangs in changelog mode
  • +
  • #1475399: Rebalance estimate time sometimes shows negative values
  • +
  • #1475635: [Scale] : Client logs flooded with "inode context is NULL" error messages
  • +
  • #1475641: gluster core dump due to assert failed GF_ASSERT (brick_index < wordcount);
  • +
  • #1475662: [Scale] : Rebalance Logs are bulky.
  • +
  • #1476109: Brick Multiplexing: Brick process crashed at changetimerecorder(ctr) translator when restarting volumes
  • +
  • #1476208: [geo-rep]: few of the self healed hardlinks on master did not sync to slave
  • +
  • #1476653: cassandra fails on gluster-block with both replicate and ec volumes
  • +
  • #1476654: gluster-block default shard-size should be 64MB
  • +
  • #1476819: scripts: invalid test in S32gluster_enable_shared_storage.sh
  • +
  • #1476863: packaging: /var/lib/glusterd/options should be %config(noreplace)
  • +
  • #1476868: [EC]: md5sum mismatches every time for a file from the fuse client on EC volume
  • +
  • #1477152: [Remove-brick] Few files are getting migrated eventhough the bricks crossed cluster.min-free-disk value
  • +
  • #1477190: [GNFS] GNFS got crashed while mounting volume on solaris client
  • +
  • #1477381: Revert experimental and 4.0 features to prepare for 3.12 release
  • +
  • #1477405: eager-lock should be off for cassandra to work at the moment
  • +
  • #1477994: [Ganesha] : Ganesha crashes while cluster enters failover/failback mode
  • +
  • #1478276: separating attach tier and add brick
  • +
  • #1479118: AFR entry self heal removes a directory's .glusterfs symlink.
  • +
  • #1479263: nfs process crashed in "nfs3svc_getattr"
  • +
  • #1479303: [Perf] : Large file sequential reads are off target by ~38% on FUSE/Ganesha
  • +
  • #1479474: Add NULL gfid checks before creating file
  • +
  • #1479655: Permission denied errors when appending files after readdir
  • +
  • #1479662: when gluster pod is restarted, bricks from the restarted pod fails to connect to fuse, self-heal etc
  • +
  • #1479717: Running sysbench on vm disk from plain distribute gluster volume causes disk corruption
  • +
  • #1480448: More useful error - replace 'not optimal'
  • +
  • #1480459: Gluster puts PID files in wrong place
  • +
  • #1481931: [Scale] : I/O errors on multiple gNFS mounts with "Stale file handle" during rebalance of an erasure coded volume.
  • +
  • #1482804: Negative Test: glusterd crashes for some of the volume options if set at cluster level
  • +
  • #1482835: glusterd fails to start
  • +
  • #1483402: DHT: readdirp fails to read some directories.
  • +
  • #1483996: packaging: use rdma-core(-devel) instead of ibverbs, rdmacm; disable rdma on armv7hl
  • +
  • #1484440: packaging: /run and /var/run; prefer /run
  • +
  • #1484885: [rpc]: EPOLLERR - disconnecting now messages every 3 secs after completing rebalance
  • +
  • #1486107: /var/lib/glusterd/peers File had a blank line, Stopped Glusterd from starting
  • +
  • #1486110: [quorum]: Replace brick is happened when Quorum not met.
  • +
  • #1486120: symlinks trigger faulty geo-replication state (rsnapshot usecase)
  • +
  • #1486122: gluster-block profile needs to have strict-o-direct
  • +
+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/release-notes/3.12.1/index.html b/release-notes/3.12.1/index.html new file mode 100644 index 00000000..bdc19b0a --- /dev/null +++ b/release-notes/3.12.1/index.html @@ -0,0 +1,4548 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + 3.12.1 - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + +

Release notes for Gluster 3.12.1

+

This is a bugfix release. The Release Notes for 3.12.0, +3.12.1 contain a listing of all the new features that +were added and bugs fixed in the GlusterFS 3.12 stable release.

+

Major changes, features and limitations addressed in this release

+
 No Major changes
+
+

Major issues

+
    +
  1. +

    Expanding a gluster volume that is sharded may cause file corruption

    +
      +
    • Sharded volumes are typically used for VM images, if such volumes are + expanded or possibly contracted (i.e add/remove bricks and rebalance) there + are reports of VM images getting corrupted.
    • +
    • The last known cause for corruption (Bug #1465123) has a fix with this + release. As further testing is still in progress, the issue is retained as + a major issue.
    • +
    • Status of this bug can be tracked here, #1465123
    • +
    +
  2. +
+

Bugs addressed

+
 A total of 12 patches have been merged, addressing 11 bugs
+
+
    +
  • #1486538: [geo-rep+qr]: Crashes observed at slave from qr_lookup_sbk during rename/hardlink/rebalance
  • +
  • #1486557: Log entry of files skipped/failed during rebalance operation
  • +
  • #1487033: rpc: client_t and related objects leaked due to incorrect ref counts
  • +
  • #1487319: afr: check op_ret value in __afr_selfheal_name_impunge
  • +
  • #1488119: scripts: mount.glusterfs contains non-portable bashisms
  • +
  • #1488168: Launch metadata heal in discover code path.
  • +
  • #1488387: gluster-blockd process crashed and core generated
  • +
  • #1488718: [RHHI] cannot boot vms created from template when disk format = qcow2
  • +
  • #1489260: Crash in dht_check_and_open_fd_on_subvol_task()
  • +
  • #1489296: glusterfsd (brick) process crashed
  • +
  • #1489511: return ENOSYS for 'non readable' FOPs
  • +
+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/release-notes/3.12.10/index.html b/release-notes/3.12.10/index.html new file mode 100644 index 00000000..4d49373b --- /dev/null +++ b/release-notes/3.12.10/index.html @@ -0,0 +1,4533 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + 3.12.10 - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + +

Release notes for Gluster 3.12.10

+

This is a bugfix release. The release notes for 3.12.0, 3.12.1, 3.12.2, +3.12.3, 3.12.4, 3.12.5, 3.12.6, 3.12.7, 3.12.8 and 3.12.9 contain a listing of all the new +features that were added and bugs fixed in the GlusterFS 3.12 stable release.

+

Major changes, features and limitations addressed in this release

+

None

+

Major issues

+

None

+

Bugs addressed

+

Bugs addressed since release-3.12.9 are listed below +.

+
    +
  • #1570475: Rebalance on few nodes doesn't seem to complete - stuck at FUTEX_WAIT
  • +
  • #1576816: GlusterFS can be improved
  • +
  • #1577164: gfapi: broken symbol versions
  • +
  • #1577845: Geo-rep: faulty session due to OSError: [Errno 95] Operation not supported
  • +
  • #1577862: [geo-rep]: Upgrade fails, session in FAULTY state
  • +
  • #1577868: Glusterd crashed on a few (master) nodes
  • +
  • #1577871: [geo-rep]: Geo-rep scheduler fails
  • +
  • #1580519: the regression test "tests/bugs/posix/bug-990028.t" fails
  • +
  • #1581746: bug-1309462.t is failing reliably due to changes in security.capability changes in the kernel
  • +
  • #1590133: xdata is leaking in server3_3_seek
  • +
+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/release-notes/3.12.11/index.html b/release-notes/3.12.11/index.html new file mode 100644 index 00000000..249e69a0 --- /dev/null +++ b/release-notes/3.12.11/index.html @@ -0,0 +1,4532 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + 3.12.11 - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + +

Release notes for Gluster 3.12.11

+

This is a bugfix release. The release notes for 3.12.0, 3.12.1, 3.12.2, +3.12.3, 3.12.4, 3.12.5, 3.12.6, 3.12.7, 3.12.8, 3.12.9, and 3.12.10 +contain a listing of all the new features that were added and bugs fixed in the +GlusterFS 3.12 stable release.

+

Major changes, features and limitations addressed in this release

+

This release contains a fix for a security vulerability in Gluster as follows,

+
    +
  • http://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2018-10841
  • +
  • https://nvd.nist.gov/vuln/detail/CVE-2018-10841
  • +
+

Installing the updated packages and restarting gluster services on gluster +brick hosts, will help prevent the security issue.

+

Major issues

+

None

+

Bugs addressed

+

Bugs addressed since release-3.12.10 are listed below.

+
    +
  • #1559829: snap/gcron.py: ABRT report for package glusterfs has reached 100 occurrences
  • +
  • #1591187: Gluster Block PVC fails to mount on Jenkins pod
  • +
  • #1593526: CVE-2018-10841 glusterfs: access trusted peer group via remote-host command [glusterfs upstream]
  • +
+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/release-notes/3.12.12/index.html b/release-notes/3.12.12/index.html new file mode 100644 index 00000000..8268f080 --- /dev/null +++ b/release-notes/3.12.12/index.html @@ -0,0 +1,4532 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + 3.12.12 - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + +

Release notes for Gluster 3.12.12

+

This is a bugfix release. The release notes for 3.12.0, 3.12.1, 3.12.2, +3.12.3, 3.12.4, 3.12.5, 3.12.6, 3.12.7, +3.12.8, 3.12.9, 3.12.10 and 3.12.11 contain a listing of +all the new features that were added and bugs fixed in the GlusterFS 3.12 stable release.

+

Major changes, features and limitations addressed in this release

+

None

+

Major issues

+

None

+

Bugs addressed

+

Bugs addressed since release-3.12.12 are listed below

+
    +
  • #1579673: Remove EIO from the dht_inode_missing macro
  • +
  • #1595528: rmdir is leaking softlinks to directories in .glusterfs
  • +
  • #1597120: Add quorum checks in post-op
  • +
  • #1597123: Changes to self-heal logic w.r.t. detecting of split-brains
  • +
  • #1597154: When storage reserve limit is reached, appending data to an existing file throws EROFS error
  • +
  • #1597230: glustershd crashes when index heal is launched before graph is initialized.
  • +
  • #1598121: lookup not assigning gfid if file is not present in all bricks of replica
  • +
  • #1598720: afr: fix bug-1363721.t failure
  • +
  • #1599247: afr: don't update readables if inode refresh failed on all children
  • +
+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/release-notes/3.12.13/index.html b/release-notes/3.12.13/index.html new file mode 100644 index 00000000..4cf811f3 --- /dev/null +++ b/release-notes/3.12.13/index.html @@ -0,0 +1,4528 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + 3.12.13 - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + +

Release notes for Gluster 3.12.13

+

This is a bugfix release. The release notes for 3.12.0, 3.12.1, 3.12.2, +3.12.3, 3.12.4, 3.12.5, 3.12.6, 3.12.7, +3.12.8, 3.12.9, 3.12.10, 3.12.11 and 3.12.12 +contain a listing of all the new features that were added and bugs fixed in the GlusterFS 3.12 stable release.

+

Major changes, features and limitations addressed in this release

+

None

+

Major issues

+

None

+

Bugs addressed

+

Bugs addressed in release-3.12.13 are listed below

+
    +
  • #1599788: _is_prefix should return false for 0-length strings
  • +
  • #1603093: directories are invisible on client side
  • +
  • #1613512: Backport glusterfs-client memory leak fix to 3.12.x
  • +
  • #1618838: gluster bash completion leaks TOP=0 into the environment
  • +
  • #1618348: [Ganesha] Ganesha crashed in mdcache_alloc_and_check_handle while running bonnie and untars with parallel lookups
  • +
+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/release-notes/3.12.14/index.html b/release-notes/3.12.14/index.html new file mode 100644 index 00000000..22a724da --- /dev/null +++ b/release-notes/3.12.14/index.html @@ -0,0 +1,4564 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + 3.12.14 - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + +

Release notes for Gluster 3.12.14

+

This is a bugfix release. The release notes for 3.12.0, 3.12.1, 3.12.2, +3.12.3, 3.12.4, 3.12.5, 3.12.6, 3.12.7, +3.12.8, 3.12.9, 3.12.10, 3.12.11, 3.12.12 +and 3.12.13 contain a listing of all the new features that were added and bugs fixed in +the GlusterFS 3.12 stable release.

+

Major changes, features and limitations addressed in this release

+
    +
  1. +

    This release contains fix for following security vulnerabilities,

    +
  2. +
  3. +

    https://nvd.nist.gov/vuln/detail/CVE-2018-10904

    +
  4. +
  5. https://nvd.nist.gov/vuln/detail/CVE-2018-10907
  6. +
  7. https://nvd.nist.gov/vuln/detail/CVE-2018-10911
  8. +
  9. https://nvd.nist.gov/vuln/detail/CVE-2018-10913
  10. +
  11. https://nvd.nist.gov/vuln/detail/CVE-2018-10914
  12. +
  13. https://nvd.nist.gov/vuln/detail/CVE-2018-10923
  14. +
  15. https://nvd.nist.gov/vuln/detail/CVE-2018-10926
  16. +
  17. https://nvd.nist.gov/vuln/detail/CVE-2018-10927
  18. +
  19. https://nvd.nist.gov/vuln/detail/CVE-2018-10928
  20. +
  21. https://nvd.nist.gov/vuln/detail/CVE-2018-10929
  22. +
  23. +

    https://nvd.nist.gov/vuln/detail/CVE-2018-10930

    +
  24. +
  25. +

    To resolve the security vulnerabilities following limitations were made in GlusterFS

    +
  26. +
  27. +

    open,read,write on special files like char and block are no longer permitted

    +
  28. +
  29. +

    io-stat xlator can dump stat info only to /var/run/gluster directory

    +
  30. +
  31. +

    Addressed an issue that affected copying a file over SSL/TLS in a volume

    +
  32. +
+

Installing the updated packages and restarting gluster services on gluster +brick hosts, will fix the security issues.

+

Major issues

+

None

+

Bugs addressed

+

Bugs addressed since release-3.12.14 are listed below.

+
    +
  • #1622405: Problem with SSL/TLS encryption on Gluster 4.0 & 4.1
  • +
  • #1625286: Information Exposure in posix_get_file_contents function in posix-helpers.c
  • +
  • #1625648: I/O to arbitrary devices on storage server
  • +
  • #1625654: Stack-based buffer overflow in server-rpc-fops.c allows remote attackers to execute arbitrary code
  • +
  • #1625656: Improper deserialization in dict.c:dict_unserialize() can allow attackers to read arbitrary memory
  • +
  • #1625660: Unsanitized file names in debug/io-stats translator can allow remote attackers to execute arbitrary code
  • +
  • #1625664: Files can be renamed outside volume
  • +
+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/release-notes/3.12.15/index.html b/release-notes/3.12.15/index.html new file mode 100644 index 00000000..b17d1788 --- /dev/null +++ b/release-notes/3.12.15/index.html @@ -0,0 +1,4530 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + 3.12.15 - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + +

Release notes for Gluster 3.12.15

+

This is a bugfix release. The release notes for 3.12.0, 3.12.1, 3.12.2, +3.12.3, 3.12.4, 3.12.5, 3.12.6, 3.12.7, +3.12.8, 3.12.9, 3.12.10, 3.12.11 3.12.12, +3.12.13 and 3.12.14 contain a listing of all the new features that were added and +bugs fixed in the GlusterFS 3.12 stable release.

+

Major changes, features and limitations addressed in this release

+

None

+

Major issues

+

None

+

Bugs addressed

+

Bugs addressed since release-3.12.15 are listed below.

+
    +
  • #1569336: Volume status inode is broken with brickmux
  • +
  • #1625588: Prevent hangs while increasing replica-count/replace-brick for directory hierarchy
  • +
  • #1497989: Gluster 3.12.1 Packages require manual systemctl daemon reload after install
  • +
  • #1512371: parallel-readdir = TRUE prevents directories listing
  • +
  • #1633625: split-brain observed on parent dir
  • +
  • #1637989: data-self-heal in arbiter volume results in stale locks.
  • +
+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/release-notes/3.12.2/index.html b/release-notes/3.12.2/index.html new file mode 100644 index 00000000..59d67c4d --- /dev/null +++ b/release-notes/3.12.2/index.html @@ -0,0 +1,4579 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + 3.12.2 - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + +

Release notes for Gluster 3.12.2

+

This is a bugfix release. The release notes for 3.12.0, 3.12.1, +3.12.2 contain a listing of all the new features that were added and bugs +fixed in the GlusterFS 3.12 stable release.

+

Major changes, features and limitations addressed in this release

+
 1.) In a pure distribute volume there is no source to heal the replaced brick
+ from and hence would cause a loss of data that was present in the replaced brick.
+ The CLI has been enhanced to prevent a user from inadvertently using replace brick
+ in a pure distribute volume. It is advised to use add/remove brick to migrate from
+ an existing brick in a pure distribute volume.
+
+

Major issues

+
    +
  1. +

    Expanding a gluster volume that is sharded may cause file corruption

    +
      +
    • Sharded volumes are typically used for VM images, if such volumes are + expanded or possibly contracted (i.e add/remove bricks and rebalance) there + are reports of VM images getting corrupted.
    • +
    • The last known cause for corruption #1465123 is still pending, and not yet + part of this release.
    • +
    +
  2. +
  3. +

    Gluster volume restarts fail if the sub directory export feature is in use. + Status of this issue can be tracked here, #1501315

    +
  4. +
  5. +

    Mounting a gluster snapshot will fail, when attempting a FUSE based mount of + the snapshot. So for the current users, it is recommend to only access snapshot + via ".snaps" directory on a mounted gluster volume. + Status of this issue can be tracked here, #1501378

    +
  6. +
+

Bugs addressed

+
 A total of 31 patches have been merged, addressing 28 bugs
+
+
    +
  • #1490493: Sub-directory mount details are incorrect in /proc/mounts
  • +
  • #1491178: GlusterD returns a bad memory pointer in glusterd_get_args_from_dict()
  • +
  • #1491292: Provide brick list as part of VOLUME_CREATE event.
  • +
  • #1491690: rpc: TLSv1_2_method() is deprecated in OpenSSL-1.1
  • +
  • #1492026: set the shard-block-size to 64MB in virt profile
  • +
  • #1492061: CLIENT_CONNECT event not being raised
  • +
  • #1492066: AFR_SUBVOL_UP and AFR_SUBVOLS_DOWN events not working
  • +
  • #1493975: disallow replace brick operation on plain distribute volume
  • +
  • #1494523: Spelling errors in 3.12.1
  • +
  • #1495162: glusterd ends up with multiple uuids for the same node
  • +
  • #1495397: Make event-history feature configurable and have it disabled by default
  • +
  • #1495858: gluster volume create asks for confirmation for replica-2 volume even with force
  • +
  • #1496238: [geo-rep]: Scheduler help needs correction for description of --no-color
  • +
  • #1496317: [afr] split-brain observed on T files post hardlink and rename in x3 volume
  • +
  • #1496326: [GNFS+EC] lock is being granted to 2 different client for the same data range at a time after performing lock acquire/release from the clients1
  • +
  • #1497084: glusterfs process consume huge memory on both server and client node
  • +
  • #1499123: Readdirp is considerably slower than readdir on acl clients
  • +
  • #1499150: Improve performance with xattrop update.
  • +
  • #1499158: client-io-threads option not working for replicated volumes
  • +
  • #1499202: self-heal daemon stuck
  • +
  • #1499392: [geo-rep]: Improve the output message to reflect the real failure with schedule_georep script
  • +
  • #1500396: [geo-rep]: Observed "Operation not supported" error with traceback on slave log
  • +
  • #1500472: Use a bitmap to store local node info instead of conf->local_nodeuuids[i].uuids
  • +
  • #1500662: gluster volume heal info "healed" and "heal-failed" showing wrong information
  • +
  • #1500835: [geo-rep]: Status shows ACTIVE for most workers in EC before it becomes the PASSIVE
  • +
  • #1500841: [geo-rep]: Worker crashes with OSError: [Errno 61] No data available
  • +
  • #1500845: [geo-rep] master worker crash with interrupted system call
  • +
  • #1500853: [geo-rep]: Incorrect last sync "0" during hystory crawl after upgrade/stop-start
  • +
  • #1501022: Make choose-local configurable through volume-set command
  • +
  • #1501154: Brick Multiplexing: Gluster volume start force complains with command "Error : Request timed out" when there are multiple volumes
  • +
+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/release-notes/3.12.3/index.html b/release-notes/3.12.3/index.html new file mode 100644 index 00000000..87b8e24d --- /dev/null +++ b/release-notes/3.12.3/index.html @@ -0,0 +1,4569 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + 3.12.3 - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + +

Release notes for Gluster 3.12.3

+

This is a bugfix release. The release notes for 3.12.0, 3.12.1, +3.12.2, 3.12.3 contain a listing of all the new features that +were added and bugs fixed in the GlusterFS 3.12 stable release.

+

Major changes, features and limitations addressed in this release

+
    +
  1. +

    The two regression related to with subdir mount got fixed - gluster volume restart failure (#1465123) - mounting gluster snapshot via fuse (#1501378)

    +
  2. +
  3. +

    Improvements for "help" command with in gluster cli (#1509786)

    +
  4. +
  5. +

    Introduction of new api glfs_fd_set_lkowner() to set lock owner

    +
  6. +
+

Major issues

+
    +
  1. +

    Expanding a gluster volume that is sharded may cause file corruption

    +
      +
    • Sharded volumes are typically used for VM images, if such volumes are + expanded or possibly contracted (i.e add/remove bricks and rebalance) there + are reports of VM images getting corrupted.
    • +
    • The last known cause for corruption #1465123 is still pending, and not yet + part of this release.
    • +
    +
  2. +
+

Bugs addressed

+
 A total of 25 patches have been merged, addressing 25 bugs
+
+
    +
  • #1484489: File-level WORM allows mv over read-only files
  • +
  • #1494527: glusterfs fails to build twice in a row
  • +
  • #1499889: md-cache uses incorrect xattr keynames for GF_POSIX_ACL keys
  • +
  • #1499892: md-cache: xattr values should not be checked with string functions
  • +
  • #1501238: [SNAPSHOT] Unable to mount a snapshot on client
  • +
  • #1501315: Gluster Volume restart fail after exporting fuse sub-dir
  • +
  • #1501864: Add generated HMAC token in header for webhook calls
  • +
  • #1501956: gfapi: API needed to set lk_owner
  • +
  • #1502104: [geo-rep]: RSYNC throwing internal errors
  • +
  • #1503239: [Glusterd] Volume operations fail on a (tiered) volume because of a stale lock held by one of the nodes
  • +
  • #1505221: glusterfs client crash when removing directories
  • +
  • #1505323: When sub-dir is mounted on Fuse client,adding bricks to the same volume unmounts the subdir from fuse client
  • +
  • #1505370: Mishandling null check at send_brick_req of glusterfsd/src/gf_attach.c
  • +
  • #1505373: server.allow-insecure should be visible in "gluster volume set help"
  • +
  • #1505527: Posix compliance rename test fails on fuse subdir mount
  • +
  • #1505846: [GSS] gluster volume status command is missing in man page
  • +
  • #1505856: Potential use of NULL this variable before it gets initialized
  • +
  • #1507747: clean up port map on brick disconnect
  • +
  • #1507748: Brick port mismatch
  • +
  • #1507877: reset-brick commit force failed with glusterd_volume_brickinfo_get Returning -1
  • +
  • #1508283: stale brick processes getting created and volume status shows brick as down(pkill glusterfsd glusterfs ,glusterd restart)
  • +
  • #1509200: Event webhook should work with HTTPS urls
  • +
  • #1509786: The output of the "gluster help" command is difficult to read
  • +
  • #1511271: Rebalance estimate(ETA) shows wrong details(as intial message of 10min wait reappears) when still in progress
  • +
  • #1511301: In distribute volume after glusterd restart, brick goes offline
  • +
+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/release-notes/3.12.4/index.html b/release-notes/3.12.4/index.html new file mode 100644 index 00000000..ba5af8eb --- /dev/null +++ b/release-notes/3.12.4/index.html @@ -0,0 +1,4530 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + 3.12.4 - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + +

Release notes for Gluster 3.12.4

+

This is a bugfix release. The release notes for 3.12.0, 3.12.1, +3.12.2, 3.12.3, 3.12.4 contain a listing of all +the new features that were added and bugs fixed in the GlusterFS 3.12 stable release.

+

Major issues

+
    +
  1. +

    Expanding a gluster volume that is sharded may cause file corruption

    +
      +
    • Sharded volumes are typically used for VM images, if such volumes are + expanded or possibly contracted (i.e add/remove bricks and rebalance) there + are reports of VM images getting corrupted.
    • +
    • The last known cause for corruption #1465123 is still pending, and not yet + part of this release.
    • +
    +
  2. +
+

Bugs addressed

+
 A total of 13 patches have been merged, addressing 12 bugs
+
+
    +
  • #1478411: Directory listings on fuse mount are very slow due to small number of getdents() entries
  • +
  • #1511782: In Replica volume 2*2 when quorum is set, after glusterd restart nfs server is coming up instead of self-heal daemon
  • +
  • #1512432: Test bug-1483058-replace-brick-quorum-validation.t fails inconsistently
  • +
  • #1513258: NetBSD port
  • +
  • #1514380: default timeout of 5min not honored for analyzing split-brain files post setfattr replica.split-brain-heal-finalize
  • +
  • #1514420: gluster volume splitbrain info needs to display output of each brick in a stream fashion instead of buffering and dumping at the end
  • +
  • #1515042: bug-1247563.t is failing on master
  • +
  • #1516691: Rebalance fails on NetBSD because fallocate is not implemented
  • +
  • #1517689: Memory leak in locks xlator
  • +
  • #1518061: Remove 'summary' option from 'gluster vol heal..' CLI
  • +
  • #1523048: glusterd consuming high memory
  • +
  • #1523455: Store allocated objects in the mem_acct
  • +
+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/release-notes/3.12.5/index.html b/release-notes/3.12.5/index.html new file mode 100644 index 00000000..fdab91d8 --- /dev/null +++ b/release-notes/3.12.5/index.html @@ -0,0 +1,4528 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + 3.12.5 - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + +

Release notes for Gluster 3.12.5

+

This is a bugfix release. The release notes for 3.12.0, 3.12.1, +3.12.2, 3.12.3, 3.12.4, 3.12.5 contain a listing of all the new features that were added and bugs fixed in the GlusterFS 3.12 stable release.

+

Major issues

+
    +
  1. +

    Expanding a gluster volume that is sharded may cause file corruption

    +
      +
    • Sharded volumes are typically used for VM images, if such volumes are + expanded or possibly contracted (i.e add/remove bricks and rebalance) there + are reports of VM images getting corrupted.
    • +
    • The last known cause for corruption #1465123 is still pending, and not yet + part of this release.
    • +
    +
  2. +
+

Bugs addressed

+
 A total of 12 patches have been merged, addressing 11 bugs
+
+
    +
  • #1489043: The number of bytes of the quota specified in version 3.7 or later is incorrect
  • +
  • #1511301: In distribute volume after glusterd restart, brick goes offline
  • +
  • #1525850: rdma transport may access an obsolete item in gf_rdma_device_t->all_mr, and causes glusterfsd/glusterfs process crash.
  • +
  • #1527276: feature/bitrot: remove internal xattrs from lookup cbk
  • +
  • #1529085: fstat returns ENOENT/ESTALE
  • +
  • #1529088: opening a file that is destination of rename results in ENOENT errors
  • +
  • #1529095: /usr/sbin/glusterfs crashing on Red Hat OpenShift Container Platform node
  • +
  • #1529539: JWT support without external dependency
  • +
  • #1530448: glustershd fails to start on a volume force start after a brick is down
  • +
  • #1530455: Files are not rebalanced if destination brick(available size) is of smaller size than source brick(available size)
  • +
  • #1531372: Use after free in cli_cmd_volume_create_cbk
  • +
+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/release-notes/3.12.6/index.html b/release-notes/3.12.6/index.html new file mode 100644 index 00000000..07db4273 --- /dev/null +++ b/release-notes/3.12.6/index.html @@ -0,0 +1,4533 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + 3.12.6 - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + +

Release notes for Gluster 3.12.6

+

This is a bugfix release. The release notes for 3.12.0, 3.12.1, 3.12.2, 3.12.3, 3.12.4, 3.12.5, 3.12.5 contain a listing of all the new features that were added and bugs fixed in the GlusterFS 3.12 stable release.

+

Major issues

+
    +
  1. +

    Expanding a gluster volume that is sharded may cause file corruption

    +
      +
    • Sharded volumes are typically used for VM images, if such volumes are + expanded or possibly contracted (i.e add/remove bricks and rebalance) there + are reports of VM images getting corrupted.
    • +
    • The last known cause for corruption #1465123 is still pending, and not yet + part of this release.
    • +
    +
  2. +
+

Bugs addressed

+
 A total of 16 patches have been merged, addressing 16 bugs
+
+
    +
  • #1510342: Not all files synced using geo-replication
  • +
  • #1533269: Random GlusterFSD process dies during rebalance
  • +
  • #1534847: entries not getting cleared post healing of softlinks (stale entries showing up in heal info)
  • +
  • #1536334: [Disperse] Implement open fd heal for disperse volume
  • +
  • #1537346: glustershd/glusterd is not using right port when connecting to glusterfsd process
  • +
  • #1539516: DHT log messages: Found anomalies in (null) (gfid = 00000000-0000-0000-0000-000000000000). Holes=1 overlaps=0
  • +
  • #1540224: dht_(f)xattrop does not implement migration checks
  • +
  • #1541267: dht_layout_t leak in dht_populate_inode_for_dentry
  • +
  • #1541930: A down brick is incorrectly considered to be online and makes the volume to be started without any brick available
  • +
  • #1542054: tests/bugs/cli/bug-1169302.t fails spuriously
  • +
  • #1542475: Random failures in tests/bugs/nfs/bug-974972.t
  • +
  • #1542601: The used space in the volume increases when the volume is expanded
  • +
  • #1542615: tests/bugs/core/multiplex-limit-issue-151.t fails sometimes in upstream master
  • +
  • #1542826: Mark tests/bugs/posix/bug-990028.t bad on release-3.12
  • +
  • #1542934: Seeing timer errors in the rebalance logs
  • +
  • #1543016: dht_lookup_unlink_of_false_linkto_cbk fails with "Permission denied"
  • +
  • #1544637: 3.8 -> 3.10 rolling upgrade fails (same for 3.12 or 3.13) on Ubuntu 14
  • +
+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/release-notes/3.12.7/index.html b/release-notes/3.12.7/index.html new file mode 100644 index 00000000..fd57d377 --- /dev/null +++ b/release-notes/3.12.7/index.html @@ -0,0 +1,4513 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + 3.12.7 - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + +

Release notes for Gluster 3.12.7

+

This is a bugfix release. The release notes for 3.12.0, 3.12.1, 3.12.2, 3.12.3, 3.12.4, 3.12.5, 3.12.6 contain a listing of all the new features that were added and bugs fixed in the GlusterFS 3.12 stable release.

+

Bugs addressed

+

Major issues

+
    +
  1. Consider a case in which one of the nodes goes down in gluster cluster with brick multiplexing enabled, if volume operations are performed then post when the node comes back, brick processes will fail to come up. The issue is tracked in #1543708 and will be fixed by next release.
  2. +
+

A total of 8 patches have been merged, addressing 8 bugs

+
    +
  • #1517260: Volume wrong size
  • +
  • #1543709: Optimize glusterd_import_friend_volume code path
  • +
  • #1544635: Though files are in split-brain able to perform writes to the file
  • +
  • #1547841: Typo error in __dht_check_free_space function log message
  • +
  • #1548078: [Rebalance] "Migrate file failed: : failed to get xattr [No data available]" warnings in rebalance logs
  • +
  • #1548270: DHT calls dht_lookup_everywhere for 1xn volumes
  • +
  • #1549505: Backport patch to reduce duplicate code in server-rpc-fops.c
  • +
+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/release-notes/3.12.8/index.html b/release-notes/3.12.8/index.html new file mode 100644 index 00000000..3142071c --- /dev/null +++ b/release-notes/3.12.8/index.html @@ -0,0 +1,4498 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + 3.12.8 - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + +

Release notes for Gluster 3.12.8

+

This is a bugfix release. The release notes for 3.12.0, 3.12.1, 3.12.2, 3.12.3, 3.12.4, 3.12.5, 3.12.6, 3.12.7 contain a listing of all the new features that were added and bugs fixed in the GlusterFS 3.12 stable release.

+

Bugs addressed

+
 A total of 9 patches have been merged, addressing 9 bugs
+
+
    +
  • #1543708: glusterd fails to attach brick during restart of the node
  • +
  • #1546627: Syntactical errors in hook scripts for managing SELinux context on bricks
  • +
  • #1549473: possible memleak in glusterfsd process with brick multiplexing on
  • +
  • #1555161: [Rebalance] ENOSPC errors on few files in rebalance logs
  • +
  • #1555201: After a replace brick command, self-heal takes some time to start healing files on disperse volumes
  • +
  • #1558352: [EC] Read performance of EC volume exported over gNFS is significantly lower than write performance
  • +
  • #1561731: Rebalance failures on a dispersed volume with lookup-optimize enabled
  • +
  • #1562723: SHD is not healing entries in halo replication
  • +
  • #1565590: timer: Possible race condition between gftimer* routines
  • +
+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/release-notes/3.12.9/index.html b/release-notes/3.12.9/index.html new file mode 100644 index 00000000..a3552982 --- /dev/null +++ b/release-notes/3.12.9/index.html @@ -0,0 +1,4536 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + 3.12.9 - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + +

Release notes for Gluster 3.12.9

+

This is a bugfix release. The release notes for 3.12.0, 3.12.1, 3.12.2, +3.12.3, 3.12.4, 3.12.5, 3.12.6, 3.12.7, and 3.12.8 contain a listing of all the new +features that were added and bugs fixed in the GlusterFS 3.12 stable release.

+

Major changes, features and limitations addressed in this release

+

This release contains a fix for a security vulerability in Gluster as follows,

+
    +
  • http://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2018-1088
  • +
  • https://nvd.nist.gov/vuln/detail/CVE-2018-1088
  • +
+

Installing the updated packages and restarting gluster services, will update the +Gluster shared storage volume volfiles, that are more secure than the defaults +currently in place.

+

Further, for increased security, the Gluster shared storage volume can be TLS +enabled, and access to the same restricted using the auth.ssl-allow option. +See, this guide for more details.

+

Major issues

+

None

+

Bugs addressed

+

Bugs addressed since release-3.12.8 are listed below.

+
    +
  • #1566131: Bringing down data bricks in cyclic order results in arbiter brick becoming the source for heal.
  • +
  • #1566820: [Remove-brick] Many files were not migrated from the decommissioned bricks; commit results in data loss
  • +
  • #1569407: EIO errors on some operations when volume has mixed brick versions on a disperse volume
  • +
  • #1570430: CVE-2018-1088 glusterfs: Privilege escalation via gluster_shared_storage when snapshot scheduling is enabled [fedora-all]
  • +
+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/release-notes/3.13.0/index.html b/release-notes/3.13.0/index.html new file mode 100644 index 00000000..48ea2f7d --- /dev/null +++ b/release-notes/3.13.0/index.html @@ -0,0 +1,5059 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + 3.13.0 - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + + + + + +
+
+ + + + + + + + +

Release notes for Gluster 3.13.0

+

This is a major release that includes a range of features enhancing usability; +enhancements to GFAPI for developers and a set of bug fixes.

+

The most notable features and changes are documented on this page. A full list +of bugs that have been addressed is included further below.

+

Major changes and features

+

Addition of summary option to the heal info CLI

+

Notes for users: +The Gluster heal info CLI now has a 'summary' option displaying the statistics +of entries pending heal, in split-brain and currently being healed, per brick.

+

Usage:

+
# gluster volume heal <volname> info summary
+
+

Sample output:

+
Brick <brickname>
+Status: Connected
+Total Number of entries: 3
+Number of entries in heal pending: 2
+Number of entries in split-brain: 1
+Number of entries possibly healing: 0
+
+Brick <brickname>
+Status: Connected
+Total Number of entries: 4
+Number of entries in heal pending: 3
+Number of entries in split-brain: 1
+Number of entries possibly healing: 0
+
+

Using the --xml option with the CLI results in the output in XML format.

+

NOTE: Summary information is obtained in a similar fashion to detailed +information, thus time taken for the command to complete would still be the +same, and not faster.

+

Addition of checks for allowing lookups in AFR and removal of 'cluster.quorum-reads' volume option.

+

Notes for users:

+

Previously, AFR has never failed lookup unless there is a gfid mismatch. +This behavior is being changed with this release, as a part of +fixing Bug#1515572.

+

Lookups in replica-3 and arbiter volumes will now succeed only if there is +quorum and there is a good copy of a file. I.e. the lookup has to succeed on +quorum #bricks and at least one of them has to be a good copy. If these +conditions are not met, the operation will fail with the ENOTCONN error.

+

As a part of this change the cluster.quorum-reads volume option is removed, as +lookup failure will result in all subsequent operations (including reads) +failing, which makes this option redundant.

+

Ensuring this strictness also helps prevent a long standing +rename-leading-to-dataloss Bug#1366818, by disallowing lookups (and thus +renames) when a good copy is not available.

+

Note: These checks do not affect replica 2 volumes where lookups works as +before, even when only 1 brick is online.

+

Further reference: mailing list discussions on topic

+

Support for max-port range in glusterd.vol

+

Notes for users:

+

Glusterd configuration provides an option to control number of ports that can +be used by gluster daemons on a node.

+

The option is named "max-port" and can be set in the glusterd.vol file per-node +to the desired maximum.

+

Prevention of other processes accessing the mounted brick snapshots

+

Notes for users: +Snapshot of gluster bricks are now only mounted when the snapshot is active, or +when these are restored. Prior to this snapshots of gluster volumes were mounted +by default across the entire life-cycle of the snapshot.

+

This behavior is transparent to users and managed by the gluster +processes.

+

Enabling thin client

+

Notes for users: +Gluster client stack encompasses the cluster translators (like distribution and +replication or disperse). This is in addition to the usual caching translators +on the client stacks. In certain cases this makes the client footprint larger +than sustainable and also incurs frequent client updates.

+

The thin client feature, moves the clustering translators (like distribute and +other translators below it) and a few caching translators to a managed protocol +endpoint (called gfproxy) on the gluster server nodes, thus thinning the client +stack.

+

Usage:

+
# gluster volume set <volname> config.gfproxyd enable
+
+

The above enables the gfproxy protocol service on the server nodes. To mount a +client that interacts with this end point, use the --thin-client mount option.

+

Example:

+
# glusterfs --thin-client --volfile-id=<volname> --volfile-server=<host> <mountpoint>
+
+

Limitations: +This feature is a technical preview in the 3.13.0 release, and will be improved +in the upcoming releases.

+

Ability to reserve back-end storage space

+

Notes for users: +Posix translator is enhanced with an option that enables reserving disk space +on the bricks. This reserved space is not used by the client mounts thus +preventing disk full scenarios, as disk expansion or cluster expansion is more +tedious to achieve when back-end bricks are full.

+

When the bricks have free space equal to or lesser than the reserved space, +mount points using the brick would get ENOSPC errors.

+

The default value for the option is 1(%) of the brick size. If set to 0(%) this +feature is disabled. The option takes a numeric percentage value, that reserves +up to that percentage of disk space.

+

Usage:

+
# gluster volume set <volname> storage.reserve <number>
+
+

List all the connected clients for a brick and also exported bricks/snapshots from each brick process

+

Notes for users: +Gluster CLI is enhanced with an option to list all connected clients to a volume +(or all volumes) and also the list of exported bricks and snapshots for the +volume.

+

Usage:

+
# gluster volume status <volname/all> client-list
+
+

Improved write performance with Disperse xlator

+

Notes for users: +Disperse translator has been enhanced to support parallel writes, that hence +improves the performance of write operations when using disperse volumes.

+

This feature is enabled by default, and can be toggled using the boolean option, +'disperse.parallel-writes'

+

Disperse xlator now supports discard operations

+

Notes for users: +This feature enables users to punch hole in files created on disperse volumes.

+

Usage:

+
# fallocate  -p -o <offset> -l <len> <file_name>
+
+

Included details about memory pools in statedumps

+

Notes for users: +For troubleshooting purposes it sometimes is useful to verify the memory +allocations done by Gluster. A previous release of Gluster included a rewrite +of the memory pool internals. Since these changes, statedumps did not include +details about the memory pools anymore.

+

This version of Gluster adds details about the used memory pools in the +statedump. Troubleshooting memory consumption problems is much more efficient +again.

+

Limitations: +There are currently no statistics included in the statedump about the actual +behavior of the memory pools. This means that the efficiency of the memory +pools can not be verified.

+

Gluster APIs added to register callback functions for upcalls

+

Notes for developers: +New APIs have been added to allow gfapi applications to register and unregister +for upcall events. Along with the list of events interested, applications now +have to register callback function. This routine shall be invoked +asynchronously, in gluster thread context, in case of any upcalls sent by the +backend server.

+
int glfs_upcall_register (struct glfs *fs, uint32_t event_list,
+                          glfs_upcall_cbk cbk, void *data);
+int glfs_upcall_unregister (struct glfs *fs, uint32_t event_list);
+
+

libgfapi header files include the complete synopsis about these APIs definition and their usage.

+

Limitations: +An application can register only a single callback function for all the upcall +events it is interested in.

+

Known Issues: +Bug#1515748 GlusterFS server should be able to identify the clients which +registered for upcalls and notify only those clients in case of such events

+

Gluster API added with a glfs_mem_header for exported memory

+

Notes for developers: +Memory allocations done in libgfapi that return a structure to the calling +application should use GLFS_CALLOC() and friends. Applications can then +correctly free the memory by calling glfs_free().

+

This is implemented with a new glfs_mem_header similar to how the memory +allocations are done with GF_CALLOC() etc. The new header includes a +release() function pointer that gets called to free the resource when the +application calls glfs_free().

+

The change is a major improvement for allocating and free'ing resources in a +standardized way that is transparent to the libgfapi applications.

+

Provided a new xlator to delay fops, to aid slow brick response simulation and debugging

+

Notes for developers: +Like error-gen translator, a new translator that introduces delays for FOPs is +added to the code base. This can help determine issues around slow(er) client +responses and enable better qualification of the translator stacks.

+

For usage refer to this test case.

+

Major issues

+
    +
  1. +

    Expanding a gluster volume that is sharded may cause file corruption

    +
      +
    • Sharded volumes are typically used for VM images, if such volumes are + expanded or possibly contracted (i.e add/remove bricks and rebalance) there + are reports of VM images getting corrupted.
    • +
    • The last known cause for corruption (Bug #1515434) has a fix with this + release. As further testing is still in progress, the issue is retained as + a major issue.
    • +
    • Status of this bug can be tracked here, #1515434
    • +
    +
  2. +
+

Bugs addressed

+

Bugs addressed since release-3.12.0 are listed below.

+
    +
  • #1248393: DHT: readdirp fails to read some directories.
  • +
  • #1258561: Gluster puts PID files in wrong place
  • +
  • #1261463: AFR : [RFE] Improvements needed in "gluster volume heal info" commands
  • +
  • #1294051: Though files are in split-brain able to perform writes to the file
  • +
  • #1328994: When a feature fails needing a higher opversion, the message should state what version it needs.
  • +
  • #1335251: mgmt/glusterd: clang compile warnings in glusterd-snapshot.c
  • +
  • #1350406: [storage/posix] - posix_do_futimes function not implemented
  • +
  • #1365683: Fix crash bug when mnt3_resolve_subdir_cbk fails
  • +
  • #1371806: DHT :- inconsistent 'custom extended attributes',uid and gid, Access permission (for directories) if User set/modifies it after bringing one or more sub-volume down
  • +
  • #1376326: separating attach tier and add brick
  • +
  • #1388509: gluster volume heal info "healed" and "heal-failed" showing wrong information
  • +
  • #1395492: trace/error-gen be turned on together while use 'volume set' command to set one of them
  • +
  • #1396327: gluster core dump due to assert failed GF_ASSERT (brick_index < wordcount);
  • +
  • #1406898: Need build time option to default to IPv6
  • +
  • #1428063: gfproxy: Introduce new server-side daemon called GFProxy
  • +
  • #1432046: symlinks trigger faulty geo-replication state (rsnapshot usecase)
  • +
  • #1443145: Free runtime allocated resources upon graph switch or glfs_fini()
  • +
  • #1445663: Improve performance with xattrop update.
  • +
  • #1451434: Use a bitmap to store local node info instead of conf->local_nodeuuids[i].uuids
  • +
  • #1454590: run.c demo mode broken
  • +
  • #1457985: Rebalance estimate time sometimes shows negative values
  • +
  • #1460514: [Ganesha] : Ganesha crashes while cluster enters failover/failback mode
  • +
  • #1461018: Implement DISCARD FOP for EC
  • +
  • #1462969: Peer-file parsing is too fragile
  • +
  • #1467209: [Scale] : Rebalance ETA shows the initial estimate to be ~140 days,finishes within 18 hours though.
  • +
  • #1467614: Gluster read/write performance improvements on NVMe backend
  • +
  • #1468291: NFS Sub directory is getting mounted on solaris 10 even when the permission is restricted in nfs.export-dir volume option
  • +
  • #1471366: Posix xlator needs to reserve disk space to prevent the brick from getting full.
  • +
  • #1472267: glusterd fails to start
  • +
  • #1472609: Root path xattr does not heal correctly in certain cases when volume is in stopped state
  • +
  • #1472758: Running sysbench on vm disk from plain distribute gluster volume causes disk corruption
  • +
  • #1472961: [GNFS+EC] lock is being granted to 2 different client for the same data range at a time after performing lock acquire/release from the clients1
  • +
  • #1473026: replace-brick failure leaves glusterd in inconsistent state
  • +
  • #1473636: Launch metadata heal in discover code path.
  • +
  • #1474180: [Scale] : Client logs flooded with "inode context is NULL" error messages
  • +
  • #1474190: cassandra fails on gluster-block with both replicate and ec volumes
  • +
  • #1474309: Disperse: Coverity issue
  • +
  • #1474318: dht remove-brick status does not indicate failures files not migrated because of a lack of space
  • +
  • #1474639: [Scale] : Rebalance Logs are bulky.
  • +
  • #1475255: [Geo-rep]: Geo-rep hangs in changelog mode
  • +
  • #1475282: [Remove-brick] Few files are getting migrated eventhough the bricks crossed cluster.min-free-disk value
  • +
  • #1475300: implementation of fallocate call in read-only xlator
  • +
  • #1475308: [geo-rep]: few of the self healed hardlinks on master did not sync to slave
  • +
  • #1475605: gluster-block default shard-size should be 64MB
  • +
  • #1475632: Brick Multiplexing: Brick process crashed at changetimerecorder(ctr) translator when restarting volumes
  • +
  • #1476205: [EC]: md5sum mismatches every time for a file from the fuse client on EC volume
  • +
  • #1476295: md-cache uses incorrect xattr keynames for GF_POSIX_ACL keys
  • +
  • #1476324: md-cache: xattr values should not be checked with string functions
  • +
  • #1476410: glusterd: code lacks clarity of logic in glusterd_get_quorum_cluster_counts()
  • +
  • #1476665: [Perf] : Large file sequential reads are off target by ~38% on FUSE/Ganesha
  • +
  • #1476668: [Disperse] : Improve heal info command to handle obvious cases
  • +
  • #1476719: glusterd: flow in glusterd_validate_quorum() could be streamlined
  • +
  • #1476785: scripts: invalid test in S32gluster_enable_shared_storage.sh
  • +
  • #1476861: packaging: /var/lib/glusterd/options should be %config(noreplace)
  • +
  • #1476957: peer-parsing.t fails on NetBSD
  • +
  • #1477169: AFR entry self heal removes a directory's .glusterfs symlink.
  • +
  • #1477404: eager-lock should be off for cassandra to work at the moment
  • +
  • #1477488: Permission denied errors when appending files after readdir
  • +
  • #1478297: Add NULL gfid checks before creating file
  • +
  • #1478710: when gluster pod is restarted, bricks from the restarted pod fails to connect to fuse, self-heal etc
  • +
  • #1479030: nfs process crashed in "nfs3_getattr"
  • +
  • #1480099: More useful error - replace 'not optimal'
  • +
  • #1480445: Log entry of files skipped/failed during rebalance operation
  • +
  • #1480525: Make choose-local configurable through volume-set command
  • +
  • #1480591: [Scale] : I/O errors on multiple gNFS mounts with "Stale file handle" during rebalance of an erasure coded volume.
  • +
  • #1481199: mempool: run-time crash when built with --disable-mempool
  • +
  • #1481600: rpc: client_t and related objects leaked due to incorrect ref counts
  • +
  • #1482023: snpashots issues with other processes accessing the mounted brick snapshots
  • +
  • #1482344: Negative Test: glusterd crashes for some of the volume options if set at cluster level
  • +
  • #1482906: /var/lib/glusterd/peers File had a blank line, Stopped Glusterd from starting
  • +
  • #1482923: afr: check op_ret value in __afr_selfheal_name_impunge
  • +
  • #1483058: [quorum]: Replace brick is happened when Quorum not met.
  • +
  • #1483995: packaging: use rdma-core(-devel) instead of ibverbs, rdmacm; disable rdma on armv7hl
  • +
  • #1484215: Add Deepshika has CI Peer
  • +
  • #1484225: [rpc]: EPOLLERR - disconnecting now messages every 3 secs after completing rebalance
  • +
  • #1484246: [PATCH] incorrect xattr list handling on FreeBSD
  • +
  • #1484490: File-level WORM allows mv over read-only files
  • +
  • #1484709: [geo-rep+qr]: Crashes observed at slave from qr_lookup_sbk during rename/hardlink/rebalance cases
  • +
  • #1484722: return ENOSYS for 'non readable' FOPs
  • +
  • #1485962: gluster-block profile needs to have strict-o-direct
  • +
  • #1486134: glusterfsd (brick) process crashed
  • +
  • #1487644: Fix reference to readthedocs.io in source code and elsewhere
  • +
  • #1487830: scripts: mount.glusterfs contains non-portable bashisms
  • +
  • #1487840: glusterd: spelling errors reported by Debian maintainer
  • +
  • #1488354: gluster-blockd process crashed and core generated
  • +
  • #1488399: Crash in dht_check_and_open_fd_on_subvol_task()
  • +
  • #1488546: [RHHI] cannot boot vms created from template when disk format = qcow2
  • +
  • #1488808: Warning on FreeBSD regarding -Wformat-extra-args
  • +
  • #1488829: Fix unused variable when TCP_USER_TIMEOUT is undefined
  • +
  • #1488840: Fix guard define on nl-cache
  • +
  • #1488906: Fix clagn/gcc warning for umountd
  • +
  • #1488909: Fix the type of 'len' in posix.c, clang is showing a warning
  • +
  • #1488913: Sub-directory mount details are incorrect in /proc/mounts
  • +
  • #1489432: disallow replace brick operation on plain distribute volume
  • +
  • #1489823: set the shard-block-size to 64MB in virt profile
  • +
  • #1490642: glusterfs client crash when removing directories
  • +
  • #1490897: GlusterD returns a bad memory pointer in glusterd_get_args_from_dict()
  • +
  • #1491025: rpc: TLSv1_2_method() is deprecated in OpenSSL-1.1
  • +
  • #1491670: [afr] split-brain observed on T files post hardlink and rename in x3 volume
  • +
  • #1492109: Provide brick list as part of VOLUME_CREATE event.
  • +
  • #1492542: Gluster v status client-list prints wrong output for multiplexed volume.
  • +
  • #1492849: xlator/tier: flood of -Wformat-truncation warnings with gcc-7.
  • +
  • #1492851: xlator/bitrot: flood of -Wformat-truncation warnings with gcc-7.
  • +
  • #1492968: CLIENT_CONNECT event is not notified by eventsapi
  • +
  • #1492996: Readdirp is considerably slower than readdir on acl clients
  • +
  • #1493133: GlusterFS failed to build while running make
  • +
  • #1493415: self-heal daemon stuck
  • +
  • #1493539: AFR_SUBVOL_UP and AFR_SUBVOLS_DOWN events not working
  • +
  • #1493893: gluster volume asks for confirmation for disperse volume even with force
  • +
  • #1493967: glusterd ends up with multiple uuids for the same node
  • +
  • #1495384: Gluster 3.12.1 Packages require manual systemctl daemon reload after install
  • +
  • #1495436: [geo-rep]: Scheduler help needs correction for description of --no-color
  • +
  • #1496363: Add generated HMAC token in header for webhook calls
  • +
  • #1496379: glusterfs process consume huge memory on both server and client node
  • +
  • #1496675: Verify pool pointer before destroying it
  • +
  • #1498570: client-io-threads option not working for replicated volumes
  • +
  • #1499004: [Glusterd] Volume operations fail on a (tiered) volume because of a stale lock held by one of the nodes
  • +
  • #1499159: [geo-rep]: Improve the output message to reflect the real failure with schedule_georep script
  • +
  • #1499180: [geo-rep]: Observed "Operation not supported" error with traceback on slave log
  • +
  • #1499391: [geo-rep]: Worker crashes with OSError: [Errno 61] No data available
  • +
  • #1499393: [geo-rep] master worker crash with interrupted system call
  • +
  • #1499509: Brick Multiplexing: Gluster volume start force complains with command "Error : Request timed out" when there are multiple volumes
  • +
  • #1499641: gfapi: API needed to set lk_owner
  • +
  • #1499663: Mark test case ./tests/bugs/bug-1371806_1.t as a bad test case.
  • +
  • #1499933: md-cache: Add additional samba and macOS specific EAs to mdcache
  • +
  • #1500269: opening a file that is destination of rename results in ENOENT errors
  • +
  • #1500284: [geo-rep]: Status shows ACTIVE for most workers in EC before it becomes the PASSIVE
  • +
  • #1500346: [geo-rep]: Incorrect last sync "0" during hystory crawl after upgrade/stop-start
  • +
  • #1500433: [geo-rep]: RSYNC throwing internal errors
  • +
  • #1500649: Shellcheck errors in hook scripts
  • +
  • #1501235: [SNAPSHOT] Unable to mount a snapshot on client
  • +
  • #1501317: glusterfs fails to build twice in a row
  • +
  • #1501390: Intermittent failure in tests/basic/afr/gfid-mismatch-resolution-with-fav-child-policy.t on NetBSD
  • +
  • #1502253: snapshot_scheduler crashes when SELinux is absent on the system
  • +
  • #1503246: clean up port map on brick disconnect
  • +
  • #1503394: Mishandling null check at send_brick_req of glusterfsd/src/gf_attach.c
  • +
  • #1503424: server.allow-insecure should be visible in "gluster volume set help"
  • +
  • #1503510: [BitRot] man page of gluster needs to be updated for scrub-frequency
  • +
  • #1503519: default timeout of 5min not honored for analyzing split-brain files post setfattr replica.split-brain-heal-finalize
  • +
  • #1503983: Wrong usage of getopt shell command in hook-scripts
  • +
  • #1505253: Update .t test files to use the new tier commands
  • +
  • #1505323: When sub-dir is mounted on Fuse client,adding bricks to the same volume unmounts the subdir from fuse client
  • +
  • #1505325: Potential use of NULL this variable before it gets initialized
  • +
  • #1505527: Posix compliance rename test fails on fuse subdir mount
  • +
  • #1505663: [GSS] gluster volume status command is missing in man page
  • +
  • #1505807: files are appendable on file-based worm volume
  • +
  • #1506083: Ignore disk space reserve check for internal FOPS
  • +
  • #1506513: stale brick processes getting created and volume status shows brick as down(pkill glusterfsd glusterfs ,glusterd restart)
  • +
  • #1506589: Brick port mismatch
  • +
  • #1506903: Event webhook should work with HTTPS urls
  • +
  • #1507466: reset-brick commit force failed with glusterd_volume_brickinfo_get Returning -1
  • +
  • #1508898: Add new configuration option to manage deletion of Worm files
  • +
  • #1509789: The output of the "gluster help" command is difficult to read
  • +
  • #1510012: GlusterFS 3.13.0 tracker
  • +
  • #1510019: Change default versions of certain features to 3.13 from 4.0
  • +
  • #1510022: Revert experimental and 4.0 features to prepare for 3.13 release
  • +
  • #1511274: Rebalance estimate(ETA) shows wrong details(as intial message of 10min wait reappears) when still in progress
  • +
  • #1511293: In distribute volume after glusterd restart, brick goes offline
  • +
  • #1511768: In Replica volume 2*2 when quorum is set, after glusterd restart nfs server is coming up instead of self-heal daemon
  • +
  • #1512435: Test bug-1483058-replace-brick-quorum-validation.t fails inconsistently
  • +
  • #1512460: disperse eager-lock degrades performance for file create workloads
  • +
  • #1513259: NetBSD port
  • +
  • #1514419: gluster volume splitbrain info needs to display output of each brick in a stream fashion instead of buffering and dumping at the end
  • +
  • #1515045: bug-1247563.t is failing on master
  • +
  • #1515572: Accessing a file when source brick is down results in that FOP being hung
  • +
  • #1516313: Bringing down data bricks in cyclic order results in arbiter brick becoming the source for heal.
  • +
  • #1517692: Memory leak in locks xlator
  • +
  • #1518257: EC DISCARD doesn't punch hole properly
  • +
  • #1518512: Change GD_OP_VERSION to 3_13_0 from 3_12_0 for RFE https://bugzilla.redhat.com/show_bug.cgi?id=1464350
  • +
  • #1518744: Add release notes about DISCARD on EC volume
  • +
+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/release-notes/3.13.1/index.html b/release-notes/3.13.1/index.html new file mode 100644 index 00000000..8a3dd581 --- /dev/null +++ b/release-notes/3.13.1/index.html @@ -0,0 +1,4540 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + 3.13.1 - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + +

Release notes for Gluster 3.13.1

+

This is a bugfix release. The release notes for 3.13.0, +contain a listing of all the new features that were added and +bugs fixed in the GlusterFS 3.13 stable release.

+

Major changes, features and limitations addressed in this release

+

No Major changes

+

Major issues

+
    +
  1. +

    Expanding a gluster volume that is sharded may cause file corruption

    +
      +
    • Sharded volumes are typically used for VM images, if such volumes are + expanded or possibly contracted (i.e add/remove bricks and rebalance) there + are reports of VM images getting corrupted.
    • +
    • The last known cause for corruption (Bug #1515434) is still under review.
    • +
    • Status of this bug can be tracked here, #1515434
    • +
    +
  2. +
+

Bugs addressed

+

Bugs addressed since release-3.13.0 are listed below.

+
    +
  • #1428060: write-behind: Allow trickling-writes to be configurable, fix usage of page_size and window_size
  • +
  • #1520232: Rebalance fails on NetBSD because fallocate is not implemented
  • +
  • #1522710: Directory listings on fuse mount are very slow due to small number of getdents() entries
  • +
  • #1523046: glusterd consuming high memory
  • +
  • #1523456: Store allocated objects in the mem_acct
  • +
  • #1527275: feature/bitrot: remove internal xattrs from lookup cbk
  • +
  • #1527699: rdma transport may access an obsolete item in gf_rdma_device_t->all_mr, and causes glusterfsd/glusterfs process crash.
  • +
+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/release-notes/3.13.2/index.html b/release-notes/3.13.2/index.html new file mode 100644 index 00000000..c7a34b9d --- /dev/null +++ b/release-notes/3.13.2/index.html @@ -0,0 +1,4534 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + 3.13.2 - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + +

Release notes for Gluster 3.13.2

+

This is a bugfix release. The release notes for 3.13.0 and 3.13.1, +contain a listing of all the new features that were added and +bugs fixed in the GlusterFS 3.13 stable release.

+

Major changes, features and limitations addressed in this release

+

No Major changes

+

Major issues

+

No Major iissues

+

Bugs addressed

+

Bugs addressed since release-3.13.1 are listed below.

+
    +
  • #1511293: In distribute volume after glusterd restart, brick goes offline
  • +
  • #1515434: dht_(f)xattrop does not implement migration checks
  • +
  • #1516313: Bringing down data bricks in cyclic order results in arbiter brick becoming the source for heal.
  • +
  • #1529055: Test case ./tests/bugs/bug-1371806_1.t is failing
  • +
  • #1529084: fstat returns ENOENT/ESTALE
  • +
  • #1529094: /usr/sbin/glusterfs crashing on Red Hat OpenShift Container Platform node
  • +
  • #1530449: glustershd fails to start on a volume force start after a brick is down
  • +
  • #1531371: Use after free in cli_cmd_volume_create_cbk
  • +
  • #1533023: [Disperse] Implement open fd heal for disperse volume
  • +
  • #1534842: entries not getting cleared post healing of softlinks (stale entries showing up in heal info)
  • +
  • #1535438: Take full lock on files in 3 way replication
  • +
  • #1536294: Random GlusterFSD process dies during rebalance
  • +
+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/release-notes/3.5.0/index.html b/release-notes/3.5.0/index.html new file mode 100644 index 00000000..439a97c9 --- /dev/null +++ b/release-notes/3.5.0/index.html @@ -0,0 +1,4830 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + 3.5.0 - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + + + + + +
+
+ + + + + + + + +

3.5.0

+ +

Major Changes and Features

+

Documentation about major changes and features is also included in the doc/features/ directory of GlusterFS repository.

+

AFR_CLI_enhancements

+

The AFR reporting via CLI has been improved. This feature provides a coherent +mechanism to present heal status,information and the logs associated. +This makes the end user more aware of healing status and provides statistics.

+

For more information refer here.

+

File_Snapshot

+

This feature provides ability to take snapshots of files in GlusterFS. +File snapshot is supported on the files of QCOW2/QED format.

+

This feature adds better integration with Openstack Cinder, and +in general ability to take snapshots of files (typically VM images)

+

For more information refer here.

+

gfid-access

+

This feature add a new translator which is designed to provide direct access +to files in glusterfs using its GFID

+

For more information refer here.

+

Prevent NFS restart on Volume change

+

Earlier any volume change (volume option, volume start, volume stop, volume +delete,brick add, etc) required restarting NFS server.

+

With this feature, it is no longer required to restart NFS server, thereby +providing better usability with no disrupts in NFS connections

+

Features/Quota_Scalability

+

This feature provides support upto 65536 quota configurations per volume.

+

readdir_ahead

+

This feature provides read-ahead support for directories to improve sequential +directory read performance.

+

zerofill

+

zerofill feature allows creation of pre-allocated and zeroed-out files on +GlusterFS volumes by offloading the zeroing part to server and/or storage +(storage offloads use SCSI WRITESAME), thereby achieves quick creation of +pre-allocated and zeroed-out VM disk image by using server/storage off-loads.

+

For more information refer here.

+

Brick_Failure_Detection

+

This feature attempts to identify storage/file system failures and disable +the failed brick without disrupting the rest of the NODE operation.

+

This adds a health-checker that periodically checks the status of the +filesystem (implies checking of functional storage-hardware).

+

For more information refer here.

+

Changelog based distributed geo-replication

+

New improved geo-replication which makes use of all the nodes in the master volume. +Unlike previous version of geo-replication where all changes were detected and synced +on a single node in master volume, now each node of master volume participates in the +geo-replication.

+

Change Detection - Now geo-rep makes use of changelog xlator to detect the set of files +which needs to be synced. Changelog xlator runs per brick and when enabled, records +each fops which modifies the files. geo-rep consumes these journal created by this +xlator and syncs the files identified as 'changed' to slave.

+

Distributed nature - Each of the nodes take the repsonsibility of syncing the data +which is present in that node. In case of replicated volume, one of them will be +'Active'ly syncing the data, while the other one is 'Passive'.

+

Syncing Method - Apart from the using rsync as the syncing method, now there tar+ssh +syncing method, which can be leveraged by the workload where there is large amount +of smallfiles.

+

Improved block device translator

+

This feature provides a translator to use logical volumes to store VM images +and expose them as files to QEMU/KVM.

+

The Volume group is represented as directory and logical volumes as files.

+

Remove brick CLI Change

+

remove-brick CLI earlier used to remove the brick forcefully ( without data migration ), +when called without any arguments. This mode of 'remove-brick' cli, without any +arguments has been deprecated.

+

Experimental Features

+

The following features are experimental with this release:

+
    +
  • RDMA-connection manager (RDMA-CM).
  • +
  • support for NUFA translator.
  • +
  • disk-encryption
  • +
  • On-Wire Compression + Decompression [CDC]
  • +
+

Minor Improvements:

+
    +
  • +

    Old graphs are cleaned up by FUSE clients

    +
  • +
  • +

    New command "volume status tasks" introduced to track asynchronous tasks like rebalance and remove-brick

    +
  • +
  • +

    glfs_readdir(), glfs_readdirplus(), glfs_fallocate(), glfs_discard() APIs support added in libgfapi

    +
  • +
  • +

    Per client RPC throttling added in rpc server

    +
  • +
  • +

    Communication between cli and glusterd happens over unix domain socket

    +
  • +
  • +

    Information on connected NFS clients is persistent across NFS restarts.

    +
  • +
  • +

    Hardlink creation failures with SMB addressed

    +
  • +
  • +

    Non-local clients function with nufa volumes

    +
  • +
  • +

    Configurable option added to mount.glusterfs to use kernel-readdirp with fuse client

    +
  • +
  • +

    AUTH support for exported nfs sub-directories added

    +
  • +
+

Known Issues:

+
    +
  • The following configuration changes are necessary for qemu and samba + integration with libgfapi to work seamlessly:
  • +
+
1) gluster volume set <volname> server.allow-insecure on
+
+2) Edit /etc/glusterfs/glusterd.vol to contain this line:
+      option rpc-auth-allow-insecure on
+
+Post 1), restarting the volume would be necessary.
+Post 2), restarting glusterd would be necessary.
+
+
    +
  • +

    RDMA connection manager needs IPoIB for connection establishment. More + details can be found here.

    +
  • +
  • +

    For Block Device translator based volumes open-behind translator at the + client side needs to be disabled.

    +
  • +
  • +

    libgfapi clients calling glfs_fini before a successfull glfs_init will cause the client to + hang as reported here. + The workaround is NOT to call glfs_fini for error cases encountered before a successfull + glfs_init.

    +
  • +
+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/release-notes/3.5.1/index.html b/release-notes/3.5.1/index.html new file mode 100644 index 00000000..d960cb3a --- /dev/null +++ b/release-notes/3.5.1/index.html @@ -0,0 +1,4635 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + 3.5.1 - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + +

3.5.1

+ +

Release Notes for GlusterFS 3.5.1

+

This is mostly a bugfix release. The Release Notes for 3.5.0 +contain a listing of all the new features that were added.

+

There are two notable changes that are not only bug fixes, or documentation +additions:

+
    +
  1. a new volume option server.manage-gids has been added + This option should be used when users of a volume are in more than + approximately 93 groups (Bug 1096425)
  2. +
  3. Duplicate Request Cache for NFS has now been disabled by default, this may + reduce performance for certain workloads, but improves the overall stability + and memory footprint for most users
  4. +
+

Bugs Fixed:

+
    +
  • 765202: lgetxattr called with invalid keys on the bricks
  • +
  • 833586: inodelk hang from marker_rename_release_newp_lock
  • +
  • 859581: self-heal process can sometimes create directories instead of symlinks for the root gfid file in .glusterfs
  • +
  • 986429: Backupvolfile server option should work internal to GlusterFS framework
  • +
  • 1039544: [FEAT] "gluster volume heal info" should list the entries that actually required to be healed.
  • +
  • 1046624: Unable to heal symbolic Links
  • +
  • 1046853: AFR : For every file self-heal there are warning messages reported in glustershd.log file
  • +
  • 1063190: Volume was not accessible after server side quorum was met
  • +
  • 1064096: The old Python Translator code (not Glupy) should be removed
  • +
  • 1066996: Using sanlock on a gluster mount with replica 3 (quorum-type auto) leads to a split-brain
  • +
  • 1071191: [3.5.1] Sporadic SIGBUS with mmap() on a sparse file created with open(), seek(), write()
  • +
  • 1078061: Need ability to heal mismatching user extended attributes without any changelogs
  • +
  • 1078365: New xlators are linked as versioned .so files, creating .so.0.0.0
  • +
  • 1086743: Add documentation for the Feature: RDMA-connection manager (RDMA-CM)
  • +
  • 1086748: Add documentation for the Feature: AFR CLI enhancements
  • +
  • 1086749: Add documentation for the Feature: Exposing Volume Capabilities
  • +
  • 1086750: Add documentation for the Feature: File Snapshots in GlusterFS
  • +
  • 1086751: Add documentation for the Feature: gfid-access
  • +
  • 1086752: Add documentation for the Feature: On-Wire Compression/Decompression
  • +
  • 1086754: Add documentation for the Feature: Quota Scalability
  • +
  • 1086755: Add documentation for the Feature: readdir-ahead
  • +
  • 1086756: Add documentation for the Feature: zerofill API for GlusterFS
  • +
  • 1086758: Add documentation for the Feature: Changelog based parallel geo-replication
  • +
  • 1086760: Add documentation for the Feature: Write Once Read Many (WORM) volume
  • +
  • 1086762: Add documentation for the Feature: BD Xlator - Block Device translator
  • +
  • 1086766: Add documentation for the Feature: Libgfapi
  • +
  • 1086774: Add documentation for the Feature: Access Control List - Version 3 support for Gluster NFS
  • +
  • 1086781: Add documentation for the Feature: Eager locking
  • +
  • 1086782: Add documentation for the Feature: glusterfs and oVirt integration
  • +
  • 1086783: Add documentation for the Feature: qemu 1.3 - libgfapi integration
  • +
  • 1088848: Spelling errors in rpc/rpc-transport/rdma/src/rdma.c
  • +
  • 1089054: gf-error-codes.h is missing from source tarball
  • +
  • 1089470: SMB: Crash on brick process during compile kernel.
  • +
  • 1089934: list dir with more than N files results in Input/output error
  • +
  • 1091340: Doc: Add glfs_fini known issue to release notes 3.5
  • +
  • 1091392: glusterfs.spec.in: minor/nit changes to sync with Fedora spec
  • +
  • 1095256: Excessive logging from self-heal daemon, and bricks
  • +
  • 1095595: Stick to IANA standard while allocating brick ports
  • +
  • 1095775: Add support in libgfapi to fetch volume info from glusterd.
  • +
  • 1095971: Stopping/Starting a Gluster volume resets ownership
  • +
  • 1096040: AFR : self-heal-daemon not clearing the change-logs of all the sources after self-heal
  • +
  • 1096425: i/o error when one user tries to access RHS volume over NFS with 100+ GIDs
  • +
  • 1099878: Need support for handle based Ops to fetch/modify extended attributes of a file
  • +
  • 1101647: gluster volume heal volname statistics heal-count not giving desired output.
  • +
  • 1102306: license: xlators/features/glupy dual license GPLv2 and LGPLv3+
  • +
  • 1103413: Failure in gf_log_init reopening stderr
  • +
  • 1104592: heal info may give Success instead of transport end point not connected when a brick is down.
  • +
  • 1104915: glusterfsd crashes while doing stress tests
  • +
  • 1104919: Fix memory leaks in gfid-access xlator.
  • +
  • 1104959: Dist-geo-rep : some of the files not accessible on slave after the geo-rep sync from master to slave.
  • +
  • 1105188: Two instances each, of brick processes, glusterfs-nfs and quotad seen after glusterd restart
  • +
  • 1105524: Disable nfs.drc by default
  • +
  • 1107937: quota-anon-fd-nfs.t fails spuriously
  • +
  • 1109832: I/O fails for for glusterfs 3.4 AFR clients accessing servers upgraded to glusterfs 3.5
  • +
  • 1110777: glusterfsd OOM - using all memory when quota is enabled
  • +
+

Known Issues:

+
    +
  • +

    The following configuration changes are necessary for qemu and samba + integration with libgfapi to work seamlessly:

    +
      +
    1. gluster volume set server.allow-insecure on
    2. +
    3. +

      restarting the volume is necessary

      +
      gluster volume stop <volname>
      +gluster volume start <volname>
      +
      +
    4. +
    5. +

      Edit /etc/glusterfs/glusterd.vol to contain this line:

      +
      option rpc-auth-allow-insecure on
      +
      +
    6. +
    7. +

      restarting glusterd is necessary

      +
      service glusterd restart
      +
      +
    8. +
    +
  • +
+

More details are also documented in the Gluster Wiki on the Libgfapi with qemu libvirt page.

+
    +
  • +

    For Block Device translator based volumes open-behind translator at the client side needs to be disabled.

    +
  • +
  • +

    libgfapi clients calling glfs_fini before a successfull glfs_init will cause the client to + hang has been reported by QEMU developers. + The workaround is NOT to call glfs_fini for error cases encountered before a successfull + glfs_init. Follow Bug 1091335 to get informed when a + release is made available that contains a final fix.

    +
  • +
  • +

    After enabling server.manage-gids, the volume needs to be stopped and + started again to have the option enabled in the brick processes

    +
    gluster volume stop <volname>
    +gluster volume start <volname>
    +
    +
  • +
+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/release-notes/3.5.2/index.html b/release-notes/3.5.2/index.html new file mode 100644 index 00000000..a6f2c4e9 --- /dev/null +++ b/release-notes/3.5.2/index.html @@ -0,0 +1,4586 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + 3.5.2 - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + +

3.5.2

+ +

Release Notes for GlusterFS 3.5.2

+

This is mostly a bugfix release. The Release Notes for 3.5.0 and 3.5.1 contain a listing of all the new features that were added and bugs fixed.

+

Bugs Fixed:

+
    +
  • 1096020: NFS server crashes in _socket_read_vectored_request
  • +
  • 1100050: Can't write to quota enable folder
  • +
  • 1103050: nfs: reset command does not alter the result for nfs options earlier set
  • +
  • 1105891: features/gfid-access: stat on .gfid virtual directory return EINVAL
  • +
  • 1111454: creating symlinks generates errors on stripe volume
  • +
  • 1112111: Self-heal errors with "afr crawl failed for child 0 with ret -1" while performing rolling upgrade.
  • +
  • 1112348: [AFR] I/O fails when one of the replica nodes go down
  • +
  • 1112659: Fix inode leaks in gfid-access xlator
  • +
  • 1112980: NFS subdir authentication doesn't correctly handle multi-(homed,protocol,etc) network addresses
  • +
  • 1113007: nfs-utils should be installed as dependency while installing glusterfs-server
  • +
  • 1113403: Excessive logging in quotad.log of the kind 'null client'
  • +
  • 1113749: client_t clienttable cliententries are never expanded when all entries are used
  • +
  • 1113894: AFR : self-heal of few files not happening when a AWS EC2 Instance is back online after a restart
  • +
  • 1113959: Spec %post server does not wait for the old glusterd to exit
  • +
  • 1114501: Dist-geo-rep : deletion of files on master, geo-rep fails to propagate to slaves.
  • +
  • 1115369: Allow the usage of the wildcard character '*' to the options "nfs.rpc-auth-allow" and "nfs.rpc-auth-reject"
  • +
  • 1115950: glfsheal: Improve the way in which we check the presence of replica volumes
  • +
  • 1116672: Resource cleanup doesn't happen for clients on servers after disconnect
  • +
  • 1116997: mounting a volume over NFS (TCP) with MOUNT over UDP fails
  • +
  • 1117241: backport 'gluster volume status --xml' issues
  • +
  • 1120151: Glustershd memory usage too high
  • +
  • 1124728: SMB: CIFS mount fails with the latest glusterfs rpm's
  • +
+

Known Issues:

+
    +
  • +

    The following configuration changes are necessary for 'qemu' and 'samba vfs + plugin' integration with libgfapi to work seamlessly:

    +
  • +
  • +

    gluster volume set server.allow-insecure on

    +
  • +
  • +

    restarting the volume is necessary

    +

    gluster volume stop <volname> + gluster volume start <volname>

    +
  • +
  • +

    Edit /etc/glusterfs/glusterd.vol to contain this line:

    +

    option rpc-auth-allow-insecure on

    +
  • +
  • +

    restarting glusterd is necessary

    +

    service glusterd restart

    +
  • +
+

More details are also documented in the Gluster Wiki on the Libgfapi with qemu libvirt page.

+
    +
  • +

    For Block Device translator based volumes open-behind translator at the + client side needs to be disabled.

    +
      gluster volume set <volname> performance.open-behind disabled
    +
    +
  • +
  • +

    libgfapi clients calling glfs_fini before a successfull glfs_init will cause the client to + hang as reported here. + The workaround is NOT to call glfs_fini for error cases encountered before a successfull + glfs_init.

    +
  • +
  • +

    If the /var/run/gluster directory does not exist enabling quota will likely fail (Bug 1117888).

    +
  • +
+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/release-notes/3.5.3/index.html b/release-notes/3.5.3/index.html new file mode 100644 index 00000000..3230e4bf --- /dev/null +++ b/release-notes/3.5.3/index.html @@ -0,0 +1,4600 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + 3.5.3 - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + +

3.5.3

+ +

Release Notes for GlusterFS 3.5.3

+

This is a bugfix release. The Release Notes for 3.5.0, +3.5.1 and 3.5.2 contain a listing of all the new +features that were added and bugs fixed in the GlusterFS 3.5 stable release.

+

Bugs Fixed:

+
    +
  • 1081016: glusterd needs xfsprogs and e2fsprogs packages
  • +
  • 1100204: brick failure detection does not work for ext4 filesystems
  • +
  • 1126801: glusterfs logrotate config file pollutes global config
  • +
  • 1129527: DHT :- data loss - file is missing on renaming same file from multiple client at same time
  • +
  • 1129541: [DHT:REBALANCE]: Rebalance failures are seen with error message " remote operation failed: File exists"
  • +
  • 1132391: NFS interoperability problem: stripe-xlator removes EOF at end of READDIR
  • +
  • 1133949: Minor typo in afr logging
  • +
  • 1136221: The memories are exhausted quickly when handle the message which has multi fragments in a single record
  • +
  • 1136835: crash on fsync
  • +
  • 1138922: DHT + rebalance : rebalance process crashed + data loss + few Directories are present on sub-volumes but not visible on mount point + lookup is not healing directories
  • +
  • 1139103: DHT + Snapshot :- If snapshot is taken when Directory is created only on hashed sub-vol; On restoring that snapshot Directory is not listed on mount point and lookup on parent is not healing
  • +
  • 1139170: DHT :- rm -rf is not removing stale link file and because of that unable to create file having same name as stale link file
  • +
  • 1139245: vdsm invoked oom-killer during rebalance and Killed process 4305, UID 0, (glusterfs nfs process)
  • +
  • 1140338: rebalance is not resulting in the hash layout changes being available to nfs client
  • +
  • 1140348: Renaming file while rebalance is in progress causes data loss
  • +
  • 1140549: DHT: Rebalance process crash after add-brick and `rebalance start' operation
  • +
  • 1140556: Core: client crash while doing rename operations on the mount
  • +
  • 1141558: AFR : "gluster volume heal info" prints some random characters
  • +
  • 1141733: data loss when rebalance + renames are in progress and bricks from replica pairs goes down and comes back
  • +
  • 1142052: Very high memory usage during rebalance
  • +
  • 1142614: files with open fd's getting into split-brain when bricks goes offline and comes back online
  • +
  • 1144315: core: all brick processes crash when quota is enabled
  • +
  • 1145000: Spec %post server does not wait for the old glusterd to exit
  • +
  • 1147156: AFR client segmentation fault in afr_priv_destroy
  • +
  • 1147243: nfs: volume set help says the rmtab file is in "/var/lib/glusterd/rmtab"
  • +
  • 1149857: Option transport.socket.bind-address ignored
  • +
  • 1153626: Sizeof bug for allocation of memory in afr_lookup
  • +
  • 1153629: AFR : excessive logging of "Non blocking entrylks failed" in glfsheal log file.
  • +
  • 1153900: Enabling Quota on existing data won't create pgfid xattrs
  • +
  • 1153904: self heal info logs are filled with messages reporting ENOENT while self-heal is going on
  • +
  • 1155073: Excessive logging in the self-heal daemon after a replace-brick
  • +
  • 1157661: GlusterFS allows insecure SSL modes
  • +
+

Known Issues:

+
    +
  • +

    The following configuration changes are necessary for 'qemu' and 'samba vfs + plugin' integration with libgfapi to work seamlessly:

    +
  • +
  • +

    gluster volume set server.allow-insecure on

    +
  • +
  • +

    restarting the volume is necessary

    +

    gluster volume stop <volname> + gluster volume start <volname>

    +
  • +
  • +

    Edit /etc/glusterfs/glusterd.vol to contain this line:

    +

    option rpc-auth-allow-insecure on

    +
  • +
  • +

    restarting glusterd is necessary

    +

    service glusterd restart

    +
  • +
+

More details are also documented in the Gluster Wiki on the Libgfapi with qemu libvirt page.

+
    +
  • +

    For Block Device translator based volumes open-behind translator at the + client side needs to be disabled.

    +
      gluster volume set <volname> performance.open-behind disabled
    +
    +
  • +
  • +

    libgfapi clients calling glfs_fini before a successful glfs_init will cause the client to + hang as reported here. + The workaround is NOT to call glfs_fini for error cases encountered before a successful + glfs_init. This is being tracked in Bug 1134050 for + glusterfs-3.5 and Bug 1093594 for mainline.

    +
  • +
  • +

    If the /var/run/gluster directory does not exist enabling quota will likely + fail (Bug 1117888).

    +
  • +
+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/release-notes/3.5.4/index.html b/release-notes/3.5.4/index.html new file mode 100644 index 00000000..70f435f0 --- /dev/null +++ b/release-notes/3.5.4/index.html @@ -0,0 +1,4594 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + 3.5.4 - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + +

3.5.4

+ +

Release Notes for GlusterFS 3.5.4

+

This is a bugfix release. The Release Notes for 3.5.0, +3.5.1, 3.5.2 and 3.5.3 contain a listing of +all the new features that were added and bugs fixed in the GlusterFS 3.5 stable +release.

+

Bugs Fixed:

+
    +
  • 1092037: Issues reported by Cppcheck static analysis tool
  • +
  • 1101138: meta-data split-brain prevents entry/data self-heal of dir/file respectively
  • +
  • 1115197: Directory quota does not apply on it's sub-directories
  • +
  • 1159968: glusterfs.spec.in: deprecate *.logrotate files in dist-git in favor of the upstream logrotate files
  • +
  • 1160711: libgfapi: use versioned symbols in libgfapi.so for compatibility
  • +
  • 1161102: self heal info logs are filled up with messages reporting split-brain
  • +
  • 1162150: AFR gives EROFS when fop fails on all subvolumes when client-quorum is enabled
  • +
  • 1162226: bulk remove xattr should not fail if removexattr fails with ENOATTR/ENODATA
  • +
  • 1162230: quota xattrs are exposed in lookup and getxattr
  • +
  • 1162767: DHT: Rebalance- Rebalance process crash after remove-brick
  • +
  • 1166275: Directory fd leaks in index translator
  • +
  • 1168173: Regression tests fail in quota-anon-fs-nfs.t
  • +
  • 1173515: [HC] - mount.glusterfs fails to check return of mount command.
  • +
  • 1174250: Glusterfs outputs a lot of warnings and errors when quota is enabled
  • +
  • 1177339: entry self-heal in 3.5 and 3.6 are not compatible
  • +
  • 1177928: Directories not visible anymore after add-brick, new brick dirs not part of old bricks
  • +
  • 1184528: Some newly created folders have root ownership although created by unprivileged user
  • +
  • 1186121: tar on a gluster directory gives message "file changed as we read it" even though no updates to file in progress
  • +
  • 1190633: self-heal-algorithm with option "full" doesn't heal sparse files correctly
  • +
  • 1191006: Building argp-standalone breaks nightly builds on Fedora Rawhide
  • +
  • 1192832: log files get flooded when removexattr() can't find a specified key or value
  • +
  • 1200764: [AFR] Core dump and crash observed during disk replacement case
  • +
  • 1202675: Perf: readdirp in replicated volumes causes performance degrade
  • +
  • 1211841: glusterfs-api.pc versioning breaks QEMU
  • +
  • 1222150: readdirp return 64bits inodes even if enable-ino32 is set
  • +
+

Known Issues:

+
    +
  • +

    The following configuration changes are necessary for 'qemu' and 'samba vfs + plugin' integration with libgfapi to work seamlessly:

    +
  • +
  • +

    gluster volume set server.allow-insecure on

    +
  • +
  • +

    restarting the volume is necessary

    +

    gluster volume stop <volname> + gluster volume start <volname>

    +
  • +
  • +

    Edit /etc/glusterfs/glusterd.vol to contain this line:

    +

    option rpc-auth-allow-insecure on

    +
  • +
  • +

    restarting glusterd is necessary

    +

    service glusterd restart

    +
  • +
+

More details are also documented in the Gluster Wiki on the Libgfapi with qemu libvirt page.

+
    +
  • +

    For Block Device translator based volumes open-behind translator at the + client side needs to be disabled.

    +
      gluster volume set <volname> performance.open-behind disabled
    +
    +
  • +
  • +

    libgfapi clients calling glfs_fini before a successful glfs_init will cause the client to + hang as reported here. + The workaround is NOT to call glfs_fini for error cases encountered before a successful + glfs_init. This is being tracked in Bug 1134050 for + glusterfs-3.5 and Bug 1093594 for mainline.

    +
  • +
  • +

    If the /var/run/gluster directory does not exist enabling quota will likely + fail (Bug 1117888).

    +
  • +
+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/release-notes/3.6.0/index.html b/release-notes/3.6.0/index.html new file mode 100644 index 00000000..fe8d14cd --- /dev/null +++ b/release-notes/3.6.0/index.html @@ -0,0 +1,4863 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + 3.6.0 - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + + + + + +
+
+ + + + + + + + +

3.6.0

+ +

Major Changes and Features

+

Documentation about major changes and features is also included in the doc/features/ directory of GlusterFS repository.

+

Volume Snapshot

+

Volume snapshot provides a point-in-time copy of a GlusterFS volume. The snapshot is an online operation and hence filesystem data continues to be available for the clients while the snapshot is being taken.

+

For more information refer here.

+

User Serviceable Snapshots

+

User Serviceable Snapshots provides the ability for users to access snapshots of GlusterFS volumes without administrative intervention.

+

For more information refer here.

+

Erasure Coding

+

The new disperse translator provides the ability to perform erasure coding across nodes.

+

For more information refer here.

+

Granular locking support for management operations

+

Glusterd now holds a volume lock to support parallel management operations on different volumes.

+

Journaling enhancements (changelog xlator)

+

Introduction of history API to consume journal records which were persisted by the changelog translator. With this API, it's not longer required to perform an expensive +filesystem crawl to identify changes. Geo-replication makes use of this (on [re]start) thereby optimizing remote replication for purges, hardlinks, etc.

+

Better Support for bricks with heterogeneous sizes

+

Prior to 3.6, bricks with heterogeneous sizes were treated as equal regardless of size, and would have been assigned an equal share of files. From 3.6, assignment of files to bricks will take into account the sizes of the bricks.

+

Improved SSL support

+

GlusterFS 3.6 provides better support to enable SSL on both management and data connections. This feature is currently being consumed by the GlusterFS native driver in OpenStack Manila.

+

Better peer identification

+

GlusterFS 3.6 improves peer identification. GlusterD will no longer complain when a mixture of FQDNs, shortnames and IP addresses are used. Changes done for this improvement have also laid down a base for improving multi network support in GlusterFS.

+

Meta translator

+

Meta translator provides a virtual interface for viewing internal state of translators.

+

Improved synchronous replication support (AFRv2)

+

The replication translator (AFR) in GlusterFS 3.6 has undergone a complete rewrite (http://review.gluster.org/#/c/6010/) and is referred to as AFRv2.

+

From a user point of view, there is no change in the replication behaviour but there are some caveats to be noted from an admin point of view:

+
    +
  • +

    Lookups do not trigger meta-data and data self-heals anymore. They only trigger entry-self-heals. Data and meta-data are healed by the self-heal daemon only.

    +
  • +
  • +

    Bricks in a replica set do not mark any pending change log extended attributes for itself during pre or post op. They only mark it for other bricks in the replica set.

    +
  • +
+

For e.g.: +In a replica 2 volume, trusted.afr.<volname>-client-0 for brick-0 and trusted.afr.<volname>-client-1 for brick-1 will always be 0x000000000000000000000000.

+
    +
  • If the post-op changelog updation does not complete successfully on a brick, a trusted.afr.dirty extended attribute is set on that brick.
  • +
+

Barrier translator

+

The barrier translator allows file operations to be temporarily 'paused' on GlusterFS bricks, which is needed for performing consistent snapshots of a GlusterFS volume.

+

For more information, see here.

+

Remove brick moves data by default

+

Prior to 3.6, volume remove-brick <volname> CLI would remove the brick from the volume without performing any data migration. Now the default behavior has been changed to perform data migration when this command is issued. Removing a brick without data migration can now be performed through volume remove-brick <volname> force interface.

+

Experimental Features

+

The following features are experimental with this release:

+
    +
  • support for rdma volumes.
  • +
  • support for NUFA translator.
  • +
  • disk-encryption
  • +
  • On-Wire Compression + Decompression [CDC]
  • +
+

Porting Status

+
    +
  • +

    NetBSD and FreeBSD support is experimental, but regressions tests suggest that it is close to be fully supported. Please make sure you use latest NetBSD code from -current or netbsd-7 branches.

    +
  • +
  • +

    OSX support is in an alpha state. More testing will help in maturing this support.

    +
  • +
+

Minor Improvements:

+
    +
  • +

    Introduction of server.anonuid and server.anongid options for root squashing

    +
  • +
  • +

    Root squashing doesn't happen for clients in trusted storage pool

    +
  • +
  • +

    Memory accounting of glusterfs processes has been enabled by default

    +
  • +
  • +

    The Gluster/NFS server now has support for setting access permissions on volumes with wildcard IP-addresses and IP-address/subnet (CIDR notation). More details and examples are in the commit message.

    +
  • +
  • +

    More preparation for better integration with the nfs-ganesha user-space NFS-server. The changes are mostly related to the handle-based functions in libgfapi.so.

    +
  • +
  • +

    A new logging framework that can suppress repetitive log messages and provide a dictionary of messages has been added. Few translators have now been integrated with the framework. More translators are expected to integrate with this framework in upcoming minor & major releases.

    +
  • +
+

Known Issues:

+
    +
  • +

    The following configuration changes are necessary for qemu and samba integration with libgfapi to work seamlessly:

    +
  • +
  • +

    gluster volume set <volname> server.allow-insecure on

    +
  • +
  • +

    Edit /etc/glusterfs/glusterd.vol to contain this line: + option rpc-auth-allow-insecure on

    +
  • +
+

Post 1, restarting the volume would be necessary: + # gluster volume stop <volname> + # gluster volume start <volname>

+

Post 2, restarting glusterd would be necessary: + # service glusterd restart

+
    +
  • +

    For Block Device translator based volumes open-behind translator at the client side needs to be disabled.

    +
  • +
  • +

    Renames happening on a file that is being migrated during rebalance will fail.

    +
  • +
  • +

    Dispersed volumes do not work with self-heal daemon. Self-healing is only activated when a damaged file or directory is accessed. To force a full self-heal or to replace a brick requires to traverse the file system from a mount point. This is the recommended command to do so:

    +
    find <mount> -d -exec getfattr -h -n test {} \;
    +
    +
  • +
  • +

    Quota on dispersed volumes is not correctly computed, allowing to store more data than specified. A workaround to this problem is to define a smaller quota based on this formula:

    +
    Q' = Q / (N - R)
    +
    +
  • +
+

Where Q is the desired quota value, Q' is the new quota value to use, N is the number of bricks per disperse set, and R is the redundancy.

+

Upgrading to 3.6.X

+

Before upgrading to 3.6 version of gluster from 3.4.X or 3.5.x, please take a look at following link: +Upgrade Gluster to 3.6

+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/release-notes/3.6.3/index.html b/release-notes/3.6.3/index.html new file mode 100644 index 00000000..604d44c5 --- /dev/null +++ b/release-notes/3.6.3/index.html @@ -0,0 +1,4596 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + 3.6.3 - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + +

3.6.3

+ +

Release Notes for GlusterFS 3.6.3

+

This is a bugfix release. The Release Notes for 3.6.0 contain a listing of +all the new features that were added and bugs fixed in the GlusterFS 3.6 stable +release.

+

Bugs Fixed:

+
    +
  • 1187526: Disperse volume mounted through NFS doesn't list any files/directories
  • +
  • 1188471: When the volume is in stopped state/all the bricks are down mount of the volume hangs
  • +
  • 1201484: glusterfs-3.6.2 fails to build on Ubuntu Precise: 'RDMA_OPTION_ID_REUSEADDR' undeclared
  • +
  • 1202212: Performance enhancement for RDMA
  • +
  • 1189023: Directories not visible anymore after add-brick, new brick dirs not part of old bricks
  • +
  • 1202673: Perf: readdirp in replicated volumes causes performance degrade
  • +
  • 1203081: Entries in indices/xattrop directory not removed appropriately
  • +
  • 1203648: Quota: Build ancestry in the lookup
  • +
  • 1199936: readv on /var/run/6b8f1f2526c6af8a87f1bb611ae5a86f.socket failed when NFS is disabled
  • +
  • 1200297: cli crashes when listing quota limits with xml output
  • +
  • 1201622: Convert quota size from n-to-h order before using it
  • +
  • 1194141: AFR : failure in self-heald.t
  • +
  • 1201624: Spurious failure of tests/bugs/quota/bug-1038598.t
  • +
  • 1194306: Do not count files which did not need index heal in the first place as successfully healed
  • +
  • 1200258: Quota: features.quota-deem-statfs is "on" even after disabling quota.
  • +
  • 1165938: Fix regression test spurious failures
  • +
  • 1197598: NFS logs are filled with system.posix_acl_access messages
  • +
  • 1199577: mount.glusterfs uses /dev/stderr and fails if the device does not exist
  • +
  • 1197598: NFS logs are filled with system.posix_acl_access messages
  • +
  • 1188066: logging improvements in marker translator
  • +
  • 1191537: With afrv2 + ext4, lookups on directories with large offsets could result in duplicate/missing entries
  • +
  • 1165129: libgfapi: use versioned symbols in libgfapi.so for compatibility
  • +
  • 1179136: glusterd: Gluster rebalance status returns failure
  • +
  • 1176756: glusterd: remote locking failure when multiple synctask transactions are run
  • +
  • 1188064: log files get flooded when removexattr() can't find a specified key or value
  • +
  • 1165938: Fix regression test spurious failures
  • +
  • 1192522: index heal doesn't continue crawl on self-heal failure
  • +
  • 1193970: Fix spurious ssl-authz.t regression failure (backport)
  • +
  • 1138897: NetBSD port
  • +
  • 1184527: Some newly created folders have root ownership although created by unprivileged user
  • +
  • 1181977: gluster vol clear-locks vol-name path kind all inode return IO error in a disperse volume
  • +
  • 1159471: rename operation leads to core dump
  • +
  • 1173528: Change in volume heal info command output
  • +
  • 1186119: tar on a gluster directory gives message "file changed as we read it" even though no updates to file in progress
  • +
  • 1183716: Force replace-brick lead to the persistent write(use dd) return Input/output error
  • +
  • 1138897: NetBSD port
  • +
  • 1178590: Enable quota(default) leads to heal directory's xattr failed.
  • +
  • 1182490: Internal ec xattrs are allowed to be modified
  • +
  • 1187547: self-heal-algorithm with option "full" doesn't heal sparse files correctly
  • +
  • 1174170: Glusterfs outputs a lot of warnings and errors when quota is enabled
  • +
  • 1212684: - GlusterD segfaults when started with management SSL
  • +
+

Known Issues:

+
    +
  • +

    The following configuration changes are necessary for 'qemu' and 'samba vfs + plugin' integration with libgfapi to work seamlessly:

    +
  • +
  • +

    gluster volume set server.allow-insecure on

    +
  • +
  • +

    restarting the volume is necessary

    +

    gluster volume stop <volname> + gluster volume start <volname>

    +
  • +
  • +

    Edit /etc/glusterfs/glusterd.vol to contain this line:

    +

    option rpc-auth-allow-insecure on

    +
  • +
  • +

    restarting glusterd is necessary

    +

    service glusterd restart

    +
  • +
+

More details are also documented in the Gluster Wiki on the Libgfapi with qemu libvirt page.

+
    +
  • For Block Device translator based volumes open-behind translator at the + client side needs to be disabled.
      gluster volume set <volname> performance.open-behind disable
    +
    +
  • +
+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/release-notes/3.7.0/index.html b/release-notes/3.7.0/index.html new file mode 100644 index 00000000..145460f1 --- /dev/null +++ b/release-notes/3.7.0/index.html @@ -0,0 +1,4865 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + 3.7.0 - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + + + + + +
+
+ + + + + + + + +

3.7.0

+ +

Release Notes for GlusterFS 3.7.0

+

Major Changes and Features

+

Documentation about major changes and features is included in the doc/features/ directory of GlusterFS repository.

+

Geo Replication

+

Many improvements have gone in the geo replication. A detailed documentation about all the improvements can be found here

+

Bitrot Detection

+

Bitrot detection is a technique used to identify an “insidious” type of disk error where data is silently corrupted with no indication from the disk to the +storage software layer that an error has occurred. When bitrot detection is enabled on a volume, gluster performs signing of all files/objects in the volume and scrubs data periodically for signature verification. All anomalies observed will be noted in log files.

+

For more information, refer here.

+

Multi threaded epoll for performance improvements

+

Gluster 3.7 introduces multiple threads to dequeue and process more requests from epoll queues. This improves performance by processing more I/O requests. Workloads that involve read/write operations on a lot of small files can benefit from this enhancement.

+

For more information refer here.

+

Volume Tiering [Experimental]

+

Policy based tiering for placement of files. This feature will serve as a foundational piece for building support for data classification.

+

For more information refer here.

+

Volume Tiering is marked as an experimental feature for this release. It is expected to be fully supported in a 3.7.x minor release.

+

Trashcan

+

This feature will enable administrators to temporarily store deleted files from Gluster volumes for a specified time period.

+

For more information refer here.

+

Efficient Object Count and Inode Quota Support

+

This improvement enables an easy mechanism to retrieve the number of objects per directory or volume. Count of objects/files within a directory hierarchy is stored as an extended attribute of a directory. The extended attribute can be queried to retrieve the count.

+

For more information refer here.

+

This feature has been utilized to add support for inode quotas.

+

For more details about inode quotas, refer here.

+

Pro-active Self healing for Erasure Coding

+

Gluster 3.7 adds pro-active self healing support for erasure coded volumes.

+

Exports and Netgroups Authentication for NFS

+

This feature adds Linux-style exports & netgroups authentication to the native NFS server. This enables administrators to restrict access to specific clients & netgroups for volume/sub-directory NFSv3 exports.

+

For more information refer here.

+

GlusterFind

+

GlusterFind is a new tool that provides a mechanism to monitor data events within a volume. Detection of events like modified files is made easier without having to traverse the entire volume.

+

For more information refer here.

+

Rebalance Performance Improvements

+

Rebalance and remove brick operations in Gluster get a performance boost by speeding up identification of files needing movement and a multi-threaded mechanism to move all such files.

+

For more information refer here.

+

NFSv4 and pNFS support

+

Gluster 3.7 supports export of volumes through NFSv4, NFSv4.1 and pNFS. This support is enabled via NFS Ganesha. Infrastructure changes done in Gluster 3.7 to support this feature include:

+
    +
  • Addition of upcall infrastructure for cache invalidation.
  • +
  • Support for lease locks and delegations.
  • +
  • Support for enabling Ganesha through Gluster CLI.
  • +
  • Corosync and pacemaker based implementation providing resource monitoring and failover to accomplish NFS HA.
  • +
+

For more information refer the below links:

+ +

pNFS support for Gluster volumes and NFSv4 delegations are in beta for this release. Infrastructure changes to support Lease locks and NFSv4 delegations are targeted for a 3.7.x minor release.

+

Snapshot Scheduling

+

With this enhancement, administrators can schedule volume snapshots.

+

For more information, see here.

+

Snapshot Cloning

+

Volume snapshots can now be cloned to create a new writeable volume.

+

For more information, see here.

+

Sharding [Experimental]

+

Sharding addresses the problem of fragmentation of space within a volume. This feature adds support for files that are larger than the size of an individual brick. Sharding works by chunking files to blobs of a configurabe size.

+

For more information, see here.

+

Sharding is an experimental feature for this release. It is expected to be fully supported in a 3.7.x minor release.

+

RCU in glusterd

+

Thread synchronization and critical section access has been improved by introducing userspace RCU in glusterd

+

Arbiter Volumes

+

Arbiter volumes are 3 way replicated volumes where the 3rd brick of the replica is automatically configured as an arbiter. The 3rd brick contains only metadata which provides network partition tolerance and prevents split-brains from happening.

+

For more information, see here.

+

Better split-brain resolution

+

split-brain resolutions can now be also driven by users without administrative intervention.

+

For more information, see the 'Resolution of split-brain from the mount point' section here.

+

Minor Improvements

+
    +
  • Message ID based logging has been added for several translators.
  • +
  • Quorum support for reads.
  • +
  • Snapshot names contain timestamps by default.Subsequent access to the snapshots should be done by the name listed in gluster snapshot list
  • +
  • Support for gluster volume get <volname> added.
  • +
  • libgfapi has added handle based functions to get/set POSIX ACLs based on common libacl structures.
  • +
+

Known Issues

+
    +
  • Enabling Bitrot on volumes with more than 2 bricks on a node is known to cause problems.
  • +
  • Addition of bricks dynamically to cold or hot tiers in a tiered volume is not supported.
  • +
  • The following configuration changes are necessary for qemu and samba integration with libgfapi to work seamlessly:
  • +
+

# gluster volume set <volname> server.allow-insecure on

+

Edit /etc/glusterfs/glusterd.vol to contain this line: option rpc-auth-allow-insecure on

+

Post 1, restarting the volume would be necessary:

+

# gluster volume stop <volname> + # gluster volume start <volname>

+

Post 2, restarting glusterd would be necessary:

+

# service glusterd restart

+

or

+

# systemctl restart glusterd

+

Upgrading to 3.7.0

+

Instructions for upgrading from previous versions of GlusterFS are maintained on this page.

+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/release-notes/3.7.1/index.html b/release-notes/3.7.1/index.html new file mode 100644 index 00000000..9a2bf4b2 --- /dev/null +++ b/release-notes/3.7.1/index.html @@ -0,0 +1,4605 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + 3.7.1 - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + +

3.7.1

+ +

Release Notes for GlusterFS 3.7.1

+

This is a bugfix release. The Release Notes for 3.7.0, contain a +listing of all the new features that were added.

+

Note: Enabling Bitrot on volumes with more than 2 bricks on a node works with this release.

+

Bugs Fixed

+
    +
  • 1212676: NetBSD port
  • +
  • 1218863: `ls' on a directory which has files with mismatching gfid's does not list anything
  • +
  • 1219782: Regression failures in tests/bugs/snapshot/bug-1112559.t
  • +
  • 1221000: detach-tier status emulates like detach-tier stop
  • +
  • 1221470: dHT rebalance: Dict_copy log messages when running rebalance on a dist-rep volume
  • +
  • 1221476: Data Tiering:rebalance fails on a tiered volume
  • +
  • 1221477: The tiering feature requires counters.
  • +
  • 1221503: DHT Rebalance : Misleading log messages for linkfiles
  • +
  • 1221507: NFS-Ganesha: ACL should not be enabled by default
  • +
  • 1221534: rebalance failed after attaching the tier to the volume.
  • +
  • 1221967: Do not allow detach-tier commands on a non tiered volume
  • +
  • 1221969: tiering: use sperate log/socket/pid file for tiering
  • +
  • 1222198: Fix nfs/mount3.c build warnings reported in Koji
  • +
  • 1222750: non-root geo-replication session goes to faulty state, when the session is started
  • +
  • 1222869: [SELinux] [BVT]: Selinux throws AVC errors while running DHT automation on Rhel6.6
  • +
  • 1223215: gluster volume status fails with locking failed error message
  • +
  • 1223286: [geo-rep]: worker died with "ESTALE" when performed rm -rf on a directory from mount of master volume
  • +
  • 1223644: [geo-rep]: With tarssh the file is created at slave but it doesnt get sync
  • +
  • 1224100: [geo-rep]: Even after successful sync, the DATA counter did not reset to 0
  • +
  • 1224241: gfapi: zero size issue in glfs_h_acl_set()
  • +
  • 1224292: peers connected in the middle of a transaction are participating in the transaction
  • +
  • 1224647: [RFE] Provide hourly scrubbing option
  • +
  • 1224650: SIGNING FAILURE Error messages are poping up in the bitd log
  • +
  • 1224894: Quota: spurious failures with quota testcases
  • +
  • 1225077: Fix regression test spurious failures
  • +
  • 1225279: Different client can not execute "for((i=0;i<1000;i++));do ls -al;done" in a same directory at the sametime
  • +
  • 1225318: glusterd could crash in remove-brick-status when local remove-brick process has just completed
  • +
  • 1225320: ls command failed with features.read-only on while mounting ec volume.
  • +
  • 1225331: [geo-rep] stop-all-gluster-processes.sh fails to stop all gluster processes
  • +
  • 1225543: [geo-rep]: snapshot creation timesout even if geo-replication is in pause/stop/delete state
  • +
  • 1225552: [Backup]: Unable to create a glusterfind session
  • +
  • 1225709: [RFE] Move signing trigger mechanism to [f]setxattr()
  • +
  • 1225743: [AFR-V2] - afr_final_errno() should treat op_ret > 0 also as success
  • +
  • 1225796: Spurious failure in tests/bugs/disperse/bug-1161621.t
  • +
  • 1225919: Log EEXIST errors in DEBUG level in fops MKNOD and MKDIR
  • +
  • 1225922: Sharding - Skip update of block count and size for directories in readdirp callback
  • +
  • 1226024: cli/tiering:typo errors in tiering
  • +
  • 1226029: I/O's hanging on tiered volumes (NFS)
  • +
  • 1226032: glusterd crashed on the node when tried to detach a tier after restoring data from the snapshot.
  • +
  • 1226117: [RFE] Return proper error codes in case of snapshot failure
  • +
  • 1226120: [Snapshot] Do not run scheduler if ovirt scheduler is running
  • +
  • 1226139: Implement MKNOD fop in bit-rot.
  • +
  • 1226146: BitRot :- bitd is not signing Objects if more than 3 bricks are present on same node
  • +
  • 1226153: Quota: Do not allow set/unset of quota limit in heterogeneous cluster
  • +
  • 1226629: bug-973073.t fails spuriously
  • +
  • 1226853: Volume start fails when glusterfs is source compiled with GCC v5.1.1
  • +
+

Known Issues

+
    +
  • 1227677: Glusterd crashes and cannot start after rebalance
  • +
  • 1227656: Glusted dies when adding new brick to a distributed volume and converting to replicated volume
  • +
  • 1210256: gluster volume info --xml gives back incorrect typrStr in xml
  • +
  • 1212842: tar on a glusterfs mount displays "file changed as we read it" even though the file was not changed
  • +
  • 1220347: Read operation on a file which is in split-brain condition is successful
  • +
  • 1213352: nfs-ganesha: HA issue, the iozone process is not moving ahead, once the nfs-ganesha is killed
  • +
  • 1220270: nfs-ganesha: Rename fails while exectuing Cthon general category test
  • +
  • 1214169: glusterfsd crashed while rebalance and self-heal were in progress
  • +
  • 1221941: glusterfsd: bricks crash while executing ls on nfs-ganesha vers=3
  • +
  • 1225809: [DHT-REBALANCE]-DataLoss: The data appended to a file during its migration will be lost once the migration is done
  • +
  • +

    1225940: DHT: lookup-unhashed feature breaks runtime compatibility with older client versions

    +
  • +
  • +

    Addition of bricks dynamically to cold or hot tiers in a tiered volume is not supported.

    +
  • +
  • The following configuration changes are necessary for qemu and samba integration with libgfapi to work seamlessly:
  • +
+

# gluster volume set <volname> server.allow-insecure on +Edit /etc/glusterfs/glusterd.vol to contain this line: option rpc-auth-allow-insecure on

+
Post 1, restarting the volume would be necessary:
+
+
# gluster volume stop <volname>
+# gluster volume start <volname>
+
+

Post 2, restarting glusterd would be necessary:

+
# service glusterd restart
+
+
or
+
+
# systemctl restart glusterd
+
+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/release-notes/3.9.0/index.html b/release-notes/3.9.0/index.html new file mode 100644 index 00000000..cb214371 --- /dev/null +++ b/release-notes/3.9.0/index.html @@ -0,0 +1,5433 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + 3.9.0 - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + + + + + +
+
+ + + + + + + + +

Release notes for Gluster 3.9.0

+

This is a major release that includes a huge number of changes. Many +improvements contribute to better support of Gluster with containers and +running your storage on the same server as your hypervisors. Lots of work has +been done to integrate with other projects that are part of the Open Source +storage ecosystem.

+

The most notable features and changes are documented on this page. A full list +of bugs that has been addressed is included further below.

+

Major changes and features

+

Introducing reset-brick command

+

Notes for users: +The reset-brick command provides support to reformat/replace the disk(s) +represented by a brick within a volume. This is helpful when a disk goes bad etc

+

Start reset process -

+
gluster volume reset-brick VOLNAME HOSTNAME:BRICKPATH start
+
+

The above command kills the respective brick process. Now the brick can be reformatted.

+

To restart the brick after modifying configuration -

+
gluster volume reset-brick VOLNAME HOSTNAME:BRICKPATH HOSTNAME:BRICKPATH commit
+
+

If the brick was killed to replace the brick with same brick path, restart with following command -

+
gluster volume reset-brick VOLNAME HOSTNAME:BRICKPATH HOSTNAME:BRICKPATH commit force
+
+

Limitations:

+
    +
  1. resetting a brick kills a brick process in concern. During this + period the brick will not be available for IO's.
  2. +
  3. Replacing a brick with this command will work only if both the brick paths + are same and belong to same volume.
  4. +
+

Get node level status of a cluster

+

Notes for users: +The get-state command provides node level status of a trusted storage pool from +the point of view of glusterd in a parseable format. Using get-state command, +external applications can invoke the command on all nodes of the cluster, and +parse and collate the data obtained from all these nodes to get a complete +picture of the state of the cluster.

+
# gluster get-state <glusterd> [odir <path/to/output/dir] [file <filename>]
+
+

This would dump data points that reflect the local state representation of the +cluster as maintained in glusterd (no other daemons are supported as of now) +to a file inside the specified output directory. The default output directory +and filename is /var/run/gluster and glusterdstate respectively.

+

Following are the sections in the output:

+
    +
  1. Global: UUID and op-version of glusterd
  2. +
  3. Global options: Displays cluster specific options that have been set + explicitly through the volume set command.
  4. +
  5. Peers: Displays the peer node information including its hostname and + connection status
  6. +
  7. Volumes: Displays the list of volumes created on this node along with + detailed information on each volume.
  8. +
  9. Services: Displays the list of the services configured on this node along + with their corresponding statuses.
  10. +
+

Limitations:

+
    +
  1. This only supports glusterd.
  2. +
  3. Does not provide complete cluster state. Data to be collated from all nodes + by external application to get the complete cluster state.
  4. +
+

Multi threaded self-heal for Disperse volumes

+

Notes for users: +Users now have the ability to configure multi-threaded self-heal in disperse volumes using the following commands:

+
Option below can be used to control number of parallel heals in SHD
+# gluster volume set <volname> disperse.shd-max-threads [1-64] # default is 1
+Option below can be used to control number of heals that can wait in SHD
+# gluster volume set <volname> disperse.shd-wait-qlength [1-65536] # default is 1024
+
+

Hardware extention acceleration in Disperse volumes

+

Notes for users: +If the user has hardware that has special instructions which can be used in erasure code calculations on the client it will be automatically used. At the moment this support is added for cpu-extentions: x64, sse, avx

+

Lock revocation feature

+

Notes for users:

+
    +
  1. Motivation: Prevents cluster instability by mis-behaving clients causing bricks to OOM due to inode/entry lock pile-ups.
  2. +
  3. Adds option to strip clients of entry/inode locks after N seconds
  4. +
  5. Adds option to clear ALL locks should the revocation threshold get hit
  6. +
  7. Adds option to clear all or granted locks should the max-blocked threshold get hit (can be used in combination w/ revocation-clear-all).
  8. +
  9. Adds logging to indicate revocation event & reason
  10. +
  11. Options are:
  12. +
+
# gluster volume set <volname> features.locks-revocation-secs <integer; 0 to disable>
+# gluster volume set <volname> features.locks-revocation-clear-all [on/off]
+# gluster volume set <volname> features.locks-revocation-max-blocked <integer>
+
+

On demand scrubbing for Bitrot Detection:

+

Notes for users: With 'ondemand' scrub option, you don't need to wait for the scrub-frequency +to expire. As the option name itself says, the scrubber can be initiated on demand to detect +the corruption. If the scrubber is already running, this option is a no op.

+
# gluster volume bitrot <volume-name> scrub ondemand
+
+

Improvements in Gluster NFS-Ganesha integration

+

Notes for users: +With this release the major change done is to store all the ganesha related configuration files in the shared storage volume mount point instead of having separate local copy in '/etc/ganesha' folder on each node.

+

For new users, before enabling nfs-ganesha

+
    +
  1. +

    create a directory named nfs-ganesha in the shared storage mount point (/var/run/gluster/shared_storage/)

    +
  2. +
  3. +

    Create ganesha.conf & ganesha-ha.conf in that directory with the required details filled in.

    +
  4. +
+

For existing users, before starting nfs-ganesha service do the following :

+
    +
  1. +

    Copy all the contents of /etc/ganesha directory (including .export_added file) to /var/run/gluster/shared_storage/nfs-ganesha from any of the ganesha nodes

    +
  2. +
  3. +

    Create symlink using /var/run/gluster/shared_storage/nfs-ganesha/ganesha.conf on /etc/ganesha one each node in ganesha-cluster

    +
  4. +
  5. +

    Change path for each export entry in ganesha.conf file

    +
  6. +
+
Example: if a volume "test" was exported, then ganesha.conf shall have below export entry -
+ %include "/etc/ganesha/exports/export.test.conf" export entry.
+Change that line to
+ %include "/var/run/gluster/shared_storage/nfs-ganesha/exports/export.test.conf"
+
+

In addition, following changes have been made -

+
    +
  • The entity "HA_VOL_SERVER= " in ganesha-ha.conf is no longer required.
  • +
  • A new resource-agent called portblock (available in >= resource-agents-3.9.5 package) is added to the cluster configuration to speed up the nfs-client connections post IP failover or failback. This may be noticed while looking at the cluster configuration status using the command pcs status.
  • +
+

Availability of python bindings to libgfapi

+

The official python bindings for GlusterFS libgfapi C library interface is +mostly API complete. The complete API reference and documentation can be +found at libgfapi-python.rtfd.io

+

The python bindings have been packaged and has been made available over +PyPI.

+

Small file improvements in Gluster with md-cache (Experimental)

+

Notes for users: +With this release, metadata cache on the client side is integrated with the +cache-invalidation feature so that the clients can cache longer without +compromising on consistency. By enabling, the metadata cache and cache +invalidation feature and extending the cache timeout to 600s, we have seen +performance improvements in metadata operation like creates, ls/stat, chmod, +rename, delete. The perf improvements is significant in SMB access of gluster +volume, but as a cascading effect the improvements is also seen on FUSE/Native +access and NFS access.

+

Use the below options in the order mentioned, to enable the features:

+
  # gluster volume set <volname> features.cache-invalidation on
+  # gluster volume set <volname> features.cache-invalidation-timeout 600
+  # gluster volume set <volname> performance.stat-prefetch on
+  # gluster volume set <volname> performance.cache-invalidation on
+  # gluster volume set <volname> performance.cache-samba-metadata on     # Only for SMB access
+  # gluster volume set <volname> performance.md-cache-timeout 600
+
+

Real time Cluster notifications using Events APIs

+

Let us imagine we have a Gluster monitoring system which displays +list of volumes and its state, to show the realtime status, monitoring +app need to query the Gluster in regular interval to check volume +status, new volumes etc. Assume if the polling interval is 5 seconds +then monitoring app has to run gluster volume info command ~17000 +times a day!

+

With Gluster 3.9 release, Gluster provides close to realtime +notification and alerts for the Gluster cluster state changes and +alerts. Webhooks can be registered to listen to Events emitted by +Gluster. More details about this new feature is available here.

+

http://docs.gluster.org/en/latest/Administrator%20Guide/Events%20APIs

+

Geo-replication improvements

+

Documentation improvements:

+

Upstream documentation is rewritten to reflect the latest version of +Geo-replication. Removed the stale/duplicate documentation. We are +still working on to add Troubleshooting, Cluster expand/shrink notes +to it. Latest version of documentation is available here +http://docs.gluster.org/en/latest/Administrator%20Guide/Geo%20Replication

+

Geo-replication Events are available for Events API consumers:

+

Events APIs is the new Gluster feature available with 3.9 release, +most of the events from Geo-replication are added to eventsapi.

+

Read more about the Events APIs and Geo-replication events here +http://docs.gluster.org/en/latest/Administrator%20Guide/Events%20APIs

+

New simplified command to setup Non root Geo-replication

+

Non root Geo-replication setup was not easy with multiple manual +steps. Non root Geo-replication steps are simplified. Read more about +the new steps in Admin guide.

+

http://docs.gluster.org/en/latest/Administrator%20Guide/Geo%20Replication/#slave-user-setup

+

New command to generate SSH keys(Alternative command to gsec_create)

+

gluster system:: execute gsec_create command generates ssh keys in +every Master cluster nodes and copies to initiated node. This command +silently ignores error if any node is down in cluster. It will not +collect SSH keys from that node. When Geo-rep create push-pem command +is issued it will copy public keys from those nodes which were up +during gsec_create. This causes Geo-rep to go to Faulty when that +master node tries to make the connection to slave nodes. With the new +command, output shows if any Master node was down while generating ssh +keys. Read more about `gluster-georep-sshkey

+

http://docs.gluster.org/en/latest/Administrator%20Guide/Geo%20Replication/#setting-up-the-environment-for-geo-replication

+

Logging improvements

+

New logs are added, now from the log we can clearly understand what is +going on. Note: This feature may change logging format of existing log +messages, Please update your parsers if used to parse Geo-rep logs.

+

Patch: http://review.gluster.org/15710

+

New Configuration options available: changelog-log-level

+

All the changelog related log messages are logged in +/var/log/glusterfs/geo-replication/<SESSION>/*.changes.log in Master +nodes. Log level was hard coded as TRACE for Changelog logs. New +configuration option provided to modify the changelog log level and +defaulted to INFO

+

Behavior changes

+
    +
  • #1221623: Earlier the ports GlusterD + used to allocate for the daemons like brick processes, quotad, shd et all + were persistent through the volume's life cycle, so every restart of the + process(es) or a node reboot will try to use the same ports which were + allocated for the first time. With release-3.9 onwards, GlusterD will try to + allocate a fresh port once a daemon is restarted or the node is rebooted.
  • +
  • #1348944: with 3.9 release the default + log file for glusterd has been renamed to glusterd.log from + etc-glusterfs-glusterd.vol.log
  • +
+

Known Issues

+
    +
  • #1387878:add-brick on a vm-store + configuration which has sharding enabled is leading to vm corruption. To work + around this issue, one can scale up by creating more volumes until this issue + is fixed.
  • +
+

Bugs addressed

+

A total of 571 patches has been sent, addressing 422 bugs:

+
    +
  • #762184: Support mandatory locking in glusterfs
  • +
  • #789278: Issues reported by Coverity static analysis tool
  • +
  • #1005257: [PATCH] Small typo fixes
  • +
  • #1175711: posix: Set correct d_type for readdirp() calls
  • +
  • #1193929: GlusterFS can be improved
  • +
  • #1198849: Minor improvements and cleanup for the build system
  • +
  • #1200914: pathinfo is wrong for striped replicated volumes
  • +
  • #1202274: Minor improvements and code cleanup for libgfapi
  • +
  • #1207604: [rfe] glusterfs snapshot cli commands should provide xml output.
  • +
  • #1211863: RFE: Support in md-cache to use upcall notifications to invalidate its cache
  • +
  • #1221623: glusterd: add brick command should re-use the port for listening which is freed by remove-brick.
  • +
  • #1222915: usage text is wrong for use-readdirp mount default
  • +
  • #1223937: Outdated autotools helper config.* files
  • +
  • #1225718: [FEAT] DHT - rebalance - rebalance status o/p should be different for 'fix-layout' option, it should not show 'Rebalanced-files' , 'Size', 'Scanned' etc as it is not migrating any files.
  • +
  • #1227667: Minor improvements and code cleanup for protocol server/client
  • +
  • #1228142: clang-analyzer: adding clang static analysis support
  • +
  • #1231224: Misleading error messages on brick logs while creating directory (mkdir) on fuse mount
  • +
  • #1236009: do an explicit lookup on the inodes linked in readdirp
  • +
  • #1254067: remove unused variables
  • +
  • #1266876: cluster/afr: AFR2 returns empty readdir results to clients if brick is added back into cluster after re-imaging/formatting
  • +
  • #1278325: DHT: Once remove brick start failed in between Remove brick commit should not be allowed
  • +
  • #1285152: store afr pending xattrs as a volume option
  • +
  • #1292020: quota: client gets IO error instead of disk quota exceed when the limit is exceeded
  • +
  • #1294813: [geo-rep]: Multiple geo-rep session to the same slave is allowed for different users
  • +
  • #1296043: Wrong usage of dict functions
  • +
  • #1302277: Wrong XML output for Volume Options
  • +
  • #1302948: tar complains: : file changed as we read it
  • +
  • #1303668: packaging: rpmlint warning and errors - Documentation URL 404
  • +
  • #1305031: AFR winds a few reads of a file in metadata split-brain.
  • +
  • #1306398: Tiering and AFR may result in data loss
  • +
  • #1311002: NFS+attach tier:IOs hang while attach tier is issued
  • +
  • #1311926: [georep]: If a georep session is recreated the existing files which are deleted from slave doesn't get sync again from master
  • +
  • #1315666: Data Tiering:tier volume status shows as in-progress on all nodes of a cluster even if the node is not part of volume
  • +
  • #1316178: changelog/rpc: Memory leak- rpc_clnt_t object is never freed
  • +
  • #1316389: georep: tests for logrotate, create+rename and hard-link rename
  • +
  • #1318204: Input / Output when chmoding files on NFS mount point
  • +
  • #1318289: [RFE] Add arbiter brick hotplug
  • +
  • #1318591: Glusterd not operational due to snapshot conflicting with nfs-ganesha export file in "/var/lib/glusterd/snaps"
  • +
  • #1319992: RFE: Lease support for gluster
  • +
  • #1320388: [GSS]-gluster v heal volname info does not work with enabled ssl/tls
  • +
  • #1321836: gluster volume info --xml returns 0 for nonexistent volume
  • +
  • #1322214: [HC] Add disk in a Hyper-converged environment fails when glusterfs is running in directIO mode
  • +
  • #1322805: [scale] Brick process does not start after node reboot
  • +
  • #1322825: IO-stats, client profile is overwritten when it is on the same node as bricks
  • +
  • #1324439: SAMBA+TIER : Wrong message display.On detach tier success the message reflects Tier command failed.
  • +
  • #1325831: gluster snap status xml output shows incorrect details when the snapshots are in deactivated state
  • +
  • #1326410: /var/lib/glusterd/$few-directories not owned by any package, causing it to remain after glusterfs-server is uninstalled
  • +
  • #1327171: Disperse: Provide description of disperse.eager-lock option.
  • +
  • #1328224: RFE : Feature: Automagic unsplit-brain policies for AFR
  • +
  • #1329211: values for Number of Scrubbed files, Number of Unsigned files, Last completed scrub time and Duration of last scrub are shown as zeros in bit rot scrub status
  • +
  • #1330032: rm -rf to a dir gives directory not empty(ENOTEMPTY) error
  • +
  • #1330097: ganesha exported volumes doesn't get synced up on shutdown node when it comes up.
  • +
  • #1330583: glusterfs-libs postun ldconfig: relative path `1' used to build cache
  • +
  • #1331254: Disperse volume fails on high load and logs show some assertion failures
  • +
  • #1331287: No xml output on gluster volume heal info command with --xml
  • +
  • #1331323: [Granular entry sh] - Implement renaming of indices in index translator
  • +
  • #1331423: distaf: Add io_libs to namespace package list
  • +
  • #1331720: implement meta-lock/unlock for lock migration
  • +
  • #1331721: distaf: Add README and HOWTO to distaflibs as well
  • +
  • #1331860: Wrong constant used in length based comparison for XATTR_SECURITY_PREFIX
  • +
  • #1331969: Ganesha+Tiering: Continuous "0-glfs_h_poll_cache_invalidation: invalid argument" messages getting logged in ganesha-gfapi logs.
  • +
  • #1332020: multiple regression failures for tests/basic/quota-ancestry-building.t
  • +
  • #1332021: multiple failures for testcase: tests/basic/inode-quota-enforcing.t
  • +
  • #1332054: multiple failures of tests/bugs/disperse/bug-1236065.t
  • +
  • #1332073: EINVAL errors while aggregating the directory size by quotad
  • +
  • #1332134: bitrot: Build generates Compilation Warning.
  • +
  • #1332136: Detach tier fire before the background fixlayout is complete may result in failure
  • +
  • #1332156: SMB:while running I/O on cifs mount and doing graph switch causes cifs mount to hang.
  • +
  • #1332219: tier: avoid pthread_join if pthread_create fails
  • +
  • #1332413: Wrong op-version for mandatory-locks volume set option
  • +
  • #1332419: geo-rep: address potential leak of memory
  • +
  • #1332460: [features/worm] - when disabled, worm xl should simply pass requested fops to its child xl
  • +
  • #1332465: glusterd + bitrot : Creating clone of snapshot. error "xlator.c:148:xlator_volopt_dynload] 0-xlator: /usr/lib64/glusterfs/3.7.9/xlator/features/bitrot.so: cannot open shared object file:
  • +
  • #1332473: tests: 'tests/bitrot/br-state-check.t' fails in netbsd
  • +
  • #1332501: Mandatory locks are not migrated during lock migration
  • +
  • #1332566: [granular entry sh] - Add more tests
  • +
  • #1332798: [AFR]: "volume heal info" command is failing during in-service upgrade to latest.
  • +
  • #1332822: distaf: Add library functions for gluster snapshot operations
  • +
  • #1332885: distaf: Add library functions for gluster bitrot operations and generic library utility functions generic to all components
  • +
  • #1332952: distaf: Add library functions for gluster quota operations
  • +
  • #1332994: Self Heal fails on a replica3 volume with 'disk quota exceeded'
  • +
  • #1333023: readdir-ahead does not fetch xattrs that md-cache needs in it's internal calls
  • +
  • #1333043: Fix excessive logging due to NULL dict in dht
  • +
  • #1333263: [features/worm] Unwind FOPs with op_errno and add gf_worm prefix to functions
  • +
  • #1333317: rpc_clnt will sometimes not reconnect when using encryption
  • +
  • #1333319: Unexporting a volume sometimes fails with "Dynamic export addition/deletion failed".
  • +
  • #1333370: [FEAT] jbr-server handle lock/unlock fops
  • +
  • #1333738: distaf: Add GlusterBaseClass (gluster_base_class.py) to distaflibs-gluster.
  • +
  • #1333912: client ID should logged when SSL connection fails
  • +
  • #1333925: libglusterfs: race conditions and illegal mem access in timer
  • +
  • #1334044: [RFE] Eventing for Gluster
  • +
  • #1334164: Worker dies with [Errno 5] Input/output error upon creation of entries at slave
  • +
  • #1334208: distaf: Add library functions for gluster rebalance operations
  • +
  • #1334269: GlusterFS 3.8 fails to build in the CentOS Community Build System
  • +
  • #1334270: glusterd: glusterd provides stale port information when a volume is recreated with same brick path
  • +
  • #1334285: Under high read load, sometimes the message "XDR decoding failed" appears in the logs and read fails
  • +
  • #1334314: changelog: changelog_rollover breaks when number of fds opened is more than 1024
  • +
  • #1334444: SAMBA-VSS : Permission denied issue while restoring the directory from windows client 1 when files are deleted from windows client 2
  • +
  • #1334620: stop all gluster processes should also include glusterfs mount process
  • +
  • #1334621: set errno in case of inode_link failures
  • +
  • #1334721: distaf: Add library functions for gluster tiering operations
  • +
  • #1334839: [Tiering]: Files remain in hot tier even after detach tier completes
  • +
  • #1335019: Add graph for decompounder xlator
  • +
  • #1335091: mount/fuse: Logging improvements
  • +
  • #1335231: features/locks: clang compile warning in posix.c
  • +
  • #1335232: features/index: clang compile warnings in index.c
  • +
  • #1335429: Self heal shows different information for the same volume from each node
  • +
  • #1335494: Modifying peer ops library
  • +
  • #1335531: Modified volume options are not syncing once glusterd comes up.
  • +
  • #1335652: Heal info shows split-brain for .shard directory though only one brick was down
  • +
  • #1335717: PREFIX is not honoured during build and install
  • +
  • #1335776: rpc: change client insecure port ceiling from 65535 to 49151
  • +
  • #1335818: Revert "features/shard: Make o-direct writes work with sharding: http://review.gluster.org/#/c/13846/"
  • +
  • #1335858: Files present in the .shard folder even after deleting all the vms from the UI
  • +
  • #1335973: [Tiering]: The message 'Max cycle time reached..exiting migration' incorrectly displayed as an 'error' in the logs
  • +
  • #1336197: failover is not working with latest builds.
  • +
  • #1336328: [FEAT] jbr: Improve code modularity
  • +
  • #1336354: Provide a way to configure gluster source location in devel-vagrant
  • +
  • #1336373: Distaf: Add gluster specific config file
  • +
  • #1336381: ENOTCONN error during parallel rmdir
  • +
  • #1336508: rpc-transport: compiler warning format string
  • +
  • #1336612: one of vm goes to paused state when network goes down and comes up back
  • +
  • #1336630: ERROR and Warning message on writing a file from mount point "null gfid for path (null)" repeated 3 times between"
  • +
  • #1336642: [RFE] git-branch-diff: wrapper script for git to visualize backports
  • +
  • #1336698: DHT : few Files are not accessible and not listed on mount + more than one Directory have same gfid + (sometimes) attributes has ?? in ls output after renaming Directories from multiple client at same time
  • +
  • #1336793: assorted typos and spelling mistakes from Debian lintian
  • +
  • #1336818: Add ability to set oom_score_adj for glusterfs process
  • +
  • #1336853: scripts: bash-isms in scripts
  • +
  • #1336945: [NFS-Ganesha] : stonith-enabled option not set with new versions of cman,pacemaker,corosync and pcs
  • +
  • #1337160: distaf: Added libraries to setup nfs-ganesha in gluster through distaf
  • +
  • #1337227: [tiering]: error message shown during the failure of detach tier commit isn't intuitive
  • +
  • #1337405: Some of VMs go to paused state when there is concurrent I/O on vms
  • +
  • #1337473: upgrade path when slave volume uuid used in geo-rep session
  • +
  • #1337597: Mounting a volume over NFS with a subdir followed by a / returns "Invalid argument"
  • +
  • #1337650: log flooded with Could not map name=xxxx to a UUID when config'd with long hostnames
  • +
  • #1337777: tests/bugs/write-behind/1279730.t fails spuriously
  • +
  • #1337791: tests/basic/afr/tarissue.t fails regression
  • +
  • #1337899: Misleading error message on rebalance start when one of the glusterd instance is down
  • +
  • #1338544: fuse: In fuse_first_lookup(), dict is not un-referenced in case create_frame returns an empty pointer.
  • +
  • #1338634: AFR : fuse,nfs mount hangs when directories with same names are created and deleted continuously
  • +
  • #1338733: __inode_ctx_put: fix mem leak on failure
  • +
  • #1338967: common-ha: ganesha.nfsd not put into NFS-GRACE after fail-back
  • +
  • #1338991: DHT2: Tracker bug
  • +
  • #1339071: dht/rebalance: mark hardlink migration failure as skipped for rebalance process
  • +
  • #1339149: Error and warning messages related to xlator/features/snapview-client.so adding up to the client log on performing IO operations
  • +
  • #1339166: distaf: Added timeout value to wait for rebalance to complete and removed older rebalance library file
  • +
  • #1339181: Full heal of a sub-directory does not clean up name-indices when granular-entry-heal is enabled.
  • +
  • #1339214: gfapi: set mem_acct for the variables created for upcall
  • +
  • #1339471: [geo-rep]: Worker died with [Errno 2] No such file or directory
  • +
  • #1339472: [geo-rep]: Monitor crashed with [Errno 3] No such process
  • +
  • #1339541: Added libraries to setup CTDB in gluster through distaf
  • +
  • #1339553: gfapi: in case of handle based APIs, close glfd after successful create
  • +
  • #1339689: RFE - capacity info (df -h on a mount) is incorrect for a tiered volume
  • +
  • #1340488: copy-export-ganesha.sh does not have a correct shebang
  • +
  • #1340623: Directory creation(mkdir) fails when the remove brick is initiated for replicated volumes accessing via nfs-ganesha
  • +
  • #1340853: [geo-rep]: If the session is renamed, geo-rep configuration are not retained
  • +
  • #1340936: Automount fails because /sbin/mount.glusterfs does not accept the -s option
  • +
  • #1341007: gfapi : throwing warning message for unused variable in glfs_h_find_handle()
  • +
  • #1341009: Log parameters such as the gfid, fd address, offset and length of the reads upon failure for easier debugging
  • +
  • #1341294: build: RHEL7 unpackaged files /var/lib/glusterd/hooks/.../S57glusterfind-delete-post.{pyc,pyo}
  • +
  • #1341474: [geo-rep]: Snapshot creation having geo-rep session is broken
  • +
  • #1341650: conservative merge happening on a x3 volume for a deleted file
  • +
  • #1341768: After setting up ganesha on RHEL 6, nodes remains in stopped state and grace related failures observed in pcs status
  • +
  • #1341796: [quota+snapshot]: Directories are inaccessible from activated snapshot, when the snapshot was created during directory creation
  • +
  • #1342171: O_DIRECT support for sharding
  • +
  • #1342259: [features/worm] - write FOP should pass for the normal files
  • +
  • #1342298: reading file with size less than 512 fails with odirect read
  • +
  • #1342356: [RFE] Python library for creating Cluster aware CLI tools for Gluster
  • +
  • #1342420: [georep]: Stopping volume fails if it has geo-rep session (Even in stopped state)
  • +
  • #1342796: self heal deamon killed due to oom kills on a dist-disperse volume using nfs ganesha
  • +
  • #1342979: [geo-rep]: Add-Brick use case: create push-pem force on existing geo-rep fails
  • +
  • #1343038: IO ERROR when multiple graph switches
  • +
  • #1343286: enabling glusternfs with nfs.rpc-auth-allow to many hosts failed
  • +
  • #1343333: [RFE] Simplify Non Root Geo-replication Setup
  • +
  • #1343374: Gluster fuse client crashed generating core dump
  • +
  • #1343838: Implement API to get page aligned iobufs in iobuf.c
  • +
  • #1343906: [Stress/Scale] : I/O errors out from gNFS mount points during high load on an erasure coded volume,Logs flooded with Error messages.
  • +
  • #1343943: Old documentation link in log during Geo-rep MISCONFIGURATION
  • +
  • #1344277: [disperse] mkdir after re balance give Input/Output Error
  • +
  • #1344340: Unsafe access to inode->fd_list
  • +
  • #1344396: fd leak in disperse
  • +
  • #1344407: fail delete volume operation if one of the glusterd instance is down in cluster
  • +
  • #1344686: tiering : Multiple brick processes crashed on tiered volume while taking snapshots
  • +
  • #1344714: removal of file from nfs mount crashs ganesha server
  • +
  • #1344836: [Disperse volume]: IO hang seen on mount with file ops
  • +
  • #1344885: inode leak in brick process
  • +
  • #1345727: Bricks are starting when server quorum not met.
  • +
  • #1345744: [geo-rep]: Worker crashed with "KeyError: "
  • +
  • #1345748: SAMBA-DHT : Crash seen while rename operations in cifs mount and windows access of share mount
  • +
  • #1345846: quota : rectify quota-deem-statfs default value in gluster v set help command
  • +
  • #1345855: Possible crash due to a timer cancellation race
  • +
  • #1346138: [RFE] Non root Geo-replication Error logs improvements
  • +
  • #1346211: cleanup glusterd-georep code
  • +
  • #1346551: wrong understanding of function's parameter
  • +
  • #1346719: [Disperse] dd + rm + ls lead to IO hang
  • +
  • #1346821: cli core dumped while providing/not wrong values during arbiter replica volume
  • +
  • #1347249: libgfapi : variables allocated by glfs_set_volfile_server is not freed
  • +
  • #1347354: glusterd: SuSE build system error for incorrect strcat, strncat usage
  • +
  • #1347686: IO error seen with Rolling or non-disruptive upgrade of an distribute-disperse(EC) volume from 3.7.5 to 3.7.9
  • +
  • #1348897: Add relative path validation for gluster copy file utility
  • +
  • #1348904: [geo-rep]: If the data is copied from .snaps directory to the master, it doesn't get sync to slave [First Copy]
  • +
  • #1348944: Change the glusterd log file name to glusterd.log
  • +
  • #1349270: ganesha.enable remains on in volume info file even after we disable nfs-ganesha on the cluster.
  • +
  • #1349273: Geo-rep silently ignores config parser errors
  • +
  • #1349276: Buffer overflow when attempting to create filesystem using libgfapi as driver on OpenStack
  • +
  • #1349284: [tiering]: Files of size greater than that of high watermark level should not be promoted
  • +
  • #1349398: nfs-ganesha disable doesn't delete nfs-ganesha folder from /var/run/gluster/shared_storage
  • +
  • #1349657: process glusterd set TCP_USER_TIMEOUT failed
  • +
  • #1349709: Polling failure errors getting when volume is started&stopped with SSL enabled setup.
  • +
  • #1349723: Added libraries to get server_brick dictionaries
  • +
  • #1350017: Change distaf glusterbase class and mount according to the config file changes
  • +
  • #1350168: distaf: made changes to create_volume function
  • +
  • #1350173: distaf: Adding samba_ops library
  • +
  • #1350188: distaf: minor import changes in ganesha.py
  • +
  • #1350191: race condition when set ctx->timer in function gf_timer_registry_init
  • +
  • #1350237: Gluster/NFS does not accept dashes in hostnames in exports/netgroups files
  • +
  • #1350245: distaf: Add library functions for gluster volume operations
  • +
  • #1350248: distaf: Modified get_pathinfo function in lib_utils.py
  • +
  • #1350256: Distaf: Modifying the ctdb_libs to get server host from the server dict
  • +
  • #1350258: Distaf: add a sample test case to the framework
  • +
  • #1350327: Protocol client not mounting volumes running on older versions.
  • +
  • #1350371: ganesha/glusterd : remove 'HA_VOL_SERVER' from ganesha-ha.conf
  • +
  • #1350383: distaf: Modified distaf gluster config file
  • +
  • #1350427: distaf: Modified tier_attach() to get bricks path for attaching tier from the available bricks in server
  • +
  • #1350744: GlusterFS 3.9.0 tracker
  • +
  • #1350793: build: remove absolute paths from glusterfs spec file
  • +
  • #1350867: RFE: FEATURE: Lock revocation for features/locks xlator
  • +
  • #1351021: [DHT]: Rebalance info for remove brick operation is not showing after glusterd restart
  • +
  • #1351071: [geo-rep] Stopped geo-rep session gets started automatically once all the master nodes are upgraded
  • +
  • #1351134: [SSL] : gluster v set help does not show ssl options
  • +
  • #1351537: [Bitrot] Need a way to set scrub interval to a minute, for ease of testing
  • +
  • #1351880: gluster volume status client" isn't showing any information when one of the nodes in a 3-way Distributed-Replicate volume is shut down
  • +
  • #1352019: RFE: Move throttling code to libglusterfs from bitrot
  • +
  • #1352277: a two node glusterfs seems not possible anymore?!
  • +
  • #1352279: [scale]: Bricks not started after node reboot.
  • +
  • #1352423: should find_library("c") be used instead of find_library("libc") in geo-replication/syncdaemon/libcxattr.py?
  • +
  • #1352634: qemu libgfapi clients hang when doing I/O
  • +
  • #1352671: RFE: As a part of xattr invalidation, send the stat info as well
  • +
  • #1352854: GlusterFS - Memory Leak - High Memory Utilization
  • +
  • #1352871: [Bitrot]: Scrub status- Certain fields continue to show previous run's details, even if the current run is in progress
  • +
  • #1353156: [RFE] CLI to get local state representation for a cluster
  • +
  • #1354141: several problems found in failure handle logic
  • +
  • #1354221: noisy compilation warnning with Wstrict-prototypes
  • +
  • #1354372: Fix timing issue in tests/bugs/glusterd/bug-963541.t
  • +
  • #1354439: nfs client I/O stuck post IP failover
  • +
  • #1354489: service file is executable
  • +
  • #1355604: afr coverity fixes
  • +
  • #1355628: Upgrade from 3.7.8 to 3.8.1 doesn't regenerate the volfiles
  • +
  • #1355706: [Bitrot]: Sticky bit files considered and skipped by the scrubber, instead of getting ignored.
  • +
  • #1355956: RFE : move ganesha related configuration into shared storage
  • +
  • #1356032: quota: correct spelling mistakes in quota src files
  • +
  • #1356068: observing " Too many levels of symbolic links" after adding bricks and then issuing a replace brick
  • +
  • #1356504: Move gf_log->gf_msg in index feature
  • +
  • #1356508: [RFE] Handle errors during SSH key generation(gsec_create)
  • +
  • #1356528: memory leak in glusterd-georeplication
  • +
  • #1356851: [Bitrot+Sharding] Scrub status shows incorrect values for 'files scrubbed' and 'files skipped'
  • +
  • #1356868: File not found errors during rpmbuild: /var/lib/glusterd/hooks/1/delete/post/S57glusterfind-delete-post.py{c,o}
  • +
  • #1356888: Correct code in socket.c to avoid fd leak
  • +
  • #1356998: syscalls: readdir_r() is deprecated in newer glibc
  • +
  • #1357210: add several fops support in io-threads
  • +
  • #1357226: add a basis function to reduce verbose code
  • +
  • #1357397: Trash translator fails to create 'internal_op' directory under already existing trash directory
  • +
  • #1357463: Error: quota context not set inode (gfid:nnn) [Invalid argument]
  • +
  • #1357490: libglusterfs : update correct memory segments in glfs-message-id
  • +
  • #1357821: Make install fails second time without uninstall
  • +
  • #1358114: tests: ./tests/bitrot/br-stub.t fails intermittently
  • +
  • #1358195: Fix spurious failure of tests/bugs/glusterd/bug-1111041.t
  • +
  • #1358196: Tiering related core observed with "uuid_is_null () message".
  • +
  • #1358244: [SNAPSHOT]: The PID for snapd is displayed even after snapd process is killed.
  • +
  • #1358594: Enable gfapi test cases in Gluster upstream regression
  • +
  • #1358608: Memory leak observed with upcall polling
  • +
  • #1358671: Add Events for Volume Set and Reset
  • +
  • #1358922: missunderstanding about GF_PROTOCOL_DICT_SERIALIZE
  • +
  • #1358936: coverity: iobuf_get_page_aligned calling iobuf_get2 should check the return pointer
  • +
  • #1358944: jbr resource leak, forget free "path"
  • +
  • #1358976: Fix spurious failures in split-brain-favorite-child-policy.t
  • +
  • #1359001: Fix spurious failures in ec.t
  • +
  • #1359190: Glusterd crashes upon receiving SIGUSR1
  • +
  • #1359370: glfs: fix glfs_set_volfile_server doc
  • +
  • #1359711: [GSS] Rebalance crashed
  • +
  • #1359717: Fix failure of ./tests/bugs/snapshot/bug-1316437.t
  • +
  • #1360169: Fix bugs in compound fops framework
  • +
  • #1360401: RFE: support multiple bricks within one process
  • +
  • #1360402: Clients can starve under heavy load
  • +
  • #1360647: gfapi: deprecate the rdma support for management connections
  • +
  • #1360670: Add output option --xml to man page of gluster
  • +
  • #1360679: Bricks doesn't come online after reboot [ Brick Full ]
  • +
  • #1360682: tests: ./tests/bitrot/bug-1244613.t fails intermittently
  • +
  • #1360693: [RFE] Add a count of snapshots associated with a volume to the output of the vol info command
  • +
  • #1360809: [RFE] Capture events in GlusterD
  • +
  • #1361094: Auto generate header files during Make
  • +
  • #1361249: posix: leverage FALLOC_FL_ZERO_RANGE in zerofill fop
  • +
  • #1361300: Direct io to sharded files fails when on zfs backend
  • +
  • #1361678: thread CPU saturation limiting throughput on write workloads
  • +
  • #1361983: Move USE_EVENTS in gf_events API
  • +
  • #1361999: Remove ganesha xlator code from gluster code base
  • +
  • #1362144: Python library to send Events
  • +
  • #1362151: [libgfchangelog]: If changelogs are not available for the requested time range, no proper error message
  • +
  • #1362397: Mem leak in meta_default_readv in meta xlators
  • +
  • #1362520: Per xlator logging not working
  • +
  • #1362602: [Open SSL] : Unable to mount an SSL enabled volume via SMB v3/Ganesha v4
  • +
  • #1363591: Geo-replication user driven Events
  • +
  • #1363721: [HC]: After bringing down and up of the bricks VM's are getting paused
  • +
  • #1363948: Spurious failure in tests/bugs/glusterd/bug-1089668.t
  • +
  • #1364026: glfs_fini() crashes with SIGSEGV
  • +
  • #1364420: [RFE] History Crawl performance improvement
  • +
  • #1364449: posix: honour fsync flags in posix_do_zerofill
  • +
  • #1364529: api: revert glfs_ipc_xd intended for 4.0
  • +
  • #1365455: [AFR]: Files not available in the mount point after converting Distributed volume type to Replicated one.
  • +
  • #1365489: glfs_truncate missing
  • +
  • #1365506: gfapi: use const qualifier for glfs_*timens()
  • +
  • #1366195: [Bitrot - RFE]: On demand scrubbing option to scrub
  • +
  • #1366222: "heal info --xml" not showing the brick name of offline bricks.
  • +
  • #1366226: Move alloca0 definition to common-utils
  • +
  • #1366284: fix bug in protocol/client lookup callback
  • +
  • #1367258: Log EEXIST errors at DEBUG level
  • +
  • #1367478: Second gluster volume is offline after daemon restart or server reboot
  • +
  • #1367527: core: use for makedev(3), major(3), minor(3)
  • +
  • #1367665: rotated FUSE mount log is using to populate the information after log rotate.
  • +
  • #1367771: Introduce graceful mode in stop-all-gluster-processes.sh
  • +
  • #1367774: Support for Client side Events
  • +
  • #1367815: [Bitrot - RFE]: Bitrot Events
  • +
  • #1368042: make fails if Events APIs are disabled
  • +
  • #1368349: tests/bugs/cli/bug-1320388.t: Infrequent failures
  • +
  • #1368451: [RFE] Implement multi threaded self-heal for ec volumes
  • +
  • #1368842: Applications not calling glfs_h_poll_upcall() have upcall events cached for no use
  • +
  • #1368882: log level set in glfs_set_logging() does not work
  • +
  • #1368931: [ RFE] Quota Events
  • +
  • #1368953: spurious netbsd run failures in tests/basic/glusterd/volfile_server_switch.t
  • +
  • #1369124: fix unused variable warnings from out-of-tree builds generate XDR headers and source files i...
  • +
  • #1369331: Memory leak with a replica 3 arbiter 1 configuration
  • +
  • #1369401: NetBSD hangs at /tests/features/lock_revocation.t
  • +
  • #1369430: Track the client that performed readdirp
  • +
  • #1369432: IATT cache invalidation should be sent when permission changes on file
  • +
  • #1369524: segment fault while join thread reaper_thr in fini()
  • +
  • #1369530: protocol/server: readlink rsp xdr failed while readlink got an error
  • +
  • #1369638: DHT stale layout issue will be seen often with md-cache prolonged cache of lookups
  • +
  • #1369721: EventApis will not work if compiled using ./configure --disable-glupy
  • +
  • #1370053: fix EXPECT_WITHIN
  • +
  • #1370074: Fix mistakes in self-heald.t
  • +
  • #1370406: build: eventtypes.h is missing
  • +
  • #1370445: Geo-replication server side events
  • +
  • #1370862: dht: fix the broken build
  • +
  • #1371541: Spurious regressions in ./tests/bugs/gfapi/bug-1093594.t
  • +
  • #1371543: Add cache invalidation stat in profile info
  • +
  • #1371775: gluster system:: uuid get hangs
  • +
  • #1372278: [RFE] Provide snapshot events for the new eventing framework
  • +
  • #1372586: Fix the test case http://review.gluster.org/#/c/15385/
  • +
  • #1372686: [RFE]Reducing number of network round trips
  • +
  • #1373529: Node remains in stopped state in pcs status with "/usr/lib/ocf/resource.d/heartbeat/ganesha_mon: line 137: [: too many arguments ]" messages in logs.
  • +
  • #1373735: Event pushed even if Answer is No in the Volume Stop and Delete prompt
  • +
  • #1373740: [RFE]: events from protocol server
  • +
  • #1373743: [RFE]: AFR events
  • +
  • #1374153: [RFE] History Crawl performance improvement
  • +
  • #1374167: disperse: Integrate important events with events framework
  • +
  • #1374278: rpc/xdr: generated files are filtered with a sed extended regex
  • +
  • #1374298: "gluster vol status all clients --xml" doesn't generate xml if there is a failure in between
  • +
  • #1374324: [RFE] Tier Events
  • +
  • #1374567: [Bitrot]: Recovery fails of a corrupted hardlink (and the corresponding parent file) in a disperse volume
  • +
  • #1374581: Geo-rep worker Faulty with OSError: [Errno 21] Is a directory
  • +
  • #1374597: [geo-rep]: AttributeError: 'Popen' object has no attribute 'elines'
  • +
  • #1374608: geo-replication *changes.log does not respect the log-level configured
  • +
  • #1374626: Worker crashes with EINVAL errors
  • +
  • #1374630: [geo-replication]: geo-rep Status is not showing bricks from one of the nodes
  • +
  • #1374639: glusterfs: create a directory with 0464 mode return EIO error
  • +
  • #1374649: Support for rc.d and init for Service management
  • +
  • #1374841: Implement SIMD support on EC
  • +
  • #1375042: bug-963541.t spurious failure
  • +
  • #1375537: gf_event python fails with ImportError
  • +
  • #1375543: [geo-rep]: defunct tar process while using tar+ssh sync
  • +
  • #1375570: Detach tier commit is allowed when detach tier start goes into failed state
  • +
  • #1375914: posix: Integrate important events with events framework
  • +
  • #1376331: Rpm installation fails with conflicts error for eventsconfig.json file
  • +
  • #1376396: /var/tmp/rpm-tmp.KPCugR: line 2: /bin/systemctl: No such file or directory
  • +
  • #1376477: [RFE] DHT Events
  • +
  • #1376874: RFE : move ganesha related configuration into shared storage
  • +
  • #1377288: The GlusterFS Callback RPC-calls always use RPC/XID 42
  • +
  • #1377386: glusterd experiencing repeated connect/disconnect messages when shd is down
  • +
  • #1377570: EC: Set/unset dirty flag for all the update operations
  • +
  • #1378814: Files not being opened with o_direct flag during random read operation (Glusterfs 3.8.2)
  • +
  • #1378948: removal of file from nfs mount crashes ganesha server
  • +
  • #1379028: Modifications to AFR Events
  • +
  • #1379287: warning messages seen in glusterd logs for each 'gluster volume status' command
  • +
  • #1379528: Poor smallfile read performance on Arbiter volume compared to Replica 3 volume
  • +
  • #1379707: gfapi: Fix fd ref leaks
  • +
  • #1379996: Volume restart couldn't re-export the volume exported via ganesha.
  • +
  • #1380252: glusterd fails to start without installing glusterfs-events package
  • +
  • #1383591: glfs_realpath() should not return malloc()'d allocated memory
  • +
  • #1383692: GlusterFS fails to build on old Linux distros with linux/oom.h missing
  • +
  • #1383913: spurious heal info as pending heal entries never end on an EC volume while IOs are going on
  • +
  • #1385224: arbiter volume write performance is bad with sharding
  • +
  • #1385236: invalid argument warning messages seen in fuse client logs 2016-09-30 06:34:58.938667] W [dict.c:418ict_set] (-->/usr/lib64/glusterfs/3.8.4/xlator/cluster/replicate.so(+0x58722) 0-dict: !this || !value for key=link-count [Invalid argument]
  • +
  • #1385451: "nfs.disable: on" is not showing in Vol info by default for the 3.7.x volumes after updating to 3.9.0
  • +
  • #1386072: Spurious permission denied problems observed
  • +
  • #1386178: eventsapi/georep: Events are not available for Checkpoint and Status Change
  • +
  • #1386338: pmap_signin event fails to update brickinfo->signed_in flag
  • +
  • #1387099: Boolean attributes are published as string
  • +
  • #1387492: Error and warning message getting while removing glusterfs-events package
  • +
  • #1387502: Incorrect volume type in the "glusterd_state" file generated using CLI "gluster get-state"
  • +
  • #1387564: [Eventing]: UUID is showing zeros in the event message for the peer probe operation.
  • +
  • #1387894: Regression caused by enabling client-io-threads by default
  • +
  • #1387960: Sequential volume start&stop is failing with SSL enabled setup.
  • +
  • #1387964: [Eventing]: 'gluster vol bitrot scrub ondemand' does not produce an event
  • +
  • #1387975: Continuous warning messages getting when one of the cluster node is down on SSL setup.
  • +
  • #1387981: [Eventing]: 'gluster volume tier start force' does not generate a TIER_START event
  • +
  • #1387984: Add a test script for compound fops changes in AFR
  • +
  • #1387990: [RFE] Geo-replication Logging Improvements
  • +
  • #1388150: geo-replica slave node goes faulty for non-root user session due to fail to locate gluster binary
  • +
  • #1388323: fuse mount point not accessible
  • +
  • #1388350: Memory Leaks in snapshot code path
  • +
  • #1388470: throw warning to show that older tier commands are depricated and will be removed.
  • +
  • #1388563: [Eventing]: 'VOLUME_REBALANCE' event messages have an incorrect volume name
  • +
  • #1388579: crypt: changes needed for openssl-1.1 (coming in Fedora 26)
  • +
  • #1388731: [GSS]glusterfind pre session hangs indefinitely in RHGS 3.1.3
  • +
  • #1388912: glusterfs can't self heal character dev file for invalid dev_t parameters
  • +
  • #1389675: Experimental translators and 4.0 features need to be disabled for release-3.9
  • +
  • #1389742: build: incorrect Requires: for portblock resource agent
  • +
  • #1390837: write-behind: flush stuck by former failed write
  • +
  • #1391448: md-cache: Invalidate cache entry in case of OPEN with O_TRUNC
  • +
  • #1392286: gfapi clients crash while using async calls due to double fd_unref
  • +
  • #1392718: Quota version not changing in the quota.conf after upgrading to 3.7.1 from 3.6.1
  • +
  • #1392844: Hosted Engine VM paused post replace-brick operation
  • +
  • #1392869: The FUSE client log is filling up with posix_acl_default and posix_acl_access messages
  • +
+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/release-notes/4.0.0/index.html b/release-notes/4.0.0/index.html new file mode 100644 index 00000000..38063dc4 --- /dev/null +++ b/release-notes/4.0.0/index.html @@ -0,0 +1,5929 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + 4.0.0 - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + +

Release notes for Gluster 4.0.0

+

The Gluster community celebrates 13 years of development with this latest +release, Gluster 4.0. This release enables improved integration with containers, +an enhanced user experience, and a next-generation management framework. +The 4.0 release helps cloud-native app developers choose Gluster as the default +scale-out distributed file system.

+

A selection of the important features and changes are documented on this page. +A full list of bugs that have been addressed is included further below.

+ +

Announcements

+
    +
  1. As 3.13 was a short term maintenance release, features which have been + included in that release are available with 4.0.0 as well.These features may be of + interest to users upgrading to 4.0.0 from older than 3.13 releases. The 3.13 + release notes captures the list of features that were introduced with 3.13.
  2. +
+

NOTE: As 3.13 was a short term maintenance release, it will reach end of +life (EOL) with the release of 4.0.0. (reference)

+
    +
  1. +

    Releases that receive maintenance updates post 4.0 release are, 3.10, 3.12, + 4.0 (reference)

    +
  2. +
  3. +

    With this release, the CentOS storage SIG will not build server packages for + CentOS6. Server packages will be available for CentOS7 only. For ease of + migrations, client packages on CentOS6 will be published and maintained.

    +
  4. +
+

NOTE: This change was announced here

+

Major changes and features

+

Features are categorized into the following sections,

+ +

Management

+

GlusterD2 (GD2) is new management daemon for Gluster-4.0. It is a complete +rewrite, with all new internal core frameworks, that make it more scalable, +easier to integrate with and has lower maintenance requirements.

+

A quick start guide is available to get started with GD2.

+

GD2 in Gluster-4.0 is a technical preview release. It is not recommended for +production use. For the current release glusterd is the preferred management +daemon. More information is available in the Limitations section.

+

GD2 brings many new changes and improvements, that affect both users and developers.

+

Features

+

The most significant new features brought by GD2 are below.

+
Native REST APIs
+

GD2 exposes all of its management functionality via ReST APIs. The ReST APIs +accept and return data encoded in JSON. This enables external projects such as +Heketi to be better integrated with GD2.

+
CLI
+

GD2 provides a new CLI, glustercli, built on top of the ReST API. The CLI +retains much of the syntax of the old gluster command. In addition we have,

+
    +
  • Improved CLI help messages
  • +
  • Auto completion for sub commands
  • +
  • Improved CLI error messages on failure
  • +
  • Framework to run glustercli from outside the Cluster.
  • +
+

In this release, the following CLI commands are available,

+
    +
  • Peer management
  • +
  • Peer Probe/Attach
  • +
  • Peer Detach
  • +
  • Peer Status
  • +
  • Volume Management
  • +
  • Create/Start/Stop/Delete
  • +
  • Expand
  • +
  • Options Set/Get
  • +
  • Bitrot
  • +
  • Enable/Disable
  • +
  • Configure
  • +
  • Status
  • +
  • Geo-replication
  • +
  • Create/Start/Pause/Resume/Stop/Delete
  • +
  • Configure
  • +
  • Status
  • +
+
Configuration store
+

GD2 uses etcd to store the Gluster pool configuration, which solves the +config synchronize issues reported against the Gluster management daemon.

+

GD2 embeds etcd, and automatically creates and manages an etcd cluster when +forming the trusted storage pool. If required, GD2 can also connect to an +already existing etcd cluster.

+
Transaction Framework
+

GD2 brings a newer more flexible distributed framework, to help it perform +actions across the storage pool. The transaction framework provides better +control for choosing peers for a Gluster operation and it also provides a +mechanism to roll back the changes when something goes bad.

+
Volume Options
+

GD2 intelligently fetches and builds the list of volume options by directly +reading xlators *.so files. It does required validations during volume set +without maintaining duplicate list of options. This avoids lot of issues which +can happen due to mismatch in the information between Glusterd and xlator +shared libraries.

+

Volume options listing is also improved, to clearly distinguish configured +options and default options. Work is still in progress to categorize these +options and tune the list for better understanding and ease of use.

+
Volfiles generation and management
+

GD2 has a newer and better structured way for developers to define volfile +structure. The new method reduces the effort required to extend graphs or add +new graphs.

+

Also, volfiles are generated in single peer and stored in etcd store. This is +very important for scalability since Volfiles are not stored in every node.

+
Security
+

GD2 supports TLS for ReST and internal communication, and authentication for +the ReST API.If enabled, ReST APIs are currently limited to CLI, or the users +who have access to the Token file present in $GLUSTERD2_WORKDIR/auth file.

+
Features integration - Self Heal
+

Self Heal feature integrated for the new Volumes created using Glusterd2.

+
Geo-replication
+

With GD2 integration Geo-replication setup becomes very easy. If Master and +Remote volume are available and running, Geo-replication can be setup with just +a single command.

+
glustercli geo-replication create <mastervol> <remotehost>::<remotevol>
+
+

Geo-replication status is improved, Status clearly distinguishes the multiple +session details in status output.

+

Order of status rows was not predictable in earlier releases. It was very +difficult to correlate the Geo-replication status with Bricks. With this +release, Master worker status rows will always match with Bricks list in +Volume info.

+

Status can be checked using,

+
glustercli geo-replication status
+glustercli geo-replication status <mastervol> <remotehost>::<remotevol>
+
+

All the other commands are available as usual.

+

Limitations:

+
    +
  • On Remote nodes, Geo-replication is not yet creates the log directories. As + a workaround, create the required log directories in Remote Volume nodes.
  • +
+
Events APIs
+

Events API feature is integrated with GD2. Webhooks can be registered to listen +for GlusterFS events. Work is in progress for exposing an REST API to view all +the events happened in last 15 minutes.

+

Limitations

+
Backward compatibility
+

GD2 is not backwards compatible with the older GlusterD. Heterogeneous clusters +running both GD2 and GlusterD are not possible.

+

GD2 retains compatibility with Gluster-3.x clients. Old clients will still be +able to mount and use volumes exported using GD2.

+
Upgrade and migration
+

GD2 does not support upgrade from Gluster-3.x releases, in Gluster-4.0. +Gluster-4.0 will be shipping with both GD2 and the existing GlusterD. Users will +be able to upgrade to Gluster-4.0 while continuing to use GlusterD.

+

In Gluster-4.1, users will be able to migrate from GlusterD to GD2. Further, +upgrades from Gluster-4.1 running GD2 to higher Gluster versions would be +supported from release 4.1 onwards.

+

Post Gluster-4.1, GlusterD would be maintained for a couple of releases, post +which the only option to manage the cluster would be GD2.

+
Missing and partial commands
+

Not all commands from GlusterD, have been implemented for GD2. Some have been +only partially implemented. This means not all GlusterFS features are available +in GD2. We aim to bring most of the commands back in Gluster-4.1.

+
Recovery from full shutdown
+

With GD2, the process of recovery from a situation of a full cluster shutdown +requires reading the document available as well as some expertise.

+

Known Issues

+
2-node clusters
+

GD2 does not work well in 2-node clusters. Two main issues exist in this regard.

+
    +
  • Restarting GD2 fails in 2-node clusters #352
  • +
  • Detach fails in 2-node clusters #332
  • +
+

So it is recommended right now to run GD2 only in clusters of 3 or larger.

+
Other issues
+

Other known issues are tracked on github issues right now. Please file any +other issue you find on github issues.

+

Monitoring

+

Till date, the absence of support for live monitoring on GlusterFS created +constrained user experience for both users and developers. Statedump is +useful for debugging, but is heavy for live monitoring.

+

Further, the existence of debug/io-stats translator was not known to many and +gluster volume profile was not recommended as it impacted performance.

+

In this release, GlusterFS enables a lightweight method to access internal +information and avoids the performance penalty and complexities of previous +approaches.

+

1. Metrics collection across every FOP in every xlator

+

Notes for users: +Now, Gluster now has in-built latency measures in the xlator abstraction, thus +enabling capture of metrics and usage patterns across workloads.

+

These measures are currently enabled by default.

+

Limitations: +This feature is auto-enabled and cannot be disabled.

+

2. Monitoring support

+

Notes for users: +Currently, the only project which consumes metrics and provides basic +monitoring is glustermetrics, which provides a good idea on how to +utilize the metrics dumped from the processes.

+

Users can send SIGUSR2 signal to the process to dump the metrics, in +/var/run/gluster/metrics/ directory.

+

Limitations: +Currently core gluster stack and memory management systems provide metrics. A +framework to generate more metrics is present for other translators and core +components. However, additional metrics are not added in this release.

+

Performance

+

1. EC: Make metadata [F]GETXATTR operations faster

+

Notes for users: +Disperse translator has made performance improvements to the [F]GETXATTR +operation. Workloads involving heavy use of extended attributes on files and +directories, will gain from the improvements made.

+

2. Allow md-cache to serve nameless lookup from cache

+

Notes for users: +The md-cache translator is enhanced to cache nameless lookups (typically seen +with NFS workloads). This helps speed up overall operations on the volume +reducing the number of lookups done over the network. Typical workloads that +will benefit from this enhancement are,

+
    +
  • NFS based access
  • +
  • Directory listing with FUSE, when ACLs are enabled
  • +
+

3. md-cache: Allow runtime addition of xattrs to the list of xattrs that md-cache caches

+

Notes for users: +md-cache was enhanced to cache extended attributes of a file or directory, for +gluster specific attributes. This has now been enhanced to cache user provided +attributes (xattrs) as well.

+

To add specific xattrs to the cache list, use the following command:

+
# gluster volume set <volname> xattr-cache-list "<xattr-name>,<xattr-name>,..."
+
+

Existing options, such as "cache-samba-metadata" "cache-swift-metadata" continue +to function. The new option "xattr-cache-list" appends to the list generated by +the existing options.

+

Limitations: +Setting this option overwrites the previous value set for this option. The +append to the existing list of xattr is not supported with this release.

+

4. Cache last stripe of an EC volume while write is going on

+

Notes for users: +Disperse translator now has the option to retain a write-through cache of the +last write stripe. This helps in improved small append sequential IO patterns +by reducing the need to read a partial stripe for appending operations.

+

To enable this use,

+
# gluster volume set <volname> disperse.stripe-cache <N>
+
+

Where, is the number of stripes to cache.

+

5. tie-breaker logic for blocking inodelks/entrylk in SHD

+

Notes for users: +Self-heal deamon locking has been enhanced to identify situations where an +selfheal deamon is actively working on an inode. This enables other selfheal +daemons to proceed with other entries in the queue, than waiting on a particular +entry, thus preventing starvation among selfheal threads.

+

6. Independent eager-lock options for file and directory accesses

+

Notes for users: +A new option named 'disperse.other-eager-lock' has been added to make it +possible to have different settings for regular file accesses and accesses +to other types of files (like directories).

+

By default this option is enabled to ensure the same behavior as the previous +versions. If you have multiple clients creating, renaming or removing files +from the same directory, you can disable this option to improve the performance +for these users while still keeping best performance for file accesses.

+

7. md-cache: Added an option to cache statfs data

+

Notes for users: +This can be controlled with option performance.md-cache-statfs

+
gluster volume set <volname> performance.md-cache-statfs <on|off>
+
+

8. Improved disperse performance due to parallel xattrop updates

+

Notes for users: +Disperse translator has been optimized to perform xattrop update operation +in parallel on the bricks during self-heal to improve performance.

+

Geo-replication

+

1. Geo-replication: Improve gverify.sh logs

+

Notes for users: +gverify.sh is the script which runs during geo-rep session creation which +validates pre-requisites. The logs have been improved and locations are changed +as follows,

+
    +
  1. Slave mount log file is changed from <logdir>/geo-replication-slaves/slave.log + to, <logdir>/geo-replication/gverify-slavemnt.log
  2. +
  3. Master mount log file is separated from the slave log file under, + <logdir>/geo-replication/gverify-mastermnt.log
  4. +
+

2. Geo-rep: Cleanup stale (unusable) XSYNC changelogs.

+

Notes for users: +Stale xsync logs were not cleaned up, causing accumulation of these on the +system. This change cleans up the stale xsync logs, if geo-replication has to +restart from a faulty state.

+

Standalone

+

1. Ability to force permissions while creating files/directories on a volume

+

Notes for users: +Options have been added to the posix translator, to override default umask +values with which files and directories are created. This is particularly useful +when sharing content by applications based on GID. As the default mode bits +prevent such useful sharing, and supersede ACLs in this regard, these options +are provided to control this behavior.

+

Command usage is as follows:

+
# gluster volume set <volume name> storage.<option-name> <value>
+
+

The valid <value> ranges from 0000 to 0777

+

<option-name> are:

+
    +
  • create-mask
  • +
  • create-directory-mask
  • +
  • force-create-mode
  • +
  • force-create-directory
  • +
+

Options "create-mask" and "create-directory-mask" are added to remove the +mode bits set on a file or directory when its created. Default value of these +options is 0777. Options "force-create-mode" and "force-create-directory" sets +the default permission for a file or directory irrespective of the clients +umask. Default value of these options is 0000.

+

2. Replace MD5 usage to enable FIPS support

+

Notes for users: +Previously, if Gluster was run on a FIPS enabled system, it used to crash +because MD5 is not FIPS compliant and Gluster consumes MD5 checksum in +various places like self-heal and geo-replication. By replacing MD5 with a FIPS +complaint SHA256, Gluster no longer crashes on a FIPS enabled system.

+

However, in order for AFR self-heal to work correctly during rolling upgrade +to 4.0, we have tied this to a volume option called fips-mode-rchecksum.

+

gluster volume set <VOLNAME> fips-mode-rchecksum on has to be performed post +upgrade to change the defaults from MD5 to SHA256. Post this gluster processes +will run clean on a FIPS enabled system.

+

NOTE: Once glusterfs 3.x is EOL'ed, the usage of the option to control this +change will be removed.

+

Limitations +Snapshot feature in Gluster still uses md5 checksums, hence running in FIPS +compliant systems requires that the snapshot feature is not used.

+

3. Dentry fop serializer xlator on brick stack

+

Notes for users: +This feature strengthens consistency of the file system, trading it for some +performance and is strongly suggested for workloads where consistency is +required.

+

In previous releases the meta-data about the files and directories shared across +the clients were not always consistent when the use-cases/workloads involved a +large number of renames, frequent creations and deletions. They do eventually +become consistent, but a large proportion of applications are not built to +handle eventual consistency.

+

This feature can be enabled as follows,

+
# gluster volume set <volname> features.sdfs enable
+
+

Limitations: +This feature is released as a technical preview, as performance implications are +not known completely.

+

4. Add option to disable nftw() based deletes when purging the landfill directory

+

Notes for users: +The gluster brick processes use an optimized manner of deleting entire sub-trees +using the nftw call. With this release, an option is being added to toggle this +behavior in cases where this optimization is not desired.

+

This is not an exposed option, and needs to be controlled using the volume +graph. Adding the disable-landfill-purge option to the storage/posix translator +helps toggle this feature.

+

The default is always enabled, as in the older releases.

+ +

Notes for users: +Added an option to POSIX that limits the number of hard links that can be +created against an inode (file). This helps when there needs to be a different +hardlink limit than what the local FS provides for the bricks.

+

The option to control this behavior is,

+
# gluster volume set <volname> storage.max-hardlinks <N>
+
+

Where, <N> is 0-0xFFFFFFFF. If the local file system that the brick is using +has a lower limit than this setting, that would be honored.

+

Default is set to 100, setting this to 0 turns it off and leaves it to the +local file system defaults. Setting it to 1 turns off hard links.

+

6. Enhancements for directory listing in readdirp

+

Notes for users: +Prior to this release, rebalance performed a fix-layout on a directory before +healing its subdirectories. If there were a lot of subdirs, it could take a +while before all subdirs were created on the newly added bricks. This led to +some missed directory listings.

+

This is changed with this release to process children directories before the +parents, thereby changing the way rebalance acts (files within sub directories +are migrated first) and also resolving the directory listing issue.

+

7. Rebalance skips migration of file if it detects writes from application

+

Notes for users: +Rebalance process skips migration of file if it detects writes from application. +To force migration even in the presence of writes from application to file, +"cluster.force-migration" has to be turned on, which is off by default.

+

The option to control this behavior is,

+
# gluster volume set <volname> cluster.force-migration <on/off>
+
+

Limitations: +It is suggested to run remove-brick with cluster.force-migration turned off. +This results in files which have writes from clients being skipped during +rebalance. It is suggested to copy these files manually to a Gluster mount post +remove brick commit is performed.

+

Rebalancing files with active write IO to them has a chance of data corruption.

+ +

1. xlators should not provide init(), fini() and others directly, but have class_methods

+

Notes for developers: +This release brings in a new unified manner of defining xlator methods. Which +avoids certain unwanted side-effects of the older method (like having to have +certain symbols being defined always), and helps a cleaner single point +registration mechanism for all xlator methods.

+

The new method, needs just a single symbol in the translator code to be exposed, +which is named xlator_api.

+

The elements of this structure is defined here and an example usage of the +same can be seen here.

+

The older mechanism is still supported, but not preferred.

+

2. Framework for distributed testing

+

Notes for developers: +A new framework for running the regression tests for Gluster is added. The +README has details on how to use the same.

+

3. New API for acquiring mandatory locks

+

Notes for developers: +The current API for byte-range locks glfs_posix_lock doesn't allow +applications to specify whether it is advisory or mandatory type lock. This +particular change is to introduce an extended byte-range lock API with an +additional argument for including the byte-range lock mode to be one among +advisory(default) or mandatory.

+

Refer to the header for details on how to use this API.

+

A sample test program can be found here that also helps in understanding the +usage of this API.

+

4. New on-wire protocol (XDR) needed to support iattx and cleaner dictionary structure

+

Notes for developers: +With changes in the code to adapt to a newer iatt structure, and stricter data +format enforcement within dictionaries passed across the wire, and also as a +part of reducing technical debt around the RPC layer, this release introduces a +new RPC Gluster protocol version (4.0.0).

+

Typically this does not impact any development, other than to ensure that newer +RPCs that are added would need to be on the 4.0.0 version of the protocol and +dictionaries on the wire need to be better encoded.

+

The newer iatt structure can be viewed here.

+

An example of better encoding dictionary values for wire transfers can be seen +here.

+

Here is some additional information on Gluster RPC programs for the inquisitive.

+

5. The protocol xlators should prevent sending binary values in a dict over the networks

+

Notes for developers: +Dict data over the wire in Gluster was sent in binary. This has been changed +with this release, as the on-wire protocol wire is also new, to send XDR encoded +dict values across. In the future, any new dict type needs to also handle the +required XDR encoding of the same.

+

6. Translator to handle 'global' options

+

Notes for developers: +GlusterFS process has around 50 command line arguments to itself. While many of +the options are initial settings, many others can change its value in volume +lifetime. Prior to this release there was no way to change a setting, other than +restarting the process for many of these options.

+

With the introduction of global option translator, it is now possible to handle +these options without restarts.

+

If contributing code that adds to the process options, strongly consider adding +the same to the global option translator. An example is provided here.

+

Major issues

+

None

+

Bugs addressed

+

Bugs addressed since release-3.13.0 are listed below.

+
    +
  • #827334: gfid is not there in the fsetattr and rchecksum requests being sent from protocol client
  • +
  • #1336889: Gluster's XDR does not conform to RFC spec
  • +
  • #1369028: rpc: Change the way client uuid is built
  • +
  • #1370116: Tests : Adding a test to check for inode leak
  • +
  • #1428060: write-behind: Allow trickling-writes to be configurable, fix usage of page_size and window_size
  • +
  • #1430305: Fix memory leak in rebalance
  • +
  • #1431955: [Disperse] Implement open fd heal for disperse volume
  • +
  • #1440659: Add events to notify disk getting fill
  • +
  • #1443145: Free runtime allocated resources upon graph switch or glfs_fini()
  • +
  • #1446381: detach start does not kill the tierd
  • +
  • #1467250: Accessing a file when source brick is down results in that FOP being hung
  • +
  • #1467614: Gluster read/write performance improvements on NVMe backend
  • +
  • #1469487: sys_xxx() functions should guard against bad return values from fs
  • +
  • #1471031: dht_(f)xattrop does not implement migration checks
  • +
  • #1471753: [disperse] Keep stripe in in-memory cache for the non aligned write
  • +
  • #1474768: The output of the "gluster help" command is difficult to read
  • +
  • #1479528: Rebalance estimate(ETA) shows wrong details(as intial message of 10min wait reappears) when still in progress
  • +
  • #1480491: tests: Enable geo-rep test cases
  • +
  • #1482064: Bringing down data bricks in cyclic order results in arbiter brick becoming the source for heal.
  • +
  • #1488103: Rebalance fails on NetBSD because fallocate is not implemented
  • +
  • #1492625: Directory listings on fuse mount are very slow due to small number of getdents() entries
  • +
  • #1496335: Extreme Load from self-heal
  • +
  • #1498966: Test case ./tests/bugs/bug-1371806_1.t is failing
  • +
  • #1499566: [Geo-rep]: Directory renames are not synced in hybrid crawl
  • +
  • #1501054: Structured logging support for Gluster logs
  • +
  • #1501132: posix health check should validate time taken between write timestamp and read timestamp cycle
  • +
  • #1502610: disperse eager-lock degrades performance for file create workloads
  • +
  • #1503227: [RFE] Changelog option in a gluster volume disables with no warning if geo-rep is configured
  • +
  • #1505660: [QUOTA] man page of gluster should be updated to list quota commands
  • +
  • #1506104: gluster volume splitbrain info needs to display output of each brick in a stream fashion instead of buffering and dumping at the end
  • +
  • #1506140: Add quorum checks in post-op
  • +
  • #1506197: [Parallel-Readdir]Warning messages in client log saying 'parallel-readdir' is not recognized.
  • +
  • #1508898: Add new configuration option to manage deletion of Worm files
  • +
  • #1508947: glusterfs: Include path in pkgconfig file is wrong
  • +
  • #1509189: timer: Possible race condition between gftimer* routines
  • +
  • #1509254: snapshot remove does not cleans lvm for deactivated snaps
  • +
  • #1509340: glusterd does not write pidfile correctly when forking
  • +
  • #1509412: Change default versions of certain features to 3.13 from 4.0
  • +
  • #1509644: rpc: make actor search parallel
  • +
  • #1509647: rpc: optimize fop program lookup
  • +
  • #1509845: In distribute volume after glusterd restart, brick goes offline
  • +
  • #1510324: Master branch is broken because of the conflicts
  • +
  • #1510397: Compiler atomic built-ins are not correctly detected
  • +
  • #1510401: fstat returns ENOENT/ESTALE
  • +
  • #1510415: spurious failure of tests/bugs/glusterd/bug-1345727-bricks-stop-on-no-quorum-validation.t
  • +
  • #1510874: print-backtrace.sh failing with cpio version 2.11 or older
  • +
  • #1510940: The number of bytes of the quota specified in version 3.7 or later is incorrect
  • +
  • #1511310: Test bug-1483058-replace-brick-quorum-validation.t fails inconsistently
  • +
  • #1511339: In Replica volume 2*2 when quorum is set, after glusterd restart nfs server is coming up instead of self-heal daemon
  • +
  • #1512437: parallel-readdir = TRUE prevents directories listing
  • +
  • #1512451: Not able to create snapshot
  • +
  • #1512455: glustereventsd hardcodes working-directory
  • +
  • #1512483: Not all files synced using geo-replication
  • +
  • #1513692: io-stats appends now instead of overwriting which floods filesystem with logs
  • +
  • #1513928: call stack group list leaks
  • +
  • #1514329: bug-1247563.t is failing on master
  • +
  • #1515161: Memory leak in locks xlator
  • +
  • #1515163: centos regression fails for tests/bugs/replicate/bug-1292379.t
  • +
  • #1515266: Prevent ec from continue processing heal operations after PARENT_DOWN
  • +
  • #1516206: EC DISCARD doesn't punch hole properly
  • +
  • #1517068: Unable to change the Slave configurations
  • +
  • #1517554: help for volume profile is not in man page
  • +
  • #1517633: Geo-rep: access-mount config is not working
  • +
  • #1517904: tests/bugs/core/multiplex-limit-issue-151.t fails sometimes in upstream master
  • +
  • #1517961: Failure of some regression tests on Centos7 (passes on centos6)
  • +
  • #1518508: Change GD_OP_VERSION to 3_13_0 from 3_12_0 for RFE https://bugzilla.redhat.com/show_bug.cgi?id=1464350
  • +
  • #1518582: Reduce lock contention on fdtable lookup
  • +
  • #1519598: Reduce lock contention on protocol client manipulating fd
  • +
  • #1520245: High mem/cpu usage, brick processes not starting and ssl encryption issues while testing scaling with multiplexing (500-800 vols)
  • +
  • #1520758: [Disperse] Add stripe in cache even if file/data does not exist
  • +
  • #1520974: Compiler warning in dht-common.c because of a switch statement on a boolean
  • +
  • #1521013: rfc.sh should allow custom remote names for ORIGIN
  • +
  • #1521014: quota_unlink_cbk crashes when loc.inode is null
  • +
  • #1521116: Absorb all test fixes from 3.8-fb branch into master
  • +
  • #1521213: crash when gifs_set_logging is called concurrently
  • +
  • #1522651: rdma transport may access an obsolete item in gf_rdma_device_t->all_mr, and causes glusterfsd/glusterfs process crash.
  • +
  • #1522662: Store allocated objects in the mem_acct
  • +
  • #1522775: glusterd consuming high memory
  • +
  • #1522847: gNFS Bug Fixes
  • +
  • #1522950: io-threads is unnecessarily calling accurate time calls on every FOP
  • +
  • #1522968: glusterd bug fixes
  • +
  • #1523295: md-cache should have an option to cache STATFS calls
  • +
  • #1523353: io-stats bugs and features
  • +
  • #1524252: quick-read: Discard cache for fallocate, zerofill and discard ops
  • +
  • #1524365: feature/bitrot: remove internal xattrs from lookup cbk
  • +
  • #1524816: heketi was not removing the LVs associated with Bricks removed when Gluster Volumes were deleted
  • +
  • #1526402: glusterd crashes when 'gluster volume set help' is executed
  • +
  • #1526780: ./run-tests-in-vagrant.sh fails because of disabled Gluster/NFS
  • +
  • #1528558: /usr/sbin/glusterfs crashing on Red Hat OpenShift Container Platform node
  • +
  • #1528975: Fedora 28 (Rawhide) renamed the pyxattr package to python2-pyxattr
  • +
  • #1529440: Files are not rebalanced if destination brick(available size) is of smaller size than source brick(available size)
  • +
  • #1529463: JWT support without external dependency
  • +
  • #1529480: Improve geo-replication logging
  • +
  • #1529488: entries not getting cleared post healing of softlinks (stale entries showing up in heal info)
  • +
  • #1529515: AFR: 3-way-replication: gluster volume set cluster.quorum-count should validate max no. of brick count to accept
  • +
  • #1529883: glusterfind is extremely slow if there are lots of changes
  • +
  • #1530281: glustershd fails to start on a volume force start after a brick is down
  • +
  • #1530910: Use after free in cli_cmd_volume_create_cbk
  • +
  • #1531149: memory leak: get-state leaking memory in small amounts
  • +
  • #1531987: increment of a boolean expression warning
  • +
  • #1532238: Failed to access volume via Samba with undefined symbol from socket.so
  • +
  • #1532591: Tests: Geo-rep tests are failing in few regression machines
  • +
  • #1533594: EC test fails when brick mux is enabled
  • +
  • #1533736: posix_statfs returns incorrect f_bfree values if brick is full.
  • +
  • #1533804: readdir-ahead: change of cache-size should be atomic
  • +
  • #1533815: Mark ./tests/basic/ec/heal-info.t as bad
  • +
  • #1534602: FUSE reverse notificatons are not written to fuse dump
  • +
  • #1535438: Take full lock on files in 3 way replication
  • +
  • #1535772: Random GlusterFSD process dies during rebalance
  • +
  • #1536913: tests/bugs/cli/bug-822830.t fails on Centos 7 and locally
  • +
  • #1538723: build: glibc has removed legacy rpc headers and rpcgen in Fedora28, use libtirpc
  • +
  • #1539657: Georeplication tests intermittently fail
  • +
  • #1539701: gsyncd is running gluster command to get config file path is not required
  • +
  • #1539842: GlusterFS 4.0.0 tracker
  • +
  • #1540438: Remove lock recovery logic from client and server protocol translators
  • +
  • #1540554: Optimize glusterd_import_friend_volume code path
  • +
  • #1540882: Do lock conflict check correctly for wait-list
  • +
  • #1541117: sdfs: crashes if the features is enabled
  • +
  • #1541277: dht_layout_t leak in dht_populate_inode_for_dentry
  • +
  • #1541880: Volume wrong size
  • +
  • #1541928: A down brick is incorrectly considered to be online and makes the volume to be started without any brick available
  • +
  • #1542380: Changes to self-heal logic w.r.t. detecting of split-brains
  • +
  • #1542382: Add quorum checks in post-op
  • +
  • #1542829: Too many log messages about dictionary and options
  • +
  • #1543487: dht_lookup_unlink_of_false_linkto_cbk fails with "Permission denied"
  • +
  • #1543706: glusterd fails to attach brick during restart of the node
  • +
  • #1543711: glustershd/glusterd is not using right port when connecting to glusterfsd process
  • +
  • #1544366: Rolling upgrade to 4.0 is broken
  • +
  • #1544638: 3.8 -> 3.10 rolling upgrade fails (same for 3.12 or 3.13) on Ubuntu 14
  • +
  • #1545724: libgfrpc does not export IPv6 RPC methods even with --with-ipv6-default
  • +
  • #1547635: add option to bulld rpm without server
  • +
  • #1547842: Typo error in __dht_check_free_space function log message
  • +
  • #1548264: [Rebalance] "Migrate file failed: : failed to get xattr [No data available]" warnings in rebalance logs
  • +
  • #1548271: DHT calls dht_lookup_everywhere for 1xn volumes
  • +
  • #1550808: memory leak in pre-op in replicate volumes for every write
  • +
  • #1551112: Rolling upgrade to 4.0 is broken
  • +
  • #1551640: GD2 fails to dlopen server xlator
  • +
  • #1554077: 4.0 clients may fail to convert iatt in dict when recieving the same from older (< 4.0) servers
  • +
+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/release-notes/4.0.1/index.html b/release-notes/4.0.1/index.html new file mode 100644 index 00000000..dec58c96 --- /dev/null +++ b/release-notes/4.0.1/index.html @@ -0,0 +1,4528 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + 4.0.1 - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + +

Release notes for Gluster 4.0.1

+

This is a bugfix release. The release notes for 4.0.0, +contain a listing of all the new features that were added and +bugs fixed in the GlusterFS 4.0 release.

+

Major changes, features and limitations addressed in this release

+

No Major changes

+

Major issues

+

No Major issues

+

Bugs addressed

+

Bugs addressed since release-4.0.0 are listed below.

+
    +
  • #1550946: [brick-mux] performance bottleneck introduced while solving ping timer expiry
  • +
  • #1552404: [CIOT] : Gluster CLI says "io-threads : enabled" on existing volumes post upgrade.
  • +
  • #1554235: Memory corruption is causing crashes, hangs and invalid answers
  • +
  • #1555198: After a replace brick command, self-heal takes some time to start healing files on disperse volumes
  • +
  • #1555309: core: libtirpc, backport XDR macro refactor
  • +
  • #1557906: [EC] Read performance of EC volume exported over gNFS is significantly lower than write performance
  • +
+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/release-notes/4.0.2/index.html b/release-notes/4.0.2/index.html new file mode 100644 index 00000000..c9e1c7be --- /dev/null +++ b/release-notes/4.0.2/index.html @@ -0,0 +1,4542 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + 4.0.2 - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + +

Release notes for Gluster 4.0.2

+

This is a bugfix release. The release notes for 4.0.0, and 4.0.1 +contain a listing of all the new features that were added and bugs fixed in the +GlusterFS 4.0 release.

+

Major changes, features and limitations addressed in this release

+

This release contains a fix for a security vulerability in Gluster as follows,

+
    +
  • http://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2018-1088
  • +
  • https://nvd.nist.gov/vuln/detail/CVE-2018-1088
  • +
+

Installing the updated packages and restarting gluster services, will update the +Gluster shared storage volume volfiles, that are more secure than the defaults +currently in place.

+

Further, for increased security, the Gluster shared storage volume can be TLS +enabled, and access to the same restricted using the auth.ssl-allow option. +See, this guide for more details.

+

Major issues

+

No Major issues

+

Bugs addressed

+

Bugs addressed since release-4.0.1 are listed below.

+
    +
  • #1558959: [brick-mux] incorrect event-thread scaling in server_reconfigure()
  • +
  • #1559079: test ./tests/bugs/ec/bug-1236065.t is generating crash on build
  • +
  • #1559244: enable ownthread feature for glusterfs4_0_fop_prog
  • +
  • #1561721: Rebalance failures on a dispersed volume with lookup-optimize enabled
  • +
  • #1562728: SHD is not healing entries in halo replication
  • +
  • #1564461: gfapi: fix a couple of minor issues
  • +
  • #1565654: /var/log/glusterfs/bricks/export_vdb.log flooded with this error message "Not able to add to index [Too many links]"
  • +
  • #1566822: [Remove-brick] Many files were not migrated from the decommissioned bricks; commit results in data loss
  • +
  • #1569403: EIO errors on some operations when volume has mixed brick versions on a disperse volume
  • +
  • #1570432: CVE-2018-1088 glusterfs: Privilege escalation via gluster_shared_storage when snapshot scheduling is enabled [fedora-all]
  • +
+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/release-notes/4.1.0/index.html b/release-notes/4.1.0/index.html new file mode 100644 index 00000000..22809559 --- /dev/null +++ b/release-notes/4.1.0/index.html @@ -0,0 +1,5354 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + 4.1.0 - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + +

Release notes for Gluster 4.1.0

+

This is a major release that includes a range of features enhancing management, +performance, monitoring, and providing newer functionality like thin arbiters, +cloud archival, time consistency. It also contains several bug fixes.

+

A selection of the important features and changes are documented on this page. +A full list of bugs that have been addressed is included further below.

+ +

Announcements

+
    +
  1. As 4.0 was a short term maintenance release, features which have been + included in that release are available with 4.1.0 as well. These features may + be of interest to users upgrading to 4.1.0 from older than 4.0 releases. The 4.0 + release notes captures the list of features that were introduced with 4.0.
  2. +
+

NOTE: As 4.0 was a short term maintenance release, it will reach end of +life (EOL) with the release of 4.1.0. (reference)

+
    +
  1. Releases that receive maintenance updates post 4.1 release are, 3.12, and + 4.1 (reference)
  2. +
+

NOTE: 3.10 long term maintenance release, will reach end of life (EOL) with +the release of 4.1.0. (reference)

+
    +
  1. Continuing with this release, the CentOS storage SIG will not build server + packages for CentOS6. Server packages will be available for CentOS7 only. For + ease of migrations, client packages on CentOS6 will be published and maintained.
  2. +
+

NOTE: This change was announced here

+

Major changes and features

+

Features are categorized into the following sections,

+ +

Management

+

GlusterD2

+
+

IMP: GlusterD2 in Gluster-4.1.0 is still considered a preview and is +experimental. It should not be considered for production use. Users should +still expect breaking changes to be possible, though efforts will be taken to +avoid such changes. As GD2 is still under heavy development, new features can +be expected throughout the 4.1 release.

+
+

GD2 brings initial support for rebalance, snapshots, intelligent volume +provisioning and a lot of other bug fixes and internal changes.

+
Rebalance #786
+

GD2 supports running rebalance on volumes. Supported rebalance operations include,

+
    +
  • rebalance start
  • +
  • rebalance start with fix-layout
  • +
  • rebalance stop
  • +
  • rebalance status
  • +
+

Support only exists in the ReST API right now. CLI support will be introduced in subsequent releases.

+
Snapshot #533
+

Initial support for volume snapshot has been introduced. At the moment, snapshots are supported only on Thin-LVM bricks.

+

Support snapshot operations include,

+
    +
  • create
  • +
  • activate/deactivate
  • +
  • list
  • +
  • info
  • +
+
Intelligent volume provisioning (IVP) #661
+

GD2 brings very early preview for intelligent volume creation, similar to +Heketi.

+
+

IMP: This is considered experimental, and the API and implementation is +not final. It is very possible that both the API and the implementation will +change.

+
+

IVP enables users to create volumes by just providing the expected volume type +and a size, without providing the bricks layout. IVP is supported in CLI in the +normal volume create command.

+

More information on IVP can be found in the pull-request.

+

To support IVP, support for adding and managing block devices, and basic support +for zones is available. #783 #785

+
Other changes
+

Other notable changes include,

+
    +
  • Support for volume option levels (experimental, advanced, deprecated) #591
  • +
  • Support for resetting volume options #545
  • +
  • Option hooks for volume set #708
  • +
  • Support for setting quota options #583
  • +
  • Changes to transaction locking #808
  • +
  • Support for setting metadata on peers and volume #600 #689 #704
  • +
  • Thin arbiter support #673 #702
  • +
+

In addition to the above, a lot of smaller bug-fixes and enhancements to internal frameworks and tests have also been done.

+
Known issues
+

GD2 is still under heavy development and has lots of known bugs. For filing new bugs or tracking known bugs, please use the GD2 github issue tracker.

+

2. Changes to gluster based smb.conf share management

+

Previously Gluster used to delete the entire volume share section from smb.conf +either after volume is stopped or while disabling user.cifs/user.smb volume set +options. With this release those volume share sections, that were added by +samba hook scripts inside smb.conf, will not get removed post a volume stop or +on disabling user.cifs/user.smb volume set options. Instead we add the following +share specific smb.conf parameter to the end of corresponding volume share +section to make it unavailable for client access:

+
available = no
+
+

This will make sure that the additional smb.conf parameters configured +externally are retained. For more details on the above parameter search under +"available (S)" at smb.conf(5) manual page.

+

Monitoring

+

Various xlators are enhanced to provide additional metrics, that help in +determining the effectiveness of the xlator in various workloads.

+

These metrics can be dumped and visualized as detailed here.

+

1. Additional metrics added to negative lookup cache xlator

+

Metrics added are:

+
    +
  • negative_lookup_hit_count
  • +
  • negative_lookup_miss_count
  • +
  • get_real_filename_hit_count
  • +
  • get_real_filename_miss_count
  • +
  • nameless_lookup_count
  • +
  • inodes_with_positive_dentry_cache
  • +
  • inodes_with_negative_dentry_cache
  • +
  • dentry_invalidations_recieved
  • +
  • cache_limit
  • +
  • consumed_cache_size
  • +
  • inode_limit
  • +
  • consumed_inodes
  • +
+

2. Additional metrics added to md-cache xlator

+

Metrics added are:

+
    +
  • stat_cache_hit_count
  • +
  • stat_cache_miss_count
  • +
  • xattr_cache_hit_count
  • +
  • xattr_cache_miss_count
  • +
  • nameless_lookup_count
  • +
  • negative_lookup_count
  • +
  • stat_cache_invalidations_received
  • +
  • xattr_cache_invalidations_received
  • +
+

3. Additional metrics added to quick-read xlator

+

Metrics added are:

+
    +
  • total_files_cached
  • +
  • total_cache_used
  • +
  • cache-hit
  • +
  • cache-miss
  • +
  • cache-invalidations
  • +
+

Performance

+

1. Support for fuse writeback cache

+

Gluster FUSE mounts support FUSE extension to leverage the kernel +"writeback cache".

+

For usage help see man 8 glusterfs and man 8 mount.glusterfs, specifically +the options -kernel-writeback-cache and -attr-times-granularity.

+

2. Extended eager-lock to metadata transactions in replicate xlator

+

Eager lock feature in replicate xlator is extended to support metadata +transactions in addition to data transactions. This helps in improving the +performance when there are frequent metadata updates in the workload. This is +typically seen with sharded volumes by default, and in other workloads that +incur a higher rate of metadata modifications to the same set of files.

+

As a part of this feature, compounded FOPs feature in AFR is deprecated, volumes +that are configured to leverage compounding will start disregarding the option +use-compound-fops.

+

NOTE: This is an internal change in AFR xlator and is not user controlled +or configurable.

+

3. Support for multi-threaded fuse readers

+

FUSE based mounts can specify number of FUSE request processing threads during +a mount. For workloads that have high concurrency on a single client, this helps +in processing FUSE requests in parallel, than the existing single reader model.

+

This is provided as a mount time option named reader-thread-count and can be +used as follows,

+
# mount -t glusterfs -o reader-thread-count=<n> <server>:<volname> <mntpoint>
+
+

4. Configurable aggregate size for write-behind xlator

+

Write-behind xlator provides the option performance.aggregate-size to enable +configurable aggregate write sizes. This option enables write-behind xlator to +aggregate writes till the specified value before the writes are sent to the +bricks.

+

Existing behaviour set this size to a maximum of 128KB per file. The +configurable option provides the ability to tune this up or down based on the +workload to improve performance of writes.

+

Usage:

+
# gluster volume set <volname> performance.aggregate-size <size>
+
+

5. Adaptive read replica selection based on queue length

+

AFR xlator is enhanced with a newer value for the option read-hash-mode. +Providing this option with a value of 3 will distribute reads across AFR +subvolumes based on the subvol having the least outstanding read requests.

+

This helps in better distributing and hence improving workload performance on +reads, in replicate based volumes.

+

Standalone

+

1. Thin arbiter quorum for 2-way replication

+

NOTE: This feature is available only with GlusterD2

+

Documentation for the feature is provided here.

+

2. Automatically configure backup volfile servers in clients

+

NOTE: This feature is available only with GlusterD2

+

Clients connecting and mounting a Gluster volume, will automatically fetch and +configure backup volfile servers, for future volfile updates and fetches, when +the initial server used to fetch the volfile and mount is down.

+

When using glusterd, this is achieved using the FUSE mount option +backup-volfile-servers, and when using GlusterD2 this is done automatically.

+

3. (c/m)time equivalence across replicate and disperse subvolumes

+

Enabling the utime feature, enables Gluster to maintain consistent change and +modification time stamps on files and directories across bricks.

+

This feature is useful when applications are sensitive to time deltas between +operations (for example tar may report "file changed as we read it"), to +maintain and report equal time stamps on the file across the subvolumes.

+

To enable the feature use,

+
# gluster volume set <volname> features.utime
+
+

Limitations:

+
    +
  • Mounting gluster volume with time attribute options (noatime, realatime...) + is not supported with this feature
  • +
  • Certain entry operations (with differing creation flags) would reflect an + eventual consistency w.r.t the time attributes
  • +
  • This feature does not guarantee consistent time for directories if hashed + sub-volume for the directory is down
  • +
  • readdirp (or directory listing) is not supported with this feature
  • +
+ +

1. New API for acquiring leases and acting on lease recalls

+

A new API to acquire a lease on an open file and also to receive callbacks when +the lease is recalled, is provided with gfapi.

+

Refer to the header for details on how to use this API.

+

2. Extended language bindings for gfapi to include perl

+

See, libgfapi-perl - Libgfapi bindings for Perl using FFI

+

Major issues

+

None

+

Bugs addressed

+

Bugs addressed since release-4.0.0 are listed below.

+
    +
  • #1074947: add option to build rpm without server
  • +
  • #1234873: glusterfs-resource-agents - volume - voldir is not properly set
  • +
  • #1272030: Remove lock recovery logic from client and server protocol translators
  • +
  • #1304962: Intermittent file creation fail,while doing concurrent writes on distributed volume has more than 40 bricks
  • +
  • #1312830: tests fail because bug-924726.t depends on netstat
  • +
  • #1319992: RFE: Lease support for gluster
  • +
  • #1450546: Paths to some tools are hardcoded to /sbin or /usr/sbin
  • +
  • #1450593: Gluster Python scripts do not check return value of find_library
  • +
  • #1468483: Sharding sends all application sent fsyncs to the main shard file
  • +
  • #1495153: xlator_t structure's 'client_latency' variable is not used
  • +
  • #1500649: Shellcheck errors in hook scripts
  • +
  • #1505355: quota: directories doesn't get heal on newly added bricks when quota is full on sub-directory
  • +
  • #1506140: Add quorum checks in post-op
  • +
  • #1507230: Man pages badly formatted
  • +
  • #1512691: PostgreSQL DB Restore: unexpected data beyond EOF
  • +
  • #1517260: Volume wrong size
  • +
  • #1521030: rpc: unregister programs before registering them again
  • +
  • #1523122: fix serval bugs found on testing protocol/client
  • +
  • #1523219: fuse xlator uses block size and fragment size 128KB leading to rounding off in df output
  • +
  • #1530905: Reducing regression time of glusterd test cases
  • +
  • #1533342: Syntactical errors in hook scripts for managing SELinux context on bricks
  • +
  • #1536024: Rebalance process is behaving differently for AFR and EC volume.
  • +
  • #1536186: build: glibc has removed legacy rpc headers and rpcgen in Fedora28, use libtirpc
  • +
  • #1537362: glustershd/glusterd is not using right port when connecting to glusterfsd process
  • +
  • #1537364: [RFE] - get-state option should mark profiling enabled flag at volume level
  • +
  • #1537457: DHT log messages: Found anomalies in (null) (gfid = 00000000-0000-0000-0000-000000000000). Holes=1 overlaps=0
  • +
  • #1537602: Georeplication tests intermittently fail
  • +
  • #1538258: build: python-ctypes only in RHEL <= 7
  • +
  • #1538427: Seeing timer errors in the rebalance logs
  • +
  • #1539023: Add ability to control verbosity settings while compiling
  • +
  • #1539166: [bitrot] scrub ondemand reports it's start as success without additional detail
  • +
  • #1539358: Changes to self-heal logic w.r.t. detecting of split-brains
  • +
  • #1539510: Optimize glusterd_import_friend_volume code path
  • +
  • #1539545: gsyncd is running gluster command to get config file path is not required
  • +
  • #1539603: Glusterfs crash when doing statedump with memory accounting is disabled
  • +
  • #1540338: Change op-version of master to 4.1.0 for future options that maybe added
  • +
  • #1540607: glusterd fails to attach brick during restart of the node
  • +
  • #1540669: Do lock conflict check correctly for wait-list
  • +
  • #1541038: A down brick is incorrectly considered to be online and makes the volume to be started without any brick available
  • +
  • #1541264: dht_layout_t leak in dht_populate_inode_for_dentry
  • +
  • #1541916: The used space in the volume increases when the volume is expanded
  • +
  • #1542318: dht_lookup_unlink_of_false_linkto_cbk fails with "Permission denied"
  • +
  • #1542829: Too many log messages about dictionary and options
  • +
  • #1543279: Moving multiple temporary files to the same destination concurrently causes ESTALE error
  • +
  • #1544090: possible memleak in glusterfsd process with brick multiplexing on
  • +
  • #1544600: 3.8 -> 3.10 rolling upgrade fails (same for 3.12 or 3.13) on Ubuntu 14
  • +
  • #1544699: Rolling upgrade to 4.0 is broken
  • +
  • #1544961: libgfrpc does not export IPv6 RPC methods even with --with-ipv6-default
  • +
  • #1545048: [brick-mux] process termination race while killing glusterfsd on last brick detach
  • +
  • #1545056: [CIOT] : Gluster CLI says "io-threads : enabled" on existing volumes post upgrade.
  • +
  • #1545891: Provide a automated way to update bugzilla status with patch merge.
  • +
  • #1546129: Geo-rep: glibc fix breaks geo-replication
  • +
  • #1546620: DHT calls dht_lookup_everywhere for 1xn volumes
  • +
  • #1546954: [Rebalance] "Migrate file failed: : failed to get xattr [No data available]" warnings in rebalance logs
  • +
  • #1547068: Bricks getting assigned to different pids depending on whether brick path is IP or hostname based
  • +
  • #1547128: Typo error in __dht_check_free_space function log message
  • +
  • #1547662: After a replace brick command, self-heal takes some time to start healing files on disperse volumes
  • +
  • #1547888: [brick-mux] incorrect event-thread scaling in server_reconfigure()
  • +
  • #1548361: Make afr_fsync a transaction
  • +
  • #1549000: line-coverage tests not capturing details properly.
  • +
  • #1549606: Eager lock should be present for both metadata and data transactions
  • +
  • #1549915: [Fuse Sub-dir] After performing add-brick on volume,doing rm -rf * on subdir mount point fails with "Transport endpoint is not connected"
  • +
  • #1550078: memory leak in pre-op in replicate volumes for every write
  • +
  • #1550339: glusterd leaks memory when vol status is issued
  • +
  • #1550895: GD2 fails to dlopen server xlator
  • +
  • #1550936: Pause/Resume of geo-replication with wrong user specified returns success
  • +
  • #1553129: Memory corruption is causing crashes, hangs and invalid answers
  • +
  • #1553598: [Rebalance] ENOSPC errors on few files in rebalance logs
  • +
  • #1553926: configure --without-ipv6-default has odd behaviour
  • +
  • #1553938: configure summary TIRPC result is misleading
  • +
  • #1554053: 4.0 clients may fail to convert iatt in dict when recieving the same from older (< 4.0) servers
  • +
  • #1554743: [EC] Read performance of EC volume exported over gNFS is significantly lower than write performance
  • +
  • #1555154: glusterd: TLS verification fails when using intermediate CA instead of self-signed certificates
  • +
  • #1555167: namespace test failure
  • +
  • #1557435: Enable lookup-optimize by default
  • +
  • #1557876: Fuse mount crashed with only one VM running with its image on that volume
  • +
  • #1557932: Shard replicate volumes don't use eager-lock affectively
  • +
  • #1558016: test ./tests/bugs/ec/bug-1236065.t is generating crash on build
  • +
  • #1558074: [disperse] Add tests for in-memory stripe cache for the non aligned write
  • +
  • #1558380: Modify glfsheal binary to accept socket file path as an optional argument.
  • +
  • #1559004: /var/log/glusterfs/bricks/export_vdb.log flooded with this error message "Not able to add to index [Too many links]"
  • +
  • #1559075: enable ownthread feature for glusterfs4_0_fop_prog
  • +
  • #1559126: Incorrect error message in /features/changelog/lib/src/gf-history-changelog.c
  • +
  • #1559130: ssh stderr in glusterfind gets swallowed
  • +
  • #1559235: Increase the inode table size on server when upcall enabled
  • +
  • #1560319: NFS client gets "Invalid argument" when writing file through nfs-ganesha with quota
  • +
  • #1560393: Fix regresssion failure for ./tests/basic/md-cache/bug-1418249.t
  • +
  • #1560411: fallocate created data set is crossing storage reserve space limits resulting 100% brick full
  • +
  • #1560441: volume stop in mgmt v3
  • +
  • #1560589: nl-cache.t fails
  • +
  • #1560957: After performing remove-brick followed by add-brick operation, brick went offline state
  • +
  • #1561129: When storage reserve limit is reached, appending data to an existing file throws EROFS error
  • +
  • #1561406: Rebalance failures on a dispersed volume with lookup-optimize enabled
  • +
  • #1562052: build: revert configure --without-ipv6-default behaviour
  • +
  • #1562717: SHD is not healing entries in halo replication
  • +
  • #1562907: set mgmt_v3_timer->timer to NULL after mgmt_v3_timer is deleted
  • +
  • #1563273: mark brick as online only when portmap registration is completed
  • +
  • #1563334: Honour cluster.localtime-logging option for all the daemons
  • +
  • #1563511: Redundant synchronization in rename codepath for a single subvolume DHT
  • +
  • #1563945: [EC] Turn ON the stripe-cache option by default for ec volume
  • +
  • #1564198: [Remove-brick] Many files were not migrated from the decommissioned bricks; commit results in data loss
  • +
  • #1564235: gfapi: fix a couple of minor issues
  • +
  • #1564600: Client can create denial of service (DOS) conditions on server
  • +
  • #1566067: Volume status inode is broken with brickmux
  • +
  • #1566207: Linux kernel untar failed with "xz: (stdin): Read error: Invalid argument" immediate after add-brick
  • +
  • #1566303: Removing directories from multiple clients throws ESTALE errors
  • +
  • #1566386: Disable choose-local in groups virt and gluster-block
  • +
  • #1566732: EIO errors on some operations when volume has mixed brick versions on a disperse volume
  • +
  • #1567209: Geo-rep: faulty session due to OSError: [Errno 95] Operation not supported
  • +
  • #1567880: Grant Deepshikha access to all CI-related infrastructure
  • +
  • #1567881: Halo replication I/O path is not working
  • +
  • #1568348: Rebalance on few nodes doesn't seem to complete - stuck at FUTEX_WAIT
  • +
  • #1568521: shard files present even after deleting vm from ovirt UI
  • +
  • #1568820: Add generated HMAC token in header for webhook calls
  • +
  • #1568844: [snapshot-scheduler]Prevent access of shared storage volume from the outside client
  • +
  • #1569198: bitrot scrub status does not show the brick where the object (file) is corrupted
  • +
  • #1569489: Need heal-timeout to be configured as low as 5 seconds
  • +
  • #1570011: test case is failing ./tests/bugs/glusterd/add-brick-and-validate-replicated-volume-options.t while brick mux is enabled
  • +
  • #1570538: linux untar errors out at completion during disperse volume inservice upgrade
  • +
  • #1570962: print the path of the corrupted object in scrub status
  • +
  • #1571069: [geo-rep]: Lot of changelogs retries and "dict is null" errors in geo-rep logs
  • +
  • #1572076: Dictionary response is not captured in syncop_(f)xattrop
  • +
  • #1572581: Remove-brick failed on Distributed volume while rm -rf is in-progress
  • +
  • #1572586: dht: do not allow migration if file is open
  • +
  • #1573066: growing glusterd memory usage with connected RHGSWA
  • +
  • #1573119: Amends in volume profile option 'gluster-block'
  • +
  • #1573220: Memory leak in volume tier status command
  • +
  • #1574259: Errors unintentionally reported for snapshot status
  • +
  • #1574305: rm command hangs in fuse_request_send
  • +
  • #1574606: the regression test "tests/bugs/posix/bug-990028.t" fails
  • +
  • #1575294: lease recall callback should be avoided on closed
  • +
  • #1575386: GlusterFS 4.1.0 tracker
  • +
  • #1575707: Gluster volume smb share options are getting overwritten after restating the gluster volume
  • +
  • #1576814: GlusterFS can be improved
  • +
  • #1577162: gfapi: broken symbol versions
  • +
  • #1579674: Remove EIO from the dht_inode_missing macro
  • +
  • #1579736: Additional log messages in dht_readdir(p)_cbk
  • +
  • #1579757: DHT Log flooding in mount log "key=trusted.glusterfs.dht.mds [Invalid argument]"
  • +
  • #1580215: [geo-rep]: Lot of changelogs retries and "dict is null" errors in geo-rep logs
  • +
  • #1580540: make getfattr return proper response for "glusterfs.gfidtopath" xattr for files created when gfid2path was off
  • +
  • #1581548: writes succeed when only good brick is down in 1x3 volume
  • +
  • #1581745: bug-1309462.t is failing reliably due to changes in security.capability changes in the kernel
  • +
  • #1582056: Input/Output errors on a disperse volume with concurrent reads and writes
  • +
  • #1582063: rpc: The gluster auth version is always AUTH_GLUSTERFS_v2
  • +
  • #1582068: ctime: Rename and unlink does not update ctime
  • +
  • #1582072: posix/ctime: Access time is not updated for file with a hardlink
  • +
  • #1582080: posix/ctime: The first lookup on file is not healing the gfid
  • +
  • #1582199: posix unwinds readdirp calls with readdir signature
  • +
  • #1582286: Brick-mux regressions failing on 4.1 branch
  • +
  • #1582531: posix/ctime: Mtime is not updated on setting it to older date
  • +
  • #1582549: api: missing __THROW on pub function decls
  • +
  • #1583016: libgfapi: glfs init fails on afr volume with ctime feature enabled
  • +
  • #1583734: rpc_transport_unref() called for an unregistered socket fd
  • +
  • #1583769: Fix incorrect rebalance log message
  • +
  • #1584633: Brick process crashed after upgrade from RHGS-3.3.1 async(7.4) to RHGS-3.4(7.5)
  • +
  • #1585894: posix/ctime: EC self heal of directory is blocked with ctime feature enabled
  • +
  • #1587908: Fix deadlock in failure codepath of shard fsync
  • +
  • #1590128: xdata is leaking in server3_3_seek
  • +
+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/release-notes/4.1.1/index.html b/release-notes/4.1.1/index.html new file mode 100644 index 00000000..0c4e4ef9 --- /dev/null +++ b/release-notes/4.1.1/index.html @@ -0,0 +1,4531 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + 4.1.1 - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + +

Release notes for Gluster 4.1.1

+

This is a bugfix release. The release notes for 4.1.0 contains a +listing of all the new features that were added and bugs fixed in the +GlusterFS 4.1 stable release.

+

Major changes, features and limitations addressed in this release

+

This release contains a fix for a security vulerability in Gluster as follows,

+
    +
  • http://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2018-10841
  • +
  • https://nvd.nist.gov/vuln/detail/CVE-2018-10841
  • +
+

Installing the updated packages and restarting gluster services on gluster +brick hosts, will help prevent the security issue.

+

Major issues

+

None

+

Bugs addressed

+

Bugs addressed since release-4.1.0 are listed below.

+
    +
  • #1590195: /usr/sbin/gcron.py aborts with OSError
  • +
  • #1591185: Gluster Block PVC fails to mount on Jenkins pod
  • +
  • #1593525: CVE-2018-10841 glusterfs: access trusted peer group via remote-host command [glusterfs upstream]
  • +
+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/release-notes/4.1.10/index.html b/release-notes/4.1.10/index.html new file mode 100644 index 00000000..951af453 --- /dev/null +++ b/release-notes/4.1.10/index.html @@ -0,0 +1,4527 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + 4.1.10 - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + +

Release notes for Gluster 4.1.10

+

This is a bugfix release. The release notes for 4.1.0, 4.1.1, +4.1.2, 4.1.3, 4.1.4, 4.1.5, +4.1.6, 4.1.7, 4.1.8 and 4.1.9 +contains a listing of all the new features that were added and bugs fixed +in the GlusterFS 4.1 stable release.

+

Major changes, features and limitations addressed in this release

+

None

+

Major issues

+

None

+

Bugs addressed

+

Bugs addressed since release-4.1.9 are listed below.

+
    +
  • #1721109: Failed to create volume which transport_type is "tcp,rdma"
  • +
  • #1729221: Upcall: Avoid sending upcalls for invalid Inode
  • +
  • #1729223: Ganesha-gfapi logs are flooded with error messages related to "gf_uuid_is_null(gfid)) [Invalid argument]" when lookups are running from multiple clients
  • +
+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/release-notes/4.1.2/index.html b/release-notes/4.1.2/index.html new file mode 100644 index 00000000..8c6e3031 --- /dev/null +++ b/release-notes/4.1.2/index.html @@ -0,0 +1,4538 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + 4.1.2 - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + +

Release notes for Gluster 4.1.2

+

This is a bugfix release. The release notes for 4.1.0 and 4.1.1 contains a +listing of all the new features that were added and bugs fixed in the +GlusterFS 4.1 stable release.

+

Major changes, features and limitations addressed in this release

+
    +
  1. Release 4.1.0 notes incorrectly reported that all python code in + Gluster packages are python3 compliant, this is not the case and the release + note is amended accordingly.
  2. +
+

Major issues

+

None

+

Bugs addressed

+

Bugs addressed since release-4.1.1 are listed below.

+
    +
  • #1593536: ctime: Self heal of symlink is failing on EC subvolume
  • +
  • #1593537: posix/ctime: Mdata value of a directory is different across replica/EC subvolume
  • +
  • #1595524: rmdir is leaking softlinks to directories in .glusterfs
  • +
  • #1597116: afr: don't update readables if inode refresh failed on all children
  • +
  • #1597117: lookup not assigning gfid if file is not present in all bricks of replica
  • +
  • #1597229: glustershd crashes when index heal is launched before graph is initialized.
  • +
  • #1598193: Stale lock with lk-owner all-zeros is observed in some tests
  • +
  • #1599629: Don't execute statements after decrementing call count in afr
  • +
  • #1599785: _is_prefix should return false for 0-length strings
  • +
  • #1600941: [geo-rep]: geo-replication scheduler is failing due to unsuccessful umount
  • +
  • #1603056: When reserve limits are reached, append on an existing file after truncate operation results to hang
  • +
  • #1603099: directories are invisible on client side
  • +
+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/release-notes/4.1.3/index.html b/release-notes/4.1.3/index.html new file mode 100644 index 00000000..ffd6d705 --- /dev/null +++ b/release-notes/4.1.3/index.html @@ -0,0 +1,4541 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + 4.1.3 - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + +

Release notes for Gluster 4.1.3

+

This is a bugfix release. The release notes for 4.1.0, 4.1.1, and 4.1.2 contains a +listing of all the new features that were added and bugs fixed in the +GlusterFS 4.1 stable release.

+

NOTE: Next minor release tentative date: Week of 24th September, 2018

+

Major changes, features and limitations addressed in this release

+

None

+

Major issues

+
    +
  1. Bug #1601356 titled "Problem with SSL/TLS encryption", + is not yet fixed with this release. Patch to fix the same is in progress and + can be tracked here.
  2. +
+

Bugs addressed

+

Bugs addressed since release-4.1.2 are listed below.

+
    +
  • #1425326: gluster bash completion leaks TOP=0 into the environment
  • +
  • #1596686: key = trusted.glusterfs.protect.writes [Invalid argument]; key = glusterfs.avoid.overwrite [Invalid argument]
  • +
  • #1609550: glusterfs-resource-agents should not be built for el6
  • +
  • #1609551: glusterfs-resource-agents should not be built for el6
  • +
  • #1611104: [geo-rep]: Upgrade fails, session in FAULTY state
  • +
  • #1611106: Glusterd crashed on a few (master) nodes
  • +
  • #1611108: [geo-rep]: Geo-rep scheduler fails
  • +
  • #1611110: Glusterd memory leaking in gf_gld_mt_linebuf
  • +
  • #1611111: [geo-rep]: Geo-replication in FAULTY state - CENTOS 6
  • +
  • #1611113: [geo-rep]: Geo-replication not syncing renamed symlink
  • +
  • #1611114: [geo-rep]: [Errno 2] No such file or directory
  • +
  • #1611115: avoid possible glusterd crash in glusterd_verify_slave
  • +
  • #1611116: 'custom extended attributes' set on a directory are not healed after bringing back the down sub-volumes
  • +
  • #1618347: [Ganesha] Ganesha crashed in mdcache_alloc_and_check_handle while running bonnie and untars with parallel lookups
  • +
+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/release-notes/4.1.4/index.html b/release-notes/4.1.4/index.html new file mode 100644 index 00000000..87c90866 --- /dev/null +++ b/release-notes/4.1.4/index.html @@ -0,0 +1,4559 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + 4.1.4 - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + +

Release notes for Gluster 4.1.4

+

This is a bugfix release. The release notes for 4.1.0, +4.1.1, 4.1.2 and 4.1.3 contains a +listing of all the new features that were added and bugs fixed in the +GlusterFS 4.1 stable release.

+

Major changes, features and limitations addressed in this release

+
    +
  1. +

    This release contains fix for following security vulnerabilities,

    +
      +
    • https://nvd.nist.gov/vuln/detail/CVE-2018-10904
    • +
    • https://nvd.nist.gov/vuln/detail/CVE-2018-10907
    • +
    • https://nvd.nist.gov/vuln/detail/CVE-2018-10911
    • +
    • https://nvd.nist.gov/vuln/detail/CVE-2018-10913
    • +
    • https://nvd.nist.gov/vuln/detail/CVE-2018-10914
    • +
    • https://nvd.nist.gov/vuln/detail/CVE-2018-10923
    • +
    • https://nvd.nist.gov/vuln/detail/CVE-2018-10926
    • +
    • https://nvd.nist.gov/vuln/detail/CVE-2018-10927
    • +
    • https://nvd.nist.gov/vuln/detail/CVE-2018-10928
    • +
    • https://nvd.nist.gov/vuln/detail/CVE-2018-10929
    • +
    • https://nvd.nist.gov/vuln/detail/CVE-2018-10930
    • +
    +
  2. +
  3. +

    To resolve the security vulnerabilities following limitations were made in GlusterFS

    +
      +
    • open,read,write on special files like char and block are no longer permitted
    • +
    • io-stat xlator can dump stat info only to /var/run/gluster directory
    • +
    +
  4. +
+

Installing the updated packages and restarting gluster services on gluster +brick hosts, will fix the security issues.

+

Major issues

+
    +
  1. Bug #1601356 titled "Problem with SSL/TLS encryption", + is not yet fixed with this release. Patch to fix the same is in progress and + can be tracked here.
  2. +
+

Bugs addressed

+

Bugs addressed since release-4.1.3 are listed below.

+
    +
  • #1625089: Improper deserialization in dict.c:dict_unserialize() can allow attackers to read arbitrary memory
  • +
  • #1625095: Files can be renamed outside volume
  • +
  • #1625096: I/O to arbitrary devices on storage server
  • +
  • #1625097: Stack-based buffer overflow in server-rpc-fops.c allows remote attackers to execute arbitrary code
  • +
  • #1625102: Information Exposure in posix_get_file_contents function in posix-helpers.c
  • +
  • #1625106: Unsanitized file names in debug/io-stats translator can allow remote attackers to execute arbitrary code
  • +
+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/release-notes/4.1.5/index.html b/release-notes/4.1.5/index.html new file mode 100644 index 00000000..8d4ad7d9 --- /dev/null +++ b/release-notes/4.1.5/index.html @@ -0,0 +1,4530 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + 4.1.5 - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + +

Release notes for Gluster 4.1.5

+

This is a bugfix release. The release notes for 4.1.0, 4.1.1, 4.1.2, +4.1.3 and 4.1.4 contains a listing of all the new +features that were added and bugs fixed in the GlusterFS 4.1 stable release.

+

NOTE: Next minor release tentative date: Week of 19th November, 2018

+

Major changes, features and limitations addressed in this release

+

None

+

Major issues

+

None

+

Bugs addressed

+

Bugs addressed since release-4.1.4 are listed below.

+
    +
  • #1601356: Problem with SSL/TLS encryption on Gluster 4.0 & 4.1
  • +
  • #1625575: Prevent hangs while increasing replica-count/replace-brick for directory hierarchy
  • +
  • #1629548: Excessive logging in posix_set_parent_ctime()
  • +
  • #1630140: geo-rep: geo-rep config set fails to set rsync-options
  • +
  • #1630141: libgfchangelog: History API fails
  • +
  • #1630144: Geo-rep: Geo-rep regression times out occasionally
  • +
  • #1630145: Geo-rep: Few workers fails to start with out any failure
  • +
+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/release-notes/4.1.6/index.html b/release-notes/4.1.6/index.html new file mode 100644 index 00000000..697dcbc8 --- /dev/null +++ b/release-notes/4.1.6/index.html @@ -0,0 +1,4560 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + 4.1.6 - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + +

Release notes for Gluster 4.1.6

+

This is a bugfix release. The release notes for 4.1.0, 4.1.1, 4.1.2, +4.1.3, 4.1.4 and 4.1.5 contains a listing of all the new +features that were added and bugs fixed in the GlusterFS 4.1 stable release.

+

NOTE: Next minor release tentative date: Week of 20th January, 2019

+

Major changes, features and limitations addressed in this release

+

This release contains fixes for several security vulnerabilities in Gluster as +follows,

+
    +
  • https://nvd.nist.gov/vuln/detail/CVE-2018-14651
  • +
  • https://nvd.nist.gov/vuln/detail/CVE-2018-14652
  • +
  • https://nvd.nist.gov/vuln/detail/CVE-2018-14653
  • +
  • https://nvd.nist.gov/vuln/detail/CVE-2018-14654
  • +
  • https://nvd.nist.gov/vuln/detail/CVE-2018-14659
  • +
  • https://nvd.nist.gov/vuln/detail/CVE-2018-14660
  • +
  • https://nvd.nist.gov/vuln/detail/CVE-2018-14661
  • +
+

Major issues

+

None

+

Bugs addressed

+

Bugs addressed since release-4.1.5 are listed below.

+
    +
  • #1632013: georep: hard-coded paths in gsyncd.conf.in
  • +
  • #1633479: 'df' shows half as much space on volume after upgrade to RHGS 3.4
  • +
  • #1633634: split-brain observed on parent dir
  • +
  • #1635979: Writes taking very long time leading to system hogging
  • +
  • #1635980: Low Random write IOPS in VM workloads
  • +
  • #1636218: [SNAPSHOT]: with brick multiplexing, snapshot restore will make glusterd send wrong volfile
  • +
  • #1637953: data-self-heal in arbiter volume results in stale locks.
  • +
  • #1641761: Spurious failures in bug-1637802-arbiter-stale-data-heal-lock.t
  • +
  • #1643052: Seeing defunt translator and discrepancy in volume info when issued from node which doesn't host bricks in that volume
  • +
  • #1643075: tests/bugs/glusterd/optimized-basic-testcases-in-cluster.t failing
  • +
  • #1643929: geo-rep: gluster-mountbroker status crashes
  • +
  • #1644163: geo-rep: geo-replication gets stuck after file rename and gfid conflict
  • +
  • #1644474: afr/lease: Read child nodes from lease structure
  • +
  • #1644516: geo-rep: gluster-mountbroker status crashes
  • +
  • #1644518: [Geo-Replication] Geo-rep faulty sesion because of the directories are not synced to slave.
  • +
  • #1644524: Excessive logging in posix_update_utime_in_mdata
  • +
  • #1645363: CVE-2018-14652 glusterfs: Buffer overflow in "features/locks" translator allows for denial of service [fedora-all]
  • +
  • #1646200: CVE-2018-14654 glusterfs: "features/index" translator can create arbitrary, empty files [fedora-all]
  • +
  • #1646806: [Geo-rep]: Faulty geo-rep sessions due to link ownership on slave volume
  • +
  • #1647667: CVE-2018-14651 glusterfs: glusterfs server exploitable via symlinks to relative paths [fedora-all]
  • +
  • #1647668: CVE-2018-14661 glusterfs: features/locks translator passes an user-controlled string to snprintf without a proper format string resulting in a denial of service [fedora-all]
  • +
  • #1647669: CVE-2018-14659 glusterfs: Unlimited file creation via "GF_XATTR_IOSTATS_DUMP_KEY" xattr allows for denial of service [fedora-all]
  • +
  • #1647670: CVE-2018-14653 glusterfs: Heap-based buffer overflow via "gf_getspec_req" RPC message [fedora-all]
  • +
  • #1647972: CVE-2018-14660 glusterfs: Repeat use of "GF_META_LOCK_KEY" xattr allows for memory exhaustion [fedora-all]
  • +
  • #1648367: crash seen while running regression, intermittently.
  • +
  • #1648938: gfapi: fix bad dict setting of lease-id
  • +
  • #1648982: packaging: don't include bd.so in rpm when --without bd
  • +
+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/release-notes/4.1.7/index.html b/release-notes/4.1.7/index.html new file mode 100644 index 00000000..56c63273 --- /dev/null +++ b/release-notes/4.1.7/index.html @@ -0,0 +1,4530 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + 4.1.7 - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + +

Release notes for Gluster 4.1.7

+

This is a bugfix release. The release notes for 4.1.0, 4.1.1, 4.1.2, +4.1.3, 4.1.4, 4.1.5 and 4.1.6 contains a listing of all the new +features that were added and bugs fixed in the GlusterFS 4.1 stable release.

+

NOTE: Next minor release tentative date: Week of 20th March, 2019

+

Major changes, features and limitations addressed in this release

+

None

+

Major issues

+

None

+

Bugs addressed

+

Bugs addressed since release-4.1.6 are listed below.

+
    +
  • #1654118: [geo-rep]: Failover / Failback shows fault status in a non-root setup
  • +
  • #1654229: Provide an option to silence glfsheal logs
  • +
  • #1655527: Incorrect usage of local->fd in afr_open_ftruncate_cbk
  • +
  • #1655532: Tracker bug for all leases related issues
  • +
  • #1655561: gfid heal does not happen when there is no source brick
  • +
  • #1662635: Fix tests/bugs/shard/zero-flag.t
  • +
  • #1663132: [Ganesha] Ganesha failed on one node while exporting volumes in loop
  • +
+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/release-notes/4.1.8/index.html b/release-notes/4.1.8/index.html new file mode 100644 index 00000000..a5ce1ac1 --- /dev/null +++ b/release-notes/4.1.8/index.html @@ -0,0 +1,4530 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + 4.1.8 - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + +

Release notes for Gluster 4.1.8

+

This is a bugfix release. The release notes for 4.1.0, 4.1.1, 4.1.2, +4.1.3, 4.1.4, 4.1.5, 4.1.6 and 4.1.7 contains a listing of all the new +features that were added and bugs fixed in the GlusterFS 4.1 stable release.

+

NOTE: Next minor release tentative date: Week of 20th May, 2019

+

Major changes, features and limitations addressed in this release

+

None

+

Major issues

+

None

+

Bugs addressed

+

Bugs addressed since release-4.1.7 are listed below.

+
    +
  • #1670303: api: bad GFAPI_4.1.6 block
  • +
  • #1672249: quorum count value not updated in nfs-server vol file
  • +
  • #1673265: Fix timeouts so the tests pass on AWS
  • +
  • #1687746: [geo-rep]: Checksum mismatch when 2x2 vols are converted to arbiter
  • +
  • #1691292: glusterfs FUSE client crashing every few days with 'Failed to dispatch handler'
  • +
  • #1693057: dht_revalidate may not heal attrs on the brick root
  • +
  • #1693201: core: move "dict is NULL" logs to DEBUG log level
  • +
+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/release-notes/4.1.9/index.html b/release-notes/4.1.9/index.html new file mode 100644 index 00000000..2d1b9b86 --- /dev/null +++ b/release-notes/4.1.9/index.html @@ -0,0 +1,4529 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + 4.1.9 - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + +

Release notes for Gluster 4.1.9

+

This is a bugfix release. The release notes for 4.1.0, 4.1.1, +4.1.2, 4.1.3, 4.1.4, 4.1.5, +4.1.6, 4.1.7 and 4.1.8 +contains a listing of all the new features that were added and bugs fixed +in the GlusterFS 4.1 stable release.

+

Major changes, features and limitations addressed in this release

+

None

+

Major issues

+

None

+

Bugs addressed

+

Bugs addressed since release-4.1.8 are listed below.

+
    +
  • #1660225: geo-rep does not replicate mv or rename of file
  • +
  • #1684404: Multiple shd processes are running on brick_mux environmet
  • +
  • #1694563: gfapi: do not block epoll thread for upcall notifications
  • +
  • #1696513: Multiple shd processes are running on brick_mux environmet
  • +
  • #1707200: VM stuck in a shutdown because of a pending fuse request
  • +
+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/release-notes/5.0/index.html b/release-notes/5.0/index.html new file mode 100644 index 00000000..776c2569 --- /dev/null +++ b/release-notes/5.0/index.html @@ -0,0 +1,5125 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + 5.0 - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + + + + + +
+
+ + + + + + + + +

Release notes for Gluster 5.0

+

This is a major release that includes a range of code improvements and stability +fixes among a few features as noted below.

+

A selection of the key features and changes are documented on this page. +A full list of bugs that have been addressed is included further below.

+ +

Announcements

+
    +
  1. Releases that receive maintenance updates post release 5 are, 4.1 + (reference)
  2. +
+

NOTE: 3.12 long term maintenance release, will reach end of life (EOL) with +the release of 5.0. (reference)

+
    +
  1. Release 5 will receive maintenance updates around the 10th of every month + for the first 3 months post release (i.e Nov'18, Dec'18, Jan'18). Post the + initial 3 months, it will receive maintenance updates every 2 months till EOL. + (reference)
  2. +
+

Major changes and features

+

Features are categorized into the following sections,

+ +

Management

+

GlusterD2

+
+

IMP: GlusterD2 in Gluster-5 is still considered a preview and is +experimental. It should not be considered ready for production use. Users +should still expect some breaking changes even though all efforts would be +taken to ensure that these can be avoided. As GD2 is still under heavy +development, new features can be expected throughout the Gluster 5 release.

+
+

The following major changes have been committed to GlusterD2 since v4.1.0.

+
    +
  1. +

    Volume snapshots : Most snapshot operations are available including create, + delete, activate, deactivate, clone and restore.

    +
  2. +
  3. +

    Volume heal: Support for full heal and index heal for replicate volumes has + been implemented.

    +
  4. +
  5. +

    Tracing with Opencensus: Support for tracing distributed operations has been + implemented in GD2, using the Opencensus API. Tracing instrumentation has been + done for volume create, list and delete operations. Other operations will + follow subsequently.

    +
  6. +
  7. +

    Portmap refactoring: Portmap in GlisterD2 no longer selects a port for the + bricks to listen on, instead leaving the choice upto the bricks. Portmap only + saves port information provided by brick during signin.

    +
  8. +
  9. +

    Smartvol API merged with volume create API: The smart volume API which allows + user to create a volume by just specifying a size has been merged with the + normal volume create API.

    +
  10. +
  11. +

    Configure GlusterD2 with environment variables: In addition to CLI flags, and + the config file, GD2 configuration options can be set using environment + variables.

    +
  12. +
+

In addition to the above, many changes have been merged for minor bug-fixes and +to help with testing.

+

Refer to the user documentation section for details on how to get started with +GlusterD2.

+

Standalone

+

1. Entry creation and handling, consistency is improved

+

The dentry serializer feature was introduced in Gluster 4.0, to strengthen the +consistency handling of entry operations in the Gluster stack. Entry operations +refer to creating, linking, renaming and unlinking of files and directory names +into the filesystem space.

+

When this feature was first introduced (in 4.0) it was optional, with this +release this feature is enabled by default.

+

2. Python code in Gluster packages is Python 3 ready

+

3. Quota fsck script to correct quota accounting

+

See usage documentation here

+

4. Added noatime option in utime xlator

+

Enabling the utime and ctime feature, enables Gluster to maintain consistent +change and modification time stamps on files and directories across bricks.

+

The utime xlator is enhanced with a noatime option and is set by default to +enabled, when the utime feature is enabled. This helps to ignore atime updates +for operations that change may trigger an atime update on the file system +objects.

+

To enable the feature use,

+
# gluster volume set <volname> features.utime on
+# gluster volume set <volname> features.ctime on
+
+

5. Added ctime-invalidation option in quick-read xlator

+

Quick-read xlator by default uses mtime (files last modification time) to +identify changes to file data. However, there are applications, like rsync, +which explicitly set mtime making it unreliable for the purpose of identifying +changes to the file content.

+

Since ctime (files last status change time) also changes when content of a file +changes and cannot be set explicitly by applications, it becomes a more reliable +source to identify staleness of cached data.

+

The ctime-invalidation option makes quick-read to prefer ctime over mtime to +validate staleness of its cache.

+

To enable this option use,

+
# gluster volume set <volname> ctime-invalidation on
+
+

NOTE: Using ctime can result in false positives as ctime is updated even on +attribute changes, like mode bits, without changes to file data. As a result +this option is recommended in situations where mtime is not reliable.

+

6. Added shard-deletion-rate option in shard xlator

+

The shard-deletion-rate option is introduced, to configure the number of +shards to delete in parallel when a file that is sharded is deleted.

+

The default value is set at 100, but can be increased to delete more shards in +parallel for faster space reclamation.

+

To change the defaults for this option use,

+
# gluster volume set <volname> shard-deletion-rate <n>
+
+

NOTE: The upper limit is unbounded, use it with caution as a very large +number will cause lock contention on the bricks. As an example, during testing, +an upper limit of 125000 was enough to cause timeouts and hangs in the gluster +processes due to lock contention.

+

7. Removed last usage of MD5 digest in code, towards better FIPS compliance

+

In an effort to ensure that Gluster can be installed and deployed on machines +that are compliant with the requirements for FIPS, remaining uses of MD5 digest +is removed from the code base.

+

Addressing this feature's requirements was initiated during the 4.0 release, at +which point enabling user space snapshots, which still used MD5 for certain +needs, broke the FIPS compliance requirements. This limitation is now addressed +in this release.

+

8. Code improvements

+

Over the course of this release, the contributors have been active in addressing +various Coverity issues, GCC and clang warnings, clang formatting of the code +base, micro improvements to GLibC API usage and memory handling around +string handling and allocation routines.

+

The above are ongoing efforts, but major strides were made during this release +to actively address code quality in these areas.

+

Major issues

+
    +
  1. +

    The following options are removed from the code base and require to be unset + before an upgrade from releases older than release 4.1.0,

    +
  2. +
  3. +

    features.lock-heal

    +
  4. +
  5. features.grace-timeout
  6. +
+

To check if these options are set use,

+
# gluster volume info
+
+

and ensure that the above options are not part of the Options Reconfigured: +section in the output of all volumes in the cluster.

+

If these are set, then unset them using the following commands,

+
# gluster volume reset <volname> <option>
+
+

NOTE: Failure to do the above may result in failure during online upgrades, +and the reset of these options to their defaults needs to be done prior to +upgrading the cluster.

+

Bugs addressed

+

Bugs addressed since release-4.1.0 are listed below.

+
    +
  • #853601: working-directory should be protected from being a brick
  • +
  • #1312832: tests fail because bug-924726.t depends on netstat
  • +
  • #1390050: Elasticsearch get CorruptIndexException errors when running with GlusterFS persistent storage
  • +
  • #1405147: glusterfs (posix-acl xlator layer) checks for "write permission" instead for "file owner" during open() when writing to a file
  • +
  • #1425325: gluster bash completion leaks TOP=0 into the environment
  • +
  • #1437780: don't send lookup in fuse_getattr()
  • +
  • #1455872: [Perf]: 25% regression on sequential reads on EC over SMB3
  • +
  • #1492847: core (named threads): flood of -Wformat-truncation warnings with gcc-7.
  • +
  • #1512691: PostgreSQL DB Restore: unexpected data beyond EOF
  • +
  • #1524323: No need to load ctr xlator if user has not configured tiering
  • +
  • #1526780: ./run-tests-in-vagrant.sh fails because of disabled Gluster/NFS
  • +
  • #1533000: Quota crawl regressed
  • +
  • #1537602: Georeplication tests intermittently fail
  • +
  • #1543279: Moving multiple temporary files to the same destination concurrently causes ESTALE error
  • +
  • #1545048: [brick-mux] process termination race while killing glusterfsd on last brick detach
  • +
  • #1546103: run-tests-in-vagrant.sh should return test status
  • +
  • #1558574: Coverity: Warning for singlton array..
  • +
  • #1558921: Gluster volume smb share options are getting overwritten after restating the gluster volume
  • +
  • #1561332: merge ssl infra with epoll infra
  • +
  • #1564071: directories are invisible on client side
  • +
  • #1564149: Agree upon a coding standard, and automate check for this in smoke
  • +
  • #1564419: Client side memory leak in encryption xlator (crypt.c).
  • +
  • #1568521: shard files present even after deleting vm from ovirt UI
  • +
  • #1569345: Need COMMITMENT from community for GPL Cure.
  • +
  • #1569399: glusterfsd should be able to start without any other arguments than a single volfile.
  • +
  • #1570538: linux untar errors out at completion during disperse volume inservice upgrade
  • +
  • #1570962: print the path of the corrupted object in scrub status
  • +
  • #1574421: Provide a way to get the hashed-subvol for a file
  • +
  • #1575381: gluster volume heal info prints extra newlines
  • +
  • #1575490: [geo-rep]: Upgrade fails, session in FAULTY state
  • +
  • #1575587: Leverage MDS subvol for dht_removexattr also
  • +
  • #1575716: gfapi: broken symbol versions
  • +
  • #1575742: Change op-version of master to 4.2.0 for future options that maybe added
  • +
  • #1575858: quota crawler fails w/ TLS enabled
  • +
  • #1575864: glusterfsd crashing because of RHGS WA?
  • +
  • #1575887: Additional log messages in dht_readdir(p)_cbk
  • +
  • #1575910: DHT Log flooding in mount log "key=trusted.glusterfs.dht.mds [Invalid argument]"
  • +
  • #1576179: [geo-rep]: Geo-rep scheduler fails
  • +
  • #1576392: Glusterd crashed on a few (master) nodes
  • +
  • #1576418: Warning messages generated for the removal of extended attribute security.ima flodding client logs
  • +
  • #1576767: [geo-rep]: Lot of changelogs retries and "dict is null" errors in geo-rep logs
  • +
  • #1576842: cloudsync: make plugins configurable
  • +
  • #1577574: brick crash seen while creating and deleting two volumes in loop
  • +
  • #1577627: [Geo-rep]: Status in ACTIVE/Created state
  • +
  • #1577672: Brick-mux regressions failing for over 8+ weeks on master
  • +
  • #1577731: [Ganesha] "Gluster nfs-ganesha enable" commands sometimes gives output as "failed" with "Unlocking failed" error messages ,even though cluster is up and healthy in backend
  • +
  • #1577744: The tool to generate new xlator template code is not upto date
  • +
  • #1578325: Input/Output errors on a disperse volume with concurrent reads and writes
  • +
  • #1578650: If parallel-readdir is enabled, the readdir-optimize option even when it is set to on it behaves as off
  • +
  • #1578721: Statedump prints memory usage statistics twice
  • +
  • #1578823: Remove EIO from the dht_inode_missing macro
  • +
  • #1579276: rpc: The gluster auth version is always AUTH_GLUSTERFS_v2
  • +
  • #1579769: inode status command is broken with distributed replicated volumes
  • +
  • #1579786: Thin-arbiter: Provide script to start and run thin arbiter process
  • +
  • #1579788: Thin-arbiter: Have the state of volume in memory
  • +
  • #1580020: ctime: Rename and unlink does not update ctime
  • +
  • #1580238: Fix incorrect rebalance log message
  • +
  • #1580269: [Remove-brick+Rename] Failure count shows zero though there are file migration failures
  • +
  • #1580352: Glusterd memory leaking in gf_gld_mt_linebuf
  • +
  • #1580529: posix/ctime: Access time is not updated for file with a hardlink
  • +
  • #1580532: posix/ctime: The first lookup on file is not healing the gfid
  • +
  • #1581035: posix/ctime: Mtime is not updated on setting it to older date
  • +
  • #1581345: posix unwinds readdirp calls with readdir signature
  • +
  • #1581735: bug-1309462.t is failing reliably due to changes in security.capability changes in the kernel
  • +
  • #1582051: Fix failure of readdir-ahead/bug-1439640.t in certain cases
  • +
  • #1582516: libgfapi: glfs init fails on afr volume with ctime feature enabled
  • +
  • #1582704: rpc_transport_unref() called for an unregistered socket fd
  • +
  • #1583018: changelog: Changelog is not capturing rename of files
  • +
  • #1583565: [distribute]: Excessive 'dict is null' errors in geo-rep logs
  • +
  • #1583583: "connecting" state in protocol client is useless
  • +
  • #1583937: Brick process crashed after upgrade from RHGS-3.3.1 async(7.4) to RHGS-3.4(7.5)
  • +
  • #1584098: 'custom extended attributes' set on a directory are not healed after bringing back the down sub-volumes
  • +
  • #1584483: afr: don't update readables if inode refresh failed on all children
  • +
  • #1584517: Inconsistent access permissions on directories after bringing back the down sub-volumes
  • +
  • #1584864: sometime messages
  • +
  • #1584981: posix/ctime: EC self heal of directory is blocked with ctime feature enabled
  • +
  • #1585391: glusteshd wrong status caused by gluterd big lock
  • +
  • #1585585: Cleanup "connected" state management of rpc-clnt
  • +
  • #1586018: (f)Setxattr and (f)removexattr invalidates the stat cache in md-cache
  • +
  • #1586020: [GSS] Pending heals are not getting completed in CNS environment
  • +
  • #1586342: Refactor the distributed test code to make it work for ipv4
  • +
  • #1586363: Refactor rebalance code
  • +
  • #1589253: After creating and starting 601 volumes, self heal daemon went down and seeing continuous warning messages in glusterd log
  • +
  • #1589691: xdata is leaking in server3_3_seek
  • +
  • #1589782: [geo-rep]: Geo-replication in FAULTY state - CENTOS 6
  • +
  • #1589842: [USS] snapview server does not go through the list of all the snapshots for validating a snap
  • +
  • #1590193: /usr/sbin/gcron.py aborts with OSError
  • +
  • #1590385: Refactor dht lookup code
  • +
  • #1590655: Excessive logging in posix_check_internal_writes() due to NULL dict
  • +
  • #1590710: Gluster Block PVC fails to mount on Jenkins pod
  • +
  • #1591193: lookup not assigning gfid if file is not present in all bricks of replica
  • +
  • #1591580: Remove code duplication in protocol/client
  • +
  • #1591621: Arequal checksum mismatch on older mount
  • +
  • #1592141: Null pointer deref in error paths
  • +
  • #1592275: posix/ctime: Mdata value of a directory is different across replica/EC subvolume
  • +
  • #1592509: ctime: Self heal of symlink is failing on EC subvolume
  • +
  • #1593232: CVE-2018-10841 glusterfs: access trusted peer group via remote-host command [glusterfs upstream]
  • +
  • #1593351: mount.glusterfs incorrectly reports "getfattr not found"
  • +
  • #1593548: Stack overflow in readdirp with parallel-readdir enabled
  • +
  • #1593562: Add new peers to Glusto
  • +
  • #1593651: gnfs nfs.register-with-portmap issue with ipv6_default
  • +
  • #1595174: Found an issue on using lock before init in md-cache
  • +
  • #1595190: rmdir is leaking softlinks to directories in .glusterfs
  • +
  • #1595320: gluster wrongly reports bricks online, even when brick path is not available
  • +
  • #1595492: tests: remove tarissue.t from BAD_TEST
  • +
  • #1595726: tests/geo-rep: Add test case for symlink rename
  • +
  • #1596020: Introduce database group profile
  • +
  • #1596513: glustershd crashes when index heal is launched before graph is initialized.
  • +
  • #1596524: 'replica 3 aribiter 1' is not a industry standard way of telling 2-way replicate with arbiter.
  • +
  • #1596789: Update mount-shared-storage.sh to automatically include all enabled glusterfs mounts in fstab
  • +
  • #1597156: Need a simpler way to find if a replica/ec subvolume is up
  • +
  • #1597247: restart all the daemons after all the bricks
  • +
  • #1597473: introduce cluster.daemon-log-level option
  • +
  • #1597512: Remove contrib/ipaddr-py
  • +
  • #1597540: tests/geo-rep: Add test cases for rsnapshot use case
  • +
  • #1597563: [geo-rep+tiering]: Hot and Cold tier brick changelogs report rsync failure
  • +
  • #1597568: Mark brick online after port registration even for brick-mux cases
  • +
  • #1597627: tests/bugs/core/bug-1432542-mpx-restart-crash.t is generated crash
  • +
  • #1597662: Stale entries of snapshots need to be removed from /var/run/gluster/snaps
  • +
  • #1597776: br-state-check.t crashed while brick multiplex is enabled
  • +
  • #1597805: Stale lock with lk-owner all-zeros is observed in some tests
  • +
  • #1598325: Replace the BROKEN_TESTS environment variable value
  • +
  • #1598345: gluster get-state command is crashing glusterd process when geo-replication is configured
  • +
  • #1598390: Remove extras/prot_filter.py
  • +
  • #1598548: Disabling iostats diagnostics.stats-dump-interval (set to 0) does not terminate the dump thread
  • +
  • #1598663: Don't execute statements after decrementing call count in afr
  • +
  • #1598884: [geo-rep]: [Errno 2] No such file or directory
  • +
  • #1598926: Misleading error messages on bricks caused by lseek
  • +
  • #1598977: [geo-rep]: geo-replication scheduler is failing due to unsuccessful umount
  • +
  • #1599219: configure fails complaining absence of libxml2-devel
  • +
  • #1599250: bug-1432542-mpx-restart-crash.t takes a lot of time to complete cleanup
  • +
  • #1599628: To find a compatible brick ignore diagnostics.brick-log-level option while brick mux is enabled
  • +
  • #1599783: _is_prefix should return false for 0-length strings
  • +
  • #1600405: [geo-rep]: Geo-replication not syncing renamed symlink
  • +
  • #1600451: crash on glusterfs_handle_brick_status of the glusterfsd
  • +
  • #1600687: fuse process segfault when use resolve-gids option
  • +
  • #1600812: A new volume set option to for GD2 quota integration
  • +
  • #1600878: crash seen while running regression, intermittently.
  • +
  • #1600963: get the failed test details into gerrit output itself
  • +
  • #1601166: performance.read-ahead causes huge increase in unnecessary network traffic
  • +
  • #1601390: Distributed testing: Fix build environment
  • +
  • #1601423: memory leak in get-state when geo-replication session is configured
  • +
  • #1601683: dht: remove useless argument from dht_iatt_merge
  • +
  • #1602070: [SNAPSHOT] snapshot daemon crashes if a fd from a deleted snapshot is accessed
  • +
  • #1602121: avoid possible glusterd crash in glusterd_verify_slave
  • +
  • #1602236: When reserve limits are reached, append on an existing file after truncate operation results to hang
  • +
  • #1602866: dht: Crash seen in thread dht_dir_attr_heal
  • +
  • #1603063: ./tests/bugs/glusterd/validating-server-quorum.t is generated core
  • +
  • #1605056: [RHHi] Mount hung and not accessible
  • +
  • #1605077: If a node disconnects during volume delete, it assumes deleted volume as a freshly created volume when it is back online
  • +
  • #1607049: Excessive logging in posix_set_parent_ctime()
  • +
  • #1607319: Remove uuid from contrib/
  • +
  • #1607689: Memory leaks on glfs_fini
  • +
  • #1607783: Segmentation fault while using gfapi while getting volume utilization
  • +
  • #1608175: Skip hash checks in dht_readdirp_cbk if dht has a single child subvol.
  • +
  • #1608564: line coverage tests failing consistently over a week
  • +
  • #1608566: line coverage tests: glusterd crash in ./tests/basic/sdfs-sanity.t
  • +
  • #1608568: line coverage tests: bug-1432542-mpx-restart-crash.t times out consistently
  • +
  • #1608684: Change glusto ownership to reflect current reality
  • +
  • #1608991: Remove code duplication in socket
  • +
  • #1609126: Fix mem leak and smoke failure for gcc8 in cloudsync
  • +
  • #1609207: thin arbiter: set notify-contention option to yes
  • +
  • #1609337: Remove argp-standalone from contrib/
  • +
  • #1609551: glusterfs-resource-agents should not be built for el6
  • +
  • #1610236: [Ganesha] Ganesha crashed in mdcache_alloc_and_check_handle while running bonnie and untars with parallel lookups
  • +
  • #1610256: [Ganesha] While performing lookups from two of the clients, "ls" command got failed with "Invalid argument"
  • +
  • #1610405: Geo-rep: Geo-rep regression times out occasionally
  • +
  • #1610726: Fuse mount of volume fails when gluster_shared_storage is enabled
  • +
  • #1611103: online_brick_count check in volume.rc should ignore bitrot and scrubber daemons
  • +
  • #1611566: tests/bitrot: tests/bitrot/bug-1373520.t fails intermittently
  • +
  • #1611692: Mount process crashes on a sharded volume during rename when dst doesn't exist
  • +
  • #1611834: glusterfsd crashes when SEEK_DATA/HOLE is not supported
  • +
  • #1612017: MAINTAINERS: Add Xavier Hernandez as peer for shard xlator
  • +
  • #1612037: Entry will be present even if the gfid link creation inside .glusterfs fails
  • +
  • #1612054: Test case bug-1586020-mark-dirty-for-entry-txn-on-quorum-failure.t failure
  • +
  • #1612418: Brick not coming up on a volume after rebooting the node
  • +
  • #1612750: gfapi: Use inode_forget in case of unlink/rename objects
  • +
  • #1613098: posix-acl: skip acl_permits check when the owner setting GF_POSIX_ACL_xxxx
  • +
  • #1613807: Fix spurious failures in tests/basic/afr/granular-esh/replace-brick.t
  • +
  • #1614062: Provide/preserve tarball of retried tests
  • +
  • #1614088: kill_brick function needs to wait for brick to be killed
  • +
  • #1614124: glusterfsd process crashed in a multiplexed configuration during cleanup of a single brick-graph triggered by volume-stop.
  • +
  • #1614142: Fix the grammar error in the rpc log
  • +
  • #1614168: [uss]snapshot: posix acl authentication is not working as expected
  • +
  • #1614654: Potential fixes for tests/basic/afr/add-brick-self-heal.t failure
  • +
  • #1614662: ./tests/bugs/replicate/bug-1448804-check-quorum-type-values.t
  • +
  • #1614718: Fix spurious failures in tests/bugs/index/bug-1559004-EMLINK-handling.t
  • +
  • #1614730: Test case bug-1433571-undo-pending-only-on-up-bricks.t failure
  • +
  • #1614799: Geo-rep: Few workers fails to start with out any failure
  • +
  • #1615037: Multiplex tests use a cleanup pattern that results in empty tarballs on failure
  • +
  • #1615078: tests/bugs/replicate/bug-1408712.t fails.
  • +
  • #1615092: tests/bugs/shard/configure-lru-limit.t spurious failure
  • +
  • #1615096: ./tests/bugs/quick-read/bug-846240.t fails spuriously
  • +
  • #1615239: Fix ./tests/basic/afr/replace-brick-self-heal.t failure
  • +
  • #1615331: gfid-mismatch-resolution-with-fav-child-policy.t is failing
  • +
  • #1615474: Rebalance status shows wrong count of "Rebalanced-files" if the file has hardlinks
  • +
  • #1615582: test: ./tests/basic/stats-dump.t fails spuriously not finding queue_size in stats output for some brick
  • +
  • #1615703: [Disperse] Improve log messages for EC volume
  • +
  • #1615789: Come up with framework to test thin-arbiter
  • +
  • #1618004: [GSS] glusterd not starting after upgrade due to snapshots error in RHEV + RHGS
  • +
  • #1619027: geo-rep: Active/Passive status change logging is redundant
  • +
  • #1619423: cli: Command gluster volume statedump <volname> dumps core
  • +
  • #1619475: NetBSD memory detection issue
  • +
  • #1619720: posix_mknod does not update trusted.pgfid.xx xattr correctly
  • +
  • #1619843: Snapshot status fails with commit failure
  • +
  • #1620544: Brick process NOT ONLINE for heketidb and block-hosting volume
  • +
  • #1621981: dht: File rename removes the .glusterfs handle for linkto file
  • +
  • #1622076: [geo-rep]: geo-rep reverse sync in FO/FB can accidentally delete the content at original master incase of gfid conflict in 3.4.0 without explicit user rmdir
  • +
  • #1622422: glusterd cli is showing brick status N/A even brick is consumed by a brick process
  • +
  • #1622549: libgfchangelog: History API fails
  • +
  • #1622665: clang-scan report: glusterfs issues
  • +
  • #1622821: Prevent hangs while increasing replica-count/replace-brick for directory hierarchy
  • +
  • #1623408: rpc: log fuse request ID with gluster transaction ID
  • +
  • #1623759: [Disperse] Don't send final version update if non data fop succeeded
  • +
  • #1624244: DHT: Rework the virtual xattr to get the hash subvol
  • +
  • #1624440: Fail volume stop operation in case brick detach request fails
  • +
  • #1625089: CVE-2018-10911 glusterfs: Improper deserialization in dict.c:dict_unserialize() can allow attackers to read arbitrary memory
  • +
  • #1625095: CVE-2018-10930 glusterfs: Files can be renamed outside volume
  • +
  • #1625096: CVE-2018-10923 glusterfs: I/O to arbitrary devices on storage server
  • +
  • #1625097: CVE-2018-10907 glusterfs: Stack-based buffer overflow in server-rpc-fops.c allows remote attackers to execute arbitrary code
  • +
  • #1625102: CVE-2018-10913 glusterfs: Information Exposure in posix_get_file_contents function in posix-helpers.c
  • +
  • #1625106: CVE-2018-10904 glusterfs: Unsanitized file names in debug/io-stats translator can allow remote attackers to execute arbitrary code
  • +
  • #1625643: Use CALLOC in dht_layouts_init
  • +
  • #1626319: DH ciphers disabled errors are encountered on basic mount & unmount with ssl enabled setup
  • +
  • #1626346: dht: Use snprintf in dht_filter_loc_subvol_key
  • +
  • #1626394: dht_create: Create linkto files if required when using dht_filter_loc_subvol_key
  • +
  • #1626787: sas workload job getting stuck after sometime
  • +
  • #1627044: Converting to replica 2 volume is not throwing warning
  • +
  • #1627620: SAS job aborts complaining about file doesn't exist
  • +
  • #1628668: Update op-version from 4.2 to 5.0
  • +
  • #1629877: GlusterFS can be improved (clone for Gluster-5)
  • +
  • #1630673: geo-rep: geo-rep config set fails to set rsync-options
  • +
  • #1630804: libgfapi-python: test_listdir_with_stat and test_scandir failure on release 5 branch
  • +
  • #1633015: ctime: Access time is different with in same replica/EC volume
  • +
  • #1633242: 'df' shows half as much space on volume after upgrade to RHGS 3.4
  • +
  • #1633552: glusterd crash in regression build
  • +
  • #1635373: ASan (address sanitizer) fixes - Blanket bug
  • +
  • #1635972: Low Random write IOPS in VM workloads
  • +
  • #1635975: Writes taking very long time leading to system hogging
  • +
  • #1636162: [SNAPSHOT]: with brick multiplexing, snapshot restore will make glusterd send wrong volfile
  • +
  • #1636842: df shows Volume size as zero if Volume created and mounted using Glusterd2
  • +
  • #1638159: data-self-heal in arbiter volume results in stale locks.
  • +
  • #1638163: split-brain observed on parent dir
  • +
  • #1639688: core: backport uuid fixes
  • +
  • #1640392: io-stats: garbage characters in the filenames generated
  • +
+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/release-notes/5.1/index.html b/release-notes/5.1/index.html new file mode 100644 index 00000000..6a26dedf --- /dev/null +++ b/release-notes/5.1/index.html @@ -0,0 +1,4556 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + 5.1 - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + +

Release notes for Gluster 5.1

+

This is a bugfix release. The release notes for 5.0 contains a listing of +all the new features that were added and bugs fixed in the GlusterFS 5 stable +release.

+

NOTE: Next minor release tentative date: Week of 10th December, 2018

+

Major changes, features and limitations addressed in this release

+

This release contains fixes for several security vulnerabilities in Gluster as +follows,

+
    +
  • https://nvd.nist.gov/vuln/detail/CVE-2018-14651
  • +
  • https://nvd.nist.gov/vuln/detail/CVE-2018-14652
  • +
  • https://nvd.nist.gov/vuln/detail/CVE-2018-14653
  • +
  • https://nvd.nist.gov/vuln/detail/CVE-2018-14654
  • +
  • https://nvd.nist.gov/vuln/detail/CVE-2018-14659
  • +
  • https://nvd.nist.gov/vuln/detail/CVE-2018-14660
  • +
  • https://nvd.nist.gov/vuln/detail/CVE-2018-14661
  • +
+

Major issues

+

None

+

Bugs addressed

+

Bugs addressed since release-5.0 are listed below.

+
    +
  • #1641429: Gfid mismatch seen on shards when lookup and mknod are in progress at the same time
  • +
  • #1641440: [ovirt-gluster] Mount hung and not accessible
  • +
  • #1641872: Spurious failures in bug-1637802-arbiter-stale-data-heal-lock.t
  • +
  • #1643078: tests/bugs/glusterd/optimized-basic-testcases-in-cluster.t failing
  • +
  • #1643402: [Geo-Replication] Geo-rep faulty sesion because of the directories are not synced to slave.
  • +
  • #1644158: geo-rep: geo-replication gets stuck after file rename and gfid conflict
  • +
  • #1644161: cliutils: geo-rep cliutils' usage of Popen is not python3 compatible
  • +
  • #1644314: build/packaging: el-X (x > 7) isms
  • +
  • #1644514: geo-rep: On gluster command failure on slave, worker crashes with python3
  • +
  • #1644515: geo-rep: gluster-mountbroker status crashes
  • +
  • #1644526: Excessive logging in posix_update_utime_in_mdata
  • +
  • #1644622: [Stress] : Mismatching iatt in glustershd logs during MTSH and continous IO from Ganesha mounts
  • +
  • #1644645: [AFR] : Start crawling indices and healing only if both data bricks are UP in replica 2 (thin-arbiter)
  • +
  • #1646204: CVE-2018-14654 glusterfs: "features/index" translator can create arbitrary, empty files [fedora-all]
  • +
  • #1646896: [Geo-Replication] Geo-rep faulty sesion because of the directories are not synced to slave.
  • +
  • #1647663: CVE-2018-14651 glusterfs: glusterfs server exploitable via symlinks to relative paths [fedora-all]
  • +
  • #1647664: CVE-2018-14653 glusterfs: Heap-based buffer overflow via "gf_getspec_req" RPC message [fedora-all]
  • +
  • #1647665: CVE-2018-14659 glusterfs: Unlimited file creation via "GF_XATTR_IOSTATS_DUMP_KEY" xattr allows for denial of service [fedora-all]
  • +
  • #1647666: CVE-2018-14661 glusterfs: features/locks translator passes an user-controlled string to snprintf without a proper format string resulting in a denial of service [fedora-all]
  • +
  • #1647801: can't enable shared-storage
  • +
  • #1647962: CVE-2018-14660 glusterfs: Repeat use of "GF_META_LOCK_KEY" xattr allows for memory exhaustion [fedora-all]
  • +
  • #1647968: Seeing defunt translator and discrepancy in volume info when issued from node which doesn't host bricks in that volume
  • +
  • #1648923: gfapi: fix bad dict setting of lease-id
  • +
+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/release-notes/5.10/index.html b/release-notes/5.10/index.html new file mode 100644 index 00000000..0c2811a9 --- /dev/null +++ b/release-notes/5.10/index.html @@ -0,0 +1,4527 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + 5.10. - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + +

Release notes for Gluster 5.10

+

This is a bugfix release. The release notes for 5.0, 5.1, +5.2, 5.3, 5.5, 5.6, 5.8 and +5.9 contains a listing of all the new features that were added and +bugs fixed in the GlusterFS 5 stable release.

+

Next minor release tentative date: Week of 10th December, 2019

+

Major changes, features and limitations addressed in this release

+

None

+

Major issues

+

None

+

Bugs addressed

+

Bugs addressed since release-5.9 are listed below.

+
    +
  • #1749352: Failures in remove-brick due to [Input/output error] errors
  • +
  • #1750230: [geo-rep]: Non-root - Unable to set up mountbroker root directory and group
  • +
  • #1739336: Multiple disconnect events being propagated for the same child
  • +
+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/release-notes/5.11/index.html b/release-notes/5.11/index.html new file mode 100644 index 00000000..34cd9a1a --- /dev/null +++ b/release-notes/5.11/index.html @@ -0,0 +1,4526 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + 5.11 - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + +

Release notes for Gluster 5.11

+

This is a bugfix release. The release notes for 5.0, 5.1, 5.2, 5.3, 5.5, 5.6, 5.8, +5.9, and 5.10 contains a listing of all the new features that were added and bugs fixed in the GlusterFS 5 stable release.

+

Next minor release tentative date: Week of 10th February, 2020

+

Major changes, features and limitations addressed in this release

+

None

+

Major issues

+

None

+

Bugs addressed

+

Bugs addressed since release-5.10 are listed below.

+
    +
  • #1718734: Memory leak in glusterfsd process
  • +
  • #1760710: glustershd can not decide heald_sinks, and skip repair, so some entries lingering in volume heal info
  • +
  • #1767305: READDIRP incorrectly updates posix-acl inode ctx
  • +
  • #1779284: Backport GNFS memory leak fix to version 5
  • +
+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/release-notes/5.12/index.html b/release-notes/5.12/index.html new file mode 100644 index 00000000..7e555f04 --- /dev/null +++ b/release-notes/5.12/index.html @@ -0,0 +1,4537 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + 5.12 - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + +

Release notes for Gluster 5.12

+

This is a bugfix release. The release notes for 5.0, 5.1, 5.2, 5.3, 5.5, 5.6, 5.8, +5.9, 5.10, and 5.11 contains +a listing of all the new features that were added and +bugs fixed in the GlusterFS 5 stable release.

+

Next minor release tentative date: Week of 10th April, 2020

+

Major changes, features and limitations addressed in this release

+

None

+

Major issues

+

None

+

Bugs addressed

+

Bugs addressed since release-5.11 are listed below.

+
    +
  • #1803810: Functionality to enable log rotation for user serviceable snapshot's logs.
  • +
  • #1804512: Mounts fails after reboot of 1/3 gluster nodes
  • +
  • #1804522: Rebalance is causing glusterfs crash on client node
  • +
  • #1805047: I/O error on writes to a disperse volume when replace-brick is executed
  • +
  • #1805049: Glusterfsd crashing in ec-inode-write.c, in GF_ASSERT
  • +
  • #1805050: [Disperse] : Client side heal is not removing dirty flag for some of the files.
  • +
  • #1805051: Disperse volume : data corruption with ftruncate data in 4+2 config
  • +
  • #1805052: Disperse volume : Ganesha crash with IO in 4+2 config when one glusterfsd restart every 600s
  • +
  • #1805053: An Input/Output error happens on a disperse volume when doing unaligned writes to a sparse file
  • +
  • #1805054: Disperse volume : data corruption with ftruncate data in 4+2 config
  • +
  • #1805055: Open fd heal should filter O_APPEND/O_EXCL
  • +
  • #1805056: Disperse volume : data corruption with ftruncate data in 4+2 config
  • +
  • #1805057: [EC] shd crashed while heal failed due to out of memory error.
  • +
+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/release-notes/5.13/index.html b/release-notes/5.13/index.html new file mode 100644 index 00000000..8edb6339 --- /dev/null +++ b/release-notes/5.13/index.html @@ -0,0 +1,4537 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + 5.13 - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + +

Release notes for Gluster 5.13

+

This is a bugfix release. The release notes for 5.0, 5.1, 5.2, 5.3, 5.5, 5.6, 5.8, +5.9, 5.10, 5.11, and 5.12 contains +a listing of all the new features that were added and +bugs fixed in the GlusterFS 5 stable release.

+

NOTE: This is supposed to be last minor release of 5.

+

Major changes, features and limitations addressed in this release

+

None

+

Major issues

+

None

+

Bugs addressed

+

Bugs addressed since release-5.12 are listed below.

+
    +
  • #1803810: Functionality to enable log rotation for user serviceable snapshot's logs.
  • +
  • #1127: Mount crash during background shard cleanup
  • +
  • #1103:afr: prevent spurious entry + heals leading to gfid split-brain
  • +
  • #1067:Metadata heal picks different brick each time as source if there are no pending xattrs
  • +
  • #1028:Segfault in FUSE process, potential use after free
  • +
  • #1390914: Glusterfs create a flock lock by anonymous fd, but can't release it forever.
  • +
  • #1806931: Changes to self-heal logic w.r.t. detecting metadata split-brains
  • +
  • #1807007: The result (hostname) of getnameinfo for all bricks (ipv6 addresses) are the same, while they are not.
  • +
  • #1807431: Setting cluster.heal-timeout requires volume restart
  • +
  • #1807748: bug-1402841.t-mt-dir-scan-race.t fails spuriously
  • +
  • #1808256: Glusterfs create a flock lock by anonymous fd, but can't release it forever.
  • +
  • #1809440: [brickmux]: glustershd crashed when rebooting 1/3 nodes at regular intervals
  • +
+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/release-notes/5.2/index.html b/release-notes/5.2/index.html new file mode 100644 index 00000000..9e4b2cee --- /dev/null +++ b/release-notes/5.2/index.html @@ -0,0 +1,4529 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + 5.2 - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + +

Release notes for Gluster 5.2

+

This is a bugfix release. The release notes for 5.0 and 5.1 contains +a listing of all the new features that were added and bugs fixed in the +GlusterFS 5 stable release.

+

NOTE: Next minor release tentative date: Week of 10th January, 2019

+

Major changes, features and limitations addressed in this release

+

None

+

Major issues

+

None

+

Bugs addressed

+

Bugs addressed since release-5.1 are listed below.

+
    +
  • #1651525: Issuing a "heal ... full" on a disperse volume causes permanent high CPU utilization.
  • +
  • #1654115: [Geo-rep]: Faulty geo-rep sessions due to link ownership on slave volume
  • +
  • #1654117: [geo-rep]: Failover / Failback shows fault status in a non-root setup
  • +
  • #1654236: Provide an option to silence glfsheal logs
  • +
  • #1654370: Bitrot: Scrub status say file is corrupted even it was just created AND 'path' in the output is broken
  • +
  • #1655545: gfid heal does not happen when there is no source brick
  • +
+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/release-notes/5.3/index.html b/release-notes/5.3/index.html new file mode 100644 index 00000000..d8cbee83 --- /dev/null +++ b/release-notes/5.3/index.html @@ -0,0 +1,4533 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + 5.3 - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + +

Release notes for Gluster 5.3

+

This is a bugfix release. The release notes for 5.0, 5.1 and 5.2 contains +a listing of all the new features that were added and bugs fixed in the +GlusterFS 5 stable release.

+

NOTE: Next minor release tentative date: Week of 10th March, 2019

+

Major changes, features and limitations addressed in this release

+

None

+

Major issues

+

None

+

Bugs addressed

+

Bugs addressed since release-5.2 are listed below.

+
    +
  • #1623107: FUSE client's memory leak
  • +
  • #1648642: fails to sync non-ascii (utf8) file and directory names, causes permanently faulty geo-replication state
  • +
  • #1651323: Tracker bug for all leases related issues
  • +
  • #1659563: gluster-blockd segfaults because of a null-dereference in shard.so
  • +
  • #1659676: Memory leak: dict_t leak in rda_opendir
  • +
  • #1660736: dht_revalidate may not heal attrs on the brick root
  • +
  • #1660932: Fix tests/bugs/shard/zero-flag.t
  • +
  • #1662200: NL cache: fix typos
  • +
  • #1663131: [Ganesha] Ganesha failed on one node while exporting volumes in loop
  • +
  • #1665803: [ovirt-gluster] Fuse mount crashed while deleting a 1 TB image file from ovirt
  • +
+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/release-notes/5.5/index.html b/release-notes/5.5/index.html new file mode 100644 index 00000000..c6b1828f --- /dev/null +++ b/release-notes/5.5/index.html @@ -0,0 +1,4544 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + 5.5 - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + +

Release notes for Gluster 5.5

+

This is a bugfix release. The release notes for 5.0, 5.1, 5.2 and 5.3 contains +a listing of all the new features that were added and bugs fixed in the +GlusterFS 5 stable release.

+

NOTE: Next minor release tentative date: Week of 10th May, 2019

+

NOTE: Release 5.4 was never announced as there was a fix which prevented +rolling upgrades from working correctly. Hence this release notes contains a +skip from 5.3 till 5.5 in terms of issues addressed and also addresses the +issue were rolling upgrades were broken.

+

Major changes, features and limitations addressed in this release

+

None

+

Major issues

+

None

+

Bugs addressed

+

Bugs addressed since release-5.3 are listed below.

+
    +
  • #1684385: [ovirt-gluster] Rolling gluster upgrade from 3.12.5 to 5.3 led to shard on-disk xattrs disappearing
  • +
  • #1684569: Upgrade from 4.1 and 5 is broken
  • +
  • #1687249: Error handling in /usr/sbin/gluster-eventsapi produces IndexError: tuple index out of range
  • +
  • #1687687: [geo-rep]: Checksum mismatch when 2x2 vols are converted to arbiter
  • +
  • #1649054: glustereventsd does not start on Ubuntu 16.04 LTS
  • +
  • #1651246: Failed to dispatch handler
  • +
  • #1665145: Writes on Gluster 5 volumes fail with EIO when "cluster.consistent-metadata" is set
  • +
  • #1669382: [ovirt-gluster] Fuse mount crashed while creating the preallocated image
  • +
  • #1670307: api: bad GFAPI_4.1.6 block
  • +
  • #1671217: core: move "dict is NULL" logs to DEBUG log level
  • +
  • #1671556: glusterfs FUSE client crashing every few days with 'Failed to dispatch handler'
  • +
  • #1671611: Unable to delete directories that contain linkto files that point to itself.
  • +
  • #1672248: quorum count not updated in nfs-server vol file
  • +
  • #1672314: thin-arbiter: Check with thin-arbiter file before marking new entry change log
  • +
  • #1673268: Fix timeouts so the tests pass on AWS
  • +
  • #1678726: Integer Overflow possible in md-cache.c due to data type inconsistency
  • +
  • #1679968: Upgrade from glusterfs 3.12 to gluster 4/5 broken
  • +
+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/release-notes/5.6/index.html b/release-notes/5.6/index.html new file mode 100644 index 00000000..0d7033b8 --- /dev/null +++ b/release-notes/5.6/index.html @@ -0,0 +1,4536 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + 5.6 - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + +

Release notes for Gluster 5.6

+

This is a bugfix release. The release notes for 5.0, 5.1, 5.2, 5.3 and 5.5 contains +a listing of all the new features that were added and bugs fixed in the +GlusterFS 5 stable release.

+

NOTE: Next minor release tentative date: Week of 10th June, 2019

+

NOTE: Release 5.4 was never announced as there was a fix which prevented +rolling upgrades from working correctly. Hence this release notes contains a +skip from 5.3 till 5.5 in terms of issues addressed and also addresses the +issue were rolling upgrades were broken.

+

Major changes, features and limitations addressed in this release

+

None

+

Major issues

+

Several users had issues around increased network usage after upgrading to 5.x +release, this issue was tracked against bug#1673058 and is now addressed as +a part of this minor release.

+

Bugs addressed

+

Bugs addressed since release-5.5 are listed below.

+
    +
  • #1673058: Network throughput usage increased x5
  • +
  • #1690952: lots of "Matching lock not found for unlock xxx" when using disperse (ec) xlator
  • +
  • #1694562: gfapi: do not block epoll thread for upcall notifications
  • +
  • #1694612: glusterd leaking memory when issued gluster vol status all tasks continuosly
  • +
  • #1695391: GF_LOG_OCCASSIONALLY API doesn't log at first instance
  • +
  • #1695403: rm -rf fails with "Directory not empty"
  • +
  • #1696147: Multiple shd processes are running on brick_mux environmet
  • +
+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/release-notes/5.8/index.html b/release-notes/5.8/index.html new file mode 100644 index 00000000..4a25f37f --- /dev/null +++ b/release-notes/5.8/index.html @@ -0,0 +1,4538 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + 5.8 - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + +

Release notes for Gluster 5.8

+

This is a bugfix release. The release notes for 5.0, 5.1, 5.2, 5.3, 5.5, and 5.6 contains +a listing of all the new features that were added and bugs fixed in the +GlusterFS 5 stable release.

+

NOTE: The 5.7 is dead by release due to #1728988 The packages weren't released. Please use 5.8. +Next minor release tentative date: Week of 10th August, 2019

+

Major changes, features and limitations addressed in this release

+

None

+

Major issues

+

A issue that was blocking the build was addressed #1728988

+

Bugs addressed

+

Bugs addressed since release-5.6 are listed below.

+
    +
  • #1717282: ec ignores lock contention notifications for partially acquired locks
  • +
  • #1629877: GlusterFS can be improved (clone for Gluster-5)
  • +
  • #1695399: With parallel-readdir enabled, deleting a directory containing stale linkto files fails with "Directory not empty"
  • +
  • #1699500: fix truncate lock to cover the write in tuncate clean
  • +
  • #1699736: Fops hang when inodelk fails on the first fop
  • +
  • +

    #1707198: VM stuck in a shutdown because of a pending fuse request

    +
  • +
  • +

    #1720634: Upcall: Avoid sending upcalls for invalid Inode

    +
  • +
  • #1720636: Ganesha-gfapi logs are flooded with error messages related to "gf_uuid_is_null(gfid)) [Invalid argument]" when lookups are running from multiple clients
  • +
  • #1721106: Failed to create volume which transport_type is "tcp,rdma"
  • +
  • #1728988: release-5.7 glupy is not getting built during packaging.
  • +
+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/release-notes/5.9/index.html b/release-notes/5.9/index.html new file mode 100644 index 00000000..eeb71bba --- /dev/null +++ b/release-notes/5.9/index.html @@ -0,0 +1,4526 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + 5.9 - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + +

Release notes for Gluster 5.9

+

This is a bugfix release. The release notes for 5.0, 5.1, 5.2, 5.3, 5.5, 5.6 and 5.8 contains +a listing of all the new features that were added and bugs fixed in the +GlusterFS 5 stable release.

+

Next minor release tentative date: Week of 10th October, 2019

+

Major changes, features and limitations addressed in this release

+

None

+

Major issues

+

None

+

Bugs addressed

+

Bugs addressed since release-5.8 are listed below.

+
    +
  • #1733881: [geo-rep]: gluster command not found while setting up a non-root session
  • +
  • #1736342: potential deadlock while processing callbacks in gfapi
  • +
  • #1737716: Unable to create geo-rep session on a non-root setup.
  • +
+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/release-notes/6.0/index.html b/release-notes/6.0/index.html new file mode 100644 index 00000000..677d7d02 --- /dev/null +++ b/release-notes/6.0/index.html @@ -0,0 +1,5177 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + 6.0 - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + + + + + +
+
+ + + + + + + + +

Release notes for Gluster 6.0

+

This is a major release that includes a range of code improvements and stability +fixes along with a few features as noted below.

+

A selection of the key features and changes are documented in this page. +A full list of bugs that have been addressed is included further below.

+ +

Announcements

+
    +
  1. +

    Releases that receive maintenance updates post release 6 are, 4.1 and 5 + (reference)

    +
  2. +
  3. +

    Release 6 will receive maintenance updates around the 10th of every month + for the first 3 months post release (i.e Apr'19, May'19, Jun'19). Post the + initial 3 months, it will receive maintenance updates every 2 months till EOL. + (reference)

    +
  4. +
  5. +

    A series of features/xlators have been deprecated in release 6 as follows, + for upgrade procedures from volumes that use these features to release 6 refer + to the release 6 upgrade guide.

    +
  6. +
+

This deprecation was announced at the gluster-users list here.

+

Features deprecated:

+
    +
  • Block device (bd) xlator
  • +
  • Decompounder feature
  • +
  • Crypt xlator
  • +
  • Symlink-cache xlator
  • +
  • Stripe feature
  • +
  • Tiering support (tier xlator and changetimerecorder)
  • +
+

Major changes and features

+

Highlights

+
    +
  • Several stability fixes addressing,
  • +
  • coverity, clang-scan, address sanitizer and valgrind reported issues
  • +
  • removal of unused and hence, deprecated code and features
  • +
  • Client side inode garbage collection
  • +
  • This release addresses one of the major concerns regarding FUSE mount + process memory footprint, by introducing client side inode garbage collection
  • +
  • See standalone section for more details
  • +
  • Performance Improvements
  • +
  • --auto-invalidation on FUSE mounts to leverage kernel page cache more + effectively
  • +
+

Features are categorized into the following sections,

+ +

Management

+

NOTE: There have been several stability improvements around the brick +multiplexing feature

+

GlusterD2

+

GlusterD2 (or GD2, in short) was planned as the next generation management +service for Gluster project.

+

Currently, GD2s main focus is not replacing glusterd, but to serve as a thin +management layer when using gluster with container orchestration systems.

+

There is no specific update around GD2 provided as a part of this release.

+

Standalone

+

1. client-side inode garbage collection via LRU list

+

A FUSE mount's inode cache can now be limited to a maximum number, thus reducing +the memory footprint of FUSE mount processes.

+

See the lru-limit option in man 8 mount.glusterfs for details.

+

NOTE: Setting this to a low value (say less than 4000), will evict inodes from +FUSE and Gluster caches at a much faster rate, and can cause performance +degrades. The setting has to be determined based on the available client memory +and required performance.

+

2. Glusterfind tool enhanced with a filter option

+

glusterfind tool has an added option "--type", to be used with the "--full" +option. The option supports finding and listing files or directories only, and +defaults to both if not specified.

+

Example usage with the pre and query commands are given below,

+
    +
  1. +

    Pre command (reference):

    +
  2. +
  3. +

    Lists both files and directories in OUTFILE: + glusterfind pre SESSION_NAME VOLUME_NAME OUTFILE

    +
  4. +
  5. +

    Lists only files in OUTFILE: + glusterfind pre SESSION_NAME VOLUME_NAME OUTFILE --type f

    +
  6. +
  7. +

    Lists only directories in OUTFILE: + glusterfind pre SESSION_NAME VOLUME_NAME OUTFILE --type d

    +
  8. +
  9. +

    Query command:

    +
  10. +
  11. +

    Lists both files and directories in OUTFILE: + glusterfind query VOLUME_NAME --full OUTFILE

    +
  12. +
  13. +

    Lists only files in OUTFILE: + glusterfind query VOLUME_NAME --full --type f OUTFILE

    +
  14. +
  15. +

    Lists only directories in OUTFILE: + glusterfind query VOLUME_NAME --full --type d OUTFILE

    +
  16. +
+

3. FUSE mounts are enhanced to handle interrupts to blocked lock requests

+

FUSE mounts are enhanced to handle interrupts to blocked locks.

+

For example, scripts using the flock (man 1 flock) utility without the +-n(nonblock) option against files on a FUSE based gluster mount, can now be +interrupted when the lock is not granted in time or using the -w option with +the same utility.

+

4. Optimized/pass-through distribute functionality for 1-way distributed volumes

+

NOTE: There are no user controllable changes with this feature

+

The distribute xlator now skips unnecessary checks and operations when the +distribute count is one for a volume, resulting in improved performance.

+

5. Options introduced to disable invalidations of kernel page cache

+

For workloads, where multiple FUSE client mounts do not concurrently operate on +any files in the volume, it is now possible to maintain a longer duration kernel +page cache using the following options in conjunction,

+
    +
  • Setting --auto-invalidation option to "no" on the glusterfs FUSE mount + process
  • +
  • Disabling the volume option performance.global-cache-invalidation
  • +
+

This enables better performance as the data is served from the kernel page cache +where possible.

+

6. Changes to gluster based SMB share management

+

Previously all GlusterFS volumes were being exported by default via smb.conf in +a Samba-CTDB setup. This includes creating a share section for CTDB lock volume +too which is not recommended. Along with few syntactical errors these scripts +failed to execute in a non-Samba setup in the absence of necessary configuration +and binary files.

+

Hereafter newly created GlusterFS volumes are not exported as SMB share via +Samba unless either of 'user.cifs' or 'user.smb' volume set options are enabled +on the volume. The existing GlusterFS volume share sections in smb.conf will +remain unchanged.

+

7. ctime feature is enabled by default

+

The ctime feature which maintains (c/m) time consistency across replica and +disperse subvolumes is enabled by default.

+

Also, with this release, a single option is provided to enable/disable ctime +feature,

+
#gluster vol set <volname> ctime <on/off>
+
+

NOTE: The time information used is from clients, hence it's required that +clients are synced with respect to their times, using NTP or other such means.

+

Limitations:

+
    +
  • Mounting gluster volume with time attribute options (noatime, realatime...) + is not supported with this feature
  • +
  • This feature does not guarantee consistent time for directories if the hashed + sub-volume for the directory is down
  • +
  • Directory listing is not supported with this feature, and may report + inconsistent time information
  • +
  • Older files created before upgrade, would witness update of ctime upon + accessing after upgrade BUG:1593542
  • +
+

Developer

+

1. Gluster code can be compiled and executed using TSAN

+

While configuring the sources for a build use the extra option --enable-tsan +to enable thread sanitizer based builds.

+

2. gfapi: A class of APIs have been enhanced to return pre/post gluster_stat information

+

A set of apis have been enhanced to return pre/post gluster_stat information. +Applications using gfapi would need to adapt to the newer interfaces to compile +against release-6 apis. Pre-compiled applications, or applications using the +older API SDK will continue to work as before.

+

Major issues

+

None

+

Bugs addressed

+

Bugs addressed since release-5 are listed below.

+
    +
  • #1138841: allow the use of the CIDR format with auth.allow
  • +
  • #1236272: socket: Use newer system calls that provide better interface/performance on Linux/*BSD when available
  • +
  • #1243991: "gluster volume set group " is not in the help text
  • +
  • #1285126: RFE: GlusterFS NFS does not implement an all_squash volume setting
  • +
  • #1343926: port-map: let brick choose its own port
  • +
  • #1364707: Remove deprecated stripe xlator
  • +
  • #1427397: script to strace processes consuming high CPU
  • +
  • #1467614: Gluster read/write performance improvements on NVMe backend
  • +
  • #1486532: need a script to resolve backtraces
  • +
  • #1511339: In Replica volume 2*2 when quorum is set, after glusterd restart nfs server is coming up instead of self-heal daemon
  • +
  • #1535495: Add option -h and --help to gluster cli
  • +
  • #1535528: Gluster cli show no help message in prompt
  • +
  • #1560561: systemd service file enhancements
  • +
  • #1560969: Garbage collect inactive inodes in fuse-bridge
  • +
  • #1564149: Agree upon a coding standard, and automate check for this in smoke
  • +
  • #1564890: mount.glusterfs: can't shift that many
  • +
  • #1575836: logic in S30samba-start.sh hook script needs tweaking
  • +
  • #1579788: Thin-arbiter: Have the state of volume in memory
  • +
  • #1582516: libgfapi: glfs init fails on afr volume with ctime feature enabled
  • +
  • #1590385: Refactor dht lookup code
  • +
  • #1593538: ctime: Access time is different with in same replica/EC volume
  • +
  • #1596787: glusterfs rpc-clnt.c: error returned while attempting to connect to host: (null), port 0
  • +
  • #1598345: gluster get-state command is crashing glusterd process when geo-replication is configured
  • +
  • #1600145: [geo-rep]: Worker still ACTIVE after killing bricks
  • +
  • #1605056: [RHHi] Mount hung and not accessible
  • +
  • #1605077: If a node disconnects during volume delete, it assumes deleted volume as a freshly created volume when it is back online
  • +
  • #1608512: cluster.server-quorum-type help text is missing possible settings
  • +
  • #1624006: /var/run/gluster/metrics/ wasn't created automatically
  • +
  • #1624332: [Thin-arbiter]: Add tests for thin arbiter feature
  • +
  • #1624724: ctime: Enable ctime feature by default and also improve usability by providing single option to enable
  • +
  • #1624796: mkdir -p fails with "No data available" when root-squash is enabled
  • +
  • #1625850: tests: fixes to bug-1015990-rep.t
  • +
  • #1625961: Writes taking very long time leading to system hogging
  • +
  • #1626313: fix glfs_fini related problems
  • +
  • #1626610: [USS]: Change gf_log to gf_msg
  • +
  • #1626994: split-brain observed on parent dir
  • +
  • #1627610: glusterd crash in regression build
  • +
  • #1627620: SAS job aborts complaining about file doesn't exist
  • +
  • #1628194: tests/dht: Additional tests for dht operations
  • +
  • #1628605: One client hangs when another client loses communication with bricks during intensive write I/O
  • +
  • #1628664: Update op-version from 4.2 to 5.0
  • +
  • #1629561: geo-rep: geo-rep config set fails to set rsync-options
  • +
  • #1630368: Low Random write IOPS in VM workloads
  • +
  • #1630798: Add performance options to virt profile
  • +
  • #1630804: libgfapi-python: test_listdir_with_stat and test_scandir failure on release 5 branch
  • +
  • #1630922: glusterd crashed and core generated at gd_mgmt_v3_unlock_timer_cbk after huge number of volumes were created
  • +
  • #1631128: rpc marks brick disconnected from glusterd & volume stop transaction gets timed out
  • +
  • #1631357: glusterfsd keeping fd open in index xlator after stop the volume
  • +
  • #1631886: Update database profile settings for gluster
  • +
  • #1632161: [Disperse] : Set others.eager-lock on for ec-1468261.t test to pass
  • +
  • #1632236: Provide indication at the console or in the logs about the progress being made with changelog processing.
  • +
  • #1632503: FUSE client segfaults when performance.md-cache-statfs is enabled for a volume
  • +
  • #1632717: EC crashes when running on non 64-bit architectures
  • +
  • #1632889: 'df' shows half as much space on volume after upgrade to RHGS 3.4
  • +
  • #1633926: Script to collect system-stats
  • +
  • #1634102: MAINTAINERS: Add sunny kumar as a peer for snapshot component
  • +
  • #1634220: md-cache: some problems of cache virtual glusterfs ACLs for ganesha
  • +
  • #1635050: [SNAPSHOT]: with brick multiplexing, snapshot restore will make glusterd send wrong volfile
  • +
  • #1635145: I/O errors observed on the application side after the creation of a 'linkto' file
  • +
  • #1635480: Correction for glusterd memory leak because use "gluster volume status volume_name --detail" continuesly (cli)
  • +
  • #1635593: glusterd crashed in cleanup_and_exit when glusterd comes up with upgrade mode.
  • +
  • #1635688: Keep only the valid (maintained/supported) components in the build
  • +
  • #1635820: Seeing defunt translator and discrepancy in volume info when issued from node which doesn't host bricks in that volume
  • +
  • #1635863: Gluster peer probe doesn't work for IPv6
  • +
  • #1636570: Cores due to SIGILL during multiplex regression tests
  • +
  • #1636631: Issuing a "heal ... full" on a disperse volume causes permanent high CPU utilization.
  • +
  • #1637196: Disperse volume 'df' usage is extremely incorrect after replace-brick.
  • +
  • #1637249: gfid heal does not happen when there is no source brick
  • +
  • #1637802: data-self-heal in arbiter volume results in stale locks.
  • +
  • #1637934: glusterfsd is keeping fd open in index xlator
  • +
  • #1638453: Gfid mismatch seen on shards when lookup and mknod are in progress at the same time
  • +
  • #1639599: Improve support-ability of glusterfs
  • +
  • #1640026: improper checking to avoid identical mounts
  • +
  • #1640066: [Stress] : Mismatching iatt in glustershd logs during MTSH and continous IO from Ganesha mounts
  • +
  • #1640165: io-stats: garbage characters in the filenames generated
  • +
  • #1640489: Invalid memory read after freed in dht_rmdir_readdirp_cbk
  • +
  • #1640495: [GSS] Fix log level issue with brick mux
  • +
  • #1640581: [AFR] : Start crawling indices and healing only if both data bricks are UP in replica 2 (thin-arbiter)
  • +
  • #1641344: Spurious failures in bug-1637802-arbiter-stale-data-heal-lock.t
  • +
  • #1642448: EC volume getting created without any redundant brick
  • +
  • #1642597: tests/bugs/glusterd/optimized-basic-testcases-in-cluster.t failing
  • +
  • #1642800: socket: log voluntary socket close/shutdown and EOF on socket at INFO log-level
  • +
  • #1642807: remove 'tier' translator from build and code
  • +
  • #1642810: remove glupy from code and build
  • +
  • #1642850: glusterd: raise default transport.listen-backlog to 1024
  • +
  • #1642865: geo-rep: geo-replication gets stuck after file rename and gfid conflict
  • +
  • #1643349: [OpenSSL] : auth.ssl-allow has no option description.
  • +
  • #1643402: [Geo-Replication] Geo-rep faulty sesion because of the directories are not synced to slave.
  • +
  • #1643519: Provide an option to silence glfsheal logs
  • +
  • #1643929: geo-rep: gluster-mountbroker status crashes
  • +
  • #1643932: geo-rep: On gluster command failure on slave, worker crashes with python3
  • +
  • #1643935: cliutils: geo-rep cliutils' usage of Popen is not python3 compatible
  • +
  • #1644129: Excessive logging in posix_update_utime_in_mdata
  • +
  • #1644164: Use GF_ATOMIC ops to update inode->nlookup
  • +
  • #1644629: [rpcsvc] Single request Queue for all event threads is a performance bottleneck
  • +
  • #1644755: CVE-2018-14651 glusterfs: glusterfs server exploitable via symlinks to relative paths [fedora-all]
  • +
  • #1644756: CVE-2018-14653 glusterfs: Heap-based buffer overflow via "gf_getspec_req" RPC message [fedora-all]
  • +
  • #1644757: CVE-2018-14659 glusterfs: Unlimited file creation via "GF_XATTR_IOSTATS_DUMP_KEY" xattr allows for denial of service [fedora-all]
  • +
  • #1644758: CVE-2018-14660 glusterfs: Repeat use of "GF_META_LOCK_KEY" xattr allows for memory exhaustion [fedora-all]
  • +
  • #1644760: CVE-2018-14654 glusterfs: "features/index" translator can create arbitrary, empty files [fedora-all]
  • +
  • #1644763: CVE-2018-14661 glusterfs: features/locks translator passes an user-controlled string to snprintf without a proper format string resulting in a denial of service [fedora-all]
  • +
  • #1645986: tests/bugs/glusterd/optimized-basic-testcases-in-cluster.t failing in distributed regression
  • +
  • #1646104: [Geo-rep]: Faulty geo-rep sessions due to link ownership on slave volume
  • +
  • #1646728: [snapview-server]:forget glfs handles during inode forget
  • +
  • #1646869: gNFS crashed when processing "gluster v status [vol] nfs clients"
  • +
  • #1646892: Portmap entries showing stale brick entries when bricks are down
  • +
  • #1647029: can't enable shared-storage
  • +
  • #1647074: when peer detach is issued, throw a warning to remount volumes using other cluster IPs before proceeding
  • +
  • #1647651: gfapi: fix bad dict setting of lease-id
  • +
  • #1648237: Bumping up of op-version times out on a scaled system with ~1200 volumes
  • +
  • #1648298: dht_revalidate may not heal attrs on the brick root
  • +
  • #1648687: Incorrect usage of local->fd in afr_open_ftruncate_cbk
  • +
  • #1648768: Tracker bug for all leases related issues
  • +
  • #1649709: profile info doesn't work when decompounder xlator is not in graph
  • +
  • #1650115: glusterd requests are timing out in a brick multiplex setup
  • +
  • #1650389: rpc: log flooding with ENODATA errors
  • +
  • #1650403: Memory leaks observed in brick-multiplex scenario on volume start/stop loop
  • +
  • #1650893: fails to sync non-ascii (utf8) file and directory names, causes permanently faulty geo-replication state
  • +
  • #1651059: [OpenSSL] : Retrieving the value of "client.ssl" option,before SSL is set up, fails .
  • +
  • #1651165: Race in per-thread mem-pool when a thread is terminated
  • +
  • #1651431: Resolve memory leak at the time of graph init
  • +
  • #1651439: gluster-NFS crash while expanding volume
  • +
  • #1651463: glusterd can't regenerate volfiles in container storage upgrade workflow
  • +
  • #1651498: [geo-rep]: Failover / Failback shows fault status in a non-root setup
  • +
  • #1651584: [geo-rep]: validate the config checkpoint date and fail if it is not is exact format hh:mm:ss
  • +
  • #1652118: default cluster.max-bricks-per-process to 250
  • +
  • #1652430: glusterd fails to start, when glusterd is restarted in a loop for every 45 seconds while volume creation is in-progress
  • +
  • #1652852: "gluster volume get" doesn't show real default value for server.tcp-user-timeout
  • +
  • #1652887: Geo-rep help looks to have a typo.
  • +
  • #1652911: Add no-verify and ssh-port n options for create command in man page
  • +
  • #1653277: bump up default value of server.event-threads
  • +
  • #1653359: Self-heal:Improve heal performance
  • +
  • #1653565: tests/geo-rep: Add arbiter volume test case
  • +
  • #1654138: Optimize for virt store fails with distribute volume type
  • +
  • #1654181: glusterd segmentation fault: glusterd_op_ac_brick_op_failed (event=0x7f44e0e63f40, ctx=0x0) at glusterd-op-sm.c:5606
  • +
  • #1654187: [geo-rep]: RFE - Make slave volume read-only while setting up geo-rep (by default)
  • +
  • #1654270: glusterd crashed with seg fault possibly during node reboot while volume creates and deletes were happening
  • +
  • #1654521: io-stats outputs json numbers as strings
  • +
  • #1654805: Bitrot: Scrub status say file is corrupted even it was just created AND 'path' in the output is broken
  • +
  • #1654917: cleanup resources in server_init in case of failure
  • +
  • #1655050: automatic split resolution with size as policy should not work on a directory which is in metadata splitbrain
  • +
  • #1655052: Automatic Splitbrain with size as policy must not resolve splitbrains when both the copies are of same size
  • +
  • #1655827: [Glusterd]: Glusterd crash while expanding volumes using heketi
  • +
  • #1655854: Converting distribute to replica-3/arbiter volume fails
  • +
  • #1656100: configure.ac does not enforce automake --foreign
  • +
  • #1656264: Fix tests/bugs/shard/zero-flag.t
  • +
  • #1656348: Commit c9bde3021202f1d5c5a2d19ac05a510fc1f788ac causes ls slowdown
  • +
  • #1656517: [GSS] Gluster client logs filling with 0-glusterfs-socket: invalid port messages
  • +
  • #1656682: brick memory consumed by volume is not getting released even after delete
  • +
  • #1656771: [Samba-Enhancement] Need for a single group command for setting up volume options for samba
  • +
  • #1656951: cluster.max-bricks-per-process 250 not working as expected
  • +
  • #1657607: Convert nr_files to gf_atomic in posix_private structure
  • +
  • #1657744: quorum count not updated in nfs-server vol file
  • +
  • #1657783: Rename of a file leading to stale reads
  • +
  • #1658045: Resolve memory leak in mgmt_pmap_signout_cbk
  • +
  • #1658116: python2 to python3 compatibilty issues
  • +
  • #1659327: 43% regression in small-file sequential read performance
  • +
  • #1659432: Memory leak: dict_t leak in rda_opendir
  • +
  • #1659708: Optimize by not stopping (restart) selfheal deamon (shd) when a volume is stopped unless it is the last volume
  • +
  • #1659857: change max-port value in glusterd vol file to 60999
  • +
  • #1659868: glusterd : features.selinux was missing in glusterd-volume-set file
  • +
  • #1659869: improvements to io-cache
  • +
  • #1659971: Setting slave volume read-only option by default results in failure
  • +
  • #1660577: [Ganesha] Ganesha failed on one node while exporting volumes in loop
  • +
  • #1660701: Use adaptive mutex in rpcsvc_program_register to improve performance
  • +
  • #1661214: Brick is getting OOM for tests/bugs/core/bug-1432542-mpx-restart-crash.t
  • +
  • #1662089: NL cache: fix typos
  • +
  • #1662264: thin-arbiter: Check with thin-arbiter file before marking new entry change log
  • +
  • #1662368: [ovirt-gluster] Fuse mount crashed while deleting a 1 TB image file from ovirt
  • +
  • #1662679: Log connection_id in statedump for posix-locks as well for better debugging experience
  • +
  • #1662906: Longevity: glusterfsd(brick process) crashed when we do volume creates and deletes
  • +
  • #1663077: memory leak in mgmt handshake
  • +
  • #1663102: Change default value for client side heal to off for replicate volumes
  • +
  • #1663223: profile info command is not displaying information of bricks which are hosted on peers
  • +
  • #1663243: rebalance status does not display localhost statistics when op-version is not bumped up
  • +
  • #1664122: do not send bit-rot virtual xattrs in lookup response
  • +
  • #1664124: Improve information dumped from io-threads in statedump
  • +
  • #1664551: Wrong description of localtime-logging in manpages
  • +
  • #1664647: dht: Add NULL check for stbuf in dht_rmdir_lookup_cbk
  • +
  • #1664934: glusterfs-fuse client not benefiting from page cache on read after write
  • +
  • #1665038: glusterd crashed while running "gluster get-state glusterd odir /get-state"
  • +
  • #1665332: Wrong offset is used in offset for zerofill fop
  • +
  • #1665358: allow regression to not run tests with nfs, if nfs is disabled.
  • +
  • #1665363: Fix incorrect definition in index-mem-types.h
  • +
  • #1665656: testcaes glusterd/add-brick-and-validate-replicated-volume-options.t is crash while brick_mux is enable
  • +
  • #1665826: [geo-rep]: Directory renames not synced to slave in Hybrid Crawl
  • +
  • #1666143: Several fixes on socket pollin and pollout return value
  • +
  • #1666833: move few recurring logs to DEBUG level.
  • +
  • #1667779: glusterd leaks about 1GB memory per day on single machine of storage pool
  • +
  • #1667804: Unable to delete directories that contain linkto files that point to itself.
  • +
  • #1667905: dict_leak in __glusterd_handle_cli_uuid_get function
  • +
  • #1668190: Block hosting volume deletion via heketi-cli failed with error "target is busy" but deleted from gluster backend
  • +
  • #1668268: Unable to mount gluster volume
  • +
  • #1669077: [ovirt-gluster] Fuse mount crashed while creating the preallocated image
  • +
  • #1669937: Rebalance : While rebalance is in progress , SGID and sticky bit which is set on the files while file migration is in progress is seen on the mount point
  • +
  • #1670031: performance regression seen with smallfile workload tests
  • +
  • #1670253: Writes on Gluster 5 volumes fail with EIO when "cluster.consistent-metadata" is set
  • +
  • #1670259: New GFID file recreated in a replica set after a GFID mismatch resolution
  • +
  • #1671213: core: move "dict is NULL" logs to DEBUG log level
  • +
  • #1671637: geo-rep: Issue with configparser import
  • +
  • #1672205: 'gluster get-state' command fails if volume brick doesn't exist.
  • +
  • #1672818: GlusterFS 6.0 tracker
  • +
  • #1673267: Fix timeouts so the tests pass on AWS
  • +
  • #1673972: insufficient logging in glusterd_resolve_all_bricks
  • +
  • #1674364: glusterfs-fuse client not benefiting from page cache on read after write
  • +
  • #1676429: distribute: Perf regression in mkdir path
  • +
  • #1677260: rm -rf fails with "Directory not empty"
  • +
  • #1678570: glusterfs FUSE client crashing every few days with 'Failed to dispatch handler'
  • +
  • #1679004: With parallel-readdir enabled, deleting a directory containing stale linkto files fails with "Directory not empty"
  • +
  • #1679275: dht: fix double extra unref of inode at heal path
  • +
  • #1679965: Upgrade from glusterfs 3.12 to gluster 4/5 broken
  • +
  • #1679998: GlusterFS can be improved
  • +
  • #1680020: Integer Overflow possible in md-cache.c due to data type inconsistency
  • +
  • #1680585: remove glupy from code and build
  • +
  • #1680586: Building RPM packages with _for_fedora_koji_builds enabled fails on el6
  • +
  • #1683008: glustereventsd does not start on Ubuntu 16.04 LTS
  • +
  • #1683506: remove experimental xlators informations from glusterd-volume-set.c
  • +
  • #1683716: glusterfind: revert shebangs to #!/usr/bin/python3
  • +
  • #1683880: Multiple shd processes are running on brick_mux environmet
  • +
  • #1683900: Failed to dispatch handler
  • +
  • #1684029: upgrade from 3.12, 4.1 and 5 to 6 broken
  • +
  • #1684777: gNFS crashed when processing "gluster v profile [vol] info nfs"
  • +
  • #1685771: glusterd memory usage grows at 98 MB/h while being monitored by RHGSWA
  • +
  • #1686364: [ovirt-gluster] Rolling gluster upgrade from 3.12.5 to 5.3 led to shard on-disk xattrs disappearing
  • +
  • #1686399: listing a file while writing to it causes deadlock
  • +
  • #1686875: packaging: rdma on s390x, unnecessary ldconfig scriptlets
  • +
  • #1687248: Error handling in /usr/sbin/gluster-eventsapi produces IndexError: tuple index out of range
  • +
  • #1687672: [geo-rep]: Checksum mismatch when 2x2 vols are converted to arbiter
  • +
  • #1688218: Brick process has coredumped, when starting glusterd
  • +
+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/release-notes/6.1/index.html b/release-notes/6.1/index.html new file mode 100644 index 00000000..ac15fd26 --- /dev/null +++ b/release-notes/6.1/index.html @@ -0,0 +1,4545 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + 6.1 - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + +

Release notes for Gluster 6.1

+

This is a bugfix release. The release notes for 6.0 contains a listing of +all the new features that were added and bugs fixed in the GlusterFS 6 stable +release.

+

NOTE: Next minor release tentative date: Week of 10th May, 2019

+

Major changes, features and limitations addressed in this release

+

Major issues

+

None

+

Bugs addressed

+

Bugs addressed since release-6.0 are listed below.

+
    +
  • #1679904: client log flooding with intentional socket shutdown message when a brick is down
  • +
  • #1690950: lots of "Matching lock not found for unlock xxx" when using disperse (ec) xlator
  • +
  • #1691187: fix Coverity CID 1399758
  • +
  • #1692101: Network throughput usage increased x5
  • +
  • #1692957: rpclib: slow floating point math and libm
  • +
  • #1693155: Excessive AFR messages from gluster showing in RHGSWA.
  • +
  • #1693223: [Disperse] : Client side heal is not removing dirty flag for some of the files.
  • +
  • #1693992: Thin-arbiter minor fixes
  • +
  • #1694002: Geo-re: Geo replication failing in "cannot allocate memory"
  • +
  • #1694561: gfapi: do not block epoll thread for upcall notifications
  • +
  • #1694610: glusterd leaking memory when issued gluster vol status all tasks continuosly
  • +
  • #1695436: geo-rep session creation fails with IPV6
  • +
  • #1695445: ssh-port config set is failing
  • +
  • #1697764: [cluster/ec] : Fix handling of heal info cases without locks
  • +
  • #1698471: ctime feature breaks old client to connect to new server
  • +
  • #1699198: Glusterfs create a flock lock by anonymous fd, but can't release it forever.
  • +
  • #1699319: Thin-Arbiter SHD minor fixes
  • +
  • #1699499: fix truncate lock to cover the write in tuncate clean
  • +
  • #1699703: ctime: Creation of tar file on gluster mount throws warning "file changed as we read it"
  • +
  • #1699713: glusterfs build is failing on rhel-6
  • +
  • #1699714: Brick is not able to detach successfully in brick_mux environment
  • +
  • #1699715: Log level changes do not take effect until the process is restarted
  • +
  • #1699731: Fops hang when inodelk fails on the first fop
  • +
+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/release-notes/6.10/index.html b/release-notes/6.10/index.html new file mode 100644 index 00000000..f5c8b11a --- /dev/null +++ b/release-notes/6.10/index.html @@ -0,0 +1,4504 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + 6.10. - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + +

Release notes for Gluster 6.10

+

This is a bugfix release. The release notes for 6.0, 6.1, 6.2, 6.3, 6.4, 6.5, 6.6, +6.7, 6.8 and 6.9 +contain a listing of all the new features that were added and bugs fixed in the GlusterFS 6 stable release.

+

NOTE: This is last minor release of 6. Users are highly encouraged to upgrade to newer releases of GlusterFS.

+

Bugs addressed

+

Bugs addressed since release-6.9 are listed below.

+
    +
  • #1740494: Fencing: Added the tcmu-runner ALUA feature support but after one of node is rebooted the glfs_file_lock() get stucked
  • +
  • #1000 [bug:1193929] GlusterFS can be improved
  • +
  • #1016 [bug:1795609] glusterfsd memory leak observed after enable tls
  • +
  • #1060 [bug:789278] Issues reported by Coverity static analysis tool
  • +
  • #1127 Mount crash during background shard cleanup
  • +
  • #1179 gnfs split brain when 1 server in 3x1 down (high load)
  • +
  • #1220 cluster/ec: return correct error code and log the message in ...
  • +
  • #1223 Failure of tests/basic/gfapi/gfapi-copy-file-range.t
  • +
  • #1254 Prioritize ENOSPC over other lesser priority errors
  • +
  • #1303 Failures in rebalance due to [Input/output error]
  • +
  • #1307 Spurious failure of tests/bug-844688.t: test bug-844688.t on ...
  • +
  • #1349 Issue for backporting https://review.gluster.org//c/glusterf...
  • +
  • #1362 [bug: 1687326]: Revoke access from nodes using Certificate Re...
  • +
+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/release-notes/6.2/index.html b/release-notes/6.2/index.html new file mode 100644 index 00000000..3c83a3ca --- /dev/null +++ b/release-notes/6.2/index.html @@ -0,0 +1,4538 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + 6.2 - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + +

Release notes for Gluster 6.2

+

This is a bugfix release. The release notes for 6.0 and 6.1 +contains a listing of all the new features that were added and bugs fixed +in the GlusterFS 6 stable release.

+

NOTE: Next minor release tentative date: Week of 10th June, 2019

+

Major changes, features and limitations addressed in this release

+

None

+

Major issues

+

None

+

Bugs addressed

+

Bugs addressed since release-6.1 are listed below.

+
    +
  • #1699917: I/O error on writes to a disperse volume when replace-brick is executed
  • +
  • #1701818: Syntactical errors in hook scripts for managing SELinux context on bricks #2 (S10selinux-label-brick.sh + S10selinux-del-fcontext.sh)
  • +
  • #1702271: Memory accounting information is not always accurate
  • +
  • #1702734: ctime: Logs are flooded with "posix set mdata failed, No ctime" error during open
  • +
  • #1703759: statedump is not capturing info related to glusterd
  • +
  • #1707393: Refactor dht lookup code
  • +
  • #1709130: thin-arbiter lock release fixes
  • +
  • #1709143: [Thin-arbiter] : send correct error code in case of failure
  • +
  • #1709660: Glusterfsd crashing in ec-inode-write.c, in GF_ASSERT
  • +
  • #1709685: Geo-rep: Value of pending entry operations in detail status output is going up after each synchronization.
  • +
  • #1709734: Geo-rep: Data inconsistency while syncing heavy renames with constant destination name
  • +
  • #1709737: geo-rep: Always uses rsync even with use_tarssh set to true
  • +
  • #1709738: geo-rep: Sync hangs with tarssh as sync-engine
  • +
  • #1712220: tests/geo-rep: arequal checksum comparison always succeeds
  • +
  • #1712223: geo-rep: With heavy rename workload geo-rep log if flooded
  • +
+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/release-notes/6.3/index.html b/release-notes/6.3/index.html new file mode 100644 index 00000000..be6961f9 --- /dev/null +++ b/release-notes/6.3/index.html @@ -0,0 +1,4525 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + 6.3 - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + +

Release notes for Gluster 6.3

+

This is a bugfix release. The release notes for 6.0, 6.1 +and 6.2 contains a listing of all the new features that were added +and bugs fixed in the GlusterFS 6 stable release.

+

NOTE: Next minor release tentative date: Week of 10th July, 2019

+

Major changes, features and limitations addressed in this release

+

None

+

Major issues

+

None

+

Bugs addressed

+

Bugs addressed since release-6.2 are listed below.

+
    +
  • #1714172: ec ignores lock contention notifications for partially acquired locks
  • +
  • #1715012: Failure when glusterd is configured to bind specific IPv6 address. If bind-address is IPv6, *addr_len will be non-zero and it goes to ret = -1 branch, which will cause listen failure eventually
  • +
+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/release-notes/6.4/index.html b/release-notes/6.4/index.html new file mode 100644 index 00000000..5de8d133 --- /dev/null +++ b/release-notes/6.4/index.html @@ -0,0 +1,4543 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + 6.4 - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + +

Release notes for Gluster 6.4

+

This is a bugfix release. The release notes for 6.0, 6.1, +6.2 and 6.3 contains a listing of all the new features that were added +and bugs fixed in the GlusterFS 6 stable release.

+

NOTE: Next minor release tentative date: Week of 10th August, 2019

+

Major changes, features and limitations addressed in this release

+

None

+

Major issues

+

None

+

Bugs addressed

+

Bugs addressed since release-6.3 are listed below.

+
    +
  • #1679998: GlusterFS can be improved
  • +
  • #1683815: Memory leak when peer detach fails
  • +
  • #1716812: Failed to create volume which transport_type is "tcp,rdma"
  • +
  • #1716871: Image size as reported from the fuse mount is incorrect
  • +
  • #1718227: SELinux context labels are missing for newly added bricks using add-brick command
  • +
  • #1720633: Upcall: Avoid sending upcalls for invalid Inode
  • +
  • #1720635: Ganesha-gfapi logs are flooded with error messages related to "gf_uuid_is_null(gfid)) [Invalid argument]" when lookups are running from multiple clients
  • +
  • #1720993: tests/features/subdir-mount.t is failing for brick_mux regrssion
  • +
  • #1721105: Failed to create volume which transport_type is "tcp,rdma"
  • +
  • #1721783: ctime changes: tar still complains file changed as we read it if uss is enabled
  • +
  • #1722805: Healing not proceeding during in-service upgrade on a disperse volume
  • +
  • #1723658: [In-service] Post upgrade glusterd is crashing with a backtrace on the upgraded node while issuing gluster volume status from non-upgraded nodes
  • +
  • #1723659: ESTALE change in fuse breaks get_real_filename implementation
  • +
  • #1724210: Incorrect power of two calculation in mem_pool_get_fn
  • +
  • #1724558: [Ganesha]: truncate operation not updating the ctime
  • +
  • #1726294: DHT: severe memory leak in dht rename
  • +
  • #1726327: tests/features/subdir-mount.t is failing for brick_mux regrssion
  • +
  • #1727984: User serviceable snapshots (USS) are not accessible after changing transport.socket.bind-address of glusterd
  • +
  • #1728126: [In-service] Post upgrade glusterd is crashing with a backtrace on the upgraded node while issuing gluster volume status from non-upgraded nodes
  • +
  • #1729952: Deadlock when generating statedumps
  • +
+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/release-notes/6.5/index.html b/release-notes/6.5/index.html new file mode 100644 index 00000000..9f847dfb --- /dev/null +++ b/release-notes/6.5/index.html @@ -0,0 +1,4531 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + 6.5 - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + +

Release notes for Gluster 6.5

+

This is a bugfix release. The release notes for 6.0, 6.1, +6.2, 6.3 and 6.4 contains a listing of all the new features that were added +and bugs fixed in the GlusterFS 6 stable release.

+

NOTE: Next minor release tentative date: Week of 30th October, 2019

+

Major changes, features and limitations addressed in this release

+

None

+

Major issues

+

None

+

Bugs addressed

+

Bugs addressed since release-6.4 are listed below.

+
    +
  • #1716848: DHT: directory permissions are wiped out
  • +
  • #1730545: gluster v geo-rep status command timing out
  • +
  • #1731509: snapd crashes sometimes
  • +
  • #1736341: potential deadlock while processing callbacks in gfapi- #1733880: [geo-rep]: gluster command not found while setting up a non-root session
  • +
  • #1733885: ctime: Upgrade/Enabling ctime feature wrongly updates older files with latest {a|m|c}time
  • +
  • #1737712: Unable to create geo-rep session on a non-root setup.
  • +
  • #1737745: ctime: When healing ctime xattr for legacy files, if multiple clients access and modify the same file, the ctime might be updated incorrectly.
  • +
  • #1737746: ctime: nfs client gets bad ctime for copied file which is on glusterfs disperse volume with ctime on
  • +
+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/release-notes/6.6/index.html b/release-notes/6.6/index.html new file mode 100644 index 00000000..515d159e --- /dev/null +++ b/release-notes/6.6/index.html @@ -0,0 +1,4559 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + 6.6 - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + +

Release notes for Gluster 6.6

+

This is a bugfix release. The release notes for 6.0, 6.1, +6.2, 6.3, 6.4 and 6.5 +contains a listing of all the new features that were added +and bugs fixed in the GlusterFS 6 stable release.

+

NOTE: Next minor release tentative date: Week of 30th December, 2019

+

Major changes, features and limitations addressed in this release

+

None

+

Major issues

+

None

+

Bugs addressed

+

Bugs addressed since release-6.5 are listed below.

+
    +
  • #1726175: CentOs 6 GlusterFS client creates files with time 01/01/1970
  • +
  • #1737141: read() returns more than file size when using direct I/O
  • +
  • #1739320: The result (hostname) of getnameinfo for all bricks (ipv6 addresses) are the same, while they are not.
  • +
  • #1739335: Multiple disconnect events being propagated for the same child
  • +
  • #1739451: An Input/Output error happens on a disperse volume when doing unaligned writes to a sparse file
  • +
  • #1740525: event: rename eventXXX with gf prefixed to avoid crash when apps linked libevent at the same time
  • +
  • #1741044: atime/mtime is not restored after healing for entry self heals
  • +
  • #1741402: READDIRP incorrectly updates posix-acl inode ctx
  • +
  • #1743219: glusterd start is failed and throwing an error Address already in use
  • +
  • #1743782: Windows client fails to copy large file to GlusterFS volume share with fruit and streams_xattr VFS modules via Samba
  • +
  • #1743988: Setting cluster.heal-timeout requires volume restart
  • +
  • #1745421: ./tests/bugs/glusterd/bug-1595320.t is failing
  • +
  • #1746118: capture stat failure error while setting the gfid
  • +
  • #1746138: ctime: If atime is updated via utimensat syscall ctime is not getting updated
  • +
  • #1749157: bug-1402841.t-mt-dir-scan-race.t fails spuriously
  • +
  • #1749307: Failures in remove-brick due to [Input/output error] errors
  • +
  • #1750228: [geo-rep]: Non-root - Unable to set up mountbroker root directory and group
  • +
  • #1751557: syncop: Bail out if frame creation fails
  • +
  • #1752413: ctime: Cannot see the "trusted.glusterfs.mdata" xattr for directory on a new brick after rebalance
  • +
  • #1753561: Custom xattrs are not healed on newly added brick
  • +
  • #1753571: interrupts leak memory
  • +
  • #1755679: Segmentation fault occurs while truncate file
  • +
  • #1755785: git clone fails on gluster volumes exported via nfs-ganesha
  • +
  • #1760361: packaging: remove leftover bd cruft in rpm .spec.in
  • +
  • #1760706: glustershd can not decide heald_sinks, and skip repair, so some entries lingering in volume heal info
  • +
  • #1760792: afr: support split-brain CLI for replica 3
  • +
  • #1761907: Rebalance causing IO Error - File descriptor in bad state
  • +
  • #1763028: [geo-rep] sync_method showing rsync instead of tarssh post in-service upgrade
  • +
  • #1764171: [Upgrade] Config files are not upgraded to new version
  • +
  • #1764172: geo-replication sessions going faulty
  • +
  • #1764174: geo-rep syncing significantly behind and also only one of the directories are synced with tracebacks seen
  • +
  • #1764176: geo-rep: Changelog archive file format is incorrect
  • +
  • #1764178: tests/geo-rep: Add test case to validate non-root geo-replication setup
  • +
  • #1764183: [GSS] geo-rep entering into faulty state with OSError: [Errno 13] Permission denied
  • +
  • #1765433: test: fix non-root geo-rep test case
  • +
+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/release-notes/6.7/index.html b/release-notes/6.7/index.html new file mode 100644 index 00000000..7a568fc7 --- /dev/null +++ b/release-notes/6.7/index.html @@ -0,0 +1,4543 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + 6.7 - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + +

Release notes for Gluster 6.7

+

This is a bugfix release. The release notes for 6.0, 6.1, +6.2, 6.3, 6.4, 6.5 and 6.6 +contain a listing of all the new features that were added +and bugs fixed in the GlusterFS 6 stable release.

+

NOTE: Tentative date for next minor release: Week of 29th February, 2020

+

Major changes, features and limitations addressed in this release

+

None

+

Major issues

+

We have come across a issue where the client undergoing IO crashes when a +rebalance is running.

+

https://bugzilla.redhat.com/show_bug.cgi?id=1786983

+

Workaround: +We can avoid this issue by stopping the IOs while running rebalance.

+

Fix: +The fix is ready and will be a part of the next release 6.8 which is +supposed to be out around 29th of February. +https://review.gluster.org/#/c/glusterfs/+/23938/

+

Bugs addressed

+

Bugs addressed since release-6.6 are listed below.

+
    +
  • #1739446: [Disperse] : Client side heal is not removing dirty flag for some of the files.
  • +
  • #1739449: Disperse volume : data corruption with ftruncate data in 4+2 config
  • +
  • #1739450: Open fd heal should filter O_APPEND/O_EXCL
  • +
  • #1749625: [GlusterFS 6.1] GlusterFS brick process crash
  • +
  • #1766425: cgroup control-cpu-load.sh script not working
  • +
  • #1768726: Memory leak in glusterfsd process
  • +
  • #1770100: [geo-rep]: Geo-rep goes FAULTY with OSError
  • +
  • #1771842: [CENTOS 6] Geo-replication session not starting after creation
  • +
  • #1778182: glusterfsd crashed with "'MemoryError' Cannot access memory at address"
  • +
  • #1782495: GlusterFS brick process crash
  • +
  • #1784796: tests/00-geo-rep/00-georep-verify-non-root-setup.t fail on freshly installed builder
  • +
+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/release-notes/6.8/index.html b/release-notes/6.8/index.html new file mode 100644 index 00000000..2e67dd01 --- /dev/null +++ b/release-notes/6.8/index.html @@ -0,0 +1,4542 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + 6.8 - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + +

Release notes for Gluster 6.8

+

This is a bugfix release. The release notes for 6.0, 6.1, +6.2, 6.3, 6.4, 6.5, 6.7, and 6.8 +contain a listing of all the new features that were added +and bugs fixed in the GlusterFS 6 stable release.

+

NOTE: Tentative date for next minor release: Week of 30th April, 2020

+

Major changes, features and limitations addressed in this release

+

None

+

Major issues

+

None

+

Bugs addressed

+

Bugs addressed since release-6.7 are listed below.

+
    +
  • #1786754: Functionality to enable log rotation for user serviceable snapshot's logs.
  • +
  • #1786983: Rebalance is causing glusterfs crash on client node
  • +
  • #1789337: glusterfs process memory leak in ior test
  • +
  • #1790445: glusterfind pre output file is empty
  • +
  • #1790449: S57glusterfind-delete-post.py not python3 ready (does not decode bytestring)
  • +
  • #1790850: Remove extra argument
  • +
  • #1792857: Memory corruption when sending events to an IPv6 host
  • +
  • #1793096: gf_event doesn't work for glfsheal process
  • +
  • #1794020: Mounts fails after reboot of 1/3 gluster nodes
  • +
  • #1797985: Brick logs inundated with [2019-04-27 22:14:53.378047] I [dict.c:541:dict_get] (-->/usr/lib64/glusterfs/6.0/xlator/features/worm.so(+0x7241) [0x7fe857bb3241] -->/usr/lib64/glusterfs/6.0/xlator/features/locks.so(+0x1c219) [0x7fe857dda219] [Invalid argumen
  • +
  • #1804546: [Thin-arbiter] : Wait for connection with TA node before sending lookup/create of ta-replica id file
  • +
  • #1804594: Heal pending on volume, even after all the bricks are up
  • +
  • #1805097: Changes to self-heal logic w.r.t. detecting metadata split-brains
  • +
  • #1805671: Memory corruption when glfs_init() is called after glfs_fini()
  • +
  • #1806836: [EC] shd crashed while heal failed due to out of memory error.
  • +
  • #1806838: Disperse volume : Ganesha crash with IO in 4+2 config when one glusterfsd restart every 600s
  • +
  • #1807786: seeing error message in glustershd.log on volume start(or may be as part of shd graph regeneration) inet_pton failed with return code 0 [Invalid argument]
  • +
  • #1807793: glusterfs-libs: usage of inet_addr() may impact IPv6
  • +
+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/release-notes/6.9/index.html b/release-notes/6.9/index.html new file mode 100644 index 00000000..addc5f03 --- /dev/null +++ b/release-notes/6.9/index.html @@ -0,0 +1,4535 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + 6.9 - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + +

Release notes for Gluster 6.9

+

This is a bugfix release. The release notes for 6.0, 6.1, +6.2, 6.3, 6.4, 6.5, 6.7, +6.8 and 6.9 +contain a listing of all the new features that were added +and bugs fixed in the GlusterFS 6 stable release.

+

NOTE: Tentative date for next minor release: Week of 30th June, 2020

+

Major changes, features and limitations addressed in this release

+

None

+

Major issues

+

None

+

Bugs addressed

+

Bugs addressed since release-6.8 are listed below.

+
    +
  • #832: Permission Denied in logs.
  • +
  • #1152: Spurious failure of tests/bugs/protocol/bug-1433815-auth-allow.t
  • +
  • #1140: getfattr returns ENOATTR for system.posix_acl_access on disperse type volumes
  • +
  • #884: [bug:1808688] Data corruption with asynchronous writes (please try to reproduce!)
  • +
  • #1134: snap_scheduler.py init failing with "TypeError: Can't mix strings and bytes in path components"
  • +
  • #1067: [bug:1661889] Metadata heal picks different brick each time as source if there are no pending xattrs.
  • +
  • #1028: [bug:1810934] Segfault in FUSE process, potential use after free
  • +
  • #1146: gfapi/Upcall: Potential deadlock in synctask threads processing upcall notifications
  • +
  • #1808966: Set volume option when one of the node is powered off, After powering the node brick processes are offline
  • +
  • #1809439: [brickmux]: glustershd crashed when rebooting 1/3 nodes at regular intervals
  • +
+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/release-notes/7.0/index.html b/release-notes/7.0/index.html new file mode 100644 index 00000000..4eddd285 --- /dev/null +++ b/release-notes/7.0/index.html @@ -0,0 +1,4909 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + 7.0 - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + + + + + +
+
+ + + + + + + + +

Release notes for Gluster 7.0

+

This is a major release that includes a range of code improvements and stability +fixes along with a few features as noted below.

+

A selection of the key features and changes are documented in this page. +A full list of bugs that have been addressed is included further below.

+ +

Announcements

+
    +
  1. +

    Releases that receive maintenance updates post release 7 are, 5, 6 and 7 + (reference)

    +
  2. +
  3. +

    Release 7 will receive maintenance updates around the 10th of every month + for the first 3 months post release (i.e Dec'19, Jan'20, Feb'20). Post the + initial 3 months, it will receive maintenance updates every 2 months till EOL.

    +
  4. +
+

Major changes and features

+

Highlights

+
    +
  • Several stability fixes addressing,
  • +
  • coverity, clang-scan, address sanitizer and valgrind reported issues
  • +
  • removal of unused and hence, deprecated code and features
  • +
  • Performance Improvements
  • +
+

Features

+

1. Rpcbind not required in glusterd.service when gnfs isn't built.

+

2. Latency based read child to improve read workload latency in a cluster, especially in a cloud setup. Also provides a load balancing with the outstanding pending request.

+

3. Glusterfind: integrate with gfid2path, to improve performance.

+

4. Issue #532: Work towards implementing global thread pooling has started

+

5. This release includes extra coverage for glfs public APIs in our regression tests, so we don't break anything.

+

6. Thin-arbiter integration with GD1

+

Major issues

+
    +
  • #1771308:Unable to build the gluster packages for centos-6
  • +
+

Note

+

Any new volumes created with the release will have the fips-mode-rchecksum volume option set to on by default.

+

If a client older than glusterfs-4.x (i.e. 3.x clients) accesses a volume which has the fips-mode-rchecksum volume option enabled, it can cause erroneous checksum computation/ unwanted behaviour during afr self-heal. This option is to be enabled only when all clients are also >=4.x. So if you are using these older clients, please explicitly turn this option off.

+

Bugs addressed

+

Bugs addressed since release-6 are listed below.

+
    +
  • #789278: Issues reported by Coverity static analysis tool
  • +
  • #1098991: Dist-geo-rep: Invalid slave url (::: three or more colons) error out with unclear error message.
  • +
  • #1193929: GlusterFS can be improved
  • +
  • #1241494: [Backup]: Glusterfind CLI commands need to verify the accepted names for session/volume, before failing with error(s)
  • +
  • #1512093: Value of pending entry operations in detail status output is going up after each synchronization.
  • +
  • #1535511: Gluster CLI shouldn't stop if log file couldn't be opened
  • +
  • #1542072: Syntactical errors in hook scripts for managing SELinux context on bricks #2 (S10selinux-label-brick.sh + S10selinux-del-fcontext.sh)
  • +
  • #1573226: eventsapi: ABRT report for package glusterfs has reached 10 occurrences
  • +
  • #1580315: gluster volume status inode getting timed out after 30 minutes with no output/error
  • +
  • #1590385: Refactor dht lookup code
  • +
  • #1593224: [Disperse] : Client side heal is not removing dirty flag for some of the files.
  • +
  • #1596787: glusterfs rpc-clnt.c: error returned while attempting to connect to host: (null), port 0
  • +
  • #1622665: clang-scan report: glusterfs issues
  • +
  • #1624701: error-out {inode,entry}lk fops with all-zero lk-owner
  • +
  • #1628194: tests/dht: Additional tests for dht operations
  • +
  • #1633930: ASan (address sanitizer) fixes - Blanket bug
  • +
  • #1634664: Inconsistent quorum checks during open and fd based operations
  • +
  • #1635688: Keep only the valid (maintained/supported) components in the build
  • +
  • #1642168: changes to cloudsync xlator
  • +
  • #1642810: remove glupy from code and build
  • +
  • #1648169: Fuse mount would crash if features.encryption is on in the version from 3.13.0 to 4.1.5
  • +
  • #1648768: Tracker bug for all leases related issues
  • +
  • #1650095: Regression tests for geo-replication on EC volume is not available. It should be added.
  • +
  • #1651246: Failed to dispatch handler
  • +
  • #1651439: gluster-NFS crash while expanding volume
  • +
  • #1651445: [RFE] storage.reserve option should take size of disk as input instead of percentage
  • +
  • #1652887: Geo-rep help looks to have a typo.
  • +
  • #1654021: Gluster volume heal causes continuous info logging of "invalid argument"
  • +
  • #1654270: glusterd crashed with seg fault possibly during node reboot while volume creates and deletes were happening
  • +
  • #1659334: FUSE mount seems to be hung and not accessible
  • +
  • #1659708: Optimize by not stopping (restart) selfheal deamon (shd) when a volume is stopped unless it is the last volume
  • +
  • #1664934: glusterfs-fuse client not benefiting from page cache on read after write
  • +
  • #1670031: performance regression seen with smallfile workload tests
  • +
  • #1672480: Bugs Test Module tests failing on s390x
  • +
  • #1672711: Upgrade from glusterfs 3.12 to gluster 4/5 broken
  • +
  • #1672727: Fix timeouts so the tests pass on AWS
  • +
  • #1672851: With parallel-readdir enabled, deleting a directory containing stale linkto files fails with "Directory not empty"
  • +
  • #1674389: [thin arbiter] : rpm - add thin-arbiter package
  • +
  • #1674406: glusterfs FUSE client crashing every few days with 'Failed to dispatch handler'
  • +
  • #1674412: listing a file while writing to it causes deadlock
  • +
  • #1675076: [posix]: log the actual path wherever possible
  • +
  • #1676400: rm -rf fails with "Directory not empty"
  • +
  • #1676430: distribute: Perf regression in mkdir path
  • +
  • #1676736: tests: ./tests/bugs/distribute/bug-1161311.t times out
  • +
  • #1676797: server xlator doesn't handle dict unserialization failures correctly
  • +
  • #1677559: gNFS crashed when processing "gluster v profile [vol] info nfs"
  • +
  • #1678726: Integer Overflow possible in md-cache.c due to data type inconsistency
  • +
  • #1679401: Geo-rep setup creates an incorrectly formatted authorized_keys file
  • +
  • #1679406: glustereventsd does not start on Ubuntu 16.04 LTS
  • +
  • #1680587: Building RPM packages with _for_fedora_koji_builds enabled fails on el6
  • +
  • #1683352: remove experimental xlators informations from glusterd-volume-set.c
  • +
  • #1683594: nfs ltp ftest* fstat gets mismatch size as except after turn on md-cache
  • +
  • #1683816: Memory leak when peer detach fails
  • +
  • #1684385: [ovirt-gluster] Rolling gluster upgrade from 3.12.5 to 5.3 led to shard on-disk xattrs disappearing
  • +
  • #1684404: Multiple shd processes are running on brick_mux environmet
  • +
  • #1685027: Error handling in /usr/sbin/gluster-eventsapi produces IndexError: tuple index out of range
  • +
  • #1685120: upgrade from 3.12, 4.1 and 5 to 6 broken
  • +
  • #1685414: glusterd memory usage grows at 98 MB/h while running "gluster v profile" in a loop
  • +
  • #1685944: WORM-XLator: Maybe integer overflow when computing new atime
  • +
  • #1686371: Cleanup nigel access and document it
  • +
  • #1686398: Thin-arbiter minor fixes
  • +
  • #1686568: [geo-rep]: Checksum mismatch when 2x2 vols are converted to arbiter
  • +
  • #1686711: [Thin-arbiter] : send correct error code in case of failure
  • +
  • #1687326: [RFE] Revoke access from nodes using Certificate Revoke List in SSL
  • +
  • #1687705: Brick process has coredumped, when starting glusterd
  • +
  • #1687811: core dump generated while running the test ./tests/00-geo-rep/georep-basic-dr-rsync-arbiter.t
  • +
  • #1688068: Proper error message needed for FUSE mount failure when /var is filled.
  • +
  • #1688106: Remove implementation of number of files opened in posix xlator
  • +
  • #1688116: Spurious failure in test ./tests/bugs/glusterfs/bug-844688.t
  • +
  • #1688287: ganesha crash on glusterfs with shard volume
  • +
  • #1689097: gfapi: provide an option for changing statedump path in glfs-api.
  • +
  • #1689799: [cluster/ec] : Fix handling of heal info cases without locks
  • +
  • #1689920: lots of "Matching lock not found for unlock xxx" when using disperse (ec) xlator
  • +
  • #1690753: Volume stop when quorum not met is successful
  • +
  • #1691164: glusterd leaking memory when issued gluster vol status all tasks continuosly
  • +
  • #1691616: client log flooding with intentional socket shutdown message when a brick is down
  • +
  • #1692093: Network throughput usage increased x5
  • +
  • #1692612: Locking issue when restarting bricks
  • +
  • #1692666: ssh-port config set is failing
  • +
  • #1693575: gfapi: do not block epoll thread for upcall notifications
  • +
  • #1693648: Geo-re: Geo replication failing in "cannot allocate memory"
  • +
  • #1693692: Increase code coverage from regression tests
  • +
  • #1694820: Geo-rep: Data inconsistency while syncing heavy renames with constant destination name
  • +
  • #1694925: GF_LOG_OCCASSIONALLY API doesn't log at first instance
  • +
  • #1695327: regression test fails with brick mux enabled.
  • +
  • #1696046: Log level changes do not take effect until the process is restarted
  • +
  • #1696077: Add pause and resume test case for geo-rep
  • +
  • #1696136: gluster fuse mount crashed, when deleting 2T image file from oVirt Manager UI
  • +
  • #1696512: glusterfs build is failing on rhel-6
  • +
  • #1696599: Fops hang when inodelk fails on the first fop
  • +
  • #1697316: Getting SEEK-2 and SEEK7 errors with [Invalid argument] in the bricks' logs
  • +
  • #1697486: bug-1650403.t && bug-858215.t are throwing error "No such file" at the time of access glustershd pidfile
  • +
  • #1697866: Provide a way to detach a failed node
  • +
  • #1697907: ctime feature breaks old client to connect to new server
  • +
  • #1697930: Thin-Arbiter SHD minor fixes
  • +
  • #1698078: ctime: Creation of tar file on gluster mount throws warning "file changed as we read it"
  • +
  • #1698449: thin-arbiter lock release fixes
  • +
  • #1699025: Brick is not able to detach successfully in brick_mux environment
  • +
  • #1699176: rebalance start command doesn't throw up error message if the command fails
  • +
  • #1699189: fix truncate lock to cover the write in tuncate clean
  • +
  • #1699339: With 1800+ vol and simultaneous 2 gluster pod restarts, running gluster commands gives issues once all pods are up
  • +
  • #1699394: [geo-rep]: Geo-rep goes FAULTY with OSError
  • +
  • #1699866: I/O error on writes to a disperse volume when replace-brick is executed
  • +
  • #1700078: disablle + reenable of bitrot leads to files marked as bad
  • +
  • #1700865: FUSE mount seems to be hung and not accessible
  • +
  • #1701337: issues with 'building' glusterfs packages if we do 'git clone --depth 1'
  • +
  • #1701457: ctime: Logs are flooded with "posix set mdata failed, No ctime" error during open
  • +
  • #1702131: The source file is left in EC volume after rename when glusterfsd out of service
  • +
  • #1702185: coredump reported by test ./tests/bugs/glusterd/bug-1699339.t
  • +
  • #1702299: Custom xattrs are not healed on newly added brick
  • +
  • #1702303: Enable enable fips-mode-rchecksum for new volumes by default
  • +
  • #1702952: remove tier related information from manual pages
  • +
  • #1703020: The cluster.heal-timeout option is unavailable for ec volume
  • +
  • #1703629: statedump is not capturing info related to glusterd
  • +
  • #1703948: Self-heal daemon resources are not cleaned properly after a ec fini
  • +
  • #1704252: Creation of bulkvoldict thread logic is not correct while brick_mux is enabled for single volume
  • +
  • #1704888: delete the snapshots and volume at the end of uss.t
  • +
  • #1705865: VM stuck in a shutdown because of a pending fuse request
  • +
  • #1705884: Image size as reported from the fuse mount is incorrect
  • +
  • #1706603: Glusterfsd crashing in ec-inode-write.c, in GF_ASSERT
  • +
  • #1707081: Self heal daemon not coming up after upgrade to glusterfs-6.0-2 (intermittently) on a brick mux setup
  • +
  • #1707700: maintain consistent values across for options when fetched at cluster level or volume level
  • +
  • #1707728: geo-rep: Sync hangs with tarssh as sync-engine
  • +
  • #1707742: tests/geo-rep: arequal checksum comparison always succeeds
  • +
  • #1707746: AFR-v2 does not log before attempting data self-heal
  • +
  • #1708051: Capture memory consumption for gluster process at the time of throwing no memory available message
  • +
  • #1708156: ec ignores lock contention notifications for partially acquired locks
  • +
  • #1708163: tests: fix bug-1319374.c compile warnings.
  • +
  • #1708926: Invalid memory access while executing cleanup_and_exit
  • +
  • #1708929: Add more test coverage for shd mux
  • +
  • #1709248: [geo-rep]: Non-root - Unable to set up mountbroker root directory and group
  • +
  • #1709653: geo-rep: With heavy rename workload geo-rep log if flooded
  • +
  • #1710054: Optimize the glustershd manager to send reconfigure
  • +
  • #1710159: glusterd: While upgrading (3-node cluster) 'gluster v status' times out on node to be upgraded
  • +
  • #1711240: [GNFS] gf_nfs_mt_inode_ctx serious memory leak
  • +
  • #1711250: bulkvoldict thread is not handling all volumes while brick multiplex is enabled
  • +
  • #1711297: Optimize glusterd code to copy dictionary in handshake code path
  • +
  • #1711764: Files inaccessible if one rebalance process is killed in a multinode volume
  • +
  • #1711820: Typo in cli return string.
  • +
  • #1711827: test case bug-1399598-uss-with-ssl.t is generating crash
  • +
  • #1712322: Brick logs inundated with [2019-04-27 22:14:53.378047] I [dict.c:541:dict_get] (-->/usr/lib64/glusterfs/6.0/xlator/features/worm.so(+0x7241) [0x7fe857bb3241] -->/usr/lib64/glusterfs/6.0/xlator/features/locks.so(+0x1c219) [0x7fe857dda219] [Invalid argumen
  • +
  • #1712668: Remove-brick shows warning cluster.force-migration enabled where as cluster.force-migration is disabled on the volume
  • +
  • #1712741: glusterd_svcs_stop should call individual wrapper function to stop rather than calling the glusterd_svc_stop
  • +
  • #1713730: Failure when glusterd is configured to bind specific IPv6 address. If bind-address is IPv6, *addr_len will be non-zero and it goes to ret = -1 branch, which will cause listen failure eventually
  • +
  • #1714098: Make debugging hung frames easier
  • +
  • #1714415: Script to make it easier to find hung frames
  • +
  • #1714973: upgrade after tier code removal results in peer rejection.
  • +
  • #1715921: uss.t tests times out with brick-mux regression
  • +
  • #1716695: Fix memory leaks that are present even after an xlator fini [client side xlator]
  • +
  • #1716766: [Thin-arbiter] TA process is not picking 24007 as port while starting up
  • +
  • #1716812: Failed to create volume which transport_type is "tcp,rdma"
  • +
  • #1716830: DHT: directory permissions are wiped out
  • +
  • #1717757: WORM: Segmentation Fault if bitrot stub do signature
  • +
  • #1717782: gluster v get all still showing storage.fips-mode-rchecksum off
  • +
  • #1717819: Changes to self-heal logic w.r.t. detecting metadata split-brains
  • +
  • #1717953: SELinux context labels are missing for newly added bricks using add-brick command
  • +
  • #1718191: Regression: Intermittent test failure for quick-read-with-upcall.t
  • +
  • #1718273: markdown formatting errors in files present under /doc directory of the project
  • +
  • #1718316: Ganesha-gfapi logs are flooded with error messages related to "gf_uuid_is_null(gfid)) [Invalid argument]" when lookups are running from multiple clients
  • +
  • #1718338: Upcall: Avoid sending upcalls for invalid Inode
  • +
  • #1718848: False positive logging of mount failure
  • +
  • #1718998: Fix test case "tests/basic/afr/split-brain-favorite-child-policy.t" failure
  • +
  • #1720201: Healing not proceeding during in-service upgrade on a disperse volume
  • +
  • #1720290: ctime changes: tar still complains file changed as we read it if uss is enabled
  • +
  • #1720615: [RHEL-8.1] yum update fails for rhel-8 glusterfs client packages 6.0-5.el8
  • +
  • #1720993: tests/features/subdir-mount.t is failing for brick_mux regrssion
  • +
  • #1721385: glusterfs-libs: usage of inet_addr() may impact IPv6
  • +
  • #1721435: DHT: Internal xattrs visible on the mount
  • +
  • #1721441: geo-rep: Fix permissions for GEOREP_DIR in non-root setup
  • +
  • #1721601: [SHD] : logs of one volume are going to log file of other volume
  • +
  • #1722541: stale shd process files leading to heal timing out and heal deamon not coming up for all volumes
  • +
  • #1703322: Need to document about fips-mode-rchecksum in gluster-7 release notes.
  • +
  • #1722802: Incorrect power of two calculation in mem_pool_get_fn
  • +
  • #1723890: Crash in glusterd when running test script bug-1699339.t
  • +
  • #1728770: Failures in remove-brick due to [Input/output error] errors
  • +
  • #1736481: capture stat failure error while setting the gfid
  • +
  • #1739424: Disperse volume : data corruption with ftruncate data in 4+2 config
  • +
  • #1739426: Open fd heal should filter O_APPEND/O_EXCL
  • +
  • #1739427: An Input/Output error happens on a disperse volume when doing unaligned writes to a sparse file
  • +
  • #1741041: atime/mtime is not restored after healing for entry self heals
  • +
  • #1743200: ./tests/bugs/glusterd/bug-1595320.t is failing
  • +
  • #1744874: interrupts leak memory
  • +
  • #1745422: ./tests/bugs/glusterd/bug-1595320.t is failing
  • +
  • #1745914: ESTALE change in fuse breaks get_real_filename implementation
  • +
  • #1746142: ctime: If atime is updated via utimensat syscall ctime is not getting updated
  • +
  • #1746145: CentOs 6 GlusterFS client creates files with time 01/01/1970
  • +
  • #1747301: Setting cluster.heal-timeout requires volume restart
  • +
  • #1747746: The result (hostname) of getnameinfo for all bricks (ipv6 addresses) are the same, while they are not.
  • +
  • #1748448: syncop: Bail out if frame creation fails
  • +
  • #1748774: Incorrect power of two calculation in mem_pool_get_fn
  • +
  • #1749155: bug-1402841.t-mt-dir-scan-race.t fails spuriously
  • +
  • #1749305: Failures in remove-brick due to [Input/output error] errors
  • +
  • #1749664: The result (hostname) of getnameinfo for all bricks (ipv6 addresses) are the same, while they are not.
  • +
  • #1751556: syncop: Bail out if frame creation fails
  • +
  • #1752245: Crash in glusterd when running test script bug-1699339.t
  • +
  • #1752429: Ctime: Cannot see the "trusted.glusterfs.mdata" xattr for directory on a new brick after rebalance
  • +
  • #1755212: geo-rep: performance improvement while syncing heavy renames with existing destination
  • +
  • #1755213: geo-rep: non-root session going fault due improper sub-command
  • +
  • #1755678: Segmentation fault occurs while truncate file
  • +
  • #1756002: git clone fails on gluster volumes exported via nfs-ganesha
  • +
+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/release-notes/7.1/index.html b/release-notes/7.1/index.html new file mode 100644 index 00000000..520887f0 --- /dev/null +++ b/release-notes/7.1/index.html @@ -0,0 +1,4602 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + 7.1 - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + +

Release notes for Gluster 7.1

+

This is a bugfix release. The release notes for 7.0 +contains a listing of all the new features that were added +and bugs fixed in the GlusterFS 7 stable release.

+

NOTE: Next minor release tentative date: Week of 20th Jan, 2020

+

Major changes, features and limitations addressed in this release

+

None

+

Major issues

+

None

+

Note

+

GlusterFS-Samba

+

Following parameters will be added to GlusterFS volume share section (if not present) in smb.conf when user.smb or user.cifs option is set on a volume:

+
+

kernel share modes = no

+
+

Following parameters will NOT be added to GlusterFS volume share section(if not present) in smb.conf when user.smb or user.cifs option is set on a volume:

+
+

guest ok = yes

+
+

Bugs addressed

+

Bugs addressed since release-7.0 are listed below.

+
    +
  • #1760356: packaging: remove leftover bd cruft in rpm .spec.in
  • +
  • #1760699: glustershd can not decide heald_sinks, and skip repair, so some entries lingering in volume heal info
  • +
  • #1760791: afr: support split-brain CLI for replica 3
  • +
  • #1761910: Rebalance causing IO Error - File descriptor in bad state
  • +
  • #1764003: [Upgrade] Config files are not upgraded to new version
  • +
  • #1764007: geo-replication sessions going faulty
  • +
  • #1764015: geo-rep syncing significantly behind and also only one of the directories are synced with tracebacks seen
  • +
  • #1764023: geo-rep: Changelog archive file format is incorrect
  • +
  • #1764026: tests/geo-rep: Add test case to validate non-root geo-replication setup
  • +
  • #1764028: [geo-rep] sync_method showing rsync instead of tarssh post in-service upgrade
  • +
  • #1764030: [GSS] geo-rep entering into faulty state with OSError: [Errno 13] Permission denied
  • +
  • #1765431: test: fix non-root geo-rep test case
  • +
  • #1766424: cgroup control-cpu-load.sh script not working
  • +
  • #1768742: Memory leak in glusterfsd process
  • +
  • #1768760: tests/bugs/shard/unlinks-and-renames.t fails on RHEL8
  • +
  • #1769315: Rebalance is causing glusterfs crash on client node
  • +
  • #1769320: Spurious failure tests/bugs/replicate/bug-1734370-entry-heal-restore-time.t
  • +
  • #1771840: [CENTOS 6] Geo-replication session not starting after creation
  • +
  • #1775495: [GNFS] showmout -a cause gnfs crash
  • +
  • #1777769: auth-allow of IPv4 address doesn't take netmask into consideration
  • +
  • #1778175: glusterfsd crashed with "'MemoryError' Cannot access memory at address"
  • +
  • #1781483: Remove guest access by default for GlusterFS volume SMB shares added by hook scripts
  • +
  • #1781486: gluster-smb:glusto-test access gluster by cifs test write report Device or resource busy
  • +
  • #1782826: event_slot_alloc not able to return index after reach slot_used count to 1024
  • +
  • #1783227: GlusterFS brick process crash
  • +
  • #1783858: Heal Info is hung when I/O is in progress on a gluster block volume
  • +
  • #1784790: tests/00-geo-rep/00-georep-verify-non-root-setup.t fail on freshly installed builder
  • +
  • #1785228: Windows client fails to copy large file to GlusterFS volume share with fruit and streams_xattr VFS modules via Samba
  • +
  • #1785493: READDIRP incorrectly updates posix-acl inode ctx
  • +
+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/release-notes/7.2/index.html b/release-notes/7.2/index.html new file mode 100644 index 00000000..e3fffb84 --- /dev/null +++ b/release-notes/7.2/index.html @@ -0,0 +1,4531 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + 7.2 - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + +

Release notes for Gluster 7.2

+

This is a bugfix release. The release notes for 7.0 +contains a listing of all the new features that were added +and bugs fixed in the GlusterFS 7 stable release.

+

NOTE: Next minor release tentative date: Week of 20th Feb, 2020

+

Major changes, features and limitations addressed in this release

+

None

+

Major issues

+

None

+

Bugs addressed

+

Bugs addressed since release-7.1 are listed below.

+
    +
  • #1767264: glusterfs client process coredump
  • +
  • #1786753: Functionality to enable log rotation for user serviceable snapshot's logs.
  • +
  • #1788785: Unable to set/modify optimistic-change-log for replicate volumes
  • +
  • #1789336: glusterfs process memory leak in ior test
  • +
  • #1790423: Glusterfind pre command fails
  • +
  • #1790428: glusterfind pre output file is empty
  • +
  • #1790438: S57glusterfind-delete-post.py not python3 ready (does not decode bytestring)
  • +
  • #1790846: Remove extra argument
  • +
+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/release-notes/7.3/index.html b/release-notes/7.3/index.html new file mode 100644 index 00000000..b40312f9 --- /dev/null +++ b/release-notes/7.3/index.html @@ -0,0 +1,4533 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + 7.3 - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + +

Release notes for Gluster 7.3

+

This is a bugfix release. The release notes for 7.0 +contains a listing of all the new features that were added +and bugs fixed in the GlusterFS 7 stable release.

+

NOTE: Next minor release tentative date: Week of 20th Mar, 2020

+

Major changes, features and limitations addressed in this release

+

Features

+

Make thin-arbiter name unique in 'pending-xattr' option. By making this +unique, we can host single thin-arbiter node for multiple clusters.

+

Major issues

+

None

+

Bugs addressed

+

Bugs addressed since release-7.2 are listed below.

+
    +
  • #1768407: glusterfsd memory leak observed after enable tls
  • +
  • #1791154: xlators/features/quota/src/quota.c:quota_log_usage
  • +
  • #1793085: gf_event doesn't work for glfsheal process
  • +
  • #1793412: config ssh-port can accept negative and outside allowed port range value
  • +
  • #1793492: cli: duplicate defns of cli_default_conn_timeout and cli_ten_minutes_timeout
  • +
  • #1794019: Mounts fails after reboot of 1/3 gluster nodes
  • +
  • #1795540: mem leak while using gluster tools
  • +
  • #1802449: spurious self-heald.t failure
  • +
+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/release-notes/7.4/index.html b/release-notes/7.4/index.html new file mode 100644 index 00000000..858753b5 --- /dev/null +++ b/release-notes/7.4/index.html @@ -0,0 +1,4533 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + 7.4 - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + +

Release notes for Gluster 7.4

+

This is a bugfix release. The release notes for 7.0 +contains a listing of all the new features that were added +and bugs fixed in the GlusterFS 7 stable release.

+

NOTE: Next minor release tentative date: Week of 20th Apr, 2020

+

Major changes, features and limitations addressed in this release

+

None

+

Major issues

+

None

+

Bugs addressed

+

Bugs addressed since release-7.3 are listed below.

+
    +
  • #1785323: glusterfsd crashes after a few seconds
  • +
  • #1804591: Heal pending on volume, even after all the bricks are up
  • +
  • #1805668: Memory corruption when glfs_init() is called after glfs_fini()
  • +
  • #1806843: Disperse volume : Ganesha crash with IO in 4+2 config when one glusterfsd restarts every 600s
  • +
  • #1807785: seeing error message in glustershd.log on volume start(or may be as part of shd graph regeneration) inet_pton failed with return code 0 [Invalid argument]
  • +
  • #1808964: Set volume option when one of the nodes is powered off, After powering the node brick processes are offline
  • +
  • #1809438: [brickmux]: glustershd crashed when rebooting 1/3 nodes at regular intervals
  • +
  • #1812849: Setting volume option when one of the glusterds is stopped in the cluster, post glusterd restart seeing couldn't find vol info in glusterd logs and shd, brick process offline
  • +
  • #1061: [EC] shd crashed while heal failed due to out of memory error.
  • +
  • #1030: Memory corruption when sending events to an IPv6 host
  • +
+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/release-notes/7.5/index.html b/release-notes/7.5/index.html new file mode 100644 index 00000000..051c22cc --- /dev/null +++ b/release-notes/7.5/index.html @@ -0,0 +1,4530 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + 7.5 - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + +

Release notes for Gluster 7.5

+

This is a bugfix release. The release notes for 7.0 +contains a listing of all the new features that were added +and bugs fixed in the GlusterFS 7 stable release.

+

NOTE: Next minor release tentative date: Week of 20th May, 2020

+

Major changes, features and limitations addressed in this release

+

None

+

Major issues

+

None

+

Bugs addressed

+

Bugs addressed since release-7.4 are listed below.

+
    +
  • #832 Permission Denied in logs
  • +
  • #884 [bug:1808688] Data corruption with asynchronous writes
  • +
  • #1067 [bug:1661889] Metadata heal picks different brick each time as source if there are no pending xattrs.
  • +
  • #1127 Mount crash during background shard cleanup
  • +
  • #1134 snap_scheduler.py init failing with "TypeError: Can't mix strings and bytes in path components"
  • +
  • #1152 Spurious failure of tests/bugs/protocol/bug-1433815-auth-allow.t
  • +
  • #1168 glusterfsd crash due to health-check failed, going down ,system call errorno not return
  • +
+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/release-notes/7.6/index.html b/release-notes/7.6/index.html new file mode 100644 index 00000000..05f817bc --- /dev/null +++ b/release-notes/7.6/index.html @@ -0,0 +1,4529 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + 7.6 - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + +

Release notes for Gluster 7.6

+

This is a bugfix release. The release notes for 7.0, 7.1, +7.2, 7.3, 7.4 and 7.5 +contains a listing of all the new features that were added +and bugs fixed in the GlusterFS 7 stable release.

+

NOTE: Next minor release tentative date: Week of 20th Jul, 2020

+

Major changes, features and limitations addressed in this release

+

None

+

Major issues

+

None

+

Bugs addressed

+

Bugs addressed since release-7.5 are listed below.

+
    +
  • #1060 [bug:789278] Issues reported by Coverity static analysis tool
  • +
  • #1140 getfattr returns ENOATTR for system.posix_acl_access on dispe...
  • +
  • #1146 gfapi/Upcall: Potential deadlock in synctask threads processi...
  • +
  • #1179 gnfs split brain when 1 server in 3x1 down (high load)
  • +
  • #1000 [bug:1193929] GlusterFS can be improved
  • +
+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/release-notes/7.7/index.html b/release-notes/7.7/index.html new file mode 100644 index 00000000..0fd72c8c --- /dev/null +++ b/release-notes/7.7/index.html @@ -0,0 +1,4533 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + 7.7 - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + +

Release notes for Gluster 7.7

+

This is a bugfix release. The release notes for 7.0, 7.1, +7.2, 7.3, 7.4 7.5 and 7.6 +contains a listing of all the new features that were added +and bugs fixed in the GlusterFS 7 stable release.

+

NOTE: Next minor release tentative date: Week of 20th Sep, 2020

+

Major changes, features and limitations addressed in this release

+

None

+

Major issues

+

None

+

Bugs addressed

+

Bugs addressed since release-7.6 are listed below.

+
    +
  • #1000 [bug:1193929] GlusterFS can be improved
  • +
  • #1220 cluster/ec: return correct error code and log the message in ...
  • +
  • #1223 Failure of tests/basic/gfapi/gfapi-copy-file-range.t
  • +
  • #1225 fuse causes glusterd to dump core
  • +
  • #1243 Modify and return iatt (especially size and block-count) in s...
  • +
  • #1254 Prioritize ENOSPC over other lesser priority errors
  • +
  • #1296 Implement seek in open-behind
  • +
  • #1303 Failures in rebalance due to [Input/output error]
  • +
  • #1348 Fuse mount crashes in shard translator when truncating a *real...
  • +
+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/release-notes/7.8/index.html b/release-notes/7.8/index.html new file mode 100644 index 00000000..e329e466 --- /dev/null +++ b/release-notes/7.8/index.html @@ -0,0 +1,4541 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + 7.8 - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + +

Release notes for Gluster 7.8

+

This is a bugfix release. The release notes for 7.0, 7.1, +7.2, 7.3, 7.4 7.5, 7.6 and 7.7 +contain a listing of all the new features that were added +and bugs fixed in the GlusterFS 7 stable release.

+

NOTE:

+
    +
  1. Next minor release tentative date: Week of 30th Nov, 2020.
  2. +
  3. Next minor release would be the last release of release-7.x series.
  4. +
+

Highlights of Release

+

This release contains majorly the bug fixes as described in the issues section.

+

Builds are available at

+

https://download.gluster.org/pub/gluster/glusterfs/7/7.8/

+

Issues addressed in this release

+

Please find the list of issues added to this release below.

+
    +
  • #763 thin-arbiter: Testing report
  • +
  • #1000 [bug:1193929] GlusterFS can be improved
  • +
  • #1002 [bug:1679998] GlusterFS can be improved
  • +
  • #1250 geo-rep: Fix corner case in rename on mkdir during hybrid crawl
  • +
  • #1253 On Ovirt setup glusterfs performs poorly
  • +
  • #1332 Unable to Upgrade to Gluster 7 from Earlier Version
  • +
  • #1351 issue with gf_fill_iatt_for_dirent()
  • +
  • #1354 High CPU utilization by self-heal on disperse volumes with no ...
  • +
  • #1385 High CPU utilization by self-heal on disperse volumes when an ...
  • +
  • #1407 glusterd keep crashing when upgrading from 6.5 to 7.7
  • +
  • #1438 syncdaemon/syncdutils.py: SyntaxWarning: "is" with a literal. ...
  • +
  • #1440 glusterfs 7.7 fuse client memory leak
  • +
  • #1472 Readdir-ahead leads to inconsistent ls results
  • +
+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/release-notes/7.9/index.html b/release-notes/7.9/index.html new file mode 100644 index 00000000..fc757893 --- /dev/null +++ b/release-notes/7.9/index.html @@ -0,0 +1,4534 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + 7.9 - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + +

Release notes for Gluster 7.9

+

This is a bugfix release. The release notes for 7.0, 7.1, +7.2, 7.3, 7.4 7.5, 7.6, 7.7 and 7.8 +contain a listing of all the new features that were added +and bugs fixed in the GlusterFS 7 stable release.

+

NOTE:

+

This release would be the last release of release-7.x series. +Users are highly encouraged to upgrade to newer releases of GlusterFS.

+

Highlights of Release

+

This release contains majorly the bug fixes as described in the issues section.

+

Builds are available at

+

https://download.gluster.org/pub/gluster/glusterfs/7/7.9/

+

Issues addressed in this release

+

Please find the list of issues added to this release below.

+
    +
  • #1852 glusterd: Can't run rebalance due to long unix socket
  • +
  • #1836 posix: Update ret value in posix_get_gfid2path if GF_MALLOC fails
  • +
  • #1738 [cli] Improper error message on command timeout
  • +
  • #1699 One brick offline with signal received: 11
  • +
  • #1604 rfc.sh on release-7 needs to move to github flow
  • +
  • #1499 why not use JumpConsistentHash to replace SuperFastHash to cho...
  • +
  • #1221 features/bit-rot: invalid snprintf() buffer size
  • +
  • #1060 [bug:789278] Issues reported by Coverity static analysis tool
  • +
+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/release-notes/8.0/index.html b/release-notes/8.0/index.html new file mode 100644 index 00000000..ab65643f --- /dev/null +++ b/release-notes/8.0/index.html @@ -0,0 +1,4932 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + 8.0 - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + +

Release notes for Gluster 8.0

+

Release date: 09-July-2020

+

This is a major release that includes a range of features, code improvements and stability fixes as noted below.

+

A selection of the key features and changes are documented in this page. +A full list of bugs that have been addressed is included further below.

+ +

Announcements

+
    +
  1. +

    Releases that receive maintenance updates post release 8 are, 7 and 8 + (reference)

    +
  2. +
  3. +

    Release 8 will receive maintenance updates around the 10th of every month + for the first 3 months post release (i.e Aug'20, Sep'20, Oct'20). Post the + initial 3 months, it will receive maintenance updates every 2 months till EOL.

    +
  4. +
+

Major changes and features

+

Highlights

+
    +
  • Several stability fixes addressing
  • +
  • coverity, clang-scan, address sanitizer and valgrind reported issues
  • +
  • removal of unused and hence, deprecated code and features
  • +
  • Performance Improvements
  • +
  • CentOS 8 and RHEL 8 is supported
  • +
+

Features

+
    +
  • Implemented seek file operation for open-behind
  • +
  • Now storage.reserve option will take size of disk as input instead of percentage
  • +
  • Added Functionality to enable log rotation for user serviceable snapshot's logs
  • +
  • Mandatory locks enhancements in replicate subvolumes
  • +
  • To validate other memory allocation implementations instead of libc's malloc added an option to build with tcmalloc library
  • +
  • Integrated Thin-arbiter with GD1
  • +
  • Client Handling of Elastic Clusters
  • +
  • The package glusterfs-libs is replaced by libgfchangelog0, libgfrpc0, libgfxdr0, and libglusterfs0; and additional libraries in libgfapi0, libglusterd0
  • +
+

Major issues

+

None

+

Bugs addressed

+

Bugs addressed since release-7 are listed below.

+
    +
  • #789278: Issues reported by Coverity static analysis tool
  • +
  • #1158130: Not possible to disable fopen-keeo-cache when mounting
  • +
  • #1183054: rpmlint throws couple of errors for RPM spec file
  • +
  • #1193929: GlusterFS can be improved
  • +
  • #1387404: geo-rep: gsync-sync-gfid binary installed in /usr/share/...
  • +
  • #1410439: glusterfind pre output file is empty
  • +
  • #1423442: group files to set volume options should have comments
  • +
  • #1430623: pthread mutexes and condition variables are not destroyed
  • +
  • #1489610: glusterfind saves var data under $prefix instead of localstatedir
  • +
  • #1507896: glfs_init returns incorrect errno on faliure
  • +
  • #1514683: Removal of bricks in volume isn't prevented if remaining brick doesn't contain all the files
  • +
  • #1538900: Found a missing unref in rpc_clnt_reconnect
  • +
  • #1554286: Xattr not updated if increasing the retention of a WORM/Retained file
  • +
  • #1593542: ctime: Upgrade/Enabling ctime feature wrongly updates older files with latest {a|m|c}time
  • +
  • #1620580: Deleted a volume and created a new volume with similar but not the same name. The kubernetes pod still keeps on running and doesn't crash. Still possible to write to gluster mount
  • +
  • #1622665: clang-scan report: glusterfs issues
  • +
  • #1626543: dht/tests: Create a .t to test all possible combinations for file rename
  • +
  • #1635688: Keep only the valid (maintained/supported) components in the build
  • +
  • #1636297: Make it easy to build / host a project which just builds glusterfs translator
  • +
  • #1644322: flooding log with "glusterfs-fuse: read from /dev/fuse returned -1 (Operation not permitted)"
  • +
  • #1651445: [RFE] storage.reserve option should take size of disk as input instead of percentage
  • +
  • #1664335: [geo-rep]: Transport endpoint not connected with arbiter volumes
  • +
  • #1665358: allow regression to not run tests with nfs, if nfs is disabled.
  • +
  • #1668239: [man page] Gluster(8) - Missing disperse-data parameter Gluster Console Manager man page
  • +
  • #1668286: READDIRP incorrectly updates posix-acl inode ctx
  • +
  • #1676479: read-ahead and io-cache degrading performance on sequential read
  • +
  • #1688115: Data heal not checking for locks on source & sink(s) before healing
  • +
  • #1689097: gfapi: provide an option for changing statedump path in glfs-api.
  • +
  • #1690454: mount-shared-storage.sh does not implement mount options
  • +
  • #1693692: Increase code coverage from regression tests
  • +
  • #1694920: Inconsistent locking in presence of disconnects
  • +
  • #1697293: DHT: print hash and layout values in hexadecimal format in the logs
  • +
  • #1698042: quick-read cache invalidation feature has the same key of md-cache
  • +
  • #1707731: [Upgrade] Config files are not upgraded to new version
  • +
  • #1708603: [geo-rep]: Note section in document is required for ignore_deletes true config option where it might delete a file
  • +
  • #1708929: Add more test coverage for shd mux
  • +
  • #1716695: Fix memory leaks that are present even after an xlator fini [client side xlator]
  • +
  • #1716979: Multiple disconnect events being propagated for the same child
  • +
  • #1717754: Enable features.locks-notify-contention by default
  • +
  • #1717824: Fencing: Added the tcmu-runner ALUA feature support but after one of node is rebooted the glfs_file_lock() get stucked
  • +
  • #1717827: tests/geo-rep: Add test case to validate non-root geo-replication setup
  • +
  • #1719290: Glusterfs mount helper script not working with IPv6 because of regular expression or man is wrong
  • +
  • #1720463: [Thin-arbiter] : Wait for connection with TA node before sending lookup/create of ta-replica id file
  • +
  • #1720566: Can't rebalance GlusterFS volume because unix socket's path name is too long
  • +
  • #1721590: tests/bugs/posix/bug-1040275-brick-uid-reset-on-volume-restart.t is failing
  • +
  • #1721686: Remove usage of obsolete function usleep()
  • +
  • #1722507: Incorrect reporting of type/gfid mismatch
  • +
  • #1722541: stale shd process files leading to heal timing out and heal deamon not coming up for all volumes
  • +
  • #1722546: do not assert in inode_unref if the inode table cleanup has started
  • +
  • #1722598: dump the min and max latency of each xlator in statedump
  • +
  • #1722698: DHT: severe memory leak in dht rename
  • +
  • #1722740: [GSS] geo-replication sessions going faulty
  • +
  • #1722802: Incorrect power of two calculation in mem_pool_get_fn
  • +
  • #1722977: ESTALE change in fuse breaks get_real_filename implementation
  • +
  • #1723280: windows cannot access mountpoint exportd from a disperse volume
  • +
  • #1723402: Brick multiplexing is not working.
  • +
  • #1723455: volume set group description missing space leading to words being merged in help output
  • +
  • #1723658: [In-service] Post upgrade glusterd is crashing with a backtrace on the upgraded node while issuing gluster volume status from non-upgraded nodes
  • +
  • #1723761: [Ganesha]: truncate operation not updating the ctime
  • +
  • #1723890: Crash in glusterd when running test script bug-1699339.t
  • +
  • #1724024: use more secure mode for mkdir operations
  • +
  • #1724184: Thin-arbiter: SHD takes lock and inspects the state on TA during every index crawl
  • +
  • #1725034: gluster volume help showing multiple commands for top instead of one.
  • +
  • #1725211: User serviceable snapshots (USS) are not accessible after changing transport.socket.bind-address of glusterd
  • +
  • #1726205: Windows client fails to copy large file to GlusterFS volume share with fruit and streams_xattr VFS modules via Samba
  • +
  • #1726783: snapd crashes sometimes
  • +
  • #1726906: get-state does not show correct brick status
  • +
  • #1727068: Deadlock when generating statedumps
  • +
  • #1727081: Disperse volume : data corruption with ftruncate data in 4+2 config
  • +
  • #1727107: geo-replication/setup.py missing license details in setup()
  • +
  • #1727248: [GNFS] showmout -a cause gnfs crash
  • +
  • #1727256: Directory pending heal in heal info output
  • +
  • #1727329: glustershd dumped core with seg fault at afr_has_quorum
  • +
  • #1727852: gluster-block: improvements to volume group profile options list
  • +
  • #1728047: interrupts leak memory
  • +
  • #1728417: Cleanup references to Hadoop in code base
  • +
  • #1728554: Spelling errors
  • +
  • #1728683: [geo-rep] gluster-mountbroker missing a brief description of what the argument does in # gluster-mountbroker (add|remove|setup) --help
  • +
  • #1728766: Volume start failed when shd is down in one of the node in cluster
  • +
  • #1728770: Failures in remove-brick due to [Input/output error] errors
  • +
  • #1729085: [EC] shd crashed while heal failed due to out of memory error.
  • +
  • #1729107: Memory leak in glusterfsd process
  • +
  • #1729463: gluster v geo-rep status command timing out
  • +
  • #1729772: Disperse volume : Ganesha crash with IO in 4+2 config when one glusterfsd restart every 600s
  • +
  • #1729847: Fix spurious failure of tests/bugs/replicate/bug-1717819-metadata-split-brain-detection.t
  • +
  • #1730175: Seeing failure due to "getxattr err for dir [No data available]" in rebalance
  • +
  • #1730409: core file generated - when EC volume stop and start is executed for 10 loops on a EC+Brickmux setup
  • +
  • #1730715: An Input/Output error happens on a disperse volume when doing unaligned writes to a sparse file
  • +
  • #1730953: mount generates errors after umount
  • +
  • #1731920: [geo-rep]: gluster command not found while setting up a non-root session
  • +
  • #1732496: [Coverity] RETURN_LOCAL in __nlc_inode_ctx_get()
  • +
  • #1732717: fuse: Limit the number of inode invalidation requests in the queue
  • +
  • #1733042: cluster.rc Create separate logdirs for each host instance
  • +
  • #1733166: potential deadlock while processing callbacks in gfapi
  • +
  • #1733425: Setting volume option when one of the glusterd is stopped in the cluster, post glusterd restart seeing couldn't find vol info in glusterd logs and shd, brick process offline
  • +
  • #1733935: Open fd heal should filter O_APPEND/O_EXCL
  • +
  • #1734026: Cannot see the "trusted.glusterfs.mdata" xattr for directory on a new brick after rebalance
  • +
  • #1734252: Heal not completing after geo-rep session is stopped on EC volumes.
  • +
  • #1734299: ctime: When healing ctime xattr for legacy files, if multiple clients access and modify the same file, the ctime might be updated incorrectly.
  • +
  • #1734370: atime/mtime is not restored after healing for entry self heals
  • +
  • #1734738: Unable to create geo-rep session on a non-root setup.
  • +
  • #1736482: capture stat failure error while setting the gfid
  • +
  • #1737288: nfs client gets bad ctime for copied file which is on glusterfs disperse volume with ctime on
  • +
  • #1737291: features/locks: avoid use after freed of frame for blocked lock
  • +
  • #1737484: geo-rep syncing significantly behind and also only one of the directories are synced with tracebacks seen
  • +
  • #1737676: Upgrading a Gluster node fails when user edited glusterd.vol file exists
  • +
  • #1737778: ocf resource agent for volumes don't work in non-standard environment
  • +
  • #1738419: read() returns more than file size when using direct I/O
  • +
  • #1738763: [EC] : fix coverity issue
  • +
  • #1738786: ctime: If atime is updated via utimensat syscall ctime is not getting updated
  • +
  • #1739360: [GNFS] gluster crash with nfs.nlm off
  • +
  • #1740017: tests/bugs/replicate/bug-880898.t created a core file.
  • +
  • #1741734: gluster-smb:glusto-test access gluster by cifs test write report Device or resource busy
  • +
  • #1741779: Fix spelling errors
  • +
  • #1741890: geo-rep: Changelog archive file format is incorrect
  • +
  • #1743020: glusterd start is failed and throwing an error Address already in use
  • +
  • #1743069: bug-1482023-snpashot-issue-with-other-processes-accessing-mounted-path.t fails in brick mux regression spuriously
  • +
  • #1743094: glusterfs build fails on centos7
  • +
  • #1743200: ./tests/bugs/glusterd/bug-1595320.t is failing
  • +
  • #1743573: fuse client hung when issued a lookup "ls" on an ec volume
  • +
  • #1743652: CentOs 6 GlusterFS client creates files with time 01/01/1970
  • +
  • #1744519: log aio_error return codes in posix_fs_health_check
  • +
  • #1744548: Setting cluster.heal-timeout requires volume restart
  • +
  • #1745965: glusterd fails to start due to SIGABRT dumping core
  • +
  • #1745967: File size was not truncated for all files when tried with rebalance in progress.
  • +
  • #1746228: systemctl start glusterd is getting timed out on the scaled setup with 2000 volumes
  • +
  • #1746320: SHORT-WRITE error leads to crash
  • +
  • #1746810: markdown files containing 404 links
  • +
  • #1747746: The result (hostname) of getnameinfo for all bricks (ipv6 addresses) are the same, while they are not.
  • +
  • #1748448: syncop: Bail out if frame creation fails
  • +
  • #1748744: bug-1402841.t-mt-dir-scan-race.t fails spuriously
  • +
  • #1748836: Application should know when update size/version went bad
  • +
  • #1749322: glustershd can not decide heald_sinks, and skip repair, so some entries lingering in volume heal info
  • +
  • #1750387: Deprecated log rotate command still present in "# gluster v help"
  • +
  • #1750618: Cleanup of executable in tests/bugs/gfapi/bug-1447266/bug-1447266.t not done
  • +
  • #1751134: Spurious failure tests/bugs/replicate/bug-1734370-entry-heal-restore-time.t
  • +
  • #1751907: bricks gone down unexpectedly
  • +
  • #1752330: seeing error message in glustershd.log on volume start(or may be as part of shd graph regeneration) inet_pton failed with return code 0 [Invalid argument]
  • +
  • #1752331: Test tests/basic/volume-scale-shd-mux.t is failing on upstream CI
  • +
  • #1753569: git clone fails on gluster volumes exported via nfs-ganesha
  • +
  • #1753592: Segmentation fault occurs while truncate file
  • +
  • #1753843: [Disperse volume]: Regression in IO performance seen in sequential read for large file
  • +
  • #1753857: geo-rep: performance improvement while syncing heavy renames with existing destination
  • +
  • #1753859: Typos in glusterd log messages
  • +
  • #1753880: Set the default lru-limit in fuse to a smaller number
  • +
  • #1753928: geo-rep: non-root session going fault due improper sub-command
  • +
  • #1754448: Re-alignment of Structure attributes
  • +
  • #1754477: Thin-arbiter: Raise error in CLI if replica-count is not 2
  • +
  • #1755344: glustershd.log getting flooded with "W [inode.c:1017:inode_find] (-->/usr/lib64/glusterfs/6.0/xlator/cluster/disperse.so(+0xe3f9) [0x7fd09b0543f9] -->/usr/lib64/glusterfs/6.0/xlator/cluster/disperse.so(+0xe19c) [0x7fd09b05419 TABLE NOT FOUND"
  • +
  • #1755900: heketidbstorage bricks go down during PVC creation
  • +
  • #1756211: tests/bugs/shard/bug-1272986.t fails
  • +
  • #1756900: tests are failing in RHEL8 regression
  • +
  • #1756938: afr: support split-brain CLI for replica 3
  • +
  • #1757399: Rebalance is causing glusterfs crash on client node
  • +
  • #1758579: Rebalance causing IO Error - File descriptor in bad state
  • +
  • #1758878: # gluster v info --xml is always returning 3 for all Nx3 volumes
  • +
  • #1758984: Enable direct-io options in group virt
  • +
  • #1759002: Spurious failure tests/bugs/replicate/bug-1744548-heal-timeout.t
  • +
  • #1759081: Spurious failure of /tests/bugs/replicate/bug-1134691-afr-lookup-metadata-heal.t
  • +
  • #1760187: Implement seek fop
  • +
  • #1760189: Use replica aware seek fop
  • +
  • #1760467: rebalance start is succeeding when quorum is not met
  • +
  • #1761759: Failure in ./tests/basic/posix/shared-statfs.t
  • +
  • #1761769: On some distros bug-1272986.t takes more than 2 minutes to run
  • +
  • #1762220: [geo-rep] sync_method showing rsync instead of tarssh post in-service upgrade
  • +
  • #1762438: DHT- gluster rebalance status shows wrong data size after rebalance is completed successfully
  • +
  • #1763036: glusterfsd crashed with "'MemoryError' Cannot access memory at address"
  • +
  • #1763439: [GSS] geo-rep entering into faulty state with OSError: [Errno 13] Permission denied
  • +
  • #1764110: tests/bugs/shard/unlinks-and-renames.t fails on RHEL8
  • +
  • #1764119: gluster rebalance status doesn't show detailed information when a node is rebooted
  • +
  • #1764129: quota_fsck script KeyError: 'contri_size'
  • +
  • #1764208: cgroup control-cpu-load.sh script not working
  • +
  • #1764418: Add Mohit & Sanju as glusterd/cli maintainers
  • +
  • #1765017: gf_event doesn't work for glfsheal process
  • +
  • #1765155: replication shouldn't modify xattr-req coming from parent
  • +
  • #1765186: Problematic coding practices at logger
  • +
  • #1765421: DHT: Add comments to the code
  • +
  • #1765426: test: fix non-root geo-rep test case
  • +
  • #1765542: Add Sunny Kumar as co-maintainer of Geo-replication component
  • +
  • #1768407: glusterfsd memory leak observed after enable tls
  • +
  • #1768896: Long method in glusterfsd - set_fuse_mount_options(...)
  • +
  • #1769712: check if grapj is ready beforce process cli command
  • +
  • #1769754: dht_readdirp_cbk: Do not strip out entries with invalid stats
  • +
  • #1771365: libglusterfs/dict.c : memory leaks
  • +
  • #1771577: [RHEL 6] Geo-replication session not starting after creation
  • +
  • #1771895: geo-rep: Improve debugging in log_raise_exception
  • +
  • #1772006: NULL dict messages flooding fuse mount log
  • +
  • #1773530: ctime value is different from atime/mtime on a create of file
  • +
  • #1773856: Set volume option when one of the node is powered off, After powering the node brick processes are offline
  • +
  • #1774011: Heal Info is hung when I/O is in progress on a gluster block volume
  • +
  • #1774866: man page update needed for gluster volume top command
  • +
  • #1775612: Remove guest access by default for GlusterFS volume SMB shares added by hook scripts
  • +
  • #1776264: RFE: systemd should restart glusterd on crash
  • +
  • #1776757: DHT - Reduce methods scope
  • +
  • #1776784: glfsheal crash on unexpected volume name
  • +
  • #1776801: Bricks are not available when volume create fails
  • +
  • #1776892: [patch] .dirstamp should be in ignored
  • +
  • #1778457: Missing error logs(afr/self-heald )
  • +
  • #1779055: glusterfs process memory leak in ior test
  • +
  • #1779089: glusterfsd do not release posix lock when multiple glusterfs client do flock -xo to the same file paralleled
  • +
  • #1779742: tests/00-geo-rep/00-georep-verify-non-root-setup.t fail on freshly installed builder
  • +
  • #1779760: Improve logging in EC, client and lock xlator
  • +
  • #1780190: glfsheal should be installed and invoked as architecture-dependent binary helper
  • +
  • #1780260: v7 fails to build on Debian 9 [patch?]
  • +
  • #1781440: event_slot_alloc not able to return index after reach slot_used count to 1024
  • +
  • #1782200: glusterd restart failing to start.
  • +
  • #1782495: GlusterFS brick process crash
  • +
  • #1784375: 'gluster volume set disable.nfs' accidentally killed unexpected process, and forced a data brick offline.
  • +
  • #1785143: Multiple glusterfsd process spawn when glusterd restart during a volume start.
  • +
  • #1785208: glusterfs client process coredump
  • +
  • #1785611: glusterfsd cashes after a few seconds
  • +
  • #1785998: change the error message for heal statistics to reflect its supportability for disperse volume
  • +
  • #1786276: [geo-rep] Help for positional argument SLAVE in schedule_georep.py.in isn't clear.
  • +
  • #1786459: unable to enable brick-multiplex feature
  • +
  • #1786478: default option is disappeared in volume info after volume reset
  • +
  • #1786679: Duplicate entries in 'ls' output after a volume expansion
  • +
  • #1786722: Functionality to enable log rotation for user serviceable snapshot's logs.
  • +
  • #1787122: glusterd allowing to set server.statedump-path to file, non-existent file and non-existent paths
  • +
  • #1787274: heal not actually healing metadata of a regular file when only time stamps are changed(data heal not required)
  • +
  • #1787554: Unable to set/modify optimistic-change-log for replicate volumes
  • +
  • #1789439: Glusterfind pre command fails
  • +
  • #1789478: S57glusterfind-delete-post.py not python3 ready (does not decode bytestring)
  • +
  • #1790748: Remove extra argument
  • +
  • #1790870: Memory corruption when sending events to an IPv6 host
  • +
  • #1791682: fail to build on recent Fedora
  • +
  • #1792276: config ssh-port can accept negative and outside allowed port range value
  • +
  • #1792707: xlators/features/quota/src/quota.c:quota_log_usage
  • +
  • #1793378: dht_hash_compute() crashes when it receives a zero length name
  • +
  • #1793852: Mounts fails after reboot of 1/3 gluster nodes
  • +
  • #1793995: gluster crash when built without gNFS support
  • +
  • #1797869: bitrot: Number of signing process threads should be configurable.
  • +
  • #1797882: Segmentation fault occurs while truncate file
  • +
  • #1797934: Client should propagate ping event from brick
  • +
  • #1800583: Halo replication is not working
  • +
  • #1800956: Rebalance : Status lists failures on stopping rebalance while it is in progress
  • +
  • #1801623: spurious self-heald.t failure
  • +
  • #1801624: Heal pending on volume, even after all the bricks are up
  • +
  • #1801684: Memory corruption when glfs_init() is called after glfs_fini()
  • +
  • #1804786: mount.glusterfs strips off "/" from subdir-mounts
  • +
  • #1808421: WORM: If autocommit-period 0 file will be WORMed with 0 Byte during initial write
  • +
  • #1808875: [brickmux]: glustershd crashed when rebooting 1/3 nodes at regular intervals
  • +
  • #1810042: Changes to gluster peer probe in nightly build breaks ansible:gluster_volume call
  • +
  • #1810842: frequent heal observed when file opened during one brick is down
  • +
  • #1810934: Segfault in FUSE process, potential use after free
  • +
  • #1811631: brick crashed when creating and deleting volumes over time (with brick mux enabled only)
  • +
  • #1812144: Add a warning message during volume expansion or resize on volume with snapshots
  • +
  • #1812353: create-export-ganesha script: mention labelled nfs parameter
  • +
  • #154 Optimized CHANGELOG
  • +
  • #237 Validate other memory allocation implementations instead of l...
  • +
  • #475 Reduce the number or threads used in the brick process
  • +
  • #613 Mandatory locks enhancements in replicate subvolumes
  • +
  • #657 Structured logging format support
  • +
  • #663 Add Ganesha HA bits back to glusterfs code repo
  • +
  • #687 Thin-arbiter integration with GD1
  • +
  • #699 executable program will crash if linked libgfapi together wit...
  • +
  • #703 provide mechanism to test individual xlators
  • +
  • #721 Introduce quorum-count option in disperse volumes as well
  • +
  • #723 Provide scripts to reset xattrs of the entries which could be...
  • +
  • #725 Disperse: A way to read from specific bricks
  • +
  • #741 Client Handling of Elastic Clusters
  • +
  • #745 storage.reserve enhancement for posix_write
  • +
  • #748 Improve MAKE_HANDLE_GFID_PATH macro and posix_handle_gfid_path()
  • +
  • #753 Remove fetching items in gf_cli_replace_brick(), gf_cli_reset...
  • +
  • #755 [RFE] Geo-replication code improvements
  • +
  • #761 Improve MAKE_HANDLE_PATH macro
  • +
  • #763 thin-arbiter: Testing report
  • +
  • #765 nfs.rpc-auth-allow gluster7 + gnfs
  • +
  • #788 run-with-valgrind option causes gnfs and quota to fail to start
  • +
  • #824 Migrate bugzilla workflow to github issues workflow
  • +
  • #832 Permission Denied in logs
  • +
  • #884 [bug:1808688] Data corruption with asynchronous writes (pleas...
  • +
  • #891 [bug:1802451] Optimize posix code to improve file creation
  • +
  • #977 [bug:1811631] brick crashed when creating and deleting volume...
  • +
  • #999 [bug:1791285] Changing permissions on root directory(director...
  • +
  • #1000 [bug:1193929] GlusterFS can be improved
  • +
  • #1038 [bug:1787138] Crash on rpcsvc_drc_client_unref() - fails on G...
  • +
  • #1042 [bug:1806499] afr-lock-heal-basic.t and /afr-lock-heal-advanc...
  • +
  • #1044 [bug:1790730] Add a basic test file to glusterfind
  • +
  • #1052 [bug:1693692] Increase code coverage from regression tests
  • +
  • #1060 [bug:789278] Issues reported by Coverity static analysis tool
  • +
  • #1067 [bug:1661889] Metadata heal picks different brick each time a...
  • +
  • #1097 [bug:1635688] Keep only the valid (maintained/supported) comp...
  • +
  • #1102 dht: gf_defrag_process_dir is called even if gf_defrag_fix_la...
  • +
  • #1104 geo-replication: descriptive message when worker crashes due ...
  • +
  • #1105 [bug:1794263] Multiple imports from the same library in the ....
  • +
  • #1127 Mount crash during background shard cleanup
  • +
  • #1134 snap_scheduler.py init failing with "TypeError: Can't mix str...
  • +
  • #1140 getfattr returns ENOATTR for system.posix_acl_access on dispe...
  • +
  • #1141 Make SSL connection messages useful
  • +
  • #1142 log the ENOENT error in posix_pstat
  • +
  • #1144 [Disperse] Add test for reset-brick for disperse volume
  • +
  • #1146 gfapi/Upcall: Potential deadlock in synctask threads processi...
  • +
  • #1149 Add error logs to debug failures in ./tests/bugs/protocol/bug...
  • +
  • #1150 Avoid dict_del logs in posix_is_layout_stale while key is NULL
  • +
  • #1152 Spurious failure of tests/bugs/protocol/bug-1433815-auth-allow.t
  • +
  • #1153 Spurious failure of ./tests/bugs/snapshot/bug-1111041.t
  • +
  • #1154 failing test cases
  • +
  • #1156 Spurious failure of tests/features/worm.t
  • +
  • #1158 spurious failure of tests/bugs/glusterd/serialize-shd-manager...
  • +
  • #1160 sys_stat should be used instead of stat
  • +
  • #1161 tests: file offsets and sizes shouldn't be truncated to 32-bi...
  • +
  • #1162 spurious failure of test case tests/bugs/glusterd/removing-mu...
  • +
  • #1169 common-ha: cluster status shows "FAILOVER" even when all node...
  • +
  • #1180 (glusterfs-8.0) - GlusterFS 8.0 tracker
  • +
  • #1179 gnfs split brain when 1 server in 3x1 down (high load)
  • +
  • #1220 cluster/ec: return correct error code and log the message in case of BADFD
  • +
  • #1223 Failure of tests/basic/gfapi/gfapi-copy-file-range.t
  • +
  • #1116 [bug:1790736] gluster volume list returning wrong volume list / volume list time out
  • +
  • #990 [bug:1578405] EIO errors when updating and deleting entries co...
  • +
  • #1126 packaging: overhaul glusterfs.spec(.in) to align with SUSE and...
  • +
  • #1225 fuse causes glusterd to dump core
  • +
  • #1243 Modify and return iatt (especially size and block-count) in sh...
  • +
  • #1254 Prioritize ENOSPC over other lesser priority errors
  • +
  • #1303 Failures in rebalance due to [Input/output error]
  • +
+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/release-notes/8.1/index.html b/release-notes/8.1/index.html new file mode 100644 index 00000000..aff8229f --- /dev/null +++ b/release-notes/8.1/index.html @@ -0,0 +1,4541 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + 8.1 - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + +

Release notes for Gluster 8.1

+

Release date: 27-Aug-2020

+

This is a Improvements and bugfix release. The release notes for 8.0 +contains a listing of all the new features that were added +and bugs fixed in the GlusterFS 8 stable release.

+

NOTE: Next minor release tentative date: Week of 20th Sep, 2020

+

Improvements and Highlights

+

Below improvements have been added to this minor release.

+
    +
  • Performance improvement over the creation of large files - VM disks in oVirt by bringing down trivial lookups of non-existent shards. Issue (#1425)
  • +
  • Fsync in the replication module uses eager-lock functionality which improves the performance of VM workloads with an improvement of more than 50% in small-block of approximately 4kb with write heavy workloads. Issue (#1253)
  • +
+

Builds are available at

+

https://download.gluster.org/pub/gluster/glusterfs/8/8.1/

+

Issues addressed in this release

+

Please find the list of issues added to this release below.

+
    +
  • #763 thin-arbiter: Testing report
  • +
  • #1217 Modify group "virt" to add rpc/network related changes
  • +
  • #1250 geo-rep: Fix corner case in rename on mkdir during hybrid crawl
  • +
  • #1281 Unlinking the file with open fd, returns ENOENT or stale file ...
  • +
  • #1348 Fuse mount crashes in shard translator when truncating a *real...
  • +
  • #1351 issue with gf_fill_iatt_for_dirent()
  • +
  • #1352 api: libgfapi symbol versions break LTO in Fedora rawhide/f33
  • +
  • #1354 High CPU utilization by self-heal on disperse volumes with no ...
  • +
  • #1385 High CPU utilization by self-heal on disperse volumes when an ...
  • +
  • #1396 [bug-1851989] smallfile performance drops after commit the pat...
  • +
  • #1407 glusterd keep crashing when upgrading from 6.5 to 7.7
  • +
  • #1418 GlusterFS 8.0: Intermittent error:1408F10B:SSL routines:SSL3_G...
  • +
  • #1440 glusterfs 7.7 fuse client memory leak
  • +
+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/release-notes/8.2/index.html b/release-notes/8.2/index.html new file mode 100644 index 00000000..9d75aa92 --- /dev/null +++ b/release-notes/8.2/index.html @@ -0,0 +1,4532 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + 8.2 - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + +

Release notes for Gluster 8.2

+

Release date: 23-Sept-2020

+

This is a Improvements and bugfix release. The release notes for 8.0, 8.1 +contain a listing of all the new features that were added +and bugs fixed in the GlusterFS 8 stable release.

+

NOTE: Next minor release tentative date: Week of 20th Oct, 2020

+

Improvements and Highlights

+

Below improvements have been added to this minor release.

+
    +
  • Glustereventsd will accept IPv6 packets too. Issue (#1377)
  • +
+

Builds are available at

+

https://download.gluster.org/pub/gluster/glusterfs/8/8.2/

+

Issues addressed in this release

+

Please find the list of issues added to this release below.

+
    +
  • #1000 [bug:1193929] GlusterFS can be improved
  • +
  • #1060 [bug:789278] Issues reported by Coverity static analysis tool
  • +
  • #1332 Unable to Upgrade to Gluster 7 from Earlier Version
  • +
  • #1440 glusterfs 7.7 fuse client memory leak
  • +
  • #1472 Readdir-ahead leads to inconsistent ls results
  • +
+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/release-notes/8.3/index.html b/release-notes/8.3/index.html new file mode 100644 index 00000000..a69729e2 --- /dev/null +++ b/release-notes/8.3/index.html @@ -0,0 +1,4541 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + 8.3 - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + +

Release notes for Gluster 8.3

+

Release date: 23-Dec-2020

+

This is a bugfix release. The release notes for 8.0, 8.1 and 8.2 +contain a listing of all the new features that were added +and bugs fixed in the GlusterFS 8 stable release.

+

NOTE:

+
    +
  • Next minor release tentative date: Week of 20th Feb, 2021
  • +
  • Users are highly encouraged to upgrade to newer releases of GlusterFS.
  • +
+

Highlights of Release

+

This release contains majorly the bug fixes as described in the issues section.

+

Builds are available at

+

https://download.gluster.org/pub/gluster/glusterfs/8/8.3/

+

Issues addressed in this release

+

Please find the list of issues added to this release below.

+
    +
  • #1836 posix: Update ret value in posix_get_gfid2path if GF_MALLOC fails
  • +
  • #1796 afr: call afr_is_lock_mode_mandatory only while xdata is valid
  • +
  • #1778 volume set: failed: ganesha.enable is already 'off'.
  • +
  • #1738 [cli] Improper error message on command timeout
  • +
  • #1699 One brick offline with signal received: 11
  • +
  • #1663 test case ./tests/bugs/core/bug-1650403.t is getting timed out
  • +
  • #1601 rfc.sh on release-8 needs to move to github flow
  • +
  • #1499 why not use JumpConsistentHash to replace SuperFastHash to cho...
  • +
  • #1438 syncdaemon/syncdutils.py: SyntaxWarning: "is" with a literal. ...
  • +
  • #1221 features/bit-rot: invalid snprintf() buffer size
  • +
  • #1060 [bug:789278] Issues reported by Coverity static analysis tool
  • +
  • #1002 [bug:1679998] GlusterFS can be improved
  • +
  • #1000 [bug:1193929] GlusterFS can be improved
  • +
+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/release-notes/8.4/index.html b/release-notes/8.4/index.html new file mode 100644 index 00000000..d145842c --- /dev/null +++ b/release-notes/8.4/index.html @@ -0,0 +1,4537 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + 8.4 - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + +

Release notes for Gluster 8.4

+

Release date: 01-Mar-2021

+

This is a bugfix release. The release notes for 8.0, 8.1, 8.2 and 8.3 +contain a listing of all the new features that were added +and bugs fixed in the GlusterFS 8 stable release.

+

NOTE:

+
    +
  • Next minor release tentative date: Week of 20th Apr, 2021
  • +
  • Users are highly encouraged to upgrade to newer releases of GlusterFS.
  • +
+

Highlights of Release

+
    +
  • Healing data in 1MB chunks instead of 128KB for improving healing performance #2067
  • +
+

Builds are available at

+

https://download.gluster.org/pub/gluster/glusterfs/8/8.4/

+

Issues addressed in this release

+

Please find the list of issues added to this release below.

+
    +
  • #2154 "Operation not supported" doing a chmod on a symlink
  • +
  • #2107 mount crashes when setfattr -n distribute.fix.layout -v "yes"...
  • +
  • #1991 mdcache: bug causes getxattr() to report ENODATA when fetchin...
  • +
  • #1925 dht_pt_getxattr does not seem to handle virtual xattrs.
  • +
  • #1539 fuse mount crashes on graph-switch when reader-thread-count i...
  • +
  • #1529 Fix regression in on demand migration feature
  • +
  • #1406 shared storage volume fails to mount in ipv6 environment
  • +
+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/release-notes/8.5/index.html b/release-notes/8.5/index.html new file mode 100644 index 00000000..db849d14 --- /dev/null +++ b/release-notes/8.5/index.html @@ -0,0 +1,4538 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + 8.5 - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + +

Release notes for Gluster 8.5

+

Release date: 17-May-2021

+

This is a bugfix release. The release notes for 8.0, 8.1, 8.2, 8.3 and 8.4 contain a listing of all the new features that were added and bugs fixed in the GlusterFS 8 stable release.

+

NOTE:

+
    +
  • Next minor release tentative date: Week of 30th Jun, 2021
  • +
  • Users are highly encouraged to upgrade to newer releases of GlusterFS.
  • +
+

Important fixes in this release

+
    +
  • Slow write on ZFS bricks after healing millions of files due to adding arbiter brick #1764
  • +
  • 4+1 arbiter setup is broken #2192
  • +
+

Builds are available at

+

https://download.gluster.org/pub/gluster/glusterfs/8/8.5/

+

Issues addressed in this release

+
    +
  • #1214 Running tests/basic/afr/inodelk.t on my VM crashes in dht
  • +
  • #1324 Inconsistent custom xattr on backend directories after bringing bac
  • +
  • #1764 Slow write on ZFS bricks after healing millions of files due to add
  • +
  • #2161 Crash caused by memory corruption
  • +
  • #2192 4+1 arbiter setup is broken
  • +
  • #2198 There are blocked inodelks for a long time
  • +
  • #2210 glusterfsd memory leak observed when constantly running volume heal
  • +
  • #2234 Segmentation fault in directory quota daemon for replicated volume
  • +
  • #2253 Disable lookup-optimize by default in the virt group
  • +
  • #2313 Long setting names mess up the columns and break parsing
  • +
+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/release-notes/8.6/index.html b/release-notes/8.6/index.html new file mode 100644 index 00000000..43cd797e --- /dev/null +++ b/release-notes/8.6/index.html @@ -0,0 +1,4536 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + 8.6 - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + +

Release notes for Gluster 8.6

+

Release date: 30-Aug-2021

+

This is a bugfix release. The release notes for 8.0, 8.1, 8.2, 8.3, 8.4 and 8.5 contain a listing of all the new features that were added and bugs fixed in the GlusterFS 8 stable release.

+

NOTE:

+
    +
  • This is the last release of Gluster 8 series
  • +
  • Users are highly encouraged to upgrade to newer releases of GlusterFS.
  • +
+

Important fixes in this release

+
    +
  • Improvement in handling of gfid mismatches of geo-rep: Geo-replication gets delayed when there are many renames on primary volume(https://github.com/gluster/glusterfs/issues/2388)
  • +
  • Resolve core dumps on Gluster 9 - 3 replicas: Initialize list head to prevent NULL de-reference(https://github.com/gluster/glusterfs/issues/2443)
  • +
+

Builds are available at

+

https://download.gluster.org/pub/gluster/glusterfs/8/8.6/

+

Issues addressed in this release

+
    +
  • #2388 Geo-replication gets delayed when there are many renames on primary volume
  • +
  • #2689 glusterd: reset mgmt_v3_lock_timeout after it be used
  • +
  • #1000 GlusterFS can be improved: fix getcwd usage warning
  • +
  • #2394 Spurious failure in tests/basic/fencing/afr-lock-heal-basic.t
  • +
  • #2691 georep-upgrade.t find failures
  • +
  • #154 Optimized CHANGELOG: upgrade script for geo-rep
  • +
  • #2443 Core dumps on Gluster 9 - 3 replicas: Initialize list head to prevent NULL de-reference
  • +
  • #2404 Spurious failure of tests/bugs/ec/bug-1236065.t
  • +
+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/release-notes/9.0/index.html b/release-notes/9.0/index.html new file mode 100644 index 00000000..a17a3f2c --- /dev/null +++ b/release-notes/9.0/index.html @@ -0,0 +1,4824 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + 9.0 - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + +

Release notes for Gluster 9.0

+

Release date: 05-Feb-2021

+

This is a major release that includes a range of features, code improvements and stability fixes as noted below.

+

A selection of the key features and changes are documented in this page. +A full list of bugs that have been addressed is included further below.

+ +

Announcements

+
    +
  1. +

    Releases that receive maintenance updates post release 9 is 8 + (reference)

    +
  2. +
  3. +

    Release 9 will receive maintenance updates around the 30th of every month + for the first 3 months post release (i.e Mar'21, Apr'21, May'21). Post the + initial 3 months, it will receive maintenance updates every 2 months till EOL.

    +
  4. +
+

Major changes and features

+

Highlights

+

Added support for:

+
    +
  • io_uring in Gluster (io_uring support in kernel required along with the presence of liburing library and headers)
  • +
  • support running with up to 5000 volumes (Testing done on: 5k volumes on 3 nodes, brick_mux was enabled with default configuration)
  • +
+

Features

+
    +
  • Added io_uring support for Gluster #1398
  • +
  • Added Support for 5K volumes #1613
  • +
  • Enabled granular-entry-heal by default #1483
  • +
  • Optimizations for rename dir heal #1211
  • +
  • Added support for monitoring the epoll/rpc layer #1466
  • +
  • Brick mux: Added support to spawn a thread per process basis instead of spawning a per brick #1482
  • +
  • Improve rebalance of sparse files #1222
  • +
  • LTO/GCC10 - Gluster is now compiled with LTO enabled by default #1772
  • +
+

Major issues

+

None

+

Bugs addressed

+

Bugs addressed since release-8 are listed below.

+
    +
  • #718 _store_global_opts(), _storeslaves() , _storeopts() should no...
  • +
  • #280 Use internal error codes instead of UNIX errnos
  • +
  • #1855 Makefile: failed to compile without git repository
  • +
  • #1849 geo-rep: The newly setup geo-rep session goes faulty with syn...
  • +
  • #1836 posix: Update ret value in posix_get_gfid2path if GF_MALLOC f...
  • +
  • #1810 Implement option to generate core dump at will without killin...
  • +
  • #1796 afr: call afr_is_lock_mode_mandatory only while xdata is valid
  • +
  • #1794 posix: A brick process is getting crashed at the time of grap...
  • +
  • #1782 Rebalance is reporting status twice upon stopping, resulting ...
  • +
  • #1778 volume set: failed: ganesha.enable is already 'off'.
  • +
  • #1775 core: lru_size showing -1 with zero inodes in the list in the...
  • +
  • #1772 build: add LTO as a configure option
  • +
  • #1743 Modify format to contain more information while raising glust...
  • +
  • #1739 test case ./tests/basic/afr/entry-self-heal-anon-dir-off.t is...
  • +
  • #1738 [cli] Improper error message on command timeout
  • +
  • #1733 api: conscious language changes
  • +
  • #1713 Conscious language changes in various xlators
  • +
  • #1699 One brick offline with signal received: 11
  • +
  • #1692 Test tests/basic/0symbol-check.t should exclude more contrib/...
  • +
  • #1663 test case ./tests/bugs/core/bug-1650403.t is getting timed out
  • +
  • #1661 test case ./tests/bugs/bug-1064147.t is continuously failing
  • +
  • #1659 wrong comparison in glusterd_brick_start() function
  • +
  • #1654 Rebalance/migration per directory/file
  • +
  • #1653 io-cache xlators lock/unlock are always accompanied by gf_msg...
  • +
  • #1627 Stopping rebalance results in a failure
  • +
  • #1613 glusterd[brick_mux]: Optimize friend handshake code to avoid ...
  • +
  • #1594 ./tests/00-geo-rep/00-georep-verify-non-root-setup.t fails on...
  • +
  • #1587 geo-rep: Enable rsync verbose logging to help debug rsync errors
  • +
  • #1584 MAINTAINERS file needs to be revisited and updated
  • +
  • #1582 ./rfc.sh doesn't pick upstream correctly
  • +
  • #1577 cli-rpc: Call to global quota rpc init even though operation ...
  • +
  • #1569 Introduce a compile time --enable-brickmux option to run bric...
  • +
  • #1565 Implement pass-through option for write-behind
  • +
  • #1550 MAINTAINERS list of DHT needs to be updated
  • +
  • #154 Optimized CHANGELOG
  • +
  • #1546 Wrong permissions syned to remote brick when using halo repli...
  • +
  • #1545 fuse_invalidate_entry() - too many repetitive calls to uuid_u...
  • +
  • #1544 file tree memory layout optimization
  • +
  • #1543 trash: Create inode_table only while feature is enabled
  • +
  • #1542 io-stats: Configure ios_sample_buf_size based on sample_inter...
  • +
  • #1541 Geo-rep: some files(hardlinks) are missing in slave after mas...
  • +
  • #1540 [RFE] Rebalance: suppurt migration to files with hardlinks (n...
  • +
  • #1539 fuse mount crashes on graph-switch when reader-thread-count i...
  • +
  • #1538 Need to configure optimum inode table hash_size for shd
  • +
  • #1529 Fix regression in on demand migration feature
  • +
  • #1526 Brick status is 'stopped' if socket file is absent but brick ...
  • +
  • #1518 glusterfs: write operations fail when the size is equal or gr...
  • +
  • #1516 Use of strchr glusterd_replace_slash_with_hyphen
  • +
  • #1511 Crash due to memory allocation
  • +
  • #1508 Add-brick with Increasing replica count fails with bad brick ...
  • +
  • #1507 Time-to-completion mechansim in rebalance is broken
  • +
  • #1506 tests/000-flaky/bugs_nfs_bug-1116503.t is crashed in in gf_me...
  • +
  • #1499 why not use JumpConsistentHash to replace SuperFastHash to ch...
  • +
  • #1497 Removing strlen and using the already existing len of data_t
  • +
  • #1487 Quota accounting check script fails with UnicodeDecodeError
  • +
  • #1483 Enable granular-entry-heal by default
  • +
  • #1482 [Brick-mux] Attach several posix threads with glusterfs_ctx
  • +
  • #1480 First letter in mount path of bricks are getting truncated fo...
  • +
  • #1477 nfs server crashes in acl3svc_init
  • +
  • #1476 Changes required at Snaphot as gluster-shared-storage mount p...
  • +
  • #1475 gluster_shared_storage failed to automount on node reboot on ...
  • +
  • #1472 Readdir-ahead leads to inconsistent ls results
  • +
  • #1466 RPC handling latencies should be printed in statedump
  • +
  • #1464 Avoid dict OR key (link-count) is NULL [Invalid argument] mes...
  • +
  • #1459 gluster_shared_storage failed to automount on node reboot on ...
  • +
  • #1453 Disperse shd heal activity should be observable
  • +
  • #1442 Remove Glusterfs SELinux module from Distribution's selinux-p...
  • +
  • #1440 glusterfs 7.7 fuse client memory leak
  • +
  • #1438 syncdaemon/syncdutils.py: SyntaxWarning: "is" with a literal....
  • +
  • #1434 Inform failures while fop failed in disperse volume due to so...
  • +
  • #1428 Redundant check in dict_get_with_refn()
  • +
  • #1427 Bricks failed to restart after a power failure
  • +
  • #1425 optimization over shard lookup in case of prealloc
  • +
  • #1422 Rebalance - new volume option to turn on/off optimization in ...
  • +
  • #1418 GlusterFS 8.0: Intermittent error:1408F10B:SSL routines:SSL3_...
  • +
  • #1416 Dependencies of performance.parallel-readdir should be automa...
  • +
  • #1410 01-georep-glusterd-tests.t times out on centos7 builders
  • +
  • #1407 glusterd keep crashing when upgrading from 6.5 to 7.7
  • +
  • #1406 shared storage volume fails to mount in ipv6 environment
  • +
  • #1404 Client side split-brain resolution using favourite-child-poli...
  • +
  • #1403 Tests failure on C8: ./tests/features/ssl-ciphers.t
  • +
  • #1401 quota_fsck.py throws TypeError
  • +
  • #1400 Annotate synctasks with tsan API if --enable-tsan is requested
  • +
  • #1399 Add xlator identifiers in statedumps for mem-pools
  • +
  • #1398 io_uring support in glusterfs main branch
  • +
  • #1397 glusterd_check_brick_order() is needlessly fetching volname, ...
  • +
  • #1396 [bug-1851989] smallfile performance drops after commit the pa...
  • +
  • #1395 optimize dict_serialized_length_lk function
  • +
  • #1391 allow add-brick from nodes which are not part of auth.allow list
  • +
  • #1385 High CPU utilization by self-heal on disperse volumes when an...
  • +
  • #1383 Remove contrib/sunrpc/xdr_sizeof.c
  • +
  • #1381 Optional FUSE notitications
  • +
  • #1379 Fix NULL pointer
  • +
  • #1378 Use better terminology and wording in the code
  • +
  • #1377 Glustereventsd to accept not only IPv4 but IPv6 packets too.
  • +
  • #1376 Runtime & Build Fixes for FreeBSD
  • +
  • #1375 cluster: mount.glusterfs is stuck when trying to mount unknow...
  • +
  • #1374 fuse interrupt issues identified in code review
  • +
  • #1371 [RHEL 8.1] [Input/output error] observed in remove-brick oper...
  • +
  • #1366 geo-replication session fails to start with IPV6
  • +
  • #1361 Screen .attribute directories on NetBSD
  • +
  • #1359 Cleanup --disable-mempool
  • +
  • #1357 options should display not only current values but also defau...
  • +
  • #1356 cli: type mismatch global_quotad_rpc in cli-quotad-client.c
  • +
  • #1355 Heal info desn't show split-brain info if halo is enabled
  • +
  • #1354 High CPU utilization by self-heal on disperse volumes with no...
  • +
  • #1353 errors seen with gluster v get all all
  • +
  • #1352 api: libgfapi symbol versions break LTO in Fedora rawhide/f33
  • +
  • #1351 issue with gf_fill_iatt_for_dirent()
  • +
  • #1350 Simplify directory scanning
  • +
  • #1348 Fuse mount crashes in shard translator when truncating a *rea...
  • +
  • #1347 NetBSD build fixes
  • +
  • #1339 Rebalance status is not shown correctly after node reboot
  • +
  • #1332 Unable to Upgrade to Gluster 7 from Earlier Version
  • +
  • #1329 Move platform-dependent filesystem sync to a library function
  • +
  • #1328 Linux kernel untar failed with errors immediate after add-brick
  • +
  • #1327 Missing directory is not healed in dht
  • +
  • #1324 Inconsistent custom xattr on backend directories after bringi...
  • +
  • #1320 Unified support for building with sanitizers
  • +
  • #1311 Data race when handling connection status
  • +
  • #1310 tests/features/flock_interrupt.t leads to error logs
  • +
  • #1306 add-brick command is failing
  • +
  • #1303 Failures in rebalance due to [Input/output error]
  • +
  • #1302 always print errno (and use English locale for strerror() out...
  • +
  • #1291 Free volume info lock and mutex
  • +
  • #1290 Test case brick-mux-validation-in-cluster.t is failing on RHEL-8
  • +
  • #1289 glustereventsd log file isn't reopened after rotation
  • +
  • #1285 Use-after-destroy mutex error
  • +
  • #1283 Undefined behavior in __builtin_ctz
  • +
  • #1282 New file created with xattr "trusted.glusterfs.dht"
  • +
  • #1281 Unlinking the file with open fd, returns ENOENT or stale file...
  • +
  • #1279 Fix several signed integer overflows
  • +
  • #1278 Fix memory leak in afr_priv_destroy()
  • +
  • #1275 Make glusterfs compile on all recent and supported versions o...
  • +
  • #1272 tests/bugs/glusterd/mgmt-handshake-and-volume-sync-post-glust...
  • +
  • #1269 common-ha: ganesha-ha.sh bad test for {rhel,centos} for pcs o...
  • +
  • #1263 Fix memory leak in glusterd_store_retrieve_bricks()
  • +
  • #1260 Implement minimal proper synchronization for gf_attach
  • +
  • #1259 Fix memory leak in gf_cli_gsync_status_output()
  • +
  • #1258 dht: Add null check
  • +
  • #1255 Improve snapshot clone error message
  • +
  • #1254 Prioritize ENOSPC over other lesser priority errors
  • +
  • #1253 On Ovirt setup glusterfs performs poorly
  • +
  • #1250 geo-rep: Fix corner case in rename on mkdir during hybrid crawl
  • +
  • #1249 Drop 'const' type qualifier on return type
  • +
  • #1248 Fix thread naming and related convention
  • +
  • #1245 Spurious failures in ./tests/basic/ec/ec-quorum-count.t
  • +
  • #1243 Modify and return iatt (especially size and block-count) in s...
  • +
  • #1242 Rebalance - Improve Crawl time in rebalance
  • +
  • #1240 tests/basic/afr/gfid-mismatch-resolution-with-fav-child-polic...
  • +
  • #1236 glusterfs-geo-replication requires policycoreutils-python-uti...
  • +
  • #1234 Fix ./tests/basic/fencing/afr-lock-heal-basic.t failure
  • +
  • #1230 core dumped executing tests/line-coverage/errorgen-coverage.t
  • +
  • #1228 seek functionalty is broken
  • +
  • #1226 Gluster webhook update throws error
  • +
  • #1225 fuse causes glusterd to dump core
  • +
  • #1223 Failure of tests/basic/gfapi/gfapi-copy-file-range.t
  • +
  • #1222 [RFE] Improve rebalance of sparse files
  • +
  • #1221 features/bit-rot: invalid snprintf() buffer size
  • +
  • #1220 cluster/ec: return correct error code and log the message in ...
  • +
  • #1218 dht: Do opendir selectively in gf_defrag_process_dir
  • +
  • #1217 Modify group "virt" to add rpc/network related changes
  • +
  • #1214 Running tests/basic/afr/inodelk.t on my VM crashes in dht
  • +
  • #1211 AFR: Rename dir heal shouldn't delete the directory at oldloc...
  • +
  • #1209 tests: georep-upgrade.t test failure
  • +
  • #1208 warning: initializer overrides prior initialization of this s...
  • +
  • #1207 warning: passing an object that undergoes default argument pr...
  • +
  • #1204 GD_OP_VERSION needs to be updated
  • +
  • #1202 Issues reported by Coverity static analysis tool
  • +
  • #1200 Handle setxattr and rm race for directory in rebalance
  • +
  • #1197 Geo-replication tests are spuriously failing in CI
  • +
  • #1196 glusterfsd is having a leak while only mgmt SSL is enabled
  • +
  • #1193 Scheduling of snapshot for a volume is failing to create snap...
  • +
  • #1190 spurious failure of tests/basic/quick-read-with-upcall.t
  • +
  • #1187 Failures in rebalance due to [No space left on device] error ...
  • +
  • #1182 geo-rep requires relevant selinux permission for rsync
  • +
  • #1179 gnfs split brain when 1 server in 3x1 down (high load)
  • +
  • #1172 core, cli, quota: cleanup malloc debugging and stats
  • +
  • #1169 common-ha: cluster status shows "FAILOVER" even when all node...
  • +
  • #1164 migrate remove-brick operation to mgmt v3 frameowrk
  • +
  • #1154 failing test cases
  • +
  • #1135 Fix @sysconfdir@ expansion in extras/systemd/glusterd.service...
  • +
  • #1126 packaging: overhaul glusterfs.spec(.in) to align with SUSE an...
  • +
  • #1116 [bug:1790736] gluster volume list returning wrong volume list...
  • +
  • #1101 [bug:1813029] volume brick fails to come online because other...
  • +
  • #1097 [bug:1635688] Keep only the valid (maintained/supported) comp...
  • +
  • #1096 [bug:1622665] clang-scan report: glusterfs issues
  • +
  • #1075 [bug:1299203] resolve-gids is not needed for Linux kernels v3...
  • +
  • #1072 [bug:1251614] gf_defrag_fix_layout recursively fails, distrac...
  • +
  • #1060 [bug:789278] Issues reported by Coverity static analysis tool
  • +
  • #1052 [bug:1693692] Increase code coverage from regression tests
  • +
  • #1050 [bug:1787325] TLS/SSL access of GlusterFS mounts is slower th...
  • +
  • #1047 [bug:1774379] check for same hostnames(bricks from same host/...
  • +
  • #1043 [bug:1793490] snapshot clone volume is not exported via NFS-G...
  • +
  • #1009 [bug:1756900] tests are failing in RHEL8 regression
  • +
  • #1002 [bug:1679998] GlusterFS can be improved
  • +
  • #1000 [bug:1193929] GlusterFS can be improved
  • +
  • #990 [bug:1578405] EIO errors when updating and deleting entries c...
  • +
  • #952 [bug:1589705] quick-read: separate performance.cache-size tun...
  • +
  • #876 [bug:1797099] After upgrade from gluster 7.0 to 7.2 posix-acl...
  • +
  • #874 [bug:1793390] Pre-validation failure does not provide any hin...
  • +
  • #837 Indicate timezone offset in formatted timestamps
  • +
  • #829 gfapi: Using ssl and glfs_set_volfile together does not work
  • +
  • #827 undefined symbol: xlator_api
  • +
  • #824 Migrate bugzilla workflow to github issues workflow
  • +
  • #816 RFE: Data/MetaData separator Translator
  • +
  • #790 infinite loop in common-utils.c - gf_rev_dns_lookup_cache() ?
  • +
  • #763 thin-arbiter: Testing report
  • +
+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/release-notes/9.1/index.html b/release-notes/9.1/index.html new file mode 100644 index 00000000..c3012d6c --- /dev/null +++ b/release-notes/9.1/index.html @@ -0,0 +1,4540 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + 9.1 - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + +

Release notes for Gluster 9.1

+

Release date: 05-Apr-2021

+

This is a bugfix and improvement release. The release notes for 9.0 +contain a listing of all the new features that were added +and bugs fixed in the GlusterFS 9 stable release.

+

NOTE:

+
    +
  • Next minor release tentative date: Week of 30th Apr, 2021
  • +
  • Users are highly encouraged to upgrade to newer releases of GlusterFS.
  • +
+

Highlights of Release

+
    +
  • Provide autoconf option to enable/disable storage.linux-io_uring during compilation #2063
  • +
  • Healing data in 1MB chunks instead of 128KB for improving healing performance #2067
  • +
+

Builds are available at

+

https://download.gluster.org/pub/gluster/glusterfs/9/9.1/

+

Issues addressed in this release

+

Please find the list of issues added to this release below.

+
    +
  • #1406 shared storage volume fails to mount in ipv6 environment
  • +
  • #1991 mdcache: bug causes getxattr() to report ENODATA when fetchin...
  • +
  • #2063 Provide autoconf option to enable/disable storage.linux-io_ur...
  • +
  • #2067 Change self-heal-window-size to 1MB by default
  • +
  • #2107 mount crashes when setfattr -n distribute.fix.layout -v "yes"...
  • +
  • #2154 "Operation not supported" doing a chmod on a symlink
  • +
  • #2192 4+1 arbiter setup is broken
  • +
  • #2198 There are blocked inodelks for a long time
  • +
  • #2234 Segmentation fault in directory quota daemon for replicated v...
  • +
+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/release-notes/9.2/index.html b/release-notes/9.2/index.html new file mode 100644 index 00000000..df3da7ab --- /dev/null +++ b/release-notes/9.2/index.html @@ -0,0 +1,4536 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + 9.2 - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + +

Release notes for Gluster 9.2

+

Release date: 17-May-2021

+

This is a bugfix and improvement release. The release notes for 9.0, 9.1 contain a listing of all the new features that were added and bugs fixed in the GlusterFS 9 stable release.

+

NOTE:

+
    +
  • Next minor release tentative date: Week of 30th Jun, 2021
  • +
  • Users are highly encouraged to upgrade to newer releases of GlusterFS.
  • +
+

Important fixes in this release

+
    +
  • After upgrade on release 9.1 glusterd protocol is broken #2351
  • +
  • Disable lookup-optimize by default in the virt group #2253
  • +
+

Builds are available at

+

https://download.gluster.org/pub/gluster/glusterfs/9/9.2/

+

Issues addressed in this release

+
    +
  • #1909 core: Avoid several dict OR key is NULL message in brick logs
  • +
  • #2161 Crash caused by memory corruption
  • +
  • #2232 "Invalid argument" when reading a directory with gfapi
  • +
  • #2253 Disable lookup-optimize by default in the virt group
  • +
  • #2313 Long setting names mess up the columns and break parsing
  • +
  • #2337 memory leak observed in lock fop
  • +
  • #2351 After upgrade on release 9.1 glusterd protocol is broken
  • +
  • #2353 Permission issue after upgrading to Gluster v9.1
  • +
+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/release-notes/9.3/index.html b/release-notes/9.3/index.html new file mode 100644 index 00000000..5ceb719d --- /dev/null +++ b/release-notes/9.3/index.html @@ -0,0 +1,4540 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + 9.3 - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + +

Release notes for Gluster 9.3

+

Release date: 15-Jul-2021

+

This is a bugfix and improvement release. The release notes for 9.0, 9.1, 9.2 contain a listing of all the new features that were added and bugs fixed in the GlusterFS 9 stable release.

+

NOTE:

+
    +
  • Next minor release tentative date: Week of 30th Aug, 2021
  • +
  • Users are highly encouraged to upgrade to newer releases of GlusterFS.
  • +
+

Important fixes in this release

+
    +
  • Core dumps on Gluster 9 - 3 replicas #2443
  • +
  • geo-rep: Improve handling of gfid mismatches #2423
  • +
  • auth.allow list is corrupted after add-brick (buffer overflow?) #2524
  • +
+

Builds are available at

+

https://download.gluster.org/pub/gluster/glusterfs/9/9.3/

+

Issues addressed in this release

+
    +
  • #705 gf_backtrace_save inefficiencies
  • +
  • #1000 [bug:1193929] GlusterFS can be improved
  • +
  • #1384 mount glusterfs volume, files larger than 64Mb only show 64Mb
  • +
  • #2388 Geo-replication gets delayed when there are many renames on primary
  • +
  • #2394 Spurious failure in tests/basic/fencing/afr-lock-heal-basic.t
  • +
  • #2398 Bitrot and scrub process showed like unknown in the gluster volume
  • +
  • #2421 rsync should not try to sync internal xattrs.
  • +
  • #2440 Geo-replication not working on Ubuntu 21.04
  • +
  • #2443 Core dumps on Gluster 9 - 3 replicas
  • +
  • #2470 sharding: [inode.c:1255:__inode_unlink] 0-inode: dentry not found
  • +
  • #2524 auth.allow list is corrupted after add-brick (buffer overflow?)
  • +
+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/release-notes/9.4/index.html b/release-notes/9.4/index.html new file mode 100644 index 00000000..2aea039f --- /dev/null +++ b/release-notes/9.4/index.html @@ -0,0 +1,4540 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + 9.4 - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + +

Release notes for Gluster 9.4

+

Release date: 14-Oct-2021

+

This is a bugfix and improvement release. The release notes for 9.0, 9.1, 9.2, 9.3 contain a listing of all the new features that were added and bugs fixed in the GlusterFS 9 stable release.

+

NOTE:

+
    +
  • Next minor release tentative date: Week of 30th Dec, 2021
  • +
  • Users are highly encouraged to upgrade to newer releases of GlusterFS.
  • +
+

Important fixes in this release

+
    +
  • Fix changelog History Crawl resume failures after stop #2133
  • +
  • Fix Stack overflow when parallel-readdir is enabled #2169
  • +
  • Fix rebalance crashes in dht #2239
  • +
+

Builds are available at -

+

https://download.gluster.org/pub/gluster/glusterfs/9/9.4/

+

Issues addressed in this release

+
    +
  • #2133 changelog History Crawl resume fails after stop
  • +
  • #2169 Stack overflow when parallel-readdir is enabled
  • +
  • #2239 rebalance crashes in dht on master
  • +
  • #2625 auth.allow value is corrupted after add-brick operation
  • +
  • #2649 glustershd failed in bind with error "Address already in use"
  • +
  • #2659 tests/basic/afr/afr-anon-inode.t crashed
  • +
  • #2754 It takes a long time to execute the “gluster volume set volumename
  • +
  • #2798 FUSE mount option for localtime-logging is not exposed
  • +
  • #2690 glusterd: reset mgmt_v3_lock_timeout after it be used
  • +
  • #2691 georep-upgrade.t find failures
  • +
  • #1101 volume brick fails to come online because other process is using port 49152
  • +
+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/release-notes/9.5/index.html b/release-notes/9.5/index.html new file mode 100644 index 00000000..44ff019d --- /dev/null +++ b/release-notes/9.5/index.html @@ -0,0 +1,4536 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + 9.5 - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + +

Release notes for Gluster 9.5

+

Release date: 1st-Feb-2022

+

This is a bugfix and improvement release. The release notes for 9.0, 9.1, 9.2, 9.3, 9.4 contain a listing of all the new features that were added and bugs fixed in the GlusterFS 9 stable release.

+

NOTE:

+
    +
  • Next minor release tentative date: Week of 20th Aug, 2022 + (As published in the Gluster Community Meeting, Release 9 will release updates every 6 months now on)
  • +
  • Users are highly encouraged to upgrade to newer releases of GlusterFS.
  • +
+

Important fixes in this release

+
    +
  • Fix rebalance of sparse files (https://github.com/gluster/glusterfs/issues/2317)
  • +
  • Fix anomalous brick offline scenario on non rebooted node by preventing bricks from connecting to a backup volfile (https://github.com/gluster/glusterfs/issues/2480)
  • +
+

Builds are available at -

+

https://download.gluster.org/pub/gluster/glusterfs/9/9.5/

+

Issues addressed in this release

+
    +
  • #2317 Fix rebalance of sparse files
  • +
  • #2414 Prefer mallinfo2() to mallinfo() if available
  • +
  • #2467 Handle failure in fuse to get gids gracefully
  • +
  • #2480 Prevent bricks from connecting to a backup volfile and fix brick offline scenario on non rebooted node
  • +
  • #2846 Avoid redundant logs in glusterd at info level
  • +
  • #2903 Fix worker disconnect due to AttributeError in geo-replication
  • +
  • #2939 Remove the deprecated commands from gluster man page
  • +
+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/release-notes/9.6/index.html b/release-notes/9.6/index.html new file mode 100644 index 00000000..b1af0572 --- /dev/null +++ b/release-notes/9.6/index.html @@ -0,0 +1,4535 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + 9.6 - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + +

Release notes for Gluster 9.6

+

This is a bugfix and improvement release. The release notes for 9.0, 9.1, 9.2, 9.3, 9.4, 9.5 contain a listing of all the new features that were added and bugs fixed in the GlusterFS 9 stable release.

+

NOTE: +- Next minor release tentative date: Week of 20th Feb, 2023 +- Users are highly encouraged to upgrade to newer releases of GlusterFS.

+

Important fixes in this release

+
    +
  • Optimize server functionality by enhancing server_process_event_upcall code path during the handling of upcall event
  • +
  • Fix all bricks not starting issue on node reboot when brick count is high(>750)
  • +
+

Builds are available at

+

https://download.gluster.org/pub/gluster/glusterfs/9/9.6/

+

Issues addressed in this release

+
    +
  • #2080 Fix inability of glustereventsd from binding to the UDP port because of selinux policies
  • +
  • #2962 Fix volume create without disperse count failures with ip addresses
  • +
  • #3177 Locks: Optimize the interrupt flow of POSIX locks
  • +
  • #3187 Fix Locks xlator leaks fd's when a blocked posix lock is cancelled
  • +
  • #3191 Fix double free issue in the cbk function dht_common_mark_mdsxattr_cbk
  • +
  • #3321 Optimize server functionality by enhancing server_process_event_upcall code path during the handling of upcall event
  • +
  • #3332 Fix garbage value reported by static analyser
  • +
  • #3334 Fix errors and timeouts when creating qcow2 file via libgfapi
  • +
  • #3375 Fix all bricks not starting issue on node reboot when brick count is high(>750)
  • +
  • #3470 Fix spurious crash when "peer probing" a non existing host name
  • +
+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/release-notes/geo-rep-in-3.7/index.html b/release-notes/geo-rep-in-3.7/index.html new file mode 100644 index 00000000..fc5e032c --- /dev/null +++ b/release-notes/geo-rep-in-3.7/index.html @@ -0,0 +1,4697 @@ + + + + + + + + + + + + + + + + + + + + + + Geo rep in 3.7 - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + + + + + +
+
+ + + + + + + + +

Geo rep in 3.7

+ +

Improved Node fail-over issues handling by using Gluster Meta Volume

+

In replica pairs one Geo-rep worker should be active and all +the other replica workers should be passive. When Active worker goes +down, Passive worker will become active. In previous releases, this logic +was based on node-uuid, but now it is based on Lock file in Meta +Volume. Now it is possible to decide Active/Passive more accurately +and multiple Active worker scenarios minimized.

+

Geo-rep works without Meta Volume also, this feature is backward +compatible. By default config option use_meta_volume is False. This +feature can be turned on with geo-rep config use_meta_volume +true. Without this feature Geo-rep works as it was working in previous +releases.

+

Issues if meta_volume is turned off:

+
    +
  1. +

    Multiple workers becoming active and participate in + syncing. Duplicate efforts and all the issues related to concurrent + execution exists.

    +
  2. +
  3. +

    Failover only works at node level, if a brick process goes down but + node is alive then fail-back will not happen and delay in syncing.

    +
  4. +
  5. +

    Very difficult documented steps about placements of bricks in case + of replica 3. For example, first brick in each replica should not be + placed in same node. etc.

    +
  6. +
  7. +

    Consuming Changelogs from previously failed node when it comes + back, which may lead to issues like delayed syncing and data + inconsistencies in case of Renames.

    +
  8. +
+

Fixes: 1196632, +1217939

+

Improved Historical Changelogs consumption

+

Support for consuming Historical Changelogs introduced in previous +releases, with this release this is more stable and improved. Use of +Filesystem crawl is minimized and limited only during initial sync.In +previous release, Node reboot or brick process going down was treated as +Changelog Breakage and Geo-rep was fallback to XSync for that +duration. With this release, Changelog session will be considered +broken only if Changelog is turned off. All the other scenarios +considered as safe.

+

This feature is also required by glusterfind.

+

Fixes: 1217944

+

Improved Status and Checkpoint

+

Status got many improvements, Showing accurate details of Session +info, User info, Slave node to which master node is connected, Last +Synced Time etc. Initializing time is reduced, Status change will +happen as soon as geo-rep workers ready.(In previous releases +Initializing time was 60 sec)

+

Fixes: 1212410

+

Worker Restart improvements

+

Workers going down and coming back is very common in geo-rep for +reasons like network failure, Slave node going down etc. When it comes +up it has to reprocess the changelogs again because worker died before +updating the last sync time. The batch size is now optimized such that +the amount of reprocess is minimized.

+

Fixes: 1210965

+

Improved RENAME handling

+

When renamed filename hash falls to other brick, respective brick's +changelog records RENAME, but rest of the fops like CREATE, DATA are +recorded in first brick. Each Geo-rep worker per brick syncs data to +Slave Volume independently, These things go out of order and Master +and Slave Volume become inconsistent. With the help of DHT team, +RENAMEs are recorded where CREATE and DATA are recorded.

+

Fixes: 1141379

+

Syncing xattrs and acls

+

Syncing both xattrs and acls to Slave cluster are now supported. These +can be disabled setting config options sync-xattrs or sync-acls to +false.

+

Fixes: 1187021, +1196690

+

Identifying Entry failures

+

Logging improvements to identify exact reason for Entry failures, GFID +conflicts, I/O errors etc. Safe errors are not logged in Mount logs +in Slave, Safe errors are post processed and only genuine errors are +logged in Master logs.

+

Fixes: 1207115, +1210562

+

Improved rm -rf issues handling

+

Successive deletes and creates had issues, Handling these issues +minimized. (Not completely fixed since it depends on Open issues of +DHT)

+

Fixes: 1211037

+

Non root Geo-replication simplified

+

Manual editing of Glusterd vol file is simplified by introducing +gluster system:: mountbroker command

+

Fixes: 1136312

+

Logging Rsync performance on request basis

+

Rsync performance can be evaluated by enabling a config option. After +this Geo-rep starts recording rsync performance in log file, which can +be post processed to get meaningful metrics.

+

Fixes: 764827

+

Initial sync issues due to upper limit comparison during Filesystem Crawl

+

Bug fix, Fixed wrong logic in Xsync Change detection. Upper limit was +considered during xsync crawl. Geo-rep XSync was missing many files +considering Changelog will take care. But Changelog will not have +complete details of the files created before enabling Geo-replication.

+

When rsync/tarssh fails, geo-rep is now capable of identifying safe +errors and continue syncing by ignoring those issues. For example, +rsync fails to sync a file which is deleted in master during +sync. This can be ignored since the file is unlinked and no need to +try syncing.

+

Fixes: 1200733

+

Changelog failures and Brick failures handling

+

When Brick process goes down, or any Changelog exception Geo-rep +worker was failing back to XSync crawl. Which was bad since Xsync +fails to identify Deletes and Renames. Now this is prevented, worker +goes to Faulty and wait for that Brick process to comeback.

+

Fixes: 1202649

+

Archive Changelogs in working directory after processing

+

Archive Changelogs after processing not generate empty changelogs when +no data is available. This is great improvement in terms of reducing +the inode consumption in Brick.

+

Fixes: 1169331

+

Virtual xattr to trigger sync

+

Since we use Historical Changelogs when Geo-rep worker restarts. Only +SETATTR will be recorded when we touch a file. In previous versions, +Re triggering a file sync is stop geo-rep, touch files and start +geo-replication. Now touch will not help since it records only SETATTR. +Virtual Xattr is introduced to retrigger the sync. No Geo-rep restart +required.

+

Fixes: 1176934

+

SSH Keys overwrite issues during Geo-rep create

+

Parallel creates or multiple Geo-rep session creation was overwriting +the pem keys written by first one. This leads to connectivity issues +when Geo-rep is started.

+

Fixes: 1183229

+

Ownership sync improvements

+

Geo-rep was failing to sync ownership information from master cluster +to Slave cluster.

+

Fixes: 1104954

+

Slave node failover handling improvements

+

When slave node goes down, Master worker which is connected to that +brick will go to faulty. Now it tries to connect to another slave node +instead of waiting for that Slave node to come back.

+

Fixes: 1151412

+

Support of ssh keys custom location

+

If ssh authorized_keys are configured in non standard location instead +of default $HOME/.ssh/authorized_keys. Geo-rep create was failing, now +this is supported.

+

Fixes: 1181117

+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/release-notes/glusterfs-selinux2.0.1/index.html b/release-notes/glusterfs-selinux2.0.1/index.html new file mode 100644 index 00000000..a33f3aad --- /dev/null +++ b/release-notes/glusterfs-selinux2.0.1/index.html @@ -0,0 +1,4456 @@ + + + + + + + + + + + + + + + + + + + + + + Release notes for glusterfs-selinux 2.0.1 - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + +

Release notes for glusterfs-selinux 2.0.1

+

This is a bugfix and improvement release.

+

Important fixes in this release

+
    +
  • #rhbz1955415 glusterfs-selinux package should own the files created by it
  • +
  • #20 Fixing verification failure for ghost
  • +
  • #rhbz1779052 Adds rule to allow glusterd to access RDMA socket
  • +
+

Issues addressed in this release

+
    +
  • #rhbz1955415 glusterfs-selinux package should own the files created by it
  • +
  • #22 Fixed mixed use of tabs and spaces (rpmlint warning)
  • +
  • #20 Fixing verification failure for ghost file
  • +
  • #rhbz1779052 Adds rule to allow glusterd to access RDMA socket
  • +
  • #15 Modifying the path provided for glustereventsd.py
  • +
+ + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/release-notes/index.html b/release-notes/index.html new file mode 100644 index 00000000..fad459d6 --- /dev/null +++ b/release-notes/index.html @@ -0,0 +1,4916 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + index - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + + + + + +
+
+ + + + + + + + +

index

+ +

Gluster releases are separated into major and minor releases. Major releases typically contain newer functionality (in addition to bug fixes) and minor releases improve the stability of a major releases by providing bug fixes that are found or reported against them.

+

Major releases are made once every 1 year and receive minor updates for the next 12 months, after which they are no longer maintained (or termed EOL (End-Of-Life)).

+

NOTE:

+

From Gluster 10 major release, the release cycle for major releases is changed from 6 months to 1 year. Minor releases will follow every alternate month for a period of 12 months.

+

Like wise minor releases of the previous major version will happen every three months.

+

Detailed release schedule here

+

Release Notes

+

GlusterFS seLinux release notes

+ +

GlusterFS 11 release notes

+ +

GlusterFS 10 release notes

+ +

GlusterFS 9 release notes

+ +

GlusterFS 8 release notes

+ +

GlusterFS 7 release notes

+ +

GlusterFS 6 release notes

+ +

GlusterFS 5 release notes

+ +

GlusterFS 4.1 release notes

+ +

GlusterFS 4.0 release notes

+ +

GlusterFS 3.13 release notes

+ +

GlusterFS 3.12 release notes

+ +

GlusterFS 3.11 release notes

+ +

GlusterFS 3.10 release notes

+ +

GlusterFS 3.9 release notes

+ +

GlusterFS 3.7 release notes

+ +

GlusterFS 3.6 release notes

+ +

GlusterFS 3.5 release notes

+ + + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/search/search_index.json b/search/search_index.json new file mode 100644 index 00000000..273ed077 --- /dev/null +++ b/search/search_index.json @@ -0,0 +1 @@ +{"config":{"lang":["en"],"separator":"[\\s\\-]+","pipeline":["stopWordFilter"]},"docs":[{"location":"","title":"GlusterFS Documentation","text":"

GlusterFS is a scalable network filesystem suitable for data-intensive tasks such as cloud storage and media streaming. GlusterFS is free and open source software and can utilize common off-the-shelf hardware. To learn more, please see the Gluster project home page.

Get Started: Quick Start/Installation Guide

Since Gluster can be used in different ways and for different tasks, it would be difficult to explain everything at once. We recommend that you follow the Quick Start Guide first. By utilizing a number of virtual machines, you will create a functional test setup to learn the basic concepts. You will then be much better equipped to read the more detailed Install Guide.

  • Quick Start Guide - Start here if you are new to Gluster!

  • Installation Guides describes the prerequisites and provides step-by-instructions to install GlusterFS on various operating systems.

  • Presentations related to Gluster from Conferences and summits.

More Documentation

  • Administration Guide - describes the configuration and management of GlusterFS.

  • GlusterFS Developer Guide - describes how you can contribute to this open source project; built through the efforts of its dedicated, passionate community.

  • Upgrade Guide - if you need to upgrade from an older version of GlusterFS.

  • Release Notes - Glusterfs Release Notes provides high-level insight into the improvements and additions that have been implemented in various Glusterfs releases.

  • GlusterFS Tools - Guides for GlusterFS tools.

  • Troubleshooting Guide - Guide for troubleshooting.

How to Contribute?

The Gluster documentation has its home on GitHub, and the easiest way to contribute is to use the \"Edit on GitHub\" link on the top right corner of each page. If you already have a GitHub account, you can simply edit the document in your browser, use the preview tab, and submit your changes for review in a pull request.

If you want to help more with Gluster documentation, please subscribe to the Gluster Users and Gluster Developers mailing lists, and share your ideas with the Gluster developer community.

"},{"location":"glossary/","title":"Glossary","text":"

Access Control Lists : Access Control Lists (ACLs) allow you to assign different permissions for different users or groups even though they do not correspond to the original owner or the owning group.

Block Storage : Block special files, or block devices, correspond to devices through which the system moves data in the form of blocks. These device nodes often represent addressable devices such as hard disks, CD-ROM drives, or memory regions. GlusterFS requires a filesystem (like XFS) that supports extended attributes.

Brick : A Brick is the basic unit of storage in GlusterFS, represented by an export directory on a server in the trusted storage pool. A brick is expressed by combining a server with an export directory in the following format:

SERVER:EXPORT\nFor example:\nmyhostname:/exports/myexportdir/\n

Client : Any machine that mounts a GlusterFS volume. Any applications that use libgfapi access mechanism can also be treated as clients in GlusterFS context.

Cluster : A trusted pool of linked computers working together, resembling a single computing resource. In GlusterFS, a cluster is also referred to as a trusted storage pool.

Distributed File System : A file system that allows multiple clients to concurrently access data which is spread across servers/bricks in a trusted storage pool. Data sharing among multiple locations is fundamental to all distributed file systems.

Extended Attributes : Extended file attributes (abbreviated xattr) is a filesystem feature that enables users/programs to associate files/dirs with metadata. Gluster stores metadata in xattrs.

Filesystem : A method of storing and organizing computer files and their data. Essentially, it organizes these files into a database for the storage, organization, manipulation, and retrieval by the computer's operating system.

Source Wikipedia

FUSE : Filesystem in Userspace (FUSE) is a loadable kernel module for Unix-like computer operating systems that lets non-privileged users create their own file systems without editing kernel code. This is achieved by running file system code in user space while the FUSE module provides only a \"bridge\" to the actual kernel interfaces. Source: Wikipedia

GFID : Each file/directory on a GlusterFS volume has a unique 128-bit number associated with it called the GFID. This is analogous to inode in a regular filesystem.

glusterd : The Gluster daemon/service that manages volumes and cluster membership. It is required to run on all the servers in the trusted storage pool.

Geo-Replication : Geo-replication provides a continuous, asynchronous, and incremental replication service from site to another over Local Area Networks (LANs), Wide Area Network (WANs), and across the Internet.

Infiniband InfiniBand is a switched fabric computer network communications link used in high-performance computing and enterprise data centers.

Metadata : Metadata is defined as data providing information about one or more other pieces of data. There is no special metadata storage concept in GlusterFS. The metadata is stored with the file data itself usually in the form of extended attributes

Namespace : A namespace is an abstract container or environment created to hold a logical grouping of unique identifiers or symbols. Each Gluster volume exposes a single namespace as a POSIX mount point that contains every file in the cluster.

Node : A server or computer that hosts one or more bricks.

N-way Replication : Local synchronous data replication which is typically deployed across campus or Amazon Web Services Availability Zones.

Petabyte : A petabyte (derived from the SI prefix peta- ) is a unit of information equal to one quadrillion (short scale) bytes, or 1000 terabytes. The unit symbol for the petabyte is PB. The prefix peta- (P) indicates a power of 1000:

1 PB = 1,000,000,000,000,000 B = 10005 B = 1015 B.\n\nThe term \"pebibyte\" (PiB), using a binary prefix, is used for the\ncorresponding power of 1024.\n

Source: Wikipedia

POSIX : Portable Operating System Interface (for Unix) is the name of a family of related standards specified by the IEEE to define the application programming interface (API), along with shell and utilities interfaces for software compatible with variants of the Unix operating system Gluster exports a POSIX compatible file system.

Quorum : The configuration of quorum in a trusted storage pool determines the number of server failures that the trusted storage pool can sustain. If an additional failure occurs, the trusted storage pool becomes unavailable.

Quota : Quota allows you to set limits on usage of disk space by directories or by volumes.

RAID : Redundant Array of Inexpensive Disks (RAID) is a technology that provides increased storage reliability through redundancy, combining multiple low-cost, less-reliable disk drives components into a logical unit where all drives in the array are interdependent.

RDMA : Remote direct memory access (RDMA) is a direct memory access from the memory of one computer into that of another without involving either one's operating system. This permits high-throughput, low-latency networking, which is especially useful in massively parallel computer clusters

Rebalance : The process of redistributing data in a distributed volume when a brick is added or removed.

RRDNS : Round Robin Domain Name Service (RRDNS) is a method to distribute load across application servers. It is implemented by creating multiple A records with the same name and different IP addresses in the zone file of a DNS server.

Samba : Samba allows file and print sharing between computers running Windows and computers running Linux. It is an implementation of several services and protocols including SMB and CIFS.

Scale-Up Storage : Increases the capacity of the storage device in a single dimension. For example, adding additional disk capacity to an existing trusted storage pool.

Scale-Out Storage : Scale out systems are designed to scale on both capacity and performance. It increases the capability of a storage device in single dimension. For example, adding more systems of the same size, or adding servers to a trusted storage pool that increases CPU, disk capacity, and throughput for the trusted storage pool.

Self-Heal : The self-heal daemon that runs in the background, identifies inconsistencies in files/dirs in a replicated or erasure coded volume and then resolves or heals them. This healing process is usually required when one or more bricks of a volume goes down and then comes up later.

Server : The machine (virtual or bare metal) that hosts the bricks in which data is stored.

Split-brain : A situation where data on two or more bricks in a replicated volume start to diverge in terms of content or metadata. In this state, one cannot determine programmatically which set of data is \"right\" and which is \"wrong\".

Subvolume : A brick after being processed by at least one translator.

Translator : Translators (also called xlators) are stackable modules where each module has a very specific purpose. Translators are stacked in a hierarchical structure called as graph. A translator receives data from its parent translator, performs necessary operations and then passes the data down to its child translator in hierarchy.

Trusted Storage Pool : A storage pool is a trusted network of storage servers. When you start the first server, the storage pool consists of that server alone.

Userspace : Applications running in user space don\u2019t directly interact with hardware, instead using the kernel to moderate access. Userspace applications are generally more portable than applications in kernel space. Gluster is a user space application.

Virtual File System (VFS) : VFS is a kernel software layer which handles all system calls related to the standard Linux file system. It provides a common interface to several kinds of file systems.

Volume : A volume is a logical collection of bricks.

Vol file : Vol files or volume (.vol) files are configuration files that determine the behavior of the Gluster trusted storage pool. It is a textual representation of a collection of modules (also known as translators) that together implement the various functions required.

"},{"location":"security/","title":"Security","text":"

This document is to be considered a \"work in progress\" until this message is removed.

Reporting security issues

Please report any security issues you find in Gluster projects to: security at gluster.org

Anyone can post to this list. The subscribers are only trusted individuals who will handle the resolution of any reported security issues in confidence. In your report, please note how you would like to be credited for discovering the issue and the details of any embargo you would like to impose.

[need to check if this holds]

Currently, the security response teams for the following distributions are subscribed to this list and will respond to your report:

Fedora Red Hat

Handling security issues

If you represent a Gluster project or a distribution which packages Gluster projects, you are welcome to subscribe to the security at gluster.org mailing list. Your subscription will only be approved if you can demonstrate that you will handle issues in confidence and properly credit reporters for discovering issues. A second mailing list exists for discussion of embargoed security issues:

security-private at gluster.org

You will be invited to subscribe to this list if you are subscribed to security at gluster.org.

Security advisories

The security advisories page lists all security vulnerabilities fixed in Gluster.

[need to check if this holds]

"},{"location":"Administrator-Guide/","title":"Administration Guide","text":"
  1. Managing a Cluster

    • Managing the Gluster Service
    • Managing Trusted Storage Pools
  2. Setting Up Storage

    • Brick Naming Conventions
    • Formatting and Mounting Bricks
    • POSIX Access Control Lists
  3. Setting Up Clients

    • Handling of users that belong to many groups
  4. Volumes

    • Setting Up Volumes
    • Managing Volumes
    • Modifying .vol files with a filter
  5. Configuring NFS-Ganesha

  6. Features

    • Replication
    • Geo Replication
    • Quotas
    • Snapshots
    • Trash
    • io_uring
  7. Data Access With Other Interfaces

    • Managing Object Store
    • Accessing GlusterFS using Cinder Hosts
    • GlusterFS with Keystone
    • Install Gluster on Top of ZFS
    • Configuring Bareos to store backups on Gluster
  8. GlusterFS Service Logs and Locations

  9. Monitoring Workload

  10. Securing GlusterFS Communication using SSL

  11. Puppet Gluster

  12. RDMA Transport

  13. GlusterFS iSCSI

  14. Linux Kernel Tuning

  15. Export and Netgroup Authentication

  16. Thin Arbiter volumes

  17. Trash for GlusterFS

  18. Split brain and ways to deal with it

  19. Arbiter volumes and quorum options

  20. Mandatory Locks

  21. GlusterFS coreutilities

  22. Events APIs

  23. Building QEMU With gfapi For Debian Based Systems

  24. Appendices

    • Network Configuration Techniques
    • Performance Testing
  25. Tuning Volume Options
"},{"location":"Administrator-Guide/Access-Control-Lists/","title":"POSIX Access Control Lists","text":"

POSIX Access Control Lists (ACLs) allows you to assign different permissions for different users or groups even though they do not correspond to the original owner or the owning group.

For example: User john creates a file but does not want to allow anyone to do anything with this file, except another user, antony (even though there are other users that belong to the group john).

This means, in addition to the file owner, the file group, and others, additional users and groups can be granted or denied access by using POSIX ACLs.

"},{"location":"Administrator-Guide/Access-Control-Lists/#activating-posix-acls-support","title":"Activating POSIX ACLs Support","text":"

To use POSIX ACLs for a file or directory, the partition of the file or directory must be mounted with POSIX ACLs support.

"},{"location":"Administrator-Guide/Access-Control-Lists/#activating-posix-acls-support-on-server","title":"Activating POSIX ACLs Support on Server","text":"

To mount the backend export directories for POSIX ACLs support, use the following command:

mount -o acl\n

For example:

mount -o acl /dev/sda1 /export1\n

Alternatively, if the partition is listed in the /etc/fstab file, add the following entry for the partition to include the POSIX ACLs option:

LABEL=/work /export1 ext3 rw, acl 14\n
"},{"location":"Administrator-Guide/Access-Control-Lists/#activating-posix-acls-support-on-client","title":"Activating POSIX ACLs Support on Client","text":"

To mount the glusterfs volumes for POSIX ACLs support, use the following command:

mount \u2013t glusterfs -o acl\n

For example:

mount -t glusterfs -o acl 198.192.198.234:glustervolume /mnt/gluster\n
"},{"location":"Administrator-Guide/Access-Control-Lists/#setting-posix-acls","title":"Setting POSIX ACLs","text":"

You can set two types of POSIX ACLs, that is, access ACLs and default ACLs. You can use access ACLs to grant permission for a specific file or directory. You can use default ACLs only on a directory but if a file inside that directory does not have an ACLs, it inherits the permissions of the default ACLs of the directory.

You can set ACLs for per user, per group, for users not in the user group for the file, and via the effective right mask.

"},{"location":"Administrator-Guide/Access-Control-Lists/#setting-access-acls","title":"Setting Access ACLs","text":"

You can apply access ACLs to grant permission for both files and directories.

To set or modify Access ACLs

You can set or modify access ACLs use the following command:

setfacl \u2013m  file\n

The ACL entry types are the POSIX ACLs representations of owner, group, and other.

Permissions must be a combination of the characters r (read), w (write), and x (execute). You must specify the ACL entry in the following format and can specify multiple entry types separated by commas.

ACL Entry Description u:uid:\\<permission> Sets the access ACLs for a user. You can specify user name or UID g:gid:\\<permission> Sets the access ACLs for a group. You can specify group name or GID. m:\\<permission> Sets the effective rights mask. The mask is the combination of all access permissions of the owning group and all of the user and group entries. o:\\<permission> Sets the access ACLs for users other than the ones in the group for the file.

If a file or directory already has a POSIX ACLs, and the setfacl command is used, the additional permissions are added to the existing POSIX ACLs or the existing rule is modified.

For example, to give read and write permissions to user antony:

setfacl -m u:antony:rw /mnt/gluster/data/testfile\n
"},{"location":"Administrator-Guide/Access-Control-Lists/#setting-default-acls","title":"Setting Default ACLs","text":"

You can apply default ACLs only to directories. They determine the permissions of a file system objects that inherits from its parent directory when it is created.

To set default ACLs

You can set default ACLs for files and directories using the following command:

setfacl \u2013m \u2013-set\n

Permissions must be a combination of the characters r (read), w (write), and x (execute). Specify the ACL entry_type as described below, separating multiple entry types with commas.

u:user_name:permissions Sets the access ACLs for a user. Specify the user name, or the UID.

g:group_name:permissions Sets the access ACLs for a group. Specify the group name, or the GID.

m:permission Sets the effective rights mask. The mask is the combination of all access permissions of the owning group, and all user and group entries.

o:permissions Sets the access ACLs for users other than the ones in the group for the file.

For example, to set the default ACLs for the /data directory to read for users not in the user group:

setfacl \u2013m --set o::r /mnt/gluster/data\n

Note

An access ACLs set for an individual file can override the default ACLs permissions.

Effects of a Default ACLs

The following are the ways in which the permissions of a directory's default ACLs are passed to the files and subdirectories in it:

  • A subdirectory inherits the default ACLs of the parent directory both as its default ACLs and as an access ACLs.
  • A file inherits the default ACLs as its access ACLs.
"},{"location":"Administrator-Guide/Access-Control-Lists/#retrieving-posix-acls","title":"Retrieving POSIX ACLs","text":"

You can view the existing POSIX ACLs for a file or directory.

To view existing POSIX ACLs

  • View the existing access ACLs of a file using the following command:
getfacl\n

For example, to view the existing POSIX ACLs for sample.jpg

getfacl /mnt/gluster/data/test/sample.jpg\n
owner: antony\ngroup: antony\nuser::rw-\ngroup::rw-\nother::r--\n
  • View the default ACLs of a directory using the following command:
getfacl\n

For example, to view the existing ACLs for /data/doc

getfacl /mnt/gluster/data/doc\n
owner: antony\ngroup: antony\nuser::rw-\nuser:john:r--\ngroup::r--\nmask::r--\nother::r--\ndefault:user::rwx\ndefault:user:antony:rwx\ndefault:group::r-x\ndefault:mask::rwx\ndefault:other::r-x\n
"},{"location":"Administrator-Guide/Access-Control-Lists/#removing-posix-acls","title":"Removing POSIX ACLs","text":"

To remove all the permissions for a user, groups, or others, use the following command:

setfacl -x\n
"},{"location":"Administrator-Guide/Access-Control-Lists/#setfaclentry_type-options","title":"setfaclentry_type Options","text":"

The ACL entry_type translates to the POSIX ACL representations of owner, group, and other.

Permissions must be a combination of the characters r (read), w (write), and x (execute). Specify the ACL entry_type as described below, separating multiple entry types with commas.

u:user_name Sets the access ACLs for a user. Specify the user name, or the UID.

g:group_name Sets the access ACLs for a group. Specify the group name, or the GID.

m:permission Sets the effective rights mask. The mask is the combination of all access permissions of the owning group, and all user and group entries.

o:permissions Sets the access ACLs for users other than the ones in the group for the file.

For example, to remove all permissions from the user antony:

setfacl -x u:antony /mnt/gluster/data/test-file\n
"},{"location":"Administrator-Guide/Access-Control-Lists/#samba-and-acls","title":"Samba and ACLs","text":"

If you are using Samba to access GlusterFS FUSE mount, then POSIX ACLs are enabled by default. Samba has been compiled with the --with-acl-support option, so no special flags are required when accessing or mounting a Samba share.

"},{"location":"Administrator-Guide/Access-Control-Lists/#nfs-and-acls","title":"NFS and ACLs","text":"

Currently GlusterFS supports POSIX ACL configuration through NFS mount, i.e. setfacl and getfacl commands work through NFS mount.

"},{"location":"Administrator-Guide/Accessing-Gluster-from-Windows/","title":"Accessing Gluster volume via SMB Protocol","text":"

Layered product Samba is used to export the Gluster volume and ctdb for providing the high availability Samba. Here are the steps to configure Highly Available Samba cluster to export Gluster volume.

Note: These configuration steps are applicable to Samba version = 4.1. and Gluster Version >= 3.7. ctdb >= 2.5

"},{"location":"Administrator-Guide/Accessing-Gluster-from-Windows/#step-1-choose-the-servers-that-will-export-the-gluster-volume","title":"Step 1: Choose the servers that will export the Gluster volume.","text":"

The servers may/may not be part of the trusted storage pool. Preferable number of servers is <=4. Install Samba and ctdb packages on these servers.

"},{"location":"Administrator-Guide/Accessing-Gluster-from-Windows/#step-2-enabledisable-the-auto-export-of-gluster-volume-via-smb","title":"Step 2: Enable/Disable the auto export of Gluster volume via SMB","text":"
# gluster volume set VOLNAME user.smb disable/enable\n
"},{"location":"Administrator-Guide/Accessing-Gluster-from-Windows/#step-3-setup-the-ctdb-cluster","title":"Step 3: Setup the CTDB Cluster:","text":"
  1. Create a ctdb meta volume with replica N, N being the number of the servers that are used as Samba servers. This volume will host only a zero byte lock file, hence choose the minimal sized bricks. To create the n replica volume run the following command:

     # gluster volume create <volname> replica n <ipaddr/host name>:/<brick_patch>.... N times\n
  2. In the following files, replace \"all\" in the statement \"META=all\" to the newly created volume name.

     /var/lib/glusterd/hooks/1/start/post/S29CTDBsetup.sh\n /var/lib/glusterd/hooks/1/stop/pre/S29CTDB-teardown.sh\n
  3. Start the ctdb volume

    # gluster vol start <volname>\n
  4. Verify the following:

    • If the following lines are added in smb.conf file in all the nodes running samba/ctdb:

      clustering = yes\nidmap backend = tdb2\n
    • If the ctdb volume is mounted at /gluster/lock on all the nodes that runs ctdb/samba

    • If the mount entry for ctdb volume is added in /etc/fstab
    • If file /etc/sysconfig/ctdb exists on all the nodes that runs ctdb/samba
  5. Create /etc/ctdb/nodes files on all the nodes that runs ctdb/samba, and add the IPs of all these nodes in the file. For example,

     # cat /etc/ctdb/nodes\n 10.16.157.0\n 10.16.157.3\n 10.16.157.6\n 10.16.157.8\n

    The IPs listed here are the private IPs of Samba/ctdb servers, which should be a private non-routable subnet and are only used for internal cluster traffic. For more details refer to the ctdb man page.

  6. Create /etc/ctdb/public_addresses files on all the nodes that runs ctdb/samba, and add the virtual IPs in the following format:

     <virtual IP><routing prefix> <node interface>\n

    Eg:

     # cat /etc/ctdb/public_addresses\n 192.168.1.20/24 eth0\n 192.168.1.21/24 eth0\n
  7. Either uncomment CTDB_SAMBA_SKIP_SHARE_CHECK=yes or add CTDB_SAMBA_SKIP_SHARE_CHECK=yes in its absence inside /etc/ctdb/script.options to disable checking of the shares by ctdb.

  8. If SELinux is enabled and enforcing, try the following command if ctdb fails.

     # setsebool -P use_fusefs_home_dirs 1\n # setsebool -P samba_load_libgfapi 1\n
"},{"location":"Administrator-Guide/Accessing-Gluster-from-Windows/#step-4-performance-tunings-before-exporting-the-volume","title":"Step 4: Performance tunings before exporting the volume","text":"
  1. To ensure lock and IO coherency:

    # gluster volume set VOLNAME storage.batch-fsync-delay-usec 0\n
  2. If using Samba 4.X version add the following line in smb.conf in the global section

     kernel share modes = no\n kernel oplocks = no\n map archive = no\n map hidden = no\n map read only = no\n map system = no\n store dos attributes = yes\n

    Note: Setting 'store dos attributes = no' is recommended if archive/hidden/read-only dos attributes are not used. This can give better performance.

  3. If you are using gluster5 or higher execute the following to improve performance:

     # gluster volume set VOLNAME group samba\n

    On older version, please execute the following:

     # gluster volume set VOLNAME features.cache-invalidation on\n # gluster volume set VOLNAME features.cache-invalidation-timeout 600\n # gluster volume set VOLNAME performance.cache-samba-metadata on\n # gluster volume set VOLNAME performance.stat-prefetch on\n # gluster volume set VOLNAME performance.cache-invalidation on\n # gluster volume set VOLNAME performance.md-cache-timeout 600\n # gluster volume set VOLNAME network.inode-lru-limit 200000\n # gluster volume set VOLNAME performance.nl-cache on\n # gluster volume set VOLNAME performance.nl-cache-timeout 600\n # gluster volume set VOLNAME performance.readdir-ahead on\n # gluster volume set VOLNAME performance.parallel-readdir on\n
  4. Tune the number of threads in gluster for better performance:

    # gluster volume set VOLNAME client.event-threads 4\n# gluster volume set VOLNAME server.event-threads 4 # Increasing to a very high value will reduce the performance\n
"},{"location":"Administrator-Guide/Accessing-Gluster-from-Windows/#step-5-mount-the-volume-using-smb","title":"Step 5: Mount the volume using SMB","text":"
  1. If no Active directory setup add the user on all the samba server and set the password

     # adduser USERNAME\n # smbpasswd -a USERNAME\n
  2. Start the ctdb, smb and other related services:

    # systemctl re/start ctdb\n# ctdb status\n# ctdb ip\n# ctdb ping -n all\n
  3. To verify if the volume exported by samba can be accessed by a user:

     # smbclient //<hostname>/gluster-<volname> -U <username>%<password>\n
  4. To mount on a linux system:

     # mount -t cifs -o user=<username>,pass=<password> //<Virtual IP>/gluster-<volname> /<mountpoint>\n

    To mount on Windows system:

     >net use <device:> \\\\<Virtual IP>\\gluster-<volname>\n

    OR

     \\\\<Virtual IP>\\gluster-<volname>\n

    from windows explorer.

"},{"location":"Administrator-Guide/Automatic-File-Replication/","title":"Replication","text":"

This doc contains information about the synchronous replication module in gluster and has two sections

  • Replication logic
  • Self-heal logic.
"},{"location":"Administrator-Guide/Automatic-File-Replication/#1-replication-logic","title":"1. Replication logic","text":"

AFR is the module (translator) in glusterfs that provides all the features that you would expect of any synchronous replication system:

  1. Simultaneous updating of all copies of data on the replica bricks when a client modifies it.
  2. Providing continued data availability to clients when say one brick of the replica set goes down.
  3. Automatic self-healing of any data that was modified when the brick that was down, once it comes back up, ensuring consistency of data on all the bricks of the replica.

1 and 2 are in the I/O path while 3 is done either in the I/O path (in the background) or via the self-heal daemon.

Each gluster translator implements what are known as File Operations (FOPs) which are mapped to the I/O syscalls which the application makes. For example, AFR has afr_writev that gets invoked when application does a write(2). As is obvious, all FOPs fall into one of 2 types:

i) Read based FOPs which only get informtion from and don\u2019t modify the file in any way.

viz: afr_readdir, afr_access, afr_stat, afr_fstat, afr_readlink, afr_getxattr, afr_fgetxattr, afr_readv,afr_seek

ii) Write based FOPs which change the file or its attributes.

viz: afr_create, afr_mknod,afr_mkdir,afr_link, afr_symlink, afr_rename, afr_unlink, afr_rmdir, afr_do_writev, afr_truncate, afr_ftruncate, afr_setattr, afr_fsetattr, afr_setxattr, afr_fsetxattr, afr_removexattr, afr_fremovexattr, afr_fallocate, afr_discard, afr_zerofill, afr_xattrop, afr_fxattrop, afr_fsync.

AFR follows a transaction model for both types of FOPs.

"},{"location":"Administrator-Guide/Automatic-File-Replication/#read-transactions","title":"Read transactions:","text":"

For every file in the replica, AFR has an in-memory notion/array called \u2018readables\u2019 which indicate whether each brick of the replica is a good copy or a bad one (i.e. in need of a heal). In a healthy state, all bricks are readable and a read FOP will be served from any one of the readable bricks. The read-hash-mode volume option decides which brick is the chosen one.

gluster volume set help | grep read-hash-mode -A7\n
Option: cluster.read-hash-mode\nDefault Value: 1\nDescription: inode-read fops happen only on one of the bricks in replicate. AFR will prefer the one computed using the method specified using this option.\n0 = first readable child of AFR, starting from 1st child.\n1 = hash by GFID of file (all clients use same subvolume).\n2 = hash by GFID of file and client PID.\n3 = brick having the least outstanding read requests.\n

If the brick is bad for a given file (i.e. it is pending heal), then it won\u2019t be marked readable to begin with. The readables array is populated based on the on-disk AFR xattrs for the file during lookup. These xattrs indicate which bricks are good and which ones are bad. We will see more about these xattrs in the write transactions section below. If the FOP fails on the chosen readable brick, AFR attempts it on the next readable one, until all are exhausted. If the FOP doesn\u2019t succeed on any of the readables, then the application receives an error.

"},{"location":"Administrator-Guide/Automatic-File-Replication/#write-transactions","title":"Write transactions:","text":"

Every write based FOP employs a write transaction model which consists of 5 phases:

1) The lock phase Take locks on the file being modified on all bricks so that AFRs of other clients are blocked if they try to modify the same file simultaneously.

2) The pre-op phase Increment the \u2018dirty\u2019 xattr (trusted.afr.dirty) by 1 on all participating bricks as an indication of an impending FOP (in the next phase)

3) The FOP phase Perform the actual FOP (say a setfattr) on all bricks.

4) The post-op phase Decrement the dirty xattr by 1 on bricks where the FOP was successful. In addition, also increment the \u2018pending\u2019 xattr (trusted.afr.$VOLNAME-client-x) xattr on the success bricks to \u2018blame\u2019 the bricks where the FOP failed.

5) The unlock phase Release the locks that were taken in phase 1. Any competing client can now go ahead with its own write transaction.

Note: There are certain optimizations done at the code level which reduce the no. of lock/unlock phases done for a transaction by piggybacking on the previous transaction\u2019s locks. These optimizations (eager-locking, piggybacking and delayed post-op) beyond the scope of this post.

AFR returns sucess for these FOPs only if they meet quorum. For replica 2, this means it needs to suceed on any one brick. For replica 3, it is two out of theree and so on.

"},{"location":"Administrator-Guide/Automatic-File-Replication/#more-on-the-afr-xattrs","title":"More on the AFR xattrs:","text":"

We saw that AFR modifies the dirty and pending xattrs in the pre-op and post-op phases. To be more precise, only parts of the xattr are modified in a given transaction. Which bytes are modified depends on the type of write transaction which the FOP belongs to.

Transaction Type FOPs that belong to it AFR_DATA_TRANSACTION afr_writev, afr_truncate, afr_ftruncate, afr_fsync, afr_fallocate, afr_discard, afr_zerofill AFR_METADATA_TRANSACTION afr_setattr, afr_fsetattr, afr_setxattr, afr_fsetxattr, afr_removexattr, afr_fremovexattr, afr_xattrop, afr_fxattrop AFR_ENTRY_TRANSACTION afr_create, afr_mknod, afr_mkdir, afr_link, afr_symlink, afr_rename, afr_unlink, afr_rmdir

Stop here and convince yourself that given a write based FOP, you can say which one of the 3 transaction types it belongs to.

Note: In the code, there is also a AFR_ENTRY_RENAME_TRANSACTION (used by afr_rename) but it is safe to assume that it is identical to AFR_ENTRY_TRANSACTION as far as interpreting the xattrs are concerned.

Consider the xttr: trusted.afr.dirty=0x000000000000000000000000 The first 4 bytes of the xattr are used for data transactions, the next 4 bytes for metadata transactions and the last 4 for entry transactions. Let us see some examples of how the xattr would look like for various types of FOPs during a transaction:

FOP Value after pre-op phase Value after post-op phase afr_writev trusted.afr.dirty=0x00000001 00000000 00000000 trusted.afr.dirty=0x00000000 00000000 00000000 afr_setattr trusted.afr.dirty=0x00000000 00000001 00000000 trusted.afr.dirty=0x00000000 00000000 00000000 afr_create trusted.afr.dirty=0x00000000 00000000 00000001 trusted.afr.dirty=0x00000000 00000000 00000000

Thus depending on the type of FOP (i.e. data/ metadata/ entry transaction), different set of bytes of the dirty xattr get incremented/ decremented. Modification of the pending xattr also follows the same pattern, execept it is incremented only in the post-op phase if the FOP fails on some bricks.

Example: Let us say a write was performed on a file, say FILE1, on replica 3 volume called \u2018testvol\u2019. Suppose the lock and pre-op phase succeeded on all bricks. After that the 3rd brick went down, and the transaction completed successfully on the first 2 bricks. What will be the state of the afr xattrs on all bricks?

# getfattr -d -m . -e hex /bricks/brick1/FILE1 | grep afr\ngetfattr: Removing leading '/' from absolute path names\ntrusted.afr.dirty=0x000000000000000000000000\ntrusted.afr.testvol-client-2=0x000000010000000000000000\n
# getfattr -d -m . -e hex /bricks/brick2/FILE1 | grep afr\ngetfattr: Removing leading '/' from absolute path names\ntrusted.afr.dirty=0x000000000000000000000000\ntrusted.afr.testvol-client-2=0x000000010000000000000000\n
# getfattr -d -m . -e hex /bricks/brick3/FILE1 | grep afr\ngetfattr: Removing leading '/' from absolute path names\ntrusted.afr.dirty=0x000000010000000000000000\n

So Brick3 will still have the dirty xattr set because it went down before the post-op had a chance to decrement it. Bricks 1 and 2 will have a zero dirty xattr and in addition, a non-zero pending xattr set. The client-2 in trusted.afr.testvol-client-2 indicates that the 3rd brick is bad and has some pending data operations.

"},{"location":"Administrator-Guide/Automatic-File-Replication/#2-self-heal-logic","title":"2. Self-heal logic.","text":"

We already know that AFR increments and/or decrements the dirty (i.e. trusted.afr.dirty) and pending (i.e. trusted.afr.$VOLNAME-client-x) xattrs during the different phases of the transaction. For a given file (or directory), an all zero value of these xattrs or the total absence of these xattrs on all bricks of the replica mean the file is healthy and does not need heal. If any of these xattrs are non-zero even on one of the bricks, then the file is a candidate for heal- it as simple as that.

When we say these xattrs are non-zero, it is in the context of no on-going I/O going from client(s) on the file. Otherwise the non-zero values that one observes might be transient as the write transaction is progressing through its five phases. Of course, as an admin, you wouldn\u2019t need to figure out all of this. Just running the heal info set of commands should give you the list of files that need heal.

So if self-heal observes a file with non-zero xattrs, it does the following steps:

  1. Fetch the afr xattrs, examine which set of 8 bytes are non-zero and determine the corresponding heals that are needed on the file \u2013 i.e. data heal/ metadata heal/ entry heal.
  2. Determine which bricks are good (a.k.a. \u2018sources\u2019) and which ones are bad (a.k.a. \u2018sinks\u2019) for each of those heals by interpretting the xattr values.
  3. Pick one source brick and heal the file on to all the sink bricks.
  4. If the heal is successful, reset the afr xattrs to zero.

This is a rather simplified description and I have omitted details about various locks that each of these steps need to take because self-heal and client I/O can happen in parallel on the file. Or even multiple self-heal daemons (described later) can attempt to heal the same file.

Data heal: Happens only for files. The contents of the file are copied from the source to the sink bricks.

Entry heal: Happens only for directories. Entries (i.e. files and subdirs) under a given directory are deleted from the sinks if they are not present in the source. Likewise, entries are created on the sinks if they are present in the source.

Metadata heal: Happens for both files and directories. File ownership, file permissions and extended attributes are copied from the source to the sink bricks.

It can be possible that for a given file, one set of bricks can be the source for data heal while another set could be the source for metadata heals. It all depends on which FOPs failed on what bricks and therefore what set of bytes are non-zero for the afr xattrs.

"},{"location":"Administrator-Guide/Automatic-File-Replication/#when-do-self-heals-happen","title":"When do self-heals happen?","text":"

There are two places from which the steps described above for healing can be carried out:

"},{"location":"Administrator-Guide/Automatic-File-Replication/#i-from-the-client-side","title":"i) From the client side.","text":"

Client-side heals are triggered when the file is accessed from the client (mount). AFR uses a monotonically increasing generation number to keep track of disconnect/connect of its children (i.e. the client translators) to the bricks. When this \u2018event generation\u2019 number changes, the file\u2019s inode is marked as a candidate for refresh. When the next FOP comes on such an inode, a refresh is triggered to update the readables during which a heal is launched (if the AFR xattrs indicate that a heal is needed, that is). This heal happens in the background, meaning it does not block the actual FOP which will continue as usual post the refresh. Specific client-side heals can be turned off by disabling the 3 corresponding volume options:

cluster.metadata-self-heal\ncluster.data-self-heal\ncluster.entry-self-heal\n

The number of client-side heals that happen in the background can be tuned via the following volume options:

background-self-heal-count\nheal-wait-queue-length\n

See the gluster volume set help for more information on all the above options.

Name heal: Name heal is just healing of the file/directory name when it is accessed. For example, say a file is created and written to when a brick is down and all the 3 client side heals are disabled. When the brick comes up and the next I/O comes on it, the file name is created on it as a part of lookup. Its contents/metadata are not healed though. Name heal cannot be disabled. It is there to ensure that the namespace is consistent on all bricks as soon as the file is accessed.

"},{"location":"Administrator-Guide/Automatic-File-Replication/#ii-by-the-self-heal-daemon","title":"ii) By the self-heal daemon.","text":"

There is a self-heal daemon process (glutershd) that runs on every node of the trusted storage pool. It is a light weight client process consisting mainly of AFR ant the protocol/client translators. It can talk to all bricks of all the replicate volume(s) of the pool. It periodically crawls (every 10 minutes by default; tunable via the heal-timeout volume option) the list of files that need heal and does their healing. As you can see, client side heal is done upon file access but glustershd processes the heal backlog pro-actively.

"},{"location":"Administrator-Guide/Automatic-File-Replication/#index-heal","title":"Index heal:","text":"

But how does glustershd know which files it needs to heal? Where does it get the list from? So in part-1, while we saw the five phases of the AFR write transaction, we left out one detail:

  • In the pre-op phase, in addition to marking the dirty xattr, each brick also stores the gfid string of the file inside its .glusterfs/indices/dirty directory.
  • Likewise, in the post-op phase, it removes the gfid string from its .glusterfs/indices/dirty If addition, if the write failed on some brick, the good bricks will stores the gfid string inside the .glusterfs/indices/xattrop directory.

Thus when no I/O is happening on a file and you still find its gfid inside .glusterfs/indices/dirty of a particular brick, it means the brick went down before the post-op phase. If you find the gfid inside .glusterfs/indices/xattrop, it means the write failed on some other brick and this brick has captured it.

The glustershd simply reads the list of entries inside .glusterfs/indices/* and triggers heal on them. This is referred to as index heal. While this happens automcatically every heal-timeout seconds, we can also manaully trigger it via the CLI using gluster volume heal $VOLNAME .

"},{"location":"Administrator-Guide/Automatic-File-Replication/#full-heal","title":"Full heal:","text":"

A full heal, triggered from the CLI with gluster volume heal $VOLNAME full, does just what the name implies. It does not process a particular list of entries like index heal, but crawls the whole gluster filesystem beginning with root, examines if files have non zero afr xattrs and triggers heal on them.

"},{"location":"Administrator-Guide/Automatic-File-Replication/#of-missing-xattrs-and-split-brains","title":"Of missing xattrs and split-brains:","text":"

You might now realise how AFR pretty much relies on its xattr values of a given file- from using it to find the good copies to serve a read to finding out the source and sink bricks to heal the file. But what if there is inconsistency in data/metadata of a file and

(a) there are zero/ no AFR xattrs (or)

(b) if the xattrs all blame each other (i.e. no good copy=>split-brain)?

For (a), AFR uses heuristics like picking a local (to that specfic glustershd process) brick, picking the bigger file, picking the file with latest ctime etc. and then does the heal.

For (b) you need to resort to using the gluster split-brain resolution CLI or setting the favorite-child-policy volume option to choose a good copy and trigger the heal.

"},{"location":"Administrator-Guide/Bareos/","title":"Configuring Bareos to store backups on Gluster","text":"

This description assumes that you already have a Gluster environment ready and configured. The examples use storage.example.org as a Round Robin DNS name that can be used to contact any of the available GlusterD processes. The Gluster Volume that is used, is called backups. Client systems would be able to access the volume by mounting it with FUSE like this:

mount -t glusterfs storage.example.org:/backups /mnt\n

Bareos contains a plugin for the Storage Daemon that uses libgfapi. This makes it possible for Bareos to access the Gluster Volumes without the need to have a FUSE mount available.

Here we will use one server that is dedicated for doing backups. This system is called backup.example.org. The Bareos Director is running on this host, together with the Bareos Storage Daemon. In the example, there is a File Daemon running on the same server. This makes it possible to backup the Bareos Director, which is useful as a backup of the Bareos database and configuration is kept that way.

"},{"location":"Administrator-Guide/Bareos/#bareos-installation","title":"Bareos Installation","text":"

An absolute minimal Bareos installation needs a Bareos Director and a Storage Daemon. In order to backup a filesystem, a File Daemon needs to be available too. For the description in this document, CentOS-7 was used, with the following packages and versions:

  • glusterfs-3.7.4
  • bareos-14.2 with bareos-storage-glusterfs

The Gluster Storage Servers do not need to have any Bareos packages installed. It is often better to keep applications (Bareos) and storage servers on different systems. So, when the Bareos repository has been configured, install the packages on the backup.example.org server:

yum install bareos-director bareos-database-sqlite3 \\\n                bareos-storage-glusterfs bareos-filedaemon \\\n                bareos-bconsole\n

To keep things as simple as possible, SQlite it used. For production deployments either MySQL or PostgrSQL is advised. It is needed to create the initial database:

sqlite3 /var/lib/bareos/bareos.db < /usr/lib/bareos/scripts/ddl/creates/sqlite3.sql\nchown bareos:bareos /var/lib/bareos/bareos.db\n

The bareos-bconsole package is optional. bconsole is a terminal application that can be used to initiate backups, check the status of different Bareos components and the like. Testing the configuration with bconsole is relatively simple.

Once the packages are installed, you will need to start and enable the daemons:

systemctl start bareos\u00adsd\nsystemctl start bareos\u00adfd\nsystemctl start bareos\u00addir\nsystemctl enable bareos\u00adsd\nsystemctl enable bareos\u00adfd\nsystemctl enable bareos\u00addir\n
"},{"location":"Administrator-Guide/Bareos/#gluster-volume-preparation","title":"Gluster Volume preparation","text":"

There are a few steps needed to allow Bareos to access the Gluster Volume. By default Gluster does not allow clients to connect from an unprivileged port. Because the Bareos Storage Daemon does not run as root, permissions to connect need to be opened up.

There are two processes involved when a client accesses a Gluster Volume. For the initial phase, GlusterD is contacted, when the client received the layout of the volume, the client will connect to the bricks directly. The changes to allow unprivileged processes to connect, are therefore twofold:

  1. In /etc/glusterfs/glusterd.vol the option rpc-auth-allow-insecure on needs to be added on all storage servers. After the modification of the configuration file, the GlusterD process needs to be restarted with systemctl restart glusterd.
  2. The brick processes for the volume are configured through a volume option. By executing gluster volume set backups server.allow-insecure on the needed option gets set. Some versions of Gluster require a volume stop/start before the option is taken into account, for these versions you will need to execute gluster volume stop backups and gluster volume start backups.

Except for the network permissions, the Bareos Storage Daemon needs to be allowed to write to the filesystem provided by the Gluster Volume. This is achieved by setting normal UNIX permissions/ownership so that the right user/group can write to the volume:

mount -t glusterfs storage.example.org:/backups /mnt\nmkdir /mnt/bareos\nchown bareos:bareos /mnt/bareos\nchmod ug=rwx /mnt/bareos\numount /mnt\n

Depending on how users/groups are maintained in the environment, the bareos user and group may not be available on the storage servers. If that is the case, the chown command above can be adapted to use the uid and gid of the bareos user and group from backup.example.org. On the Bareos server, the output would look similar to:

# id bareos\nuid=998(bareos) gid=997(bareos) groups=997(bareos),6(disk),30(tape)\n

And that makes the chown command look like this:

chown 998:997 /mnt/bareos\n
"},{"location":"Administrator-Guide/Bareos/#bareos-configuration","title":"Bareos Configuration","text":"

When bareos-storage-glusterfs got installed, an example configuration file has been added too. The /etc/bareos/bareos-sd.d/device-gluster.conf contains the Archive Device directive, which is a URL for the Gluster Volume and path where the backups should get stored. In our example, the entry should get set to:

Device {\n    Name = GlusterStorage\n    Archive Device = gluster://storage.example.org/backups/bareos\n    Device Type = gfapi\n    Media Type = GlusterFile\n    ...\n}\n

The default configuration of the Bareos provided jobs is to write backups to /var/lib/bareos/storage. In order to write all the backups to the Gluster Volume instead, the configuration for the Bareos Director needs to be modified. In the /etc/bareos/bareos-dir.conf configuration, the defaults for all jobs can be changed to use the GlusterFile storage:

JobDefs {\n    Name = \"DefaultJob\"\n    ...\n  #   Storage = File\n    Storage = GlusterFile\n    ...\n}\n

After changing the configuration files, the Bareos daemons need to apply them. The easiest to inform the processes of the changed configuration files is by instructing them to reload their configuration:

# bconsole\nConnecting to Director backup:9101\n1000 OK: backup-dir Version: 14.2.2 (12 December 2014)\nEnter a period to cancel a command.\n*reload\n

With bconsole it is also possible to check if the configuration has been applied. The status command can be used to show the URL of the storage that is configured. When all is setup correctly, the result looks like this:

*status storage=GlusterFile\nConnecting to Storage daemon GlusterFile at backup:9103\n...\nopen.\n...\n
"},{"location":"Administrator-Guide/Bareos/#create-your-first-backup","title":"Create your first backup","text":"

There are several default jobs configured in the Bareos Director. One of them is the DefaultJob which was modified in an earlier step. This job uses the SelfTest FileSet, which backups /usr/sbin. Running this job will verify if the configuration is working correctly. Additional jobs, other FileSets and more File Daemons (clients that get backed up) can be added later.

*run\nA job name must be specified.\nThe defined Job resources are:\n        1: BackupClient1\n        2: BackupCatalog\n        3: RestoreFiles\nSelect Job resource (1-3): 1\nRun Backup job\nJobName:  BackupClient1\nLevel:    Incremental\nClient:   backup-fd\n...\nOK to run? (yes/mod/no): yes\nJob queued. JobId=1\n

The job will need a few seconds to complete, the status command can be used to show the progress. Once done, the messages command will display the result:

*messages\n...\n    JobId:                  1\n    Job:                    BackupClient1.2015-09-30_21.17.56_12\n    ...\n    Termination:            Backup OK\n

The archive that contains the backup will be located on the Gluster Volume. To check if the file is available, mount the volume on a storage server:

mount -t glusterfs storage.example.org:/backups /mnt\nls /mnt/bareos\n
"},{"location":"Administrator-Guide/Bareos/#further-reading","title":"Further Reading","text":"

This document intends to provide a quick start of configuring Bareos to use Gluster as a storage backend. Bareos can be configured to create backups of different clients (which run a File Daemon), run jobs at scheduled time and intervals and much more. The excellent Bareos documentation can be consulted to find out how to create backups in a much more useful way than can get expressed on this page.

"},{"location":"Administrator-Guide/Brick-Naming-Conventions/","title":"Brick Naming Conventions","text":"

FHS-2.3 isn't entirely clear on where data shared by the server should reside. It does state that \"/srv contains site-specific data which is served by this system\", but is GlusterFS data site-specific?

The consensus seems to lean toward using /data. A good hierarchical method for placing bricks is:

/data/glusterfs/<volume>/<brick>/brick\n

In this example, <brick> is the filesystem that is mounted.

"},{"location":"Administrator-Guide/Brick-Naming-Conventions/#example-one-brick-per-server","title":"Example: One Brick Per Server","text":"

A physical disk /dev/sdb is going to be used as brick storage for a volume you're about to create named myvol1. You've partitioned and formatted /dev/sdb1 with XFS on each of 4 servers.

On all 4 servers:

mkdir -p /data/glusterfs/myvol1/brick1\nmount /dev/sdb1 /data/glusterfs/myvol1/brick1\n

We're going to define the actual brick in the brick directory on that filesystem. This helps by causing the brick to fail to start if the XFS filesystem isn't mounted.

On just one server:

gluster volume create myvol1 replica 2 server{1..4}:/data/glusterfs/myvol1/brick1/brick\n

This will create the volume myvol1 which uses the directory /data/glusterfs/myvol1/brick1/brick on all 4 servers.

"},{"location":"Administrator-Guide/Brick-Naming-Conventions/#example-two-bricks-per-server","title":"Example: Two Bricks Per Server","text":"

Two physical disks /dev/sdb and /dev/sdc are going to be used as brick storage for a volume you're about to create named myvol2. You've partitioned and formatted /dev/sdb1 and /dev/sdc1 with XFS on each of 4 servers.

On all 4 servers:

mkdir -p /data/glusterfs/myvol2/brick{1,2}\nmount /dev/sdb1 /data/glusterfs/myvol2/brick1\nmount /dev/sdc1 /data/glusterfs/myvol2/brick2\n

Again we're going to define the actual brick in the brick directory on these filesystems.

On just one server:

gluster volume create myvol2 replica 2 \\\n  server{1..4}:/data/glusterfs/myvol2/brick1/brick \\\n  server{1..4}:/data/glusterfs/myvol2/brick2/brick\n

Note: It might be tempting to try gluster volume create myvol2 replica 2 server{1..4}:/data/glusterfs/myvol2/brick{1,2}/brick but Bash would expand the last {} first, so you would end up replicating between the two bricks on each servers, instead of across servers.

"},{"location":"Administrator-Guide/Building-QEMU-With-gfapi-For-Debian-Based-Systems/","title":"Building QEMU With gfapi For Debian Based Systems","text":"

This how-to has been tested on Ubuntu 13.10 in a clean, up to date environment. Older Ubuntu distros required some hacks if I remembered rightly. Other Debian based distros should be able to follow this adjusting for dependencies. Please update this if you get it working on another distro.

"},{"location":"Administrator-Guide/Building-QEMU-With-gfapi-For-Debian-Based-Systems/#satisfying-dependencies","title":"Satisfying dependencies","text":"

Make the first stab at getting qemu dependencies

apt-get  build-dep qemu\n

This next command grabs all the dependencies specified in the debian control file as asked for from upstream Debian sid You can look into the options specified there and adjust to taste.

# get almost all the rest and the tools to work up the Debian magic\napt-get install devscripts quilt libiscsi-dev libusbredirparser-dev libssh2-1-dev libvdeplug-dev libjpeg-dev glusterfs*\n

we need a newer version of libseccomp for Ubuntu 13.10

mkdir libseccomp\ncd libseccomp\n\n# grab it from upstream sid\nwget http://ftp.de.debian.org/debian/pool/main/libs/libseccomp/libseccomp_2.1.0+dfsg.orig.tar.gz\nwget http://ftp.de.debian.org/debian/pool/main/libs/libseccomp/libseccomp_2.1.0+dfsg-1.debian.tar.gz\n\n# get it ready\ntar xf libseccomp_2.1.0+dfsg.orig.tar.gz\ncd libseccomp-2.1.0+dfsg/\n\n# install the debian magic\ntar xf ../libseccomp_2.1.0+dfsg-1.debian.tar.gz\n\n# apply series files if any\nwhile quilt push; do quilt refresh; done\n\n# build debs, they will appear one directory up\ndebuild -i -us -uc -b\ncd ..\n\n# install it\ndpkg -i *.deb\n
"},{"location":"Administrator-Guide/Building-QEMU-With-gfapi-For-Debian-Based-Systems/#building-qemu","title":"Building QEMU","text":"

This next part is straightforward if your dependencies are met. For the advanced reader look around debian/control once it is extracted before you install as you may want to change what options QEMU is built with and what targets are requested.

cd ..\nmkdir qemu\ncd qemu\n\n# download our sources. you'll want to check back frequently on these for changes\nwget http://ftp.de.debian.org/debian/pool/main/q/qemu/qemu_1.7.0+dfsg.orig.tar.xz\nwget http://ftp.de.debian.org/debian/pool/main/q/qemu/qemu_1.7.0+dfsg-2.debian.tar.gz\nwget http://download.gluster.org/pub/gluster/glusterfs/3.4/LATEST/glusterfs-3.4.2.tar.gz\ntar xf glusterfs-3.4.2.tar.gz\ntar xf qemu_1.7.0+dfsg.orig.tar.xz\ncd qemu-1.7.0+dfsg/\n\n# unpack the debian magic\ntar xf ../qemu_1.7.0+dfsg-2.debian.tar.gz\n\n# bring glusterfs in to the buiild\ncp -r ../glusterfs-3.4.2 glusterfs\n\n# the glusterfs check in configure looks around weird. I've never asked why but moving the src stuff up one works and tests fine\ncd glusterfs/api/\nmv src/* .\ncd ../..\n\n#you'll need to edit debian/control to enable glusterfs replacing\n\n  - ##--enable-glusterfs todo\n  + # --enable-glusterfs\n  + glusterfs-common (>= 3.4.0),\n\n#And finally build. It'll take ages.  http://xkcd.com/303/\n# apply series if any\nwhile quilt push; do quilt refresh; done\n\n# build packages\ndebuild -i -us -uc -b\ncd ..\n

Your debs now available to install. It is up to the reader to determine what targets they want installed.

"},{"location":"Administrator-Guide/Consul/","title":"Consul and GlusterFS integration","text":"

Consul is used for service discovery and configuration.

It consists of consul server and agents connecting to it. Apps can get configuration data from consul via HTTP API or DNS queries.

Long story short, instead of using standard hostnames and relying on official DNS servers which we may not control, we can use consul to resolve hosts with services under .consul domain, which turns this classic setup:

mount -t glusterfs -o backupvolfile-server=gluster-poc-02 gluster-poc-01:/g0 /mnt/gluster/g0\n

into more convenient entry:

mount -t glusterfs gluster.service.consul:/g0 /mnt/gluster/g0\n

which is especially useful when using image-based servers without further provisioning, and spreading load across all healthy servers registered in consul.

"},{"location":"Administrator-Guide/Consul/#warning","title":"Warning","text":"

In this document you will get a proof-of-concept basic setup - gluster servers and gluster clients configured, which should be a point to expand. You should read Further steps section to harden it.

Tested on:

  • isolated virtual network
  • selinux permissive (yay!)
  • consul server/agents version v0.7.5
  • gluster servers with glusterfs 3.8.x on CentOS 7.3 + samba 4 with simple auth and vfs gluster module
  • gluster volume set as distributed-replicated + 'features.shard: true' and 'features.shard-block-size: 512MB'
  • gluster agents with glusterfs 3.8.x on Ubuntu 14.04
  • gluster agents with glusterfs 3.8.x on CentOS 7.3
  • gluster agents with glusterfs 3.7.x on CentOS 5.9
  • Windows 2012R2 connected to gluster servers via samba
"},{"location":"Administrator-Guide/Consul/#scenario","title":"Scenario","text":"

We want to create shared storage accessible via different operating systems - Linux and Windows.

  • we do not control DNS server so we cannot add/remove entries on gluster server add/remove
  • gluster servers are in the gluster pool and have gluster volume created named g0
  • gluster servers have consul agent installed, and they will register to consul as gluster service
  • gluster servers have also SMB installed with very simple setup using gluster vfs plugin
  • gluster client have consul agent installed, and they will use gluster.service.consul as entry point.
  • DNS resolution under Linux will be handled via dnsmasq
  • DNS resolution under Windows will be handled via consul itself
"},{"location":"Administrator-Guide/Consul/#known-limitations","title":"Known limitations","text":"
  • consul health checks introduce delay, also remember that consul can cache DNS entries to increase performance
  • the way Windows share works is that it will connect to one of the samba servers, if this server die then transfers are aborted, and we must retry operation, but watch out for delay.
  • anything other than gluster volume distributed-replicated was not tested - it may not work for Windows.
"},{"location":"Administrator-Guide/Consul/#requirements","title":"Requirements","text":"
  • you should have consul server (or cluster) up and running, and the best, also accessible via default HTTP port.
  • you should have gluster servers already joined in the gluster pool, bricks and volume configured.
  • check you firewall rules for outbound and inbound for DNS, gluster, samba, consul
  • make yourself familiar with consul documentation (or specific branch on github)
"},{"location":"Administrator-Guide/Consul/#linux-setup","title":"Linux setup","text":""},{"location":"Administrator-Guide/Consul/#consul-agent-on-linux-on-gluster-clients","title":"Consul agent on Linux on gluster clients","text":"

First, install consul agent. The best way is to use for example puppet module In general your Linux boxes should register in the consul server and be visible under Nodes section.

To verify if consul agent is working properly, you can query its DNS interface, asking it to list consul servers:

\n[centos@gluster-poc-01]# dig consul.service.consul 127.0.0.1:8600\n\n; <<>> DiG 9.9.4-RedHat-9.9.4-38.el7_3.3 <<>> consul.service.consul 127.0.01:8600\n;; global options: +cmd\n;; Got answer:\n;; ->>HEADER<<- opcode: QUERY, status: NOERROR, id: 39354\n;; flags: qr aa rd ra; QUERY: 1, ANSWER: 3, AUTHORITY: 0, ADDITIONAL: 0\n\n;; QUESTION SECTION:\n;consul.service.consul.     IN  A\n\n;; ANSWER SECTION:\nconsul.service.consul.  0   IN  A   172.30.64.198\nconsul.service.consul.  0   IN  A   172.30.82.255\nconsul.service.consul.  0   IN  A   172.30.81.155\n\n;; Query time: 1 msec\n;; SERVER: 127.0.0.1#53(127.0.0.1)\n;; WHEN: Sat May 20 08:50:21 UTC 2017\n;; MSG SIZE  rcvd: 87\n\n;; Got answer:\n;; ->>HEADER<<- opcode: QUERY, status: NXDOMAIN, id: 22224\n;; flags: qr rd ra ad; QUERY: 1, ANSWER: 0, AUTHORITY: 0, ADDITIONAL: 0\n\n;; QUESTION SECTION:\n;127.0.0.1:8600.            IN  A\n\n;; Query time: 0 msec\n;; SERVER: 127.0.0.1#53(127.0.0.1)\n;; WHEN: Sat May 20 08:50:21 UTC 2017\n;; MSG SIZE  rcvd: 32\n

Now, to be able to use it on system level, we want it to work without specifying port. We can achieve it with running consul on port 53 (not advised), or redirecting network traffic from port 53 to 8600 or proxy it via local DNS resolver - for example use locally installed dnsmasq.

First, install dnsmasq, and add file /etc/dnsmasq.d/10-consul:

server=/consul/127.0.0.1#8600\n

This will ensure that any *.consul requests will be forwarded to local consul listening on its default DNS port 8600.

Make sure that /etc/resolv.conf contains nameserver 127.0.0.1. Under Debian distros it should be there, under RedHat - not really. You can fix this in two ways, choose on your onw which one to apply:

  • add nameserver 127.0.0.1 to /etc/resolvconf/resolv.conf.d/header

or

  • update /etc/dhcp/dhclient.conf and add to it line prepend domain-name-servers 127.0.0.1;.

In both cases it ensures that dnsmasq will be a first nameserver, and requires reloading resolver or networking.

Eventually you should have nameserver 127.0.0.1 set as first entry in /etc/resolv.conf and have DNS resolving consul entries:

\n[centos@gluster-poc-01]# dig consul.service.consul\n\n; <<>> DiG 9.9.4-RedHat-9.9.4-38.el7_3.3 <<>> consul.service.consul\n;; global options: +cmd\n;; Got answer:\n;; ->>HEADER<<- opcode: QUERY, status: NOERROR, id: 42571\n;; flags: qr aa rd ra; QUERY: 1, ANSWER: 3, AUTHORITY: 0, ADDITIONAL: 0\n\n;; QUESTION SECTION:\n;consul.service.consul.     IN  A\n\n;; ANSWER SECTION:\nconsul.service.consul.  0   IN  A   172.30.64.198\nconsul.service.consul.  0   IN  A   172.30.82.255\nconsul.service.consul.  0   IN  A   172.30.81.155\n\n;; Query time: 1 msec\n;; SERVER: 127.0.0.1#53(127.0.0.1)\n;; WHEN: Sat May 20 09:01:12 UTC 2017\n;; MSG SIZE  rcvd: 87\n\n

From now on we should be able to use <servicename>.service.consul in places, where we had FQDN entries of the single servers.

Next, we must define gluster service consul on the servers.

"},{"location":"Administrator-Guide/Consul/#consul-agent-on-linux-on-gluster-servers","title":"Consul agent on Linux on gluster servers","text":"

Install consul agent as described in previous section.

You can define consul services as gluster to run health checks, to do that we must add consul to sudoers or allow it executing certain sudo commands without password:

/etc/sudoers.d/99-consul.conf:

consul ALL=(ALL) NOPASSWD: /sbin/gluster pool list\n

First, lets define service in consul, it will be very basic, without volume names. Service name gluster, with default port 24007, and we will tag it as gluster and server.

Our service will have service health checks every 10s:

  • check if the gluster service is responding to TCP on 24007 port
  • check if the gluster server is connected to other peers in the pool (to avoid registering as healthy service which is actaully not serving anything)

Below is an example of /etc/consul/service_gluster.json:

{\n  \"service\": {\n    \"address\": \"\",\n    \"checks\": [\n      {\n        \"interval\": \"10s\",\n        \"tcp\": \"localhost:24007\",\n        \"timeout\": \"5s\"\n      },\n      {\n        \"interval\": \"10s\",\n        \"script\": \"/bin/bash -c \\\"sudo -n /sbin/gluster pool list |grep -v UUID|grep -v localhost|grep Connected\\\"\",\n        \"timeout\": \"5s\"\n      }\n    ],\n    \"enableTagOverride\": false,\n    \"id\": \"gluster\",\n    \"name\": \"gluster\",\n    \"port\": 24007,\n    \"tags\": [\"gluster\", \"server\"]\n  }\n}\n

Restart consul service and you should see gluster servers in consul web ui. After a while service should be in healthy stage and be available under nslookup:

[centos@gluster-poc-02]# nslookup gluster.service.consul\nServer:     127.0.0.1\nAddress:    127.0.0.1#53\n\nName:   gluster.service.consul\nAddress: 172.30.64.144\nName:   gluster.service.consul\nAddress: 172.30.65.61\n

Notice that gluster server can be also gluster client, for example if we want to mount gluster volume on the servers.

"},{"location":"Administrator-Guide/Consul/#mounting-gluster-volume-under-linux","title":"Mounting gluster volume under Linux","text":"

As a moutpoint we would usually select one of the gluster servers, and another as backup server, like this:

mount -t glusterfs -o backupvolfile-server=gluster-poc-02 gluster-poc-01:/g0 /mnt/gluster/g0\n

This is a bit inconvenient, for example we have an image with hardcoded hostnames, and old servers are gone due to maintenance. We would have to recreate image, or reconfigure existing nodes if they unmount gluster storage.

To mitigate that issue we can now use consul for fetching the server pool:

mount -t glusterfs gluster.service.consul:/g0 /mnt/gluster/g0\n

So we can populate that to /etc/fstab or one of the autofs files.

"},{"location":"Administrator-Guide/Consul/#windows-setup","title":"Windows setup","text":""},{"location":"Administrator-Guide/Consul/#configuring-gluster-servers-as-samba-shares","title":"Configuring gluster servers as samba shares","text":"

This is the simplest and not so secure setup, you have been warned.

Proper setup suggests using LDAP or CTDB. You can configure it with puppet using module kakwa-samba.

First, we want to reconfigure gluster servers so that they serve as samba shares using user/pass credentials, which is separate to Linux credentials.

We assume that accessing windows share will be done as user steve with password steve-loves-bacon, make sure you create that user on each gluster server host.

sudo adduser steve\nsudo smbpasswd -a steve\n

Notice that if you do not set user.smb = disable in gluster volume then it may auto-add itself to samba configuration. So better disable this by executing:

gluster volume get g0 user.smb disable\n

Now install samba and samba-vfs-glusterfs packages and configure /etc/samba/smb.conf:

[global]\nworkgroup = test\nsecurity = user\nmin protocol = SMB2\nnetbios name = gluster\nrealm = test\nvfs objects = acl_xattr\nmap acl inherit = Yes\nstore dos attributes = Yes\nlog level = 1\ndedicated keytab file = /etc/krb5.keytab\nmap untrusted to domain = Yes\n\n[vfs-g0]\nguest only = no\nwritable = yes\nguest ok = no\nforce user = steve\ncreate mask = 0666\ndirectory mask = 0777\ncomment = Gluster via VFS (native gluster)\npath = /\nvfs objects = glusterfs\nglusterfs:volume = g0\nkernel share modes = no\nglusterfs:loglevel = 7\nglusterfs:logfile = /var/log/samba/glusterfs-g0.%M.log\nbrowsable = yes\nforce group = steve\n

Some notes:

  • when using vfs plugin then path is a relative path via gluster volume.
  • kernel share modes = no may be required to make it work.

We can also use classic fuse mount and use it under samba as share path, then configuration is even simpler.

For detailed description between those two solutions see gluster vfs blog posts.

  • Remember to add user steve to samba with a password
  • unblock firewall ports for samba
  • test samba config and reload samba
"},{"location":"Administrator-Guide/Consul/#defining-new-samba-service-under-consul","title":"Defining new samba service under consul","text":"

Now we define gluster-samba service on gluster server hosts in a similiar way as we defined it for gluster itself.

Below is an example of /etc/consul/service_samba.json:

{\n  \"service\": {\n    \"address\": \"\",\n    \"checks\": [\n      {\n        \"interval\": \"10s\",\n        \"tcp\": \"localhost:139\",\n        \"timeout\": \"5s\"\n      },\n      {\n        \"interval\": \"10s\",\n        \"tcp\": \"localhost:445\",\n        \"timeout\": \"5s\"\n      }\n    ],\n    \"enableTagOverride\": false,\n    \"id\": \"gluster-samba\",\n    \"name\": \"gluster-samba\",\n    \"port\": 139,\n    \"tags\": [\"gluster\", \"samba\"]\n  }\n}\n

We have two health checks here, just checking if we can connect to samba service. It could be also expanded to see if the network share is actually accessible.

Reload consul service and you should after a while see new service registered in the consul. Check if it exists in dns:

nslookup gluster-samba.service.consul\n\nServer:     127.0.0.1\nAddress:    127.0.0.1#53\n\nName:   gluster-samba.service.consul\nAddress: 172.30.65.61\nName:   gluster-samba.service.consul\nAddress: 172.30.64.144\n

Install samba-client and check connectivity to samba from gluster server itself.

[centos@gluster-poc-01]# smbclient -L //gluster-samba.service.consul/g0 -U steve\nEnter steve's password:\nDomain=[test] OS=[Windows 6.1] Server=[Samba 4.4.4]\n\n    Sharename       Type      Comment\n    ---------       ----      -------\n    vfs-g0          Disk      Gluster via VFS (native gluster)\n    IPC$            IPC       IPC Service (Samba 4.4.4)\nDomain=[test] OS=[Windows 6.1] Server=[Samba 4.4.4]\n\n    Server               Comment\n    ---------            -------\n\n    Workgroup            Master\n    ---------            -------\n

Now check if we can list share directory as steve:

smbclient //gluster-samba.service.consul/vfs-g0/ -U steve -c ls\n\nEnter steve's password:\nDomain=[test] OS=[Windows 6.1] Server=[Samba 4.4.4]\n  .                                   D        0  Wed May 17 20:48:06 2017\n  ..                                  D        0  Wed May 17 20:48:06 2017\n  .trashcan                          DH        0  Mon May 15 15:41:37 2017\n  CentOS-7-x86_64-Everything-1611.iso      N 8280604672  Mon Dec  5 13:57:33 2016\n  hello.world                         D        0  Fri May 19 08:54:02 2017\n  ipconfig.all.txt                    A     2931  Wed May 17 20:18:52 2017\n  nslookup.txt                        A      126  Wed May 17 20:19:13 2017\n  net.view.txt                        A      315  Wed May 17 20:47:44 2017\n\n        463639360 blocks of size 1024. 447352464 blocks available\n\n

Notice that this might take a few seconds, because when we try to connect to the share, samba vfs connects to the gluster servers as agent.

Looks good, time to configure Windows.

"},{"location":"Administrator-Guide/Consul/#installing-consul-agent-on-windows","title":"Installing Consul agent on Windows","text":"

Log in as administrator and install consul agent on the Windows machine, the easiest way is to use chocolatey.

  • install chocolatey and use preferred installation method, for example via cmd.exe

  • optionally install some tools via chocolatey to edit files:

chocolatey install notepadplusplus\n
  • install consul as agent with specific version and configs to load:
chocolatey install consul --version 0.7.5 -params '-config-dir \"%PROGRAMDATA%\\consul\\\"'\n
  • stop consul service in command prompt:
net stop consul\n
  • edit consul config %PROGRAMDATA%\\consul\\config.json:
start notepad++.exe \"%PROGRAMDATA%\\consul\\config\\config.json\"\n

fill it with data (description below):

{\n  \"datacenter\": \"virt-gluster\",\n  \"retry_join\": [\"192.178.1.11\", \"192.178.1.12\", \"192.178.1.13\"],\n  \"recursors\": [\"8.8.8.8\", \"8.8.4.4\"],\n  \"ports\": {\n    \"dns\": 53\n  }\n}\n

Remember to replace datacenter, recursors with preferred local DNS servers and retry_join with list of consul server hosts or for example some generic Route53 entry from private zone (if it exists) which points to real consul servers.

In AWS you can also use retry_join_ec2 - his way Windows instance will always search other consul server EC2 instances and join them.

Notice that recursors section is required if not using retry_join and just relying on AWS EC2 tags - otherwise consul will fail to resolve anything else, thus not joining to the consul.

We use port 53 so that consul will serve as local DNS.

  • start consul service
net start consul\n
  • update DNS settings for network interface in Windows, make it the primary entry
netsh interface ipv4 add dnsserver \\\"Ethernet\\\" address=127.0.0.1 index=1\n
  • verify that DNS Servers is pointing to localhost:
ipconfig /all\n\nWindows IP Configuration\n\n    Host Name . . . . . . . . . . . . : WIN-S8N782O8GG3\n    ...\n    ...\n    DNS Servers . . . . . . . . . . . : 127.0.0.1\n    ...\n    ...\n
  • verify that consul resolves some services:
nslookup gluster.service.consul\n\nnslookup gluster-samba.service.consul\n\nServer:  UnKnown\nAddress:  127.0.0.1\n\nName:    gluster-samba.service.consul\nAddresses:  172.30.65.61\n            172.30.64.144\n
"},{"location":"Administrator-Guide/Consul/#mounting-gluster-volume-under-windows","title":"Mounting gluster volume under Windows","text":"

We have running gluster servers with volume and samba share, registered in consul. We have Windows with running consul agent. All hosts are registered in consul and can connect to each other.

  • verify that samba can see network share:
net view \\\\gluster-samba.service.consul\n\nShared resources at \\\\gluster-samba.service.consul\n\nSamba 4.4.4\n\nShare name  Type  Used as  Comment\n\n-------------------------------------------------------------------------------\nvfs-g0      Disk           Gluster via VFS (native gluster)\nThe command completed successfully.\n
  • mount network share, providing credentials for gluster samba share:
net use s: \\\\gluster-samba.service.consul\\vfs-g0 /user:steve password: steve-loves-bacon /persistent:yes\n

If mounting fails due to error message: System error 1219 has occurred. Multiple connections to a server or shared resource by the same user, using more than one user name, are not allowed.... then you must delete existing connections, for example:

net use /delete \\\\gluster-samba.service.consul\\IPC$\n

And then retry the net use commands again.

From now on this windows share should reconnect to the random gluster samba server, if it is healthy.

Enjoy.

"},{"location":"Administrator-Guide/Consul/#further-steps-for-improvements","title":"Further steps for improvements","text":"

Below is a list of things to improve:

  • enable selinux
  • harden samba setup on gluster servers to use domain logons
  • use consul ACL lists to control access to consul data

  • export gluster volumes as key/value in consul, use consul-template to create mountpoints on consul updates - in autofs/ samba mounts/unmounts

  • expand consul health checks with more detailed checks, like:
  • better checking if gluster volume exists etc
  • if samba share is accessible by the client (to avoid situation samba tries to share non-mounted volume)
"},{"location":"Administrator-Guide/Directory-Quota/","title":"Managing Directory Quota","text":"

Directory quotas in GlusterFS allow you to set limits on the usage of the disk space by directories or volumes. The storage administrators can control the disk space utilization at the directory and/or volume levels in GlusterFS by setting limits to allocatable disk space at any level in the volume and directory hierarchy. This is particularly useful in cloud deployments to facilitate the utility billing model.

Note: For now, only Hard limits are supported. Here, the limit cannot be exceeded, and attempts to use more disk space or inodes beyond the set limit are denied.

System administrators can also monitor the resource utilization to limit the storage for the users depending on their role in the organization.

You can set the quota at the following levels:

  • Directory level \u2013 limits the usage at the directory level
  • Volume level \u2013 limits the usage at the volume level

Note: You can set the quota limit on an empty directory. The quota limit will be automatically enforced when files are added to the directory.

"},{"location":"Administrator-Guide/Directory-Quota/#enabling-quota","title":"Enabling Quota","text":"

You must enable Quota to set disk limits.

To enable quota:

Use the following command to enable quota:

gluster volume quota <VOLNAME> enable\n

For example, to enable quota on the test-volume:

# gluster volume quota test-volume enable\nQuota is enabled on /test-volume\n
"},{"location":"Administrator-Guide/Directory-Quota/#disabling-quota","title":"Disabling Quota","text":"

You can disable Quota if needed.

To disable quota:

Use the following command to disable quota:

gluster volume quota <VOLNAME> disable\n

For example, to disable quota translator on the test-volume:

# gluster volume quota test-volume disable\nQuota translator is disabled on /test-volume\n
"},{"location":"Administrator-Guide/Directory-Quota/#setting-or-replacing-disk-limit","title":"Setting or Replacing Disk Limit","text":"

You can create new directories in your storage environment and set the disk limit or set disk limit for the existing directories. The directory name should be relative to the volume with the export directory/mount being treated as \"/\".

To set or replace disk limit:

Set the disk limit using the following command:

gluster volume quota <VOLNAME> limit-usage <DIR> <HARD_LIMIT>\n

For example, to set a limit on data directory on the test-volume where data is a directory under the export directory:

# gluster volume quota test-volume limit-usage /data 10GB\nUsage limit has been set on /data\n

Note In a multi-level directory hierarchy, the strictest disk limit will be considered for enforcement. Also, whenever the quota limit is set for the first time, an auxiliary mount point will be created under /var/run/gluster/. This is just like any other mount point with some special permissions and remains until the quota is disabled. This mount point is being used by quota to set and display limits and lists respectively."},{"location":"Administrator-Guide/Directory-Quota/#displaying-disk-limit-information","title":"Displaying Disk Limit Information","text":"

You can display disk limit information on all the directories on which the limit is set.

To display disk limit information:

  • Display disk limit information of all the directories on which limit is set, using the following command:

    gluster volume quota list

    For example, to see the set disks limit on the test-volume:

      # gluster volume quota test-volume list\n  /Test/data    10 GB       6 GB\n  /Test/data1   10 GB       4 GB\n
    • Display disk limit information on a particular directory on which limit is set, using the following command:

      gluster volume quota list

      For example, to view the set limit on /data directory of test-volume:

        # gluster volume quota test-volume list /data\n  /Test/data    10 GB       6 GB\n
      "},{"location":"Administrator-Guide/Directory-Quota/#displaying-quota-limit-information-using-the-df-utility","title":"Displaying Quota Limit Information Using the df Utility","text":"

      You can create a report of the disk usage using the df utility by considering quota limits. To generate a report, run the following command:

      gluster volume set <VOLNAME> quota-deem-statfs on\n

      In this case, the total disk space of the directory is taken as the quota hard limit set on the directory of the volume.

      Note The default value for quota-deem-statfs is on when the quota is enabled and it is recommended to keep quota-deem-statfs on.

      The following example displays the disk usage when quota-deem-statfs is off:

      # gluster volume set test-volume features.quota-deem-statfs off\nvolume set: success\n\n# gluster volume quota test-volume list\nPath            Hard-limit    Soft-limit    Used      Available\n---------------------------------------------------------------\n/               300.0GB        90%          11.5GB    288.5GB\n/John/Downloads  77.0GB        75%          11.5GB     65.5GB\n

      Disk usage for volume test-volume as seen on client1:

      # df -hT /home\nFilesystem           Type            Size  Used Avail Use% Mounted on\nserver1:/test-volume fuse.glusterfs  400G   12G  389G   3% /home\n

      The following example displays the disk usage when quota-deem-statfs is on:

      # gluster volume set test-volume features.quota-deem-statfs on\nvolume set: success\n\n# gluster vol quota test-volume list\nPath        Hard-limit    Soft-limit     Used     Available\n-----------------------------------------------------------\n/              300.0GB        90%        11.5GB     288.5GB\n/John/Downloads 77.0GB        75%        11.5GB     65.5GB\n

      Disk usage for volume test-volume as seen on client1:

      # df -hT /home\nFilesystem            Type            Size  Used Avail Use% Mounted on\nserver1:/test-volume  fuse.glusterfs  300G   12G  289G   4% /home\n

      The quota-deem-statfs option when set to on, allows the administrator to make the user view the total disk space available on the directory as the hard limit set on it.

      "},{"location":"Administrator-Guide/Directory-Quota/#updating-memory-cache-size","title":"Updating Memory Cache Size","text":""},{"location":"Administrator-Guide/Directory-Quota/#setting-timeout","title":"Setting Timeout","text":"

      For performance reasons, quota caches the directory sizes on the client. You can set a timeout indicating the maximum valid duration of directory sizes in the cache, from the time they are populated.

      For example: If multiple clients are writing to a single directory, there are chances that some other client might write till the quota limit is exceeded. However, this new file-size may not get reflected in the client till the size entry in the cache has become stale because of timeout. If writes happen on this client during this duration, they are allowed even though they would lead to exceeding of quota-limits, since the size in the cache is not in sync with the actual size. When a timeout happens, the size in the cache is updated from servers and will be in sync and no further writes will be allowed. A timeout of zero will force fetching of directory sizes from the server for every operation that modifies file data and will effectively disable directory size caching on the client-side.

      To update the memory cache size:

      Use the following command to update the memory cache size:

      1. Soft Timeout: The frequency at which the quota server-side translator checks the volume usage when the usage is below the soft limit. The soft timeout is in effect when the disk usage is less than the soft limit.
      gluster volume set <VOLNAME> features.soft-timeout <time>\n
      1. Hard Timeout: The frequency at which the quota server-side translator checks the volume usage when the usage is above the soft limit. The hard timeout is in effect when the disk usage is between the soft limit and the hard limit.
      gluster volume set <VOLNAME> features.hard-timeout <time>\n

      For example, to update the memory cache size for every 5 seconds on test-volume in case of hard-timeout:

      # gluster volume set test-volume features.hard-timeout 5\nSet volume successful\n
      "},{"location":"Administrator-Guide/Directory-Quota/#setting-alert-time","title":"Setting Alert Time","text":"

      Alert time is the frequency at which you want your usage information to be logged after you reach the soft limit.

      To set the alert time:

      Use the following command to set the alert time:

      gluster volume quota <VOLNAME> alert-time <time>\n

      Note: The default alert-time is one week.

      For example, to set the alert time to one day:

      # gluster volume quota test-volume alert-time 1d\nvolume quota : success\n
      "},{"location":"Administrator-Guide/Directory-Quota/#removing-disk-limit","title":"Removing Disk Limit","text":"

      You can remove the set disk limit if you do not want a quota anymore.

      To remove disk limit:

      Use the following command to remove the disk limit set on a particular directory:

      gluster volume quota <VOLNAME> remove <DIR>\n

      For example, to remove the disk limit on /data directory of test-volume:

      # gluster volume quota test-volume remove /data\nUsage limit set on /data is removed\n
      "},{"location":"Administrator-Guide/Events-APIs/","title":"Events APIs","text":"

      New in version 3.9

      NOTE : glusterfs-selinux package would have to be installed for events feature to function properly when the selinux is in enforced mode. In addition to that, the default port to be used for eventsd has now been changed to 55555 and it has to lie between the ephemeral port ranges.

      "},{"location":"Administrator-Guide/Events-APIs/#set-pythonpathonly-in-case-of-source-installation","title":"Set PYTHONPATH(Only in case of Source installation)","text":"

      If Gluster is installed using source install, cliutils will get installed under /usr/local/lib/python.2.7/site-packages Set PYTHONPATH by adding in ~/.bashrc

      export PYTHONPATH=/usr/local/lib/python2.7/site-packages:$PYTHONPATH\n
      "},{"location":"Administrator-Guide/Events-APIs/#enable-and-start-events-apis","title":"Enable and Start Events APIs","text":"

      Enable and Start glustereventsd in all peer nodes

      In Systems using Systemd,

      systemctl enable glustereventsd\nsystemctl start glustereventsd\n

      FreeBSD or others, add the following in /etc/rc.conf

      glustereventsd_enable=\"YES\"\n

      And start the glustereventsd using,

      service glustereventsd start\n

      SysVInit(CentOS 6),

      chkconfig glustereventsd on\nservice glustereventsd start\n
      "},{"location":"Administrator-Guide/Events-APIs/#status","title":"Status","text":"

      Status Can be checked using,

      gluster-eventsapi status\n

      Example output:

      Webhooks:\nNone\n\n+-----------+-------------+-----------------------+\n| NODE      | NODE STATUS | GLUSTEREVENTSD STATUS |\n+-----------+-------------+-----------------------+\n| localhost |          UP |                    UP |\n| node2     |          UP |                    UP |\n+-----------+-------------+-----------------------+\n
      "},{"location":"Administrator-Guide/Events-APIs/#webhooks","title":"Webhooks","text":"

      Webhooks are similar to callbacks(over HTTP), on event Gluster will call the Webhook URL(via POST) which is configured. Webhook is a web server which listens on a URL, this can be deployed outside of the Cluster. Gluster nodes should be able to access this Webhook server on the configured port.

      Example Webhook written in python,

      from flask import Flask, request\n\napp = Flask(__name__)\n\n@app.route(\"/listen\", methods=[\"POST\"])\ndef events_listener():\n    gluster_event = request.json\n    if gluster_event is None:\n        # No event to process, may be test call\n        return \"OK\"\n\n    # Process gluster_event\n    # {\n    #  \"nodeid\": NODEID,\n    #  \"ts\": EVENT_TIMESTAMP,\n    #  \"event\": EVENT_TYPE,\n    #  \"message\": EVENT_DATA\n    # }\n    print (gluster_event)\n    return \"OK\"\n\napp.run(host=\"0.0.0.0\", port=9000)\n

      Test and Register webhook using following commands,

      usage: gluster-eventsapi webhook-test [-h] [--bearer_token BEARER_TOKEN] url\n\npositional arguments:\n  url                   URL of Webhook\n\noptional arguments:\n  -h, --help            show this help message and exit\n  --bearer_token BEARER_TOKEN, -t BEARER_TOKEN\n                        Bearer Token\n

      Example(Webhook server is running in 192.168.122.188:9000),

      # gluster-eventsapi webhook-test http://192.168.122.188:9000/listen\n\n+-----------+-------------+----------------+\n| NODE      | NODE STATUS | WEBHOOK STATUS |\n+-----------+-------------+----------------+\n| localhost |          UP |             OK |\n| node2     |          UP |             OK |\n+-----------+-------------+----------------+\n

      If Webhook status is OK from all peer nodes then register the Webhook using,

      usage: gluster-eventsapi webhook-add [-h] [--bearer_token BEARER_TOKEN] url\n\npositional arguments:\n  url                   URL of Webhook\n\noptional arguments:\n  -h, --help            show this help message and exit\n  --bearer_token BEARER_TOKEN, -t BEARER_TOKEN\n                        Bearer Token\n

      Example,

      # gluster-eventsapi webhook-add http://192.168.122.188:9000/listen\n\n+-----------+-------------+-------------+\n| NODE      | NODE STATUS | SYNC STATUS |\n+-----------+-------------+-------------+\n| localhost |          UP |          OK |\n| node2     |          UP |          OK |\n+-----------+-------------+-------------+\n

      Note: If Sync status is Not OK for any node, then make sure to run following command from a peer node when that node comes up.

      gluster-eventsapi sync\n

      To unsubscribe from events, delete the webhook using following command

      usage: gluster-eventsapi webhook-del [-h] url\n\npositional arguments:\n  url         URL of Webhook\n\noptional arguments:\n  -h, --help  show this help message and exit\n

      Example,

      gluster-eventsapi webhook-del http://192.168.122.188:9000/listen\n
      "},{"location":"Administrator-Guide/Events-APIs/#configuration","title":"Configuration","text":"

      View all configurations using,

      usage: gluster-eventsapi config-get [-h] [--name NAME]\n\noptional arguments:\n  -h, --help   show this help message and exit\n  --name NAME  Config Name\n

      Example output:

      +--------------------+-------+\n| NAME               | VALUE |\n+--------------------+-------+\n| log-level          | INFO  |\n| port               | 55555 |\n| disable-events-log | False |\n+--------------------+-------+\n

      To change any configuration,

      usage: gluster-eventsapi config-set [-h] name value\n\npositional arguments:\n  name        Config Name\n  value       Config Value\n\noptional arguments:\n  -h, --help  show this help message and exit\n

      Example output,

      +-----------+-------------+-------------+\n| NODE      | NODE STATUS | SYNC STATUS |\n+-----------+-------------+-------------+\n| localhost |          UP |          OK |\n| node2     |          UP |          OK |\n+-----------+-------------+-------------+\n

      To Reset any configuration,

      usage: gluster-eventsapi config-reset [-h] name\n\npositional arguments:\n  name        Config Name or all\n\noptional arguments:\n  -h, --help  show this help message and exit\n

      Example output,

      +-----------+-------------+-------------+\n| NODE      | NODE STATUS | SYNC STATUS |\n+-----------+-------------+-------------+\n| localhost |          UP |          OK |\n| node2     |          UP |          OK |\n+-----------+-------------+-------------+\n

      Note: If any node status is not UP or sync status is not OK, make sure to run gluster-eventsapi sync from a peer node.

      "},{"location":"Administrator-Guide/Events-APIs/#add-node-to-the-cluster","title":"Add node to the Cluster","text":"

      When a new node added to the cluster,

      • Enable and Start Eventsd in the new node using the steps mentioned above
      • Run gluster-eventsapi sync command from a peer node other than the new node.
      "},{"location":"Administrator-Guide/Events-APIs/#apis-documentation","title":"APIs documentation","text":"

      Glustereventsd pushes the Events in JSON format to configured Webhooks. All Events will have following attributes.

      Attribute Description nodeid Node UUID ts Event Timestamp event Event Type message Event Specific Data

      Example:

      {\n  \"nodeid\": \"95cd599c-5d87-43c1-8fba-b12821fd41b6\",\n  \"ts\": 1468303352,\n  \"event\": \"VOLUME_CREATE\",\n  \"message\": {\n    \"name\": \"gv1\"\n  }\n}\n

      \"message\" can have following attributes based on the type of event.

      "},{"location":"Administrator-Guide/Events-APIs/#peer-events","title":"Peer Events","text":"Event Type Attribute Description PEER_ATTACH host Hostname or IP of added node PEER_DETACH host Hostname or IP of detached node"},{"location":"Administrator-Guide/Events-APIs/#volume-events","title":"Volume Events","text":"Event Type Attribute Description VOLUME_CREATE name Volume Name VOLUME_START force Force option used or not during Start name Volume Name VOLUME_STOP force Force option used or not during Stop name Volume Name VOLUME_DELETE name Volume Name VOLUME_SET name Volume Name options List of Options[(key1, val1), (key2, val2),..] VOLUME_RESET name Volume Name option Option Name"},{"location":"Administrator-Guide/Events-APIs/#brick-events","title":"Brick Events","text":"Event Type Attribute Description BRICK_RESET_START volume Volume Name source-brick Source Brick details BRICK_RESET_COMMIT volume Volume Name destination-brick Destination Brick source-brick Source Brick details BRICK_REPLACE volume Volume Name destination-brick Destination Brick source-brick Source Brick details"},{"location":"Administrator-Guide/Events-APIs/#georep-events","title":"Georep Events","text":"Event Type Attribute Description GEOREP_CREATE force Force option used during session Create secondary Secondary Details(Secondaryhost::SecondaryVolume) no_verify No verify option is used or not push_pem Push pem option is used or Not ssh_port If SSH port is configured during Session Create primary Primary Volume Name GEOREP_START force Force option used during session Start Primary Primary Volume Name secondary Secondary Details(Secondaryhost::SecondaryVolume) GEOREP_STOP force Force option used during session Stop primary Primary Volume Name secondary Secondary Details(Secondaryhost::SecondaryVolume) GEOREP_PAUSE force Force option used during session Pause primary Primary Volume Name secondary Secondary Details(Secondaryhost::SecondaryVolume) GEOREP_RESUME force Force option used during session Resume primary Primary Volume Name secondary Secondary Details(Secondaryhost::SecondaryVolume) GEOREP_DELETE force Force option used during session Delete primary Primary Volume Name secondary Secondary Details(Secondaryhost::SecondaryVolume) GEOREP_CONFIG_SET primary Primary Volume Name secondary Secondary Details(Secondaryhost::SecondaryVolume) option Name of Geo-rep config value Changed Value GEOREP_CONFIG_RESET primary Primary Volume Name secondary Secondary Details(Secondaryhost::SecondaryVolume) option Name of Geo-rep config"},{"location":"Administrator-Guide/Events-APIs/#bitrot-events","title":"Bitrot Events","text":"Event Type Attribute Description BITROT_ENABLE name Volume Name BITROT_DISABLE name Volume Name BITROT_SCRUB_THROTTLE name Volume Name value Changed Value BITROT_SCRUB_FREQ name Volume Name value Changed Value BITROT_SCRUB_OPTION name Volume Name value Changed Value"},{"location":"Administrator-Guide/Events-APIs/#quota-events","title":"Quota Events","text":"Event Type Attribute Description QUOTA_ENABLE volume Volume Name QUOTA_DISABLE volume Volume Name QUOTA_SET_USAGE_LIMIT volume Volume Name path Path in Volume on which Quota option is set limit Changed Value QUOTA_SET_OBJECTS_LIMIT volume Volume Name path Path in Volume on which Quota option is set limit Changed Value QUOTA_REMOVE_USAGE_LIMIT volume Volume Name path Path in Volume on which Quota option is Reset QUOTA_REMOVE_OBJECTS_LIMIT volume Volume Name path Path in Volume on which Quota option is Reset QUOTA_ALERT_TIME volume Volume Name time Changed Alert Time QUOTA_SOFT_TIMEOUT volume Volume Name soft-timeout Changed Value QUOTA_HARD_TIMEOUT volume Volume Name hard-timeout Changed Value QUOTA_DEFAULT_SOFT_LIMIT volume Volume Name default-soft-limit Changed Value"},{"location":"Administrator-Guide/Events-APIs/#snapshot-events","title":"Snapshot Events","text":"Event Type Attribute Description SNAPSHOT_CREATED snapshot_name Snapshot Name volume_name Volume Name snapshot_uuid Snapshot UUID SNAPSHOT_CREATE_FAILED snapshot_name Snapshot Name volume_name Volume Name error Failure details SNAPSHOT_ACTIVATED snapshot_name Snapshot Name snapshot_uuid Snapshot UUID SNAPSHOT_ACTIVATE_FAILED snapshot_name Snapshot Name error Failure details SNAPSHOT_DEACTIVATED snapshot_name Snapshot Name snapshot_uuid Snapshot UUID SNAPSHOT_DEACTIVATE_FAILED snapshot_name Snapshot Name error Failure details SNAPSHOT_SOFT_LIMIT_REACHED volume_name Volume Name volume_id Volume ID SNAPSHOT_HARD_LIMIT_REACHED volume_name Volume Name volume_id Volume ID SNAPSHOT_RESTORED snapshot_name Snapshot Name volume_name Volume Name snapshot_uuid Snapshot UUID SNAPSHOT_RESTORE_FAILED snapshot_name Snapshot Name error Failure details SNAPSHOT_DELETED snapshot_name Snapshot Name snapshot_uuid Snapshot UUID SNAPSHOT_DELETE_FAILED snapshot_name Snapshot Name error Failure details SNAPSHOT_CLONED clone_uuid Snapshot Clone UUID snapshot_name Snapshot Name clone_name Snapshot Clone Name SNAPSHOT_CLONE_FAILED snapshot_name Snapshot Name clone_name Snapshot Clone Name error Failure details SNAPSHOT_CONFIG_UPDATED auto-delete Auto delete Value if available config_type Volume Config or System Config hard_limit Hard Limit Value if available soft_limit Soft Limit Value if available snap-activate Snap activate Value if available SNAPSHOT_CONFIG_UPDATE_FAILED error Error details SNAPSHOT_SCHEDULER_INITIALISED status Succss Status SNAPSHOT_SCHEDULER_INIT_FAILED error Error details SNAPSHOT_SCHEDULER_ENABLED status Succss Status SNAPSHOT_SCHEDULER_ENABLE_FAILED error Error details SNAPSHOT_SCHEDULER_DISABLED status Succss Status SNAPSHOT_SCHEDULER_DISABLE_FAILED error Error details SNAPSHOT_SCHEDULER_SCHEDULE_ADDED status Succss Status SNAPSHOT_SCHEDULER_SCHEDULE_ADD_FAILED error Error details SNAPSHOT_SCHEDULER_SCHEDULE_EDITED status Succss Status SNAPSHOT_SCHEDULER_SCHEDULE_EDIT_FAILED error Error details SNAPSHOT_SCHEDULER_SCHEDULE_DELETED status Succss Status SNAPSHOT_SCHEDULER_SCHEDULE_DELETE_FAILED error Error details"},{"location":"Administrator-Guide/Events-APIs/#svc-events","title":"Svc Events","text":"Event Type Attribute Description SVC_MANAGER_FAILED volume Volume Name if available svc_name Service Name SVC_CONNECTED volume Volume Name if available svc_name Service Name SVC_DISCONNECTED svc_name Service Name"},{"location":"Administrator-Guide/Events-APIs/#peer-events_1","title":"Peer Events","text":"Event Type Attribute Description PEER_STORE_FAILURE peer Hostname or IP PEER_RPC_CREATE_FAILED peer Hostname or IP PEER_REJECT peer Hostname or IP PEER_CONNECT host Hostname or IP uuid Host UUID PEER_DISCONNECT host Hostname or IP uuid Host UUID state Disconnect State PEER_NOT_FOUND peer Hostname or IP uuid Host UUID"},{"location":"Administrator-Guide/Events-APIs/#unknown-events","title":"Unknown Events","text":"Event Type Attribute Description UNKNOWN_PEER peer Hostname or IP"},{"location":"Administrator-Guide/Events-APIs/#brick-events_1","title":"Brick Events","text":"Event Type Attribute Description BRICK_START_FAILED peer Hostname or IP volume Volume Name brick Brick BRICK_STOP_FAILED peer Hostname or IP volume Volume Name brick Brick BRICK_DISCONNECTED peer Hostname or IP volume Volume Name brick Brick BRICK_CONNECTED peer Hostname or IP volume Volume Name brick Brick"},{"location":"Administrator-Guide/Events-APIs/#bricks-events","title":"Bricks Events","text":"Event Type Attribute Description BRICKS_START_FAILED volume Volume Name"},{"location":"Administrator-Guide/Events-APIs/#brickpath-events","title":"Brickpath Events","text":"Event Type Attribute Description BRICKPATH_RESOLVE_FAILED peer Hostname or IP volume Volume Name brick Brick"},{"location":"Administrator-Guide/Events-APIs/#notify-events","title":"Notify Events","text":"Event Type Attribute Description NOTIFY_UNKNOWN_OP op Operation Name"},{"location":"Administrator-Guide/Events-APIs/#quorum-events","title":"Quorum Events","text":"Event Type Attribute Description QUORUM_LOST volume Volume Name QUORUM_REGAINED volume Volume Name"},{"location":"Administrator-Guide/Events-APIs/#rebalance-events","title":"Rebalance Events","text":"Event Type Attribute Description REBALANCE_START_FAILED volume Volume Name REBALANCE_STATUS_UPDATE_FAILED volume Volume Name"},{"location":"Administrator-Guide/Events-APIs/#import-events","title":"Import Events","text":"Event Type Attribute Description IMPORT_QUOTA_CONF_FAILED volume Volume Name IMPORT_VOLUME_FAILED volume Volume Name IMPORT_BRICK_FAILED peer Hostname or IP brick Brick details"},{"location":"Administrator-Guide/Events-APIs/#compare-events","title":"Compare Events","text":"Event Type Attribute Description COMPARE_FRIEND_VOLUME_FAILED volume Volume Name"},{"location":"Administrator-Guide/Events-APIs/#ec-events","title":"Ec Events","text":"Event Type Attribute Description EC_MIN_BRICKS_NOT_UP subvol Subvolume EC_MIN_BRICKS_UP subvol Subvolume"},{"location":"Administrator-Guide/Events-APIs/#georep-events_1","title":"Georep Events","text":"Event Type Attribute Description GEOREP_FAULTY primary_node Hostname or IP of Primary Volume brick_path Brick Path secondary_host Secondary Hostname or IP primary_volume Primary Volume Name current_secondary_host Current Secondary Host to which Geo-rep worker was trying to connect to secondary_volume Secondary Volume Name"},{"location":"Administrator-Guide/Events-APIs/#quota-events_1","title":"Quota Events","text":"Event Type Attribute Description QUOTA_CROSSED_SOFT_LIMIT usage Usage volume Volume Name path Path"},{"location":"Administrator-Guide/Events-APIs/#bitrot-events_1","title":"Bitrot Events","text":"Event Type Attribute Description BITROT_BAD_FILE gfid GFID of File path Path if Available brick Brick details"},{"location":"Administrator-Guide/Events-APIs/#client-events","title":"Client Events","text":"Event Type Attribute Description CLIENT_CONNECT client_identifier Client Identifier client_uid Client UID server_identifier Server Identifier brick_path Path of Brick CLIENT_AUTH_REJECT client_identifier Client Identifier client_uid Client UID server_identifier Server Identifier brick_path Path of Brick CLIENT_DISCONNECT client_identifier Client Identifier client_uid Client UID server_identifier Server Identifier brick_path Path of Brick"},{"location":"Administrator-Guide/Events-APIs/#posix-events","title":"Posix Events","text":"Event Type Attribute Description POSIX_SAME_GFID gfid GFID of File path Path of File newpath New Path of File brick Brick details POSIX_ALREADY_PART_OF_VOLUME volume-id Volume ID brick Brick details POSIX_BRICK_NOT_IN_VOLUME brick Brick details POSIX_BRICK_VERIFICATION_FAILED brick Brick details POSIX_ACL_NOT_SUPPORTED brick Brick details POSIX_HEALTH_CHECK_FAILED path Path brick Brick details op Error Number error Error"},{"location":"Administrator-Guide/Events-APIs/#afr-events","title":"Afr Events","text":"Event Type Attribute Description AFR_QUORUM_MET subvol Sub Volume Name AFR_QUORUM_FAIL subvol Sub Volume Name AFR_SUBVOL_UP subvol Sub Volume Name AFR_SUBVOLS_DOWN subvol Sub Volume Name AFR_SPLIT_BRAIN subvol Sub Volume Name"},{"location":"Administrator-Guide/Events-APIs/#tier-events","title":"Tier Events","text":"Event Type Attribute Description TIER_ATTACH vol Volume Name TIER_ATTACH_FORCE vol Volume Name TIER_DETACH_START vol Volume Name TIER_DETACH_STOP vol Volume Name TIER_DETACH_COMMIT vol Volume Name TIER_DETACH_FORCE vol Volume Name TIER_PAUSE vol Volume Name TIER_RESUME vol Volume Name TIER_WATERMARK_HI vol Volume Name TIER_WATERMARK_DROPPED_TO_MID vol Volume Name TIER_WATERMARK_RAISED_TO_MID vol Volume Name TIER_WATERMARK_DROPPED_TO_LOW vol Volume Name"},{"location":"Administrator-Guide/Events-APIs/#volume-events_1","title":"Volume Events","text":"Event Type Attribute Description VOLUME_ADD_BRICK volume Volume Name bricks Bricks details separated by Space VOLUME_REMOVE_BRICK_START volume Volume Name bricks Bricks details separated by Space VOLUME_REMOVE_BRICK_COMMIT volume Volume Name bricks Bricks details separated by Space VOLUME_REMOVE_BRICK_STOP volume Volume Name bricks Bricks details separated by Space VOLUME_REMOVE_BRICK_FORCE volume Volume Name bricks Bricks details separated by Space VOLUME_REBALANCE_START volume Volume Name VOLUME_REBALANCE_STOP volume Volume Name VOLUME_REBALANCE_FAILED volume Volume Name VOLUME_REBALANCE_COMPLETE volume Volume Name"},{"location":"Administrator-Guide/Export-And-Netgroup-Authentication/","title":"Export and Netgroup Authentication","text":""},{"location":"Administrator-Guide/Export-And-Netgroup-Authentication/#exports-and-netgroups-authentication-for-nfs","title":"Exports and Netgroups Authentication for NFS","text":"

      This feature adds Linux-style exports & netgroups authentication to Gluster's NFS server. More specifically, this feature allows users to restrict access specific IPs (exports authentication) or a netgroup (netgroups authentication), or a combination of both for both Gluster volumes and subdirectories within Gluster volumes. Netgroups are used in Unix environments to control access for NFS exports, remote logins and remote shells. Each netgroup has a unique name and defines a set of hosts, users, groups and other netgroups. This information is stored in files and gluster NFS server manage permission for clients based on those file

      "},{"location":"Administrator-Guide/Export-And-Netgroup-Authentication/#implications-and-usage","title":"Implications and Usage","text":"

      Currently, gluster can restrict access to volumes through simple IP list. But this feature makes that capability more scalable by allowing large lists of IPs to be managed through a netgroup. Moreover it provides more granular permission handling on volumes like wildcard support, read-only permission to certain client etc.

      The file /var/lib/glusterd/nfs/export contains the details of machines which can be used as clients for that server.An typical export entry use the following format :

      /<export path> <host/netgroup> (options),..\n

      Here export name can be gluster volume or subdirectory path inside that volume. Next it contains list of host/netgroup , followed by the options applicable to that entry.A string beginning with an '@' is treated as a netgroup and a string beginning without an @ is a host. The options include mount related parameters , right now options such as \"sec\", \"ro/rw\", \"anonuid\" valid one. If * is mention as host/netgroup field , then any client can mount that export path.

      The file /var/lib/glusterd/nfs/netgroup should mention the expansion of each netgroup which mentioned in the export file. An typical netgroup entry will look like :

      <netgroup name> ng1000\\nng1000 ng999\\nng999 ng1\\nng1 ng2\\nng2 (ip1,ip2,..)\n

      The gluster NFS server will check the contents of these file after specific time intervals

      "},{"location":"Administrator-Guide/Export-And-Netgroup-Authentication/#volume-options","title":"Volume Options","text":"
      1. Enabling export/netgroup feature
      gluster volume set <volname> nfs.exports-auth-enable on\n
      1. Changing the refresh interval for gluster NFS server
      gluster volume set <volname> nfs.auth-refresh-interval-sec <time in seconds>\n
      1. Changing the cache interval for an export entry
      gluster volume set <volname> nfs.auth-cache-ttl-sec <time in seconds>\n
      "},{"location":"Administrator-Guide/Export-And-Netgroup-Authentication/#testing-the-exportnetgroup-file","title":"Testing the export/netgroup file","text":"

      An user should have the ability to check the validity of the files before applying the configuration. The \"glusterfsd\" command now has the following additional arguments that can be used to check the configuration:

      • --print-netgroups: Validate the netgroups file and print it out. For example,

      • glusterfsd --print-netgroups <name of the file>

      • --print-exports: Validate the exports file and print it out. For example,

      • glusterfsd --print-export <name of the file>
      "},{"location":"Administrator-Guide/Export-And-Netgroup-Authentication/#points-to-be-noted","title":"Points to be noted.","text":"
      1. This feature does not currently support all the options in the man page of exports, but we can easily add them.

      2. The files /var/lib/glusterd/nfs/export and /var/lib/glusterd/nfs/netgroup should be created before setting the nfs.exports-auth-enable option in every node in Trusted Storage Pool.

      3. These files are handled manually by the users. So that, their contents can be different among the gluster nfs servers across Trusted Storage Pool . i.e it is possible to have different authenticate mechanism for the gluster NFS servers in the same cluster.

      4. Do not mixup this feature and authentication using nfs.rpc-auth-allow, nfs.export-dir which may result in inconsistency.

      "},{"location":"Administrator-Guide/Export-And-Netgroup-Authentication/#troubleshooting","title":"Troubleshooting","text":"

      After changing the contents of the file, if it is not reflected properly in the authentication mechanism , just restart the server using volume stop and start, So that gluster NFS server will forcefully read the contents of those files again.

      "},{"location":"Administrator-Guide/Geo-Replication/","title":"Geo-Replication","text":""},{"location":"Administrator-Guide/Geo-Replication/#introduction","title":"Introduction","text":"

      Geo-replication provides a continuous, asynchronous, and incremental replication service from one site to another over Local Area Networks (LANs), Wide Area Network (WANs), and across the Internet.

      "},{"location":"Administrator-Guide/Geo-Replication/#prerequisites","title":"Prerequisites","text":"
      • Primary and Secondary Volumes should be Gluster Volumes.
      • Primary and Secondary clusters should have the same GlusterFS version.
      "},{"location":"Administrator-Guide/Geo-Replication/#replicated-volumes-vs-geo-replication","title":"Replicated Volumes vs Geo-replication","text":"

      The following table lists the difference between replicated volumes and Geo-replication:

      Replicated Volumes Geo-replication Mirrors data across clusters Mirrors data across geographically distributed clusters Provides high-availability Ensures backing up of data for disaster recovery Synchronous replication (each and every file operation is sent across all the bricks) Asynchronous replication (checks for the changes in files periodically and syncs them on detecting differences)"},{"location":"Administrator-Guide/Geo-Replication/#exploring-geo-replication-deployment-scenarios","title":"Exploring Geo-replication Deployment Scenarios","text":"

      Geo-replication provides an incremental replication service over Local Area Networks (LANs), Wide Area Network (WANs), and across the Internet.

      This section illustrates the most common deployment scenarios for Geo-replication, including the following:

      "},{"location":"Administrator-Guide/Geo-Replication/#geo-replication-over-local-area-networklan","title":"Geo-replication over Local Area Network(LAN)","text":""},{"location":"Administrator-Guide/Geo-Replication/#geo-replication-over-wide-area-networkwan","title":"Geo-replication over Wide Area Network(WAN)","text":""},{"location":"Administrator-Guide/Geo-Replication/#geo-replication-over-internet","title":"Geo-replication over Internet","text":""},{"location":"Administrator-Guide/Geo-Replication/#mirror-data-in-a-cascading-fashion-across-multiple-sitesmulti-site-cascading-geo-replication","title":"Mirror data in a cascading fashion across multiple sites(Multi-site cascading Geo-replication)","text":""},{"location":"Administrator-Guide/Geo-Replication/#secondary-user-setup","title":"Secondary User setup","text":"

      Setup an unprivileged user in Secondary nodes to secure the SSH connectivity to those nodes. The unprivileged Secondary user uses the mountbroker service of glusterd to set up an auxiliary gluster mount for the user in a special environment, which ensures that the user is only allowed to access with special parameters that provide administrative level access to the particular Volume.

      In all the Secondary nodes, create a new group as \"geogroup\".

      sudo groupadd geogroup\n

      In all the Secondary nodes, create an unprivileged account. For example, \"geoaccount\". Add geoaccount as a member of \"geogroup\" group.

      useradd -G geogroup geoaccount\n

      In any one Secondary node, run the following command to setup the mountbroker root directory and group.

      gluster-mountbroker setup <MOUNT ROOT> <GROUP>\n

      For example,

      gluster-mountbroker setup /var/mountbroker-root geogroup\n

      In any one of Secondary node, Run the following commands to add Volume and user to mountbroker service.

      gluster-mountbroker add <VOLUME> <USER>\n

      For example,

      gluster-mountbroker add gvol-secondary geoaccount\n

      (Note: To remove a user, use gluster-mountbroker remove command)

      Check the status of setup using,

      gluster-mountbroker status\n

      Restart glusterd service on all Secondary nodes.

      "},{"location":"Administrator-Guide/Geo-Replication/#setting-up-the-environment-for-geo-replication","title":"Setting Up the Environment for Geo-replication","text":""},{"location":"Administrator-Guide/Geo-Replication/#time-synchronization","title":"Time Synchronization","text":"

      On bricks of a geo-replication Primary volume, all the servers' time must be uniform. You are recommended to set up NTP (Network Time Protocol) or similar service to keep the bricks sync in time and avoid the out-of-time sync effect.

      For example: In a Replicated volume where brick1 of the Primary is at 12.20 hrs, and brick 2 of the Primary is at 12.10 hrs with 10 minutes time lag, all the changes in brick2 between this period may go unnoticed during synchronization of files with Secondary.

      "},{"location":"Administrator-Guide/Geo-Replication/#password-less-ssh","title":"Password-less SSH","text":"

      Password-less login has to be set up between the host machine (where geo-replication Create command will be issued) and one of the Secondary node for the unprivileged account created above.

      Note: This is required to run Create command. This can be disabled once the session is established.(Required again while running create force)

      On one of the Primary node where geo-replication Create command will be issued, run the following command to generate the SSH key(Press Enter twice to avoid passphrase).

      ssh-keygen\n

      Run the following command on the same node to one Secondary node which is identified as the main Secondary node.

      ssh-copy-id geoaccount@snode1.example.com\n
      "},{"location":"Administrator-Guide/Geo-Replication/#creating-secret-pem-pub-file","title":"Creating secret pem pub file","text":"

      Execute the below command from the node where you setup the password-less ssh to Secondary. This will generate Geo-rep session specific ssh-keys in all Primary peer nodes and collect public keys from all peer nodes to the command initiated node.

      gluster-georep-sshkey generate\n

      This command adds extra prefix inside common_secret.pem.pub file to each pub keys to prevent running extra commands using this key, to disable that prefix,

      gluster-georep-sshkey generate --no-prefix\n
      "},{"location":"Administrator-Guide/Geo-Replication/#creating-the-session","title":"Creating the session","text":"

      Create a geo-rep session between Primary and Secondary volume using the following command. The node in which this command is executed and the <Secondary_host> specified in the command should have password less ssh setup between them. The push-pem option actually uses the secret pem pub file created earlier and establishes geo-rep specific password less ssh between each node in Primary to each node of Secondary.

      gluster volume geo-replication <primary_volume> \\\n    <secondary_user>@<secondary_host>::<secondary_volume> \\\n    create [ssh-port <port>] push-pem|no-verify [force]\n

      For example,

      gluster volume geo-replication gvol-primary \\\n  geoaccount@snode1.example.com::gvol-secondary \\\n  create push-pem\n

      If custom SSH port (example: 50022) is configured in Secondary nodes then

      gluster volume geo-replication gvol-primary  \\\n  geoaccount@snode1.example.com::gvol-secondary \\\n  config ssh_port 50022\n\ngluster volume geo-replication gvol-primary  \\\n  geoaccount@snode1.example.com::gvol-secondary \\\n  create ssh-port 50022 push-pem\n

      If the total available size in Secondary volume is less than the total size of Primary, the command will throw error message. In such cases 'force' option can be used.

      In use cases where the rsa-keys of nodes in Primary volume is distributed to Secondary nodes through an external agent and following Secondary side verifications are taken care of by the external agent, then

      • if ssh port 22 or custom port is open in Secondary
      • has proper passwordless ssh login setup
      • Secondary volume is created and is empty
      • if Secondary has enough memory

      Then use following command to create Geo-rep session with no-verify option.

      gluster volume geo-replication <primary_volume> \\\n    <secondary_user>@<secondary_host>::<secondary_volume> create no-verify [force]\n

      For example,

      gluster volume geo-replication gvol-primary  \\\n  geoaccount@snode1.example.com::gvol-secondary \\\n  create no-verify\n

      In this case the Primary node rsa-key distribution to Secondary node does not happen and above mentioned Secondary verification is not performed and these two things has to be taken care externaly.

      "},{"location":"Administrator-Guide/Geo-Replication/#post-creation-steps","title":"Post Creation steps","text":"

      Run the following command as root in any one of Secondary node.

      /usr/libexec/glusterfs/set_geo_rep_pem_keys.sh <secondary_user> \\\n    <primary_volume> <secondary_volume>\n

      For example,

      /usr/libexec/glusterfs/set_geo_rep_pem_keys.sh geoaccount \\\n  gvol-primary gvol-secondary\n
      "},{"location":"Administrator-Guide/Geo-Replication/#configuration","title":"Configuration","text":"

      Configuration can be changed anytime after creating the session. After successful configuration change, Geo-rep session will be automatically restarted.

      To view all configured options of a session,

      gluster volume geo-replication <primary_volume> \\\n    <secondary_user>@<secondary_host>::<secondary_volume> config [option]\n

      For Example,

      gluster volume geo-replication gvol-primary  \\\n  geoaccount@snode1.example.com::gvol-secondary \\\n  config\n\ngluster volume geo-replication gvol-primary  \\\n  geoaccount@snode1.example.com::gvol-secondary \\\n  config sync-jobs\n

      To configure Gluster Geo-replication, use the following command at the Gluster command line

      gluster volume geo-replication <primary_volume> \\\n   <secondary_user>@<secondary_host>::<secondary_volume> config [option]\n

      For example:

      gluster volume geo-replication gvol-primary  \\\n  geoaccount@snode1.example.com::gvol-secondary \\\n  config sync-jobs 3\n

      Note: If Geo-rep is in between sync, restart due to configuration change may cause resyncing a few entries which are already synced.

      "},{"location":"Administrator-Guide/Geo-Replication/#configurable-options","title":"Configurable Options","text":"

      Meta Volume

      In case of Replica bricks, one brick worker will be Active and participate in syncing and others will be waiting as Passive. By default Geo-rep uses node-uuid, if node-uuid of worker present in first up subvolume node ids list then that worker will become Active. With this method, multiple workers of same replica becomes Active if multiple bricks used from same machine.

      To prevent this, Meta Volume(Extra Gluster Volume) can be used in Geo-rep. With this method, Each worker will try to acquire lock on a file inside meta volume. Lock file name pattern will be different for each sub volume. If a worker acquire lock, then it will become Active else remain as Passive.

      gluster volume geo-replication <primary_volume> \\\n    <secondary_user>@<secondary_host>::<secondary_volume> config\n    use-meta-volume true\n

      Note: Meta Volume is shared replica 3 Gluster Volume. The name of the meta-volume should be gluster_shared_storage and should be mounted at /var/run/gluster/shared_storage/.

      The following table provides an overview of the configurable options for a geo-replication setting:

      Option Description log-level LOGFILELEVEL The log level for geo-replication. gluster-log-level LOGFILELEVEL The log level for glusterfs processes. changelog-log-level LOGFILELEVEL The log level for Changelog processes. ssh-command COMMAND The SSH command to connect to the remote machine (the default is ssh). If ssh is installed in custom location, that path can be configured. For ex /usr/local/sbin/ssh rsync-command COMMAND The rsync command to use for synchronizing the files (the default is rsync). use-tarssh true The use-tarssh command allows tar over Secure Shell protocol. Use this option to handle workloads of files that have not undergone edits. timeout SECONDS The timeout period in seconds. sync-jobs N The number of simultaneous files/directories that can be synchronized. ignore-deletes If this option is set to 1, a file deleted on the primary will not trigger a delete operation on the secondary. As a result, the secondary will remain as a superset of the primary and can be used to recover the primary in the event of a crash and/or accidental delete."},{"location":"Administrator-Guide/Geo-Replication/#starting-geo-replication","title":"Starting Geo-replication","text":"

      Use the following command to start geo-replication session,

      gluster volume geo-replication <primary_volume>  \\\n    <secondary_user>@<secondary_host>::<secondary_volume> \\\n    start [force]\n

      For example,

      gluster volume geo-replication gvol-primary  \\\n  geoaccount@snode1.example.com::gvol-secondary \\\n  start\n

      Note

      You may need to configure the session before starting Gluster Geo-replication.

      "},{"location":"Administrator-Guide/Geo-Replication/#stopping-geo-replication","title":"Stopping Geo-replication","text":"

      Use the following command to stop geo-replication sesion,

      gluster volume geo-replication <primary_volume>  \\\n    <secondary_user>@<secondary_host>::<secondary_volume> \\\n    stop [force]\n

      For example,

      gluster volume geo-replication gvol-primary  \\\n  geoaccount@snode1.example.com::gvol-secondary \\\n  stop\n
      "},{"location":"Administrator-Guide/Geo-Replication/#status","title":"Status","text":"

      To check the status of all Geo-replication sessions in the Cluster

      gluster volume geo-replication status\n

      To check the status of one session,

      gluster volume geo-replication <primary_volume> \\\n    <secondary_user>@<secondary_host>::<secondary_volume> status [detail]\n

      Example,

      gluster volume geo-replication gvol-primary \\\n  geoaccount@snode1::gvol-secondary status\n\ngluster volume geo-replication gvol-primary \\\n  geoaccount@snode1::gvol-secondary status detail\n

      Example Status Output

      PRIMARY NODE    PRIMARY VOL          PRIMARY BRICK    SECONDARY USER    SECONDARY         SECONDARY NODE    STATUS    CRAWL STATUS       LAST_SYNCED\n---------------------------------------------------------------------------------------------------------------------------------------------------------\nmnode1         gvol-primary           /bricks/b1      root          snode1::gvol-secondary  snode1        Active    Changelog Crawl    2016-10-12 23:07:13\nmnode2         gvol-primary           /bricks/b2      root          snode1::gvol-secondary  snode2        Active    Changelog Crawl    2016-10-12 23:07:13\n

      Example Status detail Output

      PRIMARY NODE    PRIMARY VOL    PRIMARY BRICK    SECONDARY USER    SECONDARY        SECONDARY NODE    STATUS    CRAWL STATUS       LAST_SYNCED            ENTRY    DATA    META    FAILURES    CHECKPOINT TIME    CHECKPOINT COMPLETED    CHECKPOINT COMPLETION TIME\n--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\nmnode1         gvol-primary           /bricks/b1      root          snode1::gvol-secondary  snode1        Active    Changelog Crawl    2016-10-12 23:07:13    0        0       0       0           N/A                N/A                     N/A\nmnode2         gvol-primary           /bricks/b2      root          snode1::gvol-secondary  snode2        Active    Changelog Crawl    2016-10-12 23:07:13    0        0       0       0           N/A                N/A                     N/A\n

      The STATUS of the session could be one of the following,

      • Initializing: This is the initial phase of the Geo-replication session; it remains in this state for a minute in order to make sure no abnormalities are present.

      • Created: The geo-replication session is created, but not started.

      • Active: The gsync daemon in this node is active and syncing the data. (One worker among the replica pairs will be in Active state)

      • Passive: A replica pair of the active node. The data synchronization is handled by active node. Hence, this node does not sync any data. If Active node goes down, Passive worker will become Active

      • Faulty: The geo-replication session has experienced a problem, and the issue needs to be investigated further. Check log files for more details about the Faulty status. Log file path can be found using

        gluster volume geo-replication <primary_volume> \\\n    <secondary_user>@<secondary_host>::<secondary_volume> config log-file\n
      • Stopped: The geo-replication session has stopped, but has not been deleted.

      The CRAWL STATUS can be one of the following:

      • Hybrid Crawl: The gsyncd daemon is crawling the glusterFS file system and generating pseudo changelog to sync data. This crawl is used during initial sync and if Changelogs are not available.

      • History Crawl: gsyncd daemon syncs data by consuming Historical Changelogs. On every worker restart, Geo-rep uses this Crawl to process backlog Changelogs.

      • Changelog Crawl: The changelog translator has produced the changelog and that is being consumed by gsyncd daemon to sync data.

      The ENTRY denotes: The number of pending entry operations (create, mkdir, mknod, symlink, link, rename, unlink, rmdir) per session.

      The DATA denotes: The number of pending Data operations (write, writev, truncate, ftruncate) per session.

      The META denotes: The number of pending Meta operations (setattr, fsetattr, setxattr, fsetxattr, removexattr, fremovexattr) per session.

      The FAILURE denotes: The number of failures per session. On encountering failures, one can proceed to look at the log files.

      "},{"location":"Administrator-Guide/Geo-Replication/#deleting-the-session","title":"Deleting the session","text":"

      Established Geo-replication session can be deleted using the following command,

      gluster volume geo-replication <primary_volume> \\\n    <secondary_user>@<secondary_host>::<secondary_volume> delete [force]\n

      For example,

      gluster volume geo-replication gvol-primary \\\n  geoaccount@snode1.example.com::gvol-secondary delete\n

      Note: If the same session is created again then syncing will resume from where it was stopped before deleting the session. If the session to be deleted permanently then use reset-sync-time option with delete command. For example, gluster volume geo-replication gvol-primary geoaccount@snode1::gvol-secondary delete reset-sync-time

      "},{"location":"Administrator-Guide/Geo-Replication/#checkpoint","title":"Checkpoint","text":"

      Using Checkpoint feature we can find the status of sync with respect to the Checkpoint time. Checkpoint completion status shows \"Yes\" once Geo-rep syncs all the data from that brick which are created or modified before the Checkpoint Time.

      Set the Checkpoint using,

      gluster volume geo-replication <primary_volume> \\\n    <secondary_user>@<secondary_host>::<secondary_volume> config checkpoint now\n

      Example,

      gluster volume geo-replication gvol-primary \\\n  geoaccount@snode1.example.com::gvol-secondary \\\n  config checkpoint now\n

      Touch the Primary mount point to make sure Checkpoint completes even though no I/O happening in the Volume

      mount -t glusterfs <primaryhost>:<primaryvol> /mnt\ntouch /mnt\n

      Checkpoint status can be checked using Geo-rep status command. Following columns in status output gives more information about Checkpoint

      • CHECKPOINT TIME: Checkpoint Set Time
      • CHECKPOINT COMPLETED: Yes/No/NA, Status of Checkpoint
      • CHECKPOINT COMPLETION TIME: Checkpoint Completion Time if completed, else N/A
      "},{"location":"Administrator-Guide/Geo-Replication/#log-files","title":"Log Files","text":"

      Primary Log files are located in /var/log/glusterfs/geo-replication directory in each Primary nodes. Secondary log files are located in /var/log/glusterfs/geo-replication-secondary directory in Secondary nodes.

      "},{"location":"Administrator-Guide/Geo-Replication/#gluster-snapshots-and-geo-replicated-volumes","title":"Gluster Snapshots and Geo-replicated Volumes","text":"

      Gluster snapshot of Primary and Secondary should not go out of order on restore. So while taking snapshot take snapshot of both Primary and Secondary Volumes.

      • Pause the Geo-replication session using,

        gluster volume geo-replication <primary_volume> \\\n    <secondary_user>@<secondary_host>::<secondary_volume> pause\n
      • Take Gluster Snapshot of Secondary Volume and Primary Volume(Use same name for snapshots)

        gluster snapshot create <snapname> <volname>\n

      Example,

          gluster snapshot create snap1 gvol-secondary\n    gluster snapshot create snap1 gvol-primary\n
      • Resume Geo-replication session using,
        gluster volume geo-replication <primary_volume> \\\n    <secondary_user>@<secondary_host>::<secondary_volume> resume\n

      If we want to continue Geo-rep session after snapshot restore, we need to restore both Primary and Secondary Volume and resume the Geo-replication session using force option

      gluster snapshot restore <snapname>\ngluster volume geo-replication <primary_volume> \\\n    <secondary_user>@<secondary_host>::<secondary_volume> resume force\n

      Example,

      gluster snapshot restore snap1 # Secondary Snap\ngluster snapshot restore snap1 # Primary Snap\ngluster volume geo-replication gvol-primary geoaccount@snode1::gvol-secondary \\\n  resume force\n
      "},{"location":"Administrator-Guide/Gluster-On-ZFS/","title":"Gluster On ZFS","text":""},{"location":"Administrator-Guide/Gluster-On-ZFS/#gluster-on-zfs","title":"Gluster On ZFS","text":"

      This is a step-by-step set of instructions to install Gluster on top of ZFS as the backing file store. There are some commands which were specific to my installation, specifically, the ZFS tuning section. Moniti estis.

      "},{"location":"Administrator-Guide/Gluster-On-ZFS/#preparation","title":"Preparation","text":"
      • Install CentOS 6.3
      • Assumption is that your hostname is gfs01
      • Run all commands as the root user
      • yum update
      • Disable IP Tables
      chkconfig\u00a0iptables\u00a0off\nservice\u00a0iptables\u00a0stop\n
      • Disable SELinux
      1. edit `/etc/selinux/config`\n2. set `SELINUX=disabled`\n3. reboot\n
      "},{"location":"Administrator-Guide/Gluster-On-ZFS/#install-zfs-on-linux","title":"Install ZFS on Linux","text":"

      For RHEL6 or 7 and derivatives, you can install the ZFSoL repo (and EPEL) and use that to install ZFS

      • RHEL\u00a06:
      yum\u00a0localinstall\u00a0--nogpgcheck\u00a0https://download.fedoraproject.org/pub/epel/6/x86_64/epel-release-6-8.noarch.rpm\nyum\u00a0localinstall\u00a0--nogpgcheck\u00a0http://archive.zfsonlinux.org/epel/zfs-release.el6.noarch.rpm\nyum\u00a0install\u00a0kernel-devel\u00a0zfs\n
      • RHEL\u00a07:
      yum\u00a0localinstall\u00a0--nogpgcheck\u00a0https://download.fedoraproject.org/pub/epel/7/x86_64/e/epel-release-7-2.noarch.rpm\nyum\u00a0localinstall\u00a0--nogpgcheck\u00a0http://archive.zfsonlinux.org/epel/zfs-release.el7.noarch.rpm\nyum\u00a0install\u00a0kernel-devel\u00a0zfs\n

      and skip to Finish ZFS Configuration below.

      Or you can roll your own if you want specific patches:

      yum\u00a0groupinstall\u00a0\"Development\u00a0Tools\"\n
      • Download & unpack latest SPL and ZFS tarballs from zfsonlinux.org
      "},{"location":"Administrator-Guide/Gluster-On-ZFS/#install-dkms","title":"Install DKMS","text":"

      We want automatically rebuild the kernel modules when we upgrade the kernel, so you definitely want DKMS with ZFS on Linux.

      • Download latest RPM from http://linux.dell.com/dkms
      • Install DKMS
      rpm -Uvh dkms*.rpm\n
      "},{"location":"Administrator-Guide/Gluster-On-ZFS/#build-install-spl","title":"Build & Install SPL","text":"
      • Enter SPL source directory
      • The following commands create two source & three binary RPMs. Remove the static module RPM (we are using DKMS) and install the rest:
      ./configure\nmake\u00a0rpm\nrm\u00a0spl-modules-0.6.0*.x86_64.rpm\nrpm\u00a0-Uvh\u00a0spl*.x86_64.rpm\u00a0spl*.noarch.rpm\n
      "},{"location":"Administrator-Guide/Gluster-On-ZFS/#build-install-zfs","title":"Build & Install ZFS","text":"

      Notice: If you plan to use the xattr=sa filesystem option, make sure you have the ZFS fix for https://github.com/zfsonlinux/zfs/issues/1648 so your symlinks don't get corrupted. (applies to ZFSoL before 0.6.3, xattr=sa is safe to use on 0.6.3 and later)

      • Enter ZFS source directory
      • The following commands create two source & five binary RPMs. Remove the static module RPM and install the rest. Note we have a few preliminary packages to install before we can compile.
      yum\u00a0install\u00a0zlib-devel\u00a0libuuid-devel\u00a0libblkid-devel\u00a0libselinux-devel\u00a0parted\u00a0lsscsi\n./configure\nmake\u00a0rpm\nrm\u00a0zfs-modules-0.6.0*.x86_64.rpm\nrpm\u00a0-Uvh\u00a0zfs*.x86_64.rpm\u00a0zfs*.noarch.rpm\n
      "},{"location":"Administrator-Guide/Gluster-On-ZFS/#finish-zfs-configuration","title":"Finish ZFS Configuration","text":"
      • Reboot to allow all changes to take effect, if desired
      • Create ZFS storage pool, in below examples it will be named sp1. This is a simple example of 4 HDDs in RAID10. NOTE: Check the latest ZFS on Linux FAQ about configuring the /etc/zfs/zdev.conf file. You want to create mirrored devices across controllers to maximize performance. Make sure to run udevadm trigger after creating zdev.conf.
      zpool\u00a0create\u00a0-f\u00a0sp1\u00a0mirror\u00a0A0\u00a0B0\u00a0mirror\u00a0A1\u00a0B1\nzpool\u00a0status\u00a0sp1\ndf\u00a0-h\n
      • You should see the /sp1 mount point
      • Enable ZFS compression to save disk space:

      zfs set compression=on sp1

      • you can also use lz4 compression on later versions of ZFS as it can be faster, especially for incompressible workloads. It is safe to change this on the fly, as ZFS will compress new data with the current setting:

      zfs set compression=lz4 sp1

      • Set ZFS tunables. This is specific to my environment.
      • Set ARC cache min to 33% and max to 75% of installed RAM. Since this is a dedicated storage node, I can get away with this. In my case my servers have 24G of RAM. More RAM is better with ZFS.
      • We use SATA drives which do not accept command tagged queuing, therefore set the min and max pending requests to 1
      • Disable read prefetch because it is almost completely useless and does nothing in our environment but work the drives unnecessarily. I see < 10% prefetch cache hits, so it's really not required and actually hurts performance.
      • Set transaction group timeout to 5 seconds to prevent the volume from appearing to freeze due to a large batch of writes. 5 seconds is the default, but safe to force this.
      • Ignore client flush/sync commands; let ZFS handle this with the transaction group timeout flush. NOTE: Requires a UPS backup solution unless you don't mind losing that 5 seconds worth of data.
      echo\u00a0\"options\u00a0zfs\u00a0zfs_arc_min=8G\u00a0zfs_arc_max=18G\u00a0zfs_vdev_min_pending=1\u00a0zfs_vdev_max_pending=1\u00a0zfs_prefetch_disable=1\u00a0zfs_txg_timeout=5\"\u00a0>\u00a0/etc/modprobe.d/zfs.conf\nreboot\n
      • Setting the acltype property to posixacl indicates Posix ACLs should be used.

      zfs set acltype=posixacl sp1

      "},{"location":"Administrator-Guide/Gluster-On-ZFS/#install-glusterfs","title":"Install GlusterFS","text":"
      wget\u00a0-P\u00a0/etc/yum.repos.d\u00a0http://download.gluster.org/pub/gluster/glusterfs/LATEST/EPEL.repo/glusterfs-epel.repo\nyum\u00a0install\u00a0glusterfs{-fuse,-server}\nservice\u00a0glusterd\u00a0start\nservice\u00a0glusterd\u00a0status\nchkconfig\u00a0glusterd\u00a0on\n
      • Continue with your GFS peer probe, volume creation, etc.
      • To mount GFS volumes automatically after reboot, add these lines to /etc/rc.local (assuming your gluster volume is called export and your desired mount point is /export:
      #\u00a0Mount\u00a0GFS\u00a0Volumes\nmount\u00a0-t\u00a0glusterfs\u00a0gfs01:/export\u00a0/export\n
      "},{"location":"Administrator-Guide/Gluster-On-ZFS/#miscellaneous-notes-todo","title":"Miscellaneous Notes & TODO","text":""},{"location":"Administrator-Guide/Gluster-On-ZFS/#daily-e-mail-status-reports","title":"Daily e-mail status reports","text":"

      Python script source; put your desired e-mail address in the toAddr variable. Add a crontab entry to run this daily.

      #!/usr/bin/python\n'''\nSend e-mail to given user with zfs status\n'''\nimport datetime\nimport socket\nimport smtplib\nimport subprocess\n\n\ndef doShellCmd(cmd):\n    '''execute system shell command, return output as string'''\n    subproc = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)\n    cmdOutput = subproc.communicate()[0]\n    return cmdOutput\n\nhostname = socket.gethostname()\nstatusLine = \"Status of \" + hostname + \" at \" + str(datetime.datetime.now())\nzpoolList = doShellCmd('zpool list')\nzpoolStatus = doShellCmd('zpool status')\nzfsList = doShellCmd('zfs list')\nreport = (statusLine + \"\\n\" +\n    \"-----------------------------------------------------------\\n\" +\n    zfsList +\n    \"-----------------------------------------------------------\\n\" +\n    zpoolList +\n    \"-----------------------------------------------------------\\n\" +\n    zpoolStatus)\n\nfromAddr = \"From: root@\" + hostname + \"\\r\\n\"\ntoAddr = \"To: user@your.com\\r\\n\"\nsubject = \"Subject: ZFS Status from \" + hostname + \"\\r\\n\"\nmsg = (subject + report)\nserver = smtplib.SMTP('localhost')\nserver.set_debuglevel(1)\nserver.sendmail(fromAddr, toAddr, msg)\nserver.quit()\n\n
      "},{"location":"Administrator-Guide/Gluster-On-ZFS/#restoring-files-from-zfs-snapshots","title":"Restoring files from ZFS Snapshots","text":"

      Show which node a file is on (for restoring files from ZFS snapshots):

       getfattr\u00a0-n\u00a0trusted.glusterfs.pathinfo\u00a0<file>\n
      "},{"location":"Administrator-Guide/Gluster-On-ZFS/#recurring-zfs-snapshots","title":"Recurring ZFS Snapshots","text":"

      Since the community site will not let me actually post the script due to some random bug with Akismet spam blocking, I'll just post links instead.

      • Recurring ZFS Snapshots
      • Or use https://github.com/zfsonlinux/zfs-auto-snapshot
      "},{"location":"Administrator-Guide/GlusterFS-Cinder/","title":"Accessing GlusterFS using Cinder Hosts","text":"

      Note: GlusterFS driver was removed from Openstack since Ocata. This guide applies only to older Openstack releases.

      "},{"location":"Administrator-Guide/GlusterFS-Cinder/#1-introduction","title":"1. Introduction","text":"

      GlusterFS and Cinder integration provides a system for data storage that enables users to access the same data, both as an object and as a file, thus simplifying management and controlling storage costs.

      GlusterFS - GlusterFS is an open source, distributed file system capable of scaling to several petabytes and handling thousands of clients. GlusterFS clusters together storage building blocks over Infiniband RDMA or TCP/IP interconnect, aggregating disk and memory resources and managing data in a single global namespace. GlusterFS is based on a stackable user space design and can deliver exceptional performance for diverse workloads.

      Cinder - Cinder is the OpenStack service which is responsible for handling persistent storage for virtual machines. This is persistent block storage for the instances running in Nova. Snapshots can be taken for backing up and data, either for restoring data, or to be used to create new block storage volumes.

      With Enterprise Linux 6, configuring OpenStack Grizzly to use GlusterFS for its Cinder (block) storage is fairly simple.

      These instructions have been tested with both GlusterFS 3.3 and GlusterFS 3.4. Other releases may also work, but have not been tested.

      "},{"location":"Administrator-Guide/GlusterFS-Cinder/#2-prerequisites","title":"2. Prerequisites","text":""},{"location":"Administrator-Guide/GlusterFS-Cinder/#glusterfs","title":"GlusterFS","text":"

      For information on prerequisites and instructions for installing GlusterFS, see http://www.gluster.org/community/documentation/index.php.

      "},{"location":"Administrator-Guide/GlusterFS-Cinder/#cinder","title":"Cinder","text":"

      For information on prerequisites and instructions for installing Cinder, see http://docs.openstack.org/.

      Before beginning, you must ensure there are no existing volumes in Cinder. Use \"cinder delete\" to remove any, and \"cinder list\" to verify that they are deleted. If you do not delete the existing cinder volumes, it will cause errors later in the process, breaking your Cinder installation.

      NOTE - Unlike other software, the \"openstack-config\" and \"cinder\" commands generally require you to run them as a root user. Without prior configuration, running them through sudo generally does not work. (This can be changed, but is beyond the scope of this HOW-TO.)

      "},{"location":"Administrator-Guide/GlusterFS-Cinder/#3-installing-glusterfs-client-on-cinder-hosts","title":"3 Installing GlusterFS Client on Cinder hosts","text":"

      On each Cinder host, install the GlusterFS client packages:

      sudo\u00a0yum\u00a0-y\u00a0install\u00a0glusterfs-fuse\n
      "},{"location":"Administrator-Guide/GlusterFS-Cinder/#4-configuring-cinder-to-add-glusterfs","title":"4. Configuring Cinder to Add GlusterFS","text":"

      On each Cinder host, run the following commands to add GlusterFS to the Cinder configuration:

      openstack-config --set\u00a0/etc/cinder/cinder.conf\u00a0DEFAULT\u00a0volume_driver\u00a0cinder.volume.drivers.glusterfs.GlusterfsDriver\nopenstack-config\u00a0--set\u00a0/etc/cinder/cinder.conf\u00a0DEFAULT\u00a0glusterfs_shares_config\u00a0/etc/cinder/shares.conf\nopenstack-config\u00a0--set\u00a0/etc/cinder/cinder.conf\u00a0DEFAULT\u00a0glusterfs_mount_point_base\u00a0/var/lib/cinder/volumes\n
      "},{"location":"Administrator-Guide/GlusterFS-Cinder/#5-creating-glusterfs-volume-list","title":"5. Creating GlusterFS Volume List","text":"

      On each of the Cinder nodes, create a simple text file /etc/cinder/shares.conf.

      This file is a simple list of the GlusterFS volumes to be used, one per line, using the following format:

      GLUSTERHOST:VOLUME\nGLUSTERHOST:NEXTVOLUME\nGLUSTERHOST2:SOMEOTHERVOLUME\n

      For example:

      myglusterbox.example.org:myglustervol\n
      "},{"location":"Administrator-Guide/GlusterFS-Cinder/#6-updating-firewall-for-glusterfs","title":"6. Updating Firewall for GlusterFS","text":"

      You must update the firewall rules on each Cinder node to communicate with the GlusterFS nodes.

      The ports to open are explained in Step 3:

      https://docs.gluster.org/en/latest/Install-Guide/Install/

      If you are using iptables as your firewall, these lines can be added under :OUTPUT ACCEPT in the \"*filter\" section. You should probably adjust them to suit your environment (eg. only accept connections from your GlusterFS servers).

      -A\u00a0INPUT\u00a0-m\u00a0state\u00a0--state\u00a0NEW\u00a0-m\u00a0tcp\u00a0-p\u00a0tcp\u00a0--dport\u00a0111\u00a0-j\u00a0ACCEPT\n-A\u00a0INPUT\u00a0-m\u00a0state\u00a0--state\u00a0NEW\u00a0-m\u00a0tcp\u00a0-p\u00a0tcp\u00a0--dport\u00a024007\u00a0-j\u00a0ACCEPT\n-A\u00a0INPUT\u00a0-m\u00a0state\u00a0--state\u00a0NEW\u00a0-m\u00a0tcp\u00a0-p\u00a0tcp\u00a0--dport\u00a024008\u00a0-j\u00a0ACCEPT\n-A\u00a0INPUT\u00a0-m\u00a0state\u00a0--state\u00a0NEW\u00a0-m\u00a0tcp\u00a0-p\u00a0tcp\u00a0--dport\u00a024009\u00a0-j\u00a0ACCEPT\n-A\u00a0INPUT\u00a0-m\u00a0state\u00a0--state\u00a0NEW\u00a0-m\u00a0tcp\u00a0-p\u00a0tcp\u00a0--dport\u00a024010\u00a0-j\u00a0ACCEPT\n-A\u00a0INPUT\u00a0-m\u00a0state\u00a0--state\u00a0NEW\u00a0-m\u00a0tcp\u00a0-p\u00a0tcp\u00a0--dport\u00a024011\u00a0-j\u00a0ACCEPT\n-A\u00a0INPUT\u00a0-m\u00a0state\u00a0--state\u00a0NEW\u00a0-m\u00a0tcp\u00a0-p\u00a0tcp\u00a0--dport\u00a038465:38469\u00a0-j\u00a0ACCEPT\n

      Restart the firewall service:

      sudo\u00a0service\u00a0iptables\u00a0restart\n
      "},{"location":"Administrator-Guide/GlusterFS-Cinder/#7-restarting-cinder-services","title":"7. Restarting Cinder Services","text":"

      Configuration is complete and now you must restart the Cinder services to make it active.

      for\u00a0i\u00a0in\u00a0api\u00a0scheduler\u00a0volume;\u00a0do\u00a0sudo\u00a0service\u00a0openstack-cinder-${i}\u00a0start;\u00a0done\n

      Check the Cinder volume log to make sure that there are no errors:

      sudo\u00a0tail\u00a0-50\u00a0/var/log/cinder/volume.log\n
      "},{"location":"Administrator-Guide/GlusterFS-Cinder/#8-verify-glusterfs-integration-with-cinder","title":"8. Verify GlusterFS Integration with Cinder","text":"

      To verify if the installation and configuration is successful, create a Cinder volume then check using GlusterFS.

      Create a Cinder volume:

      cinder\u00a0create\u00a0--display_name\u00a0myvol\u00a010\n

      Volume creation takes a few seconds. Once created, run the following command:

      cinder\u00a0list\n

      The volume should be in \"available\" status. Now, look for a new file in the GlusterFS volume directory:

      sudo\u00a0ls\u00a0-lah\u00a0/var/lib/cinder/volumes/XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/\n

      (the XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX will be a number specific to your installation)

      A newly created file should be inside that directory which is the new volume you just created. A new file will appear each time you create a volume.

      For example:

      $ sudo\u00a0ls\u00a0-lah\u00a0/var/lib/cinder/volumes/29e55f0f3d56494ef1b1073ab927d425/\n\u00a0\n total\u00a04.0K\n\u00a0drwxr-xr-x.\u00a03\u00a0root\u00a0\u00a0\u00a0root\u00a0\u00a0\u00a0\u00a0\u00a073\u00a0Apr\u00a0\u00a04\u00a015:46\u00a0.\n\u00a0drwxr-xr-x.\u00a03\u00a0cinder\u00a0cinder\u00a04.0K\u00a0Apr\u00a0\u00a03\u00a009:31\u00a0..\n\u00a0-rw-rw-rw-.\u00a01\u00a0root\u00a0\u00a0\u00a0root\u00a0\u00a0\u00a0\u00a010G\u00a0Apr\u00a0\u00a04\u00a015:46\u00a0volume-a4b97d2e-0f8e-45b2-9b94-b8fa36bd51b9\n
      "},{"location":"Administrator-Guide/GlusterFS-Coreutils/","title":"Coreutils for GlusterFS volumes","text":"

      The GlusterFS Coreutils is a suite of utilities that aims to mimic the standard Linux coreutils, with the exception that it utilizes the gluster C API in order to do work. It offers an interface similar to that of the ftp program. Operations include things like getting files from the server to the local machine, putting files from the local machine to the server, retrieving directory information from the server and so on.

      "},{"location":"Administrator-Guide/GlusterFS-Coreutils/#installation","title":"Installation","text":""},{"location":"Administrator-Guide/GlusterFS-Coreutils/#install-glusterfs","title":"Install GlusterFS","text":"

      For information on prerequisites, instructions and configuration of GlusterFS, see Installation Guides from http://docs.gluster.org/en/latest/.

      "},{"location":"Administrator-Guide/GlusterFS-Coreutils/#install-glusterfs-coreutils","title":"Install glusterfs-coreutils","text":"

      For now glusterfs-coreutils will be packaged only as rpm. Other package formats will be supported very soon.

      "},{"location":"Administrator-Guide/GlusterFS-Coreutils/#for-fedora","title":"For fedora","text":"

      Use dnf/yum to install glusterfs-coreutils:

      dnf install glusterfs-coreutils\n

      OR

      yum install glusterfs-coreutils\n
      "},{"location":"Administrator-Guide/GlusterFS-Coreutils/#usage","title":"Usage","text":"

      glusterfs-coreutils provides a set of basic utilities such as cat, cp, flock, ls, mkdir, rm, stat and tail that are implemented specifically using the GlusterFS API commonly known as libgfapi. These utilities can be used either inside a gluster remote shell or as standalone commands with 'gf' prepended to their respective base names. For example, glusterfs cat utility is named as gfcat and so on with an exception to flock core utility for which a standalone gfflock command is not provided as such(see the notes section on why flock is designed in that way).

      "},{"location":"Administrator-Guide/GlusterFS-Coreutils/#using-coreutils-within-a-remote-gluster-shell","title":"Using coreutils within a remote gluster-shell","text":""},{"location":"Administrator-Guide/GlusterFS-Coreutils/#invoke-a-new-shell","title":"Invoke a new shell","text":"

      In order to enter into a gluster client-shell, type gfcli and press enter. You will now be presented with a similar prompt as shown below:

      # gfcli\ngfcli>\n

      See the man page for gfcli for more options.

      "},{"location":"Administrator-Guide/GlusterFS-Coreutils/#connect-to-a-gluster-volume","title":"Connect to a gluster volume","text":"

      Now we need to connect as a client to some glusterfs volume which has already started. Use connect command to do so as follows:

      gfcli> connect glfs://<SERVER-IP or HOSTNAME>/<VOLNAME>\n

      For example if you have a volume named vol on a server with hostname localhost the above command will take the following form:

      gfcli> connect glfs://localhost/vol\n

      Make sure that you are successfully attached to a remote gluster volume by verifying the new prompt which should look like:

      gfcli (<SERVER IP or HOSTNAME/<VOLNAME>)\n
      "},{"location":"Administrator-Guide/GlusterFS-Coreutils/#try-out-your-favorite-utilities","title":"Try out your favorite utilities","text":"

      Please go through the man pages for different utilities and available options for each command. For example, man gfcp will display details on the usage of cp command outside or within a gluster-shell. Run different commands as follows:

      gfcli (localhost/vol) ls .\ngfcli (localhost/vol) stat .trashcan\n
      "},{"location":"Administrator-Guide/GlusterFS-Coreutils/#terminate-the-client-connection-from-the-volume","title":"Terminate the client connection from the volume","text":"

      Use disconnect command to close the connection:

      gfcli (localhost/vol) disconnect\ngfcli>\n
      "},{"location":"Administrator-Guide/GlusterFS-Coreutils/#exit-from-shell","title":"Exit from shell","text":"

      Run quit from shell:

      gfcli> quit\n
      "},{"location":"Administrator-Guide/GlusterFS-Coreutils/#using-standalone-glusterfs-coreutil-commands","title":"Using standalone glusterfs coreutil commands","text":"

      As mentioned above glusterfs coreutils also provides standalone commands to perform the basic GNU coreutil functionalities. All those commands are prepended by 'gf'. Instead of invoking a gluster client-shell you can directly make use of these to establish and perform the operation in one shot. For example see the following sample usage of gfstat command:

      gfstat glfs://localhost/vol/foo\n

      There is an exemption regarding flock coreutility which is not available as a standalone command for a reason described under 'Notes' section.

      For more information on each command and corresponding options see associated man pages.

      "},{"location":"Administrator-Guide/GlusterFS-Coreutils/#notes","title":"Notes","text":"
      • Within a particular session of gluster client-shell, history of commands are preserved i.e, you can use up/down arrow keys to search through previously executed commands or the reverse history search technique using Ctrl+R.
      • flock is not available as standalone 'gfflock'. Because locks are always associated with file descriptors. Unlike all other commands flock cannot straight away clean up the file descriptor after acquiring the lock. For flock we need to maintain an active connection as a glusterfs client.
      "},{"location":"Administrator-Guide/GlusterFS-Filter/","title":"Modifying .vol files with a filter","text":"

      If you need to make manual changes to a .vol file it is recommended to make these through the client interface ('gluster foo'). Making changes directly to .vol files is discouraged, because it cannot be predicted when a .vol file will be reset on disk, for example with a 'gluster set foo' command. The command line interface was never designed to read the .vol files, but rather to keep state and rebuild them (from /var/lib/glusterd/vols/$vol/info). There is, however, another way to do this.

      You can create a shell script in the directory /usr/lib*/glusterfs/$VERSION/filter. All scripts located there will be executed every time the .vol files are written back to disk. The first and only argument passed to all script located there is the name of the .vol file.

      So you could create a script there that looks like this:

      #!/bin/sh\nsed\u00a0-i\u00a0'some-sed-magic'\u00a0\"$1\"\n

      Which will run the script, which in turn will run the sed command on the .vol file (passed as \\$1).

      Importantly, the script needs to be set as executable (eg via chmod), else it won't be run.

      "},{"location":"Administrator-Guide/GlusterFS-Introduction/","title":"What is Gluster ?","text":"

      Gluster is a scalable, distributed file system that aggregates disk storage resources from multiple servers into a single global namespace.

      "},{"location":"Administrator-Guide/GlusterFS-Introduction/#advantages","title":"Advantages","text":"
      • Scales to several petabytes
      • Handles thousands of clients
      • POSIX compatible
      • Uses commodity hardware
      • Can use any ondisk filesystem that supports extended attributes
      • Accessible using industry standard protocols like NFS and SMB
      • Provides replication, quotas, geo-replication, snapshots and bitrot detection
      • Allows optimization for different workloads
      • Open Source

      Enterprises can scale capacity, performance, and availability on demand, with no vendor lock-in, across on-premise, public cloud, and hybrid environments. Gluster is used in production at thousands of organisations spanning media, healthcare, government, education, web 2.0, and financial services.

      "},{"location":"Administrator-Guide/GlusterFS-Introduction/#commercial-offerings-and-support","title":"Commercial offerings and support","text":"

      Several companies offer support or consulting.

      Red Hat Gluster Storage is a commercial storage software product, based on Gluster.

      "},{"location":"Administrator-Guide/GlusterFS-Keystone-Quickstart/","title":"GlusterFS Keystone Quickstart","text":"

      This is a document in progress, and may contain some errors or missing information.

      I am currently in the process of building an AWS Image with this installed, however if you can't wait, and want to install this with a script, here are the commands from both articles, with defaults appropriate for an Amazon CentOS/RHEL 6 AMI, such as ami-a6e15bcf

      This document assumes you already have GlusterFS with UFO installed, 3.3.1-11 or later, and are using the instructions here:

      http://www.gluster.org/2012/09/howto-using-ufo-swift-a-quick-and-dirty-setup-guide/

      These docs are largely derived from:

      http://fedoraproject.org/wiki/Getting_started_with_OpenStack_on_Fedora_17#Initial_Keystone_setup

      Add the RDO Openstack Grizzly and Epel repos:

      sudo\u00a0yum\u00a0install\u00a0-y\u00a0\"http://dl.fedoraproject.org/pub/epel/6/i386/epel-release-6-8.noarch.rpm\"\n\nsudo\u00a0yum\u00a0install\u00a0-y\u00a0\"http://rdo.fedorapeople.org/openstack/openstack-grizzly/rdo-release-grizzly-1.noarch.rpm\"\n

      Install Openstack-Keystone

      sudo\u00a0yum\u00a0install\u00a0openstack-keystone\u00a0openstack-utils\u00a0python-keystoneclient\n

      Configure keystone

      $\u00a0cat\u00a0>\u00a0keystonerc\u00a0<<\u00a0_EOF\nexport\u00a0ADMIN_TOKEN=$(openssl\u00a0rand\u00a0-hex\u00a010)\nexport\u00a0OS_USERNAME=admin\nexport\u00a0OS_PASSWORD=$(openssl\u00a0rand\u00a0-hex\u00a010)\nexport\u00a0OS_TENANT_NAME=admin\nexport\u00a0OS_AUTH_URL=`[`https://127.0.0.1:5000/v2.0/`](https://127.0.0.1:5000/v2.0/)\nexport\u00a0SERVICE_ENDPOINT=`[`https://127.0.0.1:35357/v2.0/`](https://127.0.0.1:35357/v2.0/)\nexport\u00a0SERVICE_TOKEN=\\$ADMIN_TOKEN\n_EOF\n\n$\u00a0.\u00a0./keystonerc\n$\u00a0sudo\u00a0openstack-db\u00a0--service\u00a0keystone\u00a0--init\n

      Append the keystone configs to /etc/swift/proxy-server.conf

      $\u00a0sudo\u00a0-i\n\n#\u00a0cat\u00a0>>\u00a0/etc/swift/proxy-server.conf\u00a0<<\u00a0_EOM\n[filter:keystone]`\nuse\u00a0=\u00a0egg:swift#keystoneauth`\noperator_roles\u00a0=\u00a0admin,\u00a0swiftoperator`\n\n[filter:authtoken]\npaste.filter_factory\u00a0=\u00a0keystoneclient.middleware.auth_token:filter_factory\nauth_port\u00a0=\u00a035357\nauth_host\u00a0=\u00a0127.0.0.1\nauth_protocol\u00a0=\u00a0https\n_EOM\n\n# exit\n

      Finish configuring both swift and keystone using the command-line tool:

      sudo\u00a0openstack-config\u00a0--set\u00a0/etc/swift/proxy-server.conf\u00a0filter:authtoken\u00a0admin_token\u00a0$ADMIN_TOKEN\nsudo\u00a0openstack-config\u00a0--set\u00a0/etc/swift/proxy-server.conf\u00a0filter:authtoken\u00a0auth_token\u00a0$ADMIN_TOKEN\nsudo\u00a0openstack-config\u00a0--set\u00a0/etc/swift/proxy-server.conf\u00a0DEFAULT\u00a0log_name\u00a0proxy_server\nsudo\u00a0openstack-config\u00a0--set\u00a0/etc/swift/proxy-server.conf\u00a0filter:authtoken\u00a0signing_dir\u00a0/etc/swift\nsudo\u00a0openstack-config\u00a0--set\u00a0/etc/swift/proxy-server.conf\u00a0pipeline:main\u00a0pipeline\u00a0\"healthcheck\u00a0cache\u00a0authtoken\u00a0keystone\u00a0proxy-server\"\n\nsudo\u00a0openstack-config\u00a0--set\u00a0/etc/keystone/keystone.conf\u00a0DEFAULT\u00a0admin_token\u00a0$ADMIN_TOKEN\nsudo\u00a0openstack-config\u00a0--set\u00a0/etc/keystone/keystone.conf\u00a0ssl\u00a0enable\u00a0True\nsudo\u00a0openstack-config\u00a0--set\u00a0/etc/keystone/keystone.conf\u00a0ssl\u00a0keyfile\u00a0/etc/swift/cert.key\nsudo\u00a0openstack-config\u00a0--set\u00a0/etc/keystone/keystone.conf\u00a0ssl\u00a0certfile\u00a0/etc/swift/cert.crt\nsudo\u00a0openstack-config\u00a0--set\u00a0/etc/keystone/keystone.conf\u00a0signing\u00a0token_format\u00a0UUID\nsudo\u00a0openstack-config\u00a0--set\u00a0/etc/keystone/keystone.conf\u00a0sql\u00a0connection\u00a0mysql://keystone:keystone@127.0.0.1/keystone\n

      Configure keystone to start at boot and start it up.

      sudo\u00a0chkconfig\u00a0openstack-keystone\u00a0on\nsudo\u00a0service\u00a0openstack-keystone\u00a0start\u00a0#\u00a0If\u00a0you\u00a0script\u00a0this,\u00a0you'll\u00a0want\u00a0to\u00a0wait\u00a0a\u00a0few\u00a0seconds\u00a0to\u00a0start\u00a0using\u00a0it\n

      We are using untrusted certs, so tell keystone not to complain. If you replace with trusted certs, or are not using SSL, set this to \"\".

      INSECURE=\"--insecure\"\n

      Create the keystone and swift services in keystone:

      KS_SERVICEID=$(keystone\u00a0$INSECURE\u00a0service-create\u00a0--name=keystone\u00a0--type=identity\u00a0--description=\"Keystone\u00a0Identity\u00a0Service\"\u00a0|\u00a0grep\u00a0\"\u00a0id\u00a0\"\u00a0|\u00a0cut\u00a0-d\u00a0\"|\"\u00a0-f\u00a03)\n\nSW_SERVICEID=$(keystone\u00a0$INSECURE\u00a0service-create\u00a0--name=swift\u00a0--type=object-store\u00a0--description=\"Swift\u00a0Service\"\u00a0|\u00a0grep\u00a0\"\u00a0id\u00a0\"\u00a0|\u00a0cut\u00a0-d\u00a0\"|\"\u00a0-f\u00a03)\n\nendpoint=\"`[`https://127.0.0.1:443`](https://127.0.0.1:443)`\"\n\nkeystone\u00a0$INSECURE\u00a0endpoint-create\u00a0--service_id\u00a0$KS_SERVICEID\u00a0\\\n\u00a0\u00a0--publicurl\u00a0$endpoint'/v2.0'\u00a0--adminurl\u00a0`[`https://127.0.0.1:35357/v2.0`](https://127.0.0.1:35357/v2.0)`\u00a0\\\n\u00a0\u00a0--internalurl\u00a0`[`https://127.0.0.1:5000/v2.0`](https://127.0.0.1:5000/v2.0)\n\nkeystone\u00a0$INSECURE\u00a0endpoint-create\u00a0--service_id\u00a0$SW_SERVICEID\u00a0\\\n\u00a0\u00a0--publicurl\u00a0$endpoint'/v1/AUTH_$(tenant_id)s'\u00a0\\\n\u00a0\u00a0--adminurl\u00a0$endpoint'/v1/AUTH_$(tenant_id)s'\u00a0\\\n\u00a0\u00a0--internalurl\u00a0$endpoint'/v1/AUTH_$(tenant_id)s'\n

      Create the admin tenant:

      admin_id=$(keystone\u00a0$INSECURE\u00a0tenant-create\u00a0--name\u00a0admin\u00a0--description\u00a0\"Internal\u00a0Admin\u00a0Tenant\"\u00a0|\u00a0grep\u00a0id\u00a0|\u00a0awk\u00a0'{print\u00a0$4}')\n

      Create the admin roles:

      admin_role=$(keystone\u00a0$INSECURE\u00a0role-create\u00a0--name\u00a0admin\u00a0|\u00a0grep\u00a0id\u00a0|\u00a0awk\u00a0'{print\u00a0$4}')\nksadmin_role=$(keystone\u00a0$INSECURE\u00a0role-create\u00a0--name\u00a0KeystoneServiceAdmin\u00a0|\u00a0grep\u00a0id\u00a0|\u00a0awk\u00a0'{print\u00a0$4}')\nkadmin_role=$(keystone\u00a0$INSECURE\u00a0role-create\u00a0--name\u00a0KeystoneAdmin\u00a0|\u00a0grep\u00a0id\u00a0|\u00a0awk\u00a0'{print\u00a0$4}')\nmember_role=$(keystone\u00a0$INSECURE\u00a0role-create\u00a0--name\u00a0member\u00a0|\u00a0grep\u00a0id\u00a0|\u00a0awk\u00a0'{print\u00a0$4}')\n

      Create the admin user:

      user_id=$(keystone\u00a0$INSECURE\u00a0user-create\u00a0--name\u00a0admin\u00a0--tenant-id\u00a0$admin_id\u00a0--pass\u00a0$OS_PASSWORD\u00a0|\u00a0grep\u00a0id\u00a0|\u00a0awk\u00a0'{print\u00a0$4}')\n\nkeystone\u00a0$INSECURE\u00a0user-role-add\u00a0--user-id\u00a0$user_id\u00a0--tenant-id\u00a0$admin_id\u00a0\\\n\u00a0\u00a0--role-id\u00a0$admin_role\n\nkeystone\u00a0$INSECURE\u00a0user-role-add\u00a0--user-id\u00a0$user_id\u00a0--tenant-id\u00a0$admin_id\u00a0\\\n\u00a0\u00a0--role-id\u00a0$kadmin_role\n\nkeystone\u00a0$INSECURE\u00a0user-role-add\u00a0--user-id\u00a0$user_id\u00a0--tenant-id\u00a0$admin_id\u00a0\\\n\u00a0\u00a0--role-id\u00a0$ksadmin_role\n

      If you do not have multi-volume support (broken in 3.3.1-11), then the volume names will not correlate to the tenants, and all tenants will map to the same volume, so just use a normal name. (This will be fixed in 3.4, and should be fixed in 3.4 Beta. The bug report for this is here: https://bugzilla.redhat.com/show_bug.cgi?id=924792)

      volname=\"admin\"\n\n# or\u00a0if\u00a0you\u00a0have\u00a0the\u00a0multi-volume\u00a0patch\nvolname=$admin_id\n

      Create and start the admin volume:

      sudo\u00a0gluster\u00a0volume\u00a0create\u00a0$volname\u00a0$myhostname:$pathtobrick\nsudo\u00a0gluster\u00a0volume\u00a0start\u00a0$volname\nsudo\u00a0service\u00a0openstack-keystone\u00a0start\n

      Create the ring for the admin tenant. If you have working multi-volume support, then you can specify multiple volume names in the call:

      cd\u00a0/etc/swift\nsudo\u00a0/usr/bin/gluster-swift-gen-builders\u00a0$volname\nsudo\u00a0swift-init\u00a0main\u00a0restart\n

      Create a testadmin user associated with the admin tenant with password testadmin and admin role:

      user_id=$(keystone\u00a0$INSECURE\u00a0user-create\u00a0--name\u00a0testadmin\u00a0--tenant-id\u00a0$admin_id\u00a0--pass\u00a0testadmin\u00a0|\u00a0grep\u00a0id\u00a0|\u00a0awk\u00a0'{print\u00a0$4}')\n\nkeystone\u00a0$INSECURE\u00a0user-role-add\u00a0--user-id\u00a0$user_id\u00a0--tenant-id\u00a0$admin_id\u00a0\\\n\u00a0\u00a0--role-id\u00a0$admin_role\n

      Test the user:

      curl\u00a0$INSECURE\u00a0-d\u00a0'{\"auth\":{\"tenantName\":\u00a0\"admin\",\u00a0\"passwordCredentials\":{\"username\":\u00a0\"testadmin\",\u00a0\"password\":\u00a0\"testadmin\"}}}'\u00a0-H\u00a0\"Content-type:\u00a0application/json\"\u00a0\"https://127.0.0.1:5000/v2.0/tokens\"\n

      See here for more examples:

      http://docs.openstack.org/developer/keystone/api_curl_examples.html

      "},{"location":"Administrator-Guide/GlusterFS-iSCSI/","title":"GlusterFS iSCSI","text":""},{"location":"Administrator-Guide/GlusterFS-iSCSI/#introduction","title":"Introduction","text":"

      iSCSI on Gluster can be set up using the Linux Target driver. This is a user space daemon that accepts iSCSI (as well as iSER and FCoE.) It interprets iSCSI CDBs and converts them into some other I/O operation, according to user configuration. In our case, we can convert the CDBs into file operations that run against a gluster file. The file represents the LUN and the offset in the file the LBA.

      A plug-in for the Linux target driver has been written to use the libgfapi. It is part of the Linux target driver (bs_glfs.c). Using it, the datapath skips FUSE. This document will be updated to describe how to use it. You can see README.glfs in the Linux target driver's documentation subdirectory.

      LIO is a replacement for the Linux Target Driver that is included in RHEL7. A user-space plug-in mechanism for it is under development. Once that piece of code exists a similar mechanism can be built for gluster as was done for the Linux target driver.

      Below is a cookbook to set it up using the Linux Target Driver on the server. This has been tested on XEN and KVM instances within RHEL6, RHEL7, and Fedora 19 instances. In this setup a single path leads to gluster, which represents a performance bottleneck and single point of failure. For HA and load balancing, it is possible to setup two or more paths to different gluster servers using mpio; if the target name is equivalent over each path, mpio will coalless both paths into a single device.

      For more information on iSCSI and the Linux target driver, see [1] and [2].

      "},{"location":"Administrator-Guide/GlusterFS-iSCSI/#setup","title":"Setup","text":"

      Mount gluster locally on your gluster server. Note you can also run it on the gluster client. There are pros and cons to these configurations, described below.

      mount\u00a0-t\u00a0glusterfs\u00a0127.0.0.1:gserver\u00a0/mnt\n

      Create a large file representing your block device within the gluster fs. In this case, the lun is 2G. (You could also create a gluster \"block device\" for this purpose, which would skip the file system).

      dd\u00a0if=/dev/zero\u00a0of=disk3\u00a0bs=2G\u00a0count=25\n

      Create a target using the file as the backend storage.

      If necessary, download the Linux SCSI target. Then start the service.

      yum\u00a0install\u00a0scsi-target-utils\nservice\u00a0tgtd\u00a0start\n

      You must give an iSCSI Qualified name (IQN), in the format : iqn.yyyy-mm.reversed.domain.name:OptionalIdentifierText

      where:

      yyyy-mm represents the 4-digit year and 2-digit month the device was started (for example: 2011-07)

      tgtadm\u00a0--lld\u00a0iscsi\u00a0--op\u00a0new\u00a0--mode\u00a0target\u00a0--tid\u00a01\u00a0-T\u00a0iqn.20013-10.com.redhat\n

      You can look at the target:

      #\u00a0tgtadm\u00a0--lld\u00a0iscsi\u00a0--op\u00a0show\u00a0--mode\u00a0conn\u00a0--tid\u00a01\n\nSession:\u00a011\u00a0\u00a0Connection:\u00a00\u00a0\u00a0\u00a0\u00a0\u00a0Initiator\u00a0iqn.1994-05.com.redhat:cf75c8d4274d\n

      Next, add a logical unit to the target

      tgtadm\u00a0--lld\u00a0iscsi\u00a0--op\u00a0new\u00a0--mode\u00a0logicalunit\u00a0--tid\u00a01\u00a0--lun\u00a01\u00a0-b\u00a0/mnt/disk3\n

      Allow any initiator to access the target.

      tgtadm\u00a0--lld\u00a0iscsi\u00a0--op\u00a0bind\u00a0--mode\u00a0target\u00a0--tid\u00a01\u00a0-I\u00a0ALL\n

      Now it\u2019s time to set up your client.

      Discover your targets. Note in this example's case, the target IP address is 192.168.1.2

      iscsiadm\u00a0--mode\u00a0discovery\u00a0--type\u00a0sendtargets\u00a0--portal\u00a0192.168.1.2\n

      Login to your target session.

      iscsiadm\u00a0--mode\u00a0node\u00a0--targetname\u00a0iqn.2001-04.com.example:storage.disk1.amiens.sys1.xyz\u00a0--portal\u00a0192.168.1.2:3260\u00a0--login\n

      You should have a new SCSI disk. You will see it created in /var/log/messages. You will see it in lsblk.

      You can send I/O to it:

      dd\u00a0if=/dev/zero\u00a0of=/dev/sda\u00a0bs=4K\u00a0count=100\n

      To tear down your iSCSI connection:

      iscsiadm\u00a0\u00a0-m\u00a0node\u00a0-T\u00a0iqn.2001-04.com.redhat\u00a0\u00a0-p\u00a0172.17.40.21\u00a0-u\n
      "},{"location":"Administrator-Guide/GlusterFS-iSCSI/#running-the-iscsi-target-on-the-gluster-client","title":"Running the iSCSI target on the gluster client","text":"

      You can run the Linux target daemon on the gluster client. The advantages to this setup is the client could run gluster and enjoy all of gluster's benefits. For example, gluster could \"fan out\" I/O to different gluster servers. The downside would be that the client would need to load and configure gluster. It is better to run gluster on the client if it is possible.

      "},{"location":"Administrator-Guide/GlusterFS-iSCSI/#references","title":"References","text":"

      [1] http://www.linuxjournal.com/content/creating-software-backed-iscsi-targets-red-hat-enterprise-linux-6

      [2] http://www.cyberciti.biz/tips/howto-setup-linux-iscsi-target-sanwith-tgt.html

      "},{"location":"Administrator-Guide/Handling-of-users-with-many-groups/","title":"Handling of users that belong to many groups","text":"

      Users can belong to many different (UNIX) groups. These groups are generally used to allow or deny permissions for executing commands or access to files and directories.

      The number of groups a user can belong to depends on the operating system, but there are also components that support fewer groups. In Gluster, there are different restrictions on different levels in the stack. The explanations in this document should clarify which restrictions exist, and how these can be handled.

      "},{"location":"Administrator-Guide/Handling-of-users-with-many-groups/#tldr","title":"tl;dr","text":"
      • if users belong to more than 90 groups, the brick processes need to resolve the secondary/auxiliary groups with the server.manage-gids volume option
      • the linux kernels /proc filesystem provides up to 32 groups of a running process, if this is not sufficient the mount option resolve-gids can be used
      • Gluster/NFS needs nfs.server-aux-gids when users accessing a Gluster volume over NFS belong to more than 16 groups

      For all of the above options counts that the system doing the group resolving must be configured (nsswitch, sssd, ..) to be able to get all groups when only a UID is known.

      "},{"location":"Administrator-Guide/Handling-of-users-with-many-groups/#limit-in-the-glusterfs-protocol","title":"Limit in the GlusterFS protocol","text":"

      When a Gluster client does some action on a Gluster volume, the operation is sent in an RPC packet. This RPC packet contains an header with the credentials of the user. The server-side receives the RPC packet and uses the credentials from the RPC header to perform ownership operations and allow/deny checks.

      The RPC header used by the GlusterFS protocols can contain at most ~93 groups. In order to pass this limit, the server process (brick) receiving the RPC procedure can do the resolving of the groups locally, and ignore the (too few) groups from the RPC header.

      This requires that the service process can resolve all of the users groups by the UID of the client process. Most environments that have many groups, already use a configuration where users and groups are maintained in a central location. for enterprises it is common to manage users and their groups in LDAP, Active Directory, NIS or similar.

      To have the groups of a user resolved on the server-side (brick), the volume option server.manage-gids needs to be set. Once this option is enabled, the brick processes will not use the groups that the Gluster clients send, but will use the POSIX getgrouplist() function to fetch them.

      Because this is a protocol limitation, all clients, including FUSE mounts, Gluster/NFS server and libgfapi applications are affected by this.

      "},{"location":"Administrator-Guide/Handling-of-users-with-many-groups/#group-limit-with-fuse","title":"Group limit with FUSE","text":"

      The FUSE client gets the groups of the process that does the I/O by reading the information from /proc/$pid/status. This file only contains up to 32 groups. If client-side xlators rely on all groups of a process/user (like posix-acl), these 32 groups could limit functionality.

      For that reason a mount option has been added. With the resolve-gids mount option, the FUSE client calls the POSIX getgrouplist() function instead of reading /proc/$pid/status.

      "},{"location":"Administrator-Guide/Handling-of-users-with-many-groups/#group-limit-for-nfs","title":"Group limit for NFS","text":"

      The NFS protocol (actually the AUTH_SYS/AUTH_UNIX RPC header) allows up to 16 groups. These are the groups that the NFS-server receives from NFS-clients. Similar to the way the brick processes can resolve the groups on the server-side, the NFS-server can take the UID passed by the NFS-client and use that to resolve the groups. the volume option for that is nfs.server-aux-gids.

      Other NFS-servers offer options like this too. The Linux kernel nfsd server uses rpc.mountd --manage-gids. NFS-Ganesha has the configuration option Manage_Gids.

      "},{"location":"Administrator-Guide/Handling-of-users-with-many-groups/#implications-of-these-solutions","title":"Implications of these solutions","text":"

      All of the mentioned options are disabled by default. one of the reasons is that resolving groups is an expensive operation. in many cases there is no need for supporting many groups and there could be a performance hit.

      When groups are resolved, the list is cached. the validity of the cache is configurable. the Gluster processes are not the only ones that cache these groups. nscd or sssd also keep a cache when they handle the getgroupslist() requests. When there are many requests, and querying the groups from a centralized management system takes long, caches might benefit from a longer validity.

      An other, less obvious difference might be noticed too. Many processes that are written with security in mind reduce the groups that the process can effectively use. This is normally done with the setegids() function. When storage processes do not honour the fewer groups that are effective, and the processes use the UID to resolve all groups of a process, the groups that got dropped with setegids() are added back again. this could lead to permissions that the process should not have.

      "},{"location":"Administrator-Guide/Hook-scripts/","title":"Managing GlusterFS Volume Life-Cycle Extensions with Hook Scripts","text":"

      Glusterfs allows automation of operations by user-written scripts. For every operation, you can execute a pre and a post script.

      "},{"location":"Administrator-Guide/Hook-scripts/#pre-scripts","title":"Pre Scripts","text":"

      These scripts are run before the occurrence of the event. You can write a script to automate activities like managing system-wide services. For example, you can write a script to stop exporting the SMB share corresponding to the volume before you stop the volume.

      "},{"location":"Administrator-Guide/Hook-scripts/#post-scripts","title":"Post Scripts","text":"

      These scripts are run after execution of the event. For example, you can write a script to export the SMB share corresponding to the volume after you start the volume.

      You can run scripts for the following events:

      • Creating a volume
      • Starting a volume
      • Adding a brick
      • Removing a brick
      • Tuning volume options
      • Stopping a volume
      • Deleting a volume
      "},{"location":"Administrator-Guide/Hook-scripts/#naming-convention","title":"Naming Convention","text":"

      While creating the file names of your scripts, you must follow the naming convention followed in your underlying file system like XFS.

      Note: To enable the script, the name of the script must start with an S . Scripts run in lexicographic order of their names.

      "},{"location":"Administrator-Guide/Hook-scripts/#location-of-scripts","title":"Location of Scripts","text":"

      This section provides information on the folders where the scripts must be placed. When you create a trusted storage pool, the following directories are created:

      • /var/lib/glusterd/hooks/1/create/
      • /var/lib/glusterd/hooks/1/delete/
      • /var/lib/glusterd/hooks/1/start/
      • /var/lib/glusterd/hooks/1/stop/
      • /var/lib/glusterd/hooks/1/set/
      • /var/lib/glusterd/hooks/1/add-brick/
      • /var/lib/glusterd/hooks/1/remove-brick/

      After creating a script, you must ensure to save the script in its respective folder on all the nodes of the trusted storage pool. The location of the script dictates whether the script must be executed before or after an event. Scripts are provided with the command line argument --volname=VOLNAME to specify the volume. Command-specific additional arguments are provided for the following volume operations:

      Start volume\n    --first=yes, if the volume is the first to be started\n    --first=no, for otherwise\nStop volume\n    --last=yes, if the volume is to be stopped last.\n    --last=no, for otherwise\nSet volume\n    -o key=value\n    For every key, value is specified in volume set command.\n
      "},{"location":"Administrator-Guide/Hook-scripts/#prepackaged-scripts","title":"Prepackaged Scripts","text":"

      Gluster provides scripts to export Samba (SMB) share when you start a volume and to remove the share when you stop the volume. These scripts are available at: /var/lib/glusterd/hooks/1/start/post and /var/lib/glusterd/hooks/1/stop/pre. By default, the scripts are enabled.

      When you start a volume using gluster volume start VOLNAME, the S30samba-start.sh script performs the following:

      • Adds Samba share configuration details of the volume to the smb.conf file
      • Mounts the volume through FUSE and adds an entry in /etc/fstab for the same.
      • Restarts Samba to run with updated configuration

      When you stop the volume using gluster volume stop VOLNAME, the S30samba-stop.sh script performs the following:

      • Removes the Samba share details of the volume from the smb.conf file
      • Unmounts the FUSE mount point and removes the corresponding entry in /etc/fstab
      • Restarts Samba to run with updated configuration
      "},{"location":"Administrator-Guide/Linux-Kernel-Tuning/","title":"Linux Kernel Tuning","text":""},{"location":"Administrator-Guide/Linux-Kernel-Tuning/#linux-kernel-tuning-for-glusterfs","title":"Linux kernel tuning for GlusterFS","text":"

      Every now and then, questions come up here internally and with many enthusiasts on what Gluster has to say about kernel tuning, if anything.

      The rarity of kernel tuning is on account of the Linux kernel doing a pretty good job on most workloads. But there is a flip side to this design. The Linux kernel historically has eagerly eaten up a lot of RAM, provided there is some, or driven towards caching as the primary way to improve performance.

      For most cases, this is fine, but as the amount of workload increases over time and clustered load is thrown upon the servers, this turns out to be troublesome, leading to catastrophic failures of jobs etc.

      Having had a fair bit of experience looking at large memory systems with heavily loaded regressions, be it CAD, EDA or similar tools, we've sometimes encountered stability problems with Gluster. We had to carefully analyse the memory footprint and amount of disk wait times over days. This gave us a rather remarkable story of disk trashing, huge iowaits, kernel oops, disk hangs etc.

      This article is the result of my many experiences with tuning options which were performed on many sites. The tuning not only helped with overall responsiveness, but it dramatically stabilized the cluster overall.

      When it comes to memory tuning the journey starts with the 'VM' subsystem which has a bizarre number of options, which can cause a lot of confusion.

      "},{"location":"Administrator-Guide/Linux-Kernel-Tuning/#vmswappiness","title":"vm.swappiness","text":"

      vm.swappiness is a tunable kernel parameter that controls how much the kernel favors swap over RAM. At the source code level, it\u2019s also defined as the tendency to steal mapped memory. A high swappiness value means that the kernel will be more apt to unmap mapped pages. A low swappiness value means the opposite, the kernel will be less apt to unmap mapped pages. In other words, the higher the vm.swappiness value, the more the system will swap.

      High system swapping has very undesirable effects when there are huge chunks of data being swapped in and out of RAM. Many have argued for the value to be set high, but in my experience, setting the value to '0' causes a performance increase.

      Conforming with the details here - http://lwn.net/Articles/100978/

      But again these changes should be driven by testing and due diligence from the user for their own applications. Heavily loaded, streaming apps should set this value to '0'. By changing this value to '0', the system's responsiveness improves.

      "},{"location":"Administrator-Guide/Linux-Kernel-Tuning/#vmvfs_cache_pressure","title":"vm.vfs_cache_pressure","text":"

      This option controls the tendency of the kernel to reclaim the memory which is used for caching of directory and inode objects.

      At the default value of vfs_cache_pressure=100 the kernel will attempt to reclaim dentries and inodes at a \"fair\" rate with respect to pagecache and swapcache reclaim. Decreasing vfs_cache_pressure causes the kernel to prefer to retain dentry and inode caches. When vfs_cache_pressure=0, the kernel will never reclaim dentries and inodes due to memory pressure and this can easily lead to out-of-memory conditions. Increasing vfs_cache_pressure beyond 100 causes the kernel to prefer to reclaim dentries and inodes.

      With GlusterFS, many users with a lot of storage and many small files easily end up using a lot of RAM on the server side due to 'inode/dentry' caching, leading to decreased performance when the kernel keeps crawling through data-structures on a 40GB RAM system. Changing this value higher than 100 has helped many users to achieve fair caching and more responsiveness from the kernel.

      "},{"location":"Administrator-Guide/Linux-Kernel-Tuning/#vmdirty_background_ratio","title":"vm.dirty_background_ratio","text":""},{"location":"Administrator-Guide/Linux-Kernel-Tuning/#vmdirty_ratio","title":"vm.dirty_ratio","text":"

      The first of the two (vm.dirty_background_ratio) defines the percentage of memory that can become dirty before a background flushing of the pages to disk starts. Until this percentage is reached no pages are flushed to disk. However when the flushing starts, then it's done in the background without disrupting any of the running processes in the foreground.

      Now the second of the two parameters (vm.dirty_ratio) defines the percentage of memory which can be occupied by dirty pages before a forced flush starts. If the percentage of dirty pages reaches this threshold, then all processes become synchronous, and they are not allowed to continue until the io operation they have requested is actually performed and the data is on disk. In cases of high performance I/O machines, this causes a problem as the data caching is cut away and all of the processes doing I/O become blocked to wait for I/O. This will cause a large number of hanging processes, which leads to high load, which leads to an unstable system and crappy performance.

      Lowering them from standard values causes everything to be flushed to disk rather than storing much in RAM. It helps large memory systems, which would normally flush a 45G-90G pagecache to disk, causing huge wait times for front-end applications, decreasing overall responsiveness and interactivity.

      "},{"location":"Administrator-Guide/Linux-Kernel-Tuning/#1-procsysvmpagecache","title":"\"1\" > /proc/sys/vm/pagecache","text":"

      Page Cache is a disk cache which holds data from files and executable programs, i.e. pages with actual contents of files or block devices. Page Cache (disk cache) is used to reduce the number of disk reads. A value of '1' indicates 1% of the RAM is used for this, so that most of them are fetched from disk rather than RAM. This value is somewhat fishy after the above values have been changed. Changing this option is not necessary, but if you are still paranoid about controlling the pagecache, this value should help.

      "},{"location":"Administrator-Guide/Linux-Kernel-Tuning/#deadline-sysblocksdcqueuescheduler","title":"\"deadline\" > /sys/block/sdc/queue/scheduler","text":"

      The I/O scheduler is a component of the Linux kernel which decides how the read and write buffers are to be queued for the underlying device. Theoretically 'noop' is better with a smart RAID controller because Linux knows nothing about (physical) disk geometry, therefore it can be efficient to let the controller, well aware of disk geometry, handle the requests as soon as possible. But 'deadline' seems to enhance performance. You can read more about them in the Linux kernel source documentation: linux/Documentation/block/*iosched.txt . I have also seen 'read' throughput increase during mixed-operations (many writes).

      "},{"location":"Administrator-Guide/Linux-Kernel-Tuning/#256-sysblocksdcqueuenr_requests","title":"\"256\" > /sys/block/sdc/queue/nr_requests","text":"

      This is the size of I/O requests which are buffered before they are communicated to the disk by the Scheduler. The internal queue size of some controllers (queue_depth) is larger than the I/O scheduler's nr_requests so that the I/O scheduler doesn't get much of a chance to properly order and merge the requests. Deadline or CFQ scheduler likes to have nr_requests to be set 2 times the value of queue_depth, which is the default for a given controller. Merging the order and requests helps the scheduler to be more responsive during huge load.

      "},{"location":"Administrator-Guide/Linux-Kernel-Tuning/#echo-16-procsysvmpage-cluster","title":"echo \"16\" > /proc/sys/vm/page-cluster","text":"

      page-cluster controls the number of pages which are written to swap in a single attempt. It defines the swap I/O size, in the above example adding '16' as per the RAID stripe size of 64k. This wouldn't make sense after you have used swappiness=0, but if you defined swappiness=10 or 20, then using this value helps when your have a RAID stripe size of 64k.

      "},{"location":"Administrator-Guide/Linux-Kernel-Tuning/#blockdev-setra-4096-dev-eg-sdb-hdc-or-dev_mapper","title":"blockdev --setra 4096 /dev/ (eg:- sdb, hdc or dev_mapper)

      Default block device settings often result in terrible performance for many RAID controllers. Adding the above option, which sets read-ahead to 4096 * 512-byte sectors, at least for the streamed copy, increases the speed, saturating the HD's integrated cache by reading ahead during the period used by the kernel to prepare I/O. It may put in cached data which will be requested by the next read. Too much read-ahead may kill random I/O on huge files if it uses potentially useful drive time or loads data beyond caches.

      A few other miscellaneous changes which are recommended at filesystem level but haven't been tested yet are the following. Make sure that your filesystem knows about the stripe size and number of disks in the array. E.g. for a raid5 array with a stripe size of 64K and 6 disks (effectively 5, because in every stripe-set there is one disk doing parity). These are built on theoretical assumptions and gathered from various other blogs/articles provided by RAID experts.

      -> ext4 fs, 5 disks, 64K stripe, units in 4K blocks

      mkfs -text4 -E stride=\\$((64/4))

      -> xfs, 5 disks, 64K stripe, units in 512-byte sectors

      mkfs -txfs -d sunit=\\$((64*2)) -d swidth=\\$((5*64*2))

      You may want to consider increasing the above stripe sizes for streaming large files.

      WARNING: Above changes are highly subjective with certain types of applications. This article doesn't guarantee any benefits whatsoever without prior due diligence from the user for their respective applications. It should only be applied at the behest of an expected increase in overall system responsiveness or if it resolves ongoing issues.

      More informative and interesting articles/emails/blogs to read

      • http://dom.as/2008/02/05/linux-io-schedulers/
      • http://www.nextre.it/oracledocs/oraclemyths.html
      • https://lkml.org/lkml/2006/11/15/40
      • http://misterd77.blogspot.com/2007/11/3ware-hardware-raid-vs-linux-software.html

      Last updated by:User:y4m4

      ","text":""},{"location":"Administrator-Guide/Linux-Kernel-Tuning/#commentjdarcy","title":"comment:jdarcy

      Some additional tuning ideas:

      * The choice of scheduler is *really* hardware- and workload-dependent, and some schedulers have unique features other than performance. For example, last time I looked cgroups support was limited to the cfq scheduler. Different tests regularly do best on any of cfq, deadline, or noop. The best advice here is not to use a particular scheduler but to try them all for a specific need.

      * It's worth checking to make sure that /sys/.../max_sectors_kb matches max_hw_sectors_kb. I haven't seen this problem for a while, but back when I used to work on Lustre I often saw that these didn't match and performance suffered.

      * For read-heavy workloads, experimenting with /sys/.../readahead_kb is definitely worthwhile.

      * Filesystems should be built with -I 512 or similar so that more xattrs can be stored in the inode instead of requiring an extra seek.

      * Mounting with noatime or relatime is usually good for performance.

      ","text":""},{"location":"Administrator-Guide/Linux-Kernel-Tuning/#replyy4m4","title":"reply:y4m4","text":"

      Agreed i was about write those parameters you mentioned. I should write another elaborate article on FS changes.

      y4m4

      "},{"location":"Administrator-Guide/Linux-Kernel-Tuning/#commenteco","title":"comment:eco

      1 year ago\\ This article is the model on which all articles should be written. Detailed information, solid examples and a great selection of references to let readers go more in depth on topics they choose. Great benchmark for others to strive to attain.\\ Eco\\

      ","text":""},{"location":"Administrator-Guide/Linux-Kernel-Tuning/#commenty4m4","title":"comment:y4m4

      sysctl -w net.core.{r,w}mem_max = 4096000 - this helped us to Reach 800MB/sec with replicated GlusterFS on 10gige - Thanks to Ben England for these test results.\\ y4m4

      ","text":""},{"location":"Administrator-Guide/Linux-Kernel-Tuning/#commentbengland","title":"comment:bengland

      After testing Gluster 3.2.4 performance with RHEL6.1, I'd suggest some changes to this article's recommendations:

      vm.swappiness=10 not 0 -- I think 0 is a bit extreme and might lead to out-of-memory conditions, but 10 will avoid just about all paging/swapping. If you still see swapping, you need to probably focus on restricting dirty pages with vm.dirty_ratio.

      vfs_cache_pressure > 100 -- why? I thought this was a percentage.

      vm.pagecache=1 -- some distros (e.g. RHEL6) don't have vm.pagecache parameter.

      vm.dirty_background_ratio=1 not 10 (kernel default?) -- the kernel default is a bit dependent on choice of Linux distro, but for most workloads it's better to set this parameter very low to cause Linux to push dirty pages out to storage sooner. It means that if dirty pages exceed 1% of RAM then it will start to asynchronously write dirty pages to storage. The only workload where this is really bad: apps that write temp files and then quickly delete them (compiles) -- and you should probably be using local storage for such files anyway.

      Choice of vm.dirty_ratio is more dependent upon the workload, but in other contexts I have observed that response time fairness and stability is much better if you lower dirty ratio so that it doesn't take more than 2-5 seconds to flush all dirty pages to storage.

      block device parameters:

      I'm not aware of any case where cfq scheduler actually helps Gluster server. Unless server I/O threads correspond directly to end-users, I don't see how cfq can help you. Deadline scheduler is a good choice. I/O request queue has to be deep enough to allow scheduler to reorder requests to optimize away disk seeks. The parameters max_sectors_kb and nr_requests are relevant for this. For read-ahead, consider increasing it to the point where you prefetch for longer period of time than a disk seek (on order of 10 msec), so that you can avoid unnecessary disk seeks for multi-stream workloads. This comes at the expense of I/O latency so don't overdo it.

      network:

      jumbo frames can increase throughput significantly for 10-GbE networks.

      Raise net.core.{r,w}mem_max to 540000 from default of 131071 (not 4 MB above, my previous recommendation). Gluster 3.2 does setsockopt() call to use 1/2 MB mem for TCP socket buffer space.\\ bengland\\

      ","text":""},{"location":"Administrator-Guide/Linux-Kernel-Tuning/#commenthjmangalam","title":"comment:hjmangalam

      Thanks very much for noting this info - the descriptions are VERY good.. I'm in the midst of debugging a misbehaving gluster that can't seem to handle small writes over IPoIB and this contains some useful pointers.

      Some suggestions that might make this more immediately useful:

      - I'm assuming that this discussion refers to the gluster server nodes, not to the gluster native client nodes, yes? If that's the case, are there are also kernel parameters or recommended settings for the client nodes?\\ - While there are some cases where you mention that a value should be changed to a particular # or %, in a number of cases you advise just increasing/decreasing the values, which for something like a kernel parameter is probably not a useful suggestion. Do I raise it by 10? 10% 2x? 10x?

      I also ran across a complimentary page, which might be of interest - it explains more of the vm variables, especially as it relates to writing.\\ \"Theory of Operation and Tuning for Write-Heavy Loads\"\\ `` and refs therein. hjmangalam

      ","text":""},{"location":"Administrator-Guide/Linux-Kernel-Tuning/#commentbengland_1","title":"comment:bengland

      Here are some additional suggestions based on recent testing:\\ - scaling out number of clients -- you need to increase the size of the ARP tables on Gluster server if you want to support more than 1K clients mounting a gluster volume. The defaults for RHEL6.3 were too low to support this, we used this:

      net.ipv4.neigh.default.gc_thresh2 = 2048\\ net.ipv4.neigh.default.gc_thresh3 = 4096

      In addition, tunings common to webservers become relevant at this number of clients as well, such as netdev_max_backlog, tcp_fin_timeout, and somaxconn.

      Bonding mode 6 has been observed to increase replication write performance, I have no experience with bonding mode 4 but it should work if switch is properly configured, other bonding modes are a waste of time.

      bengland\\ 3 months ago

      ","text":""},{"location":"Administrator-Guide/Logging/","title":"GlusterFS service Logs and locations","text":"

      Below lists the component, services, and functionality based logs in the GlusterFS Server. As per the File System Hierarchy Standards (FHS) all the log files are placed in the /var/log directory. \u2060

      "},{"location":"Administrator-Guide/Logging/#glusterd","title":"Glusterd:","text":"

      glusterd logs are located at /var/log/glusterfs/glusterd.log. One glusterd log file per server. This log file also contains the snapshot and user logs.

      "},{"location":"Administrator-Guide/Logging/#gluster-cli-command","title":"Gluster cli command:","text":"

      gluster cli logs are located at /var/log/glusterfs/cli.log. Gluster commands executed on a node in a GlusterFS Trusted Storage Pool is logged in /var/log/glusterfs/cmd_history.log.

      "},{"location":"Administrator-Guide/Logging/#bricks","title":"Bricks:","text":"

      Bricks logs are located at /var/log/glusterfs/bricks/<path extraction of brick path>.log. One log file per brick on the server

      "},{"location":"Administrator-Guide/Logging/#rebalance","title":"Rebalance:","text":"

      rebalance logs are located at /var/log/glusterfs/VOLNAME-rebalance.log . One log file per volume on the server.

      "},{"location":"Administrator-Guide/Logging/#self-heal-deamon","title":"Self heal deamon:","text":"

      self heal deamon are logged at /var/log/glusterfs/glustershd.log. One log file per server

      "},{"location":"Administrator-Guide/Logging/#quota","title":"Quota:","text":"

      /var/log/glusterfs/quotad.log are log of the quota daemons running on each node. /var/log/glusterfs/quota-crawl.log Whenever quota is enabled, a file system crawl is performed and the corresponding log is stored in this file. /var/log/glusterfs/quota-mount- VOLNAME.log An auxiliary FUSE client is mounted in /VOLNAME of the glusterFS and the corresponding client logs found in this file. One log file per server and per volume from quota-mount."},{"location":"Administrator-Guide/Logging/#gluster-nfs","title":"Gluster NFS:","text":"

      /var/log/glusterfs/nfs.log One log file per server

      "},{"location":"Administrator-Guide/Logging/#samba-gluster","title":"SAMBA Gluster:","text":"

      /var/log/samba/glusterfs-VOLNAME-<ClientIP>.log . If the client mounts this on a glusterFS server node, the actual log file or the mount point may not be found. In such a case, the mount outputs of all the glusterFS type mount operations need to be considered.

      "},{"location":"Administrator-Guide/Logging/#ganesha-nfs","title":"Ganesha NFS :","text":"

      /var/log/nfs-ganesha.log

      "},{"location":"Administrator-Guide/Logging/#fuse-mount","title":"FUSE Mount:","text":"

      /var/log/glusterfs/<mountpoint path extraction>.log

      "},{"location":"Administrator-Guide/Logging/#geo-replication","title":"Geo-replication:","text":"

      /var/log/glusterfs/geo-replication/<primary> /var/log/glusterfs/geo-replication-secondary

      "},{"location":"Administrator-Guide/Logging/#gluster-volume-heal-volname-info-command","title":"Gluster volume heal VOLNAME info command:","text":"

      /var/log/glusterfs/glfsheal-VOLNAME.log . One log file per server on which the command is executed.

      "},{"location":"Administrator-Guide/Logging/#gluster-swift","title":"Gluster-swift:","text":"

      /var/log/messages

      "},{"location":"Administrator-Guide/Logging/#swiftkrbauth","title":"SwiftKrbAuth:","text":"

      /var/log/httpd/error_log

      "},{"location":"Administrator-Guide/Managing-Snapshots/","title":"Managing GlusterFS Volume Snapshots","text":"

      This section describes how to perform common GlusterFS volume snapshot management operations

      "},{"location":"Administrator-Guide/Managing-Snapshots/#pre-requisites","title":"Pre-requisites","text":"

      GlusterFS volume snapshot feature is based on thinly provisioned LVM snapshot. To make use of snapshot feature GlusterFS volume should fulfill following pre-requisites:

      • Each brick should be on an independent thinly provisioned LVM.
      • Brick LVM should not contain any other data other than brick.
      • None of the brick should be on a thick LVM.
      • gluster version should be 3.6 and above.

      Details of how to create thin volume can be found at the following link. https://access.redhat.com/documentation/en-US/Red_Hat_Enterprise_Linux/7/html/Logical_Volume_Manager_Administration/LV.html#thinly_provisioned_volume_creation

      "},{"location":"Administrator-Guide/Managing-Snapshots/#few-features-of-snapshot-are","title":"Few features of snapshot are:","text":"

      Crash Consistency

      when a snapshot is taken at a particular point-in-time, it is made sure that the taken snapshot is crash consistent. when the taken snapshot is restored, then the data is identical as it was at the time of taking a snapshot.

      Online Snapshot

      When the snapshot is being taken the file system and its associated data continue to be available for the clients.

      Barrier

      During snapshot creation some of the fops are blocked to guarantee crash consistency. There is a default time-out of 2 minutes, if snapshot creation is not complete within that span then fops are unbarried. If unbarrier happens before the snapshot creation is complete then the snapshot creation operation fails. This to ensure that the snapshot is in a consistent state.

      "},{"location":"Administrator-Guide/Managing-Snapshots/#snapshot-management","title":"Snapshot Management","text":""},{"location":"Administrator-Guide/Managing-Snapshots/#snapshot-creation","title":"Snapshot creation","text":"
      gluster snapshot create <snapname> <volname> [no-timestamp] [description <description>]\n

      Creates a snapshot of a GlusterFS volume. User can provide a snap-name and a description to identify the snap. The description cannot be more than 1024 characters.

      Snapshot will be created by appending timestamp with user provided snap name. User can override this behaviour by giving no-timestamp flag.

      NOTE: To be able to take a snapshot, volume should be present and it should be in started state. All the bricks used in creating the snapshot have to be online in order to successfully create a snapshot as the force option is now deprecated.

      "},{"location":"Administrator-Guide/Managing-Snapshots/#snapshot-clone","title":"Snapshot clone","text":"
      gluster snapshot clone <clonename> <snapname>\n

      Creates a clone of a snapshot. Upon successful completion, a new GlusterFS volume will be created from snapshot. The clone will be a space efficient clone, i.e, the snapshot and the clone will share the backend disk.

      NOTE: To be able to take a clone from snapshot, snapshot should be present and it should be in activated state.

      "},{"location":"Administrator-Guide/Managing-Snapshots/#restoring-snaps","title":"Restoring snaps","text":"
      gluster snapshot restore <snapname>\n

      Restores an already taken snapshot of a GlusterFS volume. Snapshot restore is an offline activity therefore if the volume is online (in started state) then the restore operation will fail.

      Once the snapshot is restored it will not be available in the list of snapshots.

      "},{"location":"Administrator-Guide/Managing-Snapshots/#deleting-snaps","title":"Deleting snaps","text":"
      gluster snapshot delete (all | <snapname> | volume <volname>)\n

      If snapname is specified then mentioned snapshot is deleted. If volname is specified then all snapshots belonging to that particular volume is deleted. If keyword all is used then all snapshots belonging to the system is deleted.

      "},{"location":"Administrator-Guide/Managing-Snapshots/#listing-of-available-snaps","title":"Listing of available snaps","text":"
      gluster snapshot list [volname]\n

      Lists all snapshots taken. If volname is provided, then only the snapshots belonging to that particular volume is listed.

      "},{"location":"Administrator-Guide/Managing-Snapshots/#information-of-available-snaps","title":"Information of available snaps","text":"
      gluster snapshot info [(snapname | volume <volname>)]\n

      This command gives information such as snapshot name, snapshot UUID, time at which snapshot was created, and it lists down the snap-volume-name, number of snapshots already taken and number of snapshots still available for that particular volume, and the state of the snapshot.

      "},{"location":"Administrator-Guide/Managing-Snapshots/#status-of-snapshots","title":"Status of snapshots","text":"
      gluster snapshot status [(snapname | volume <volname>)]\n

      This command gives status of the snapshot. The details included are snapshot brick path, volume group(LVM details), status of the snapshot bricks, PID of the bricks, data percentage filled for that particular volume group to which the snapshots belong to, and total size of the logical volume.

      If snapname is specified then status of the mentioned snapshot is displayed. If volname is specified then status of all snapshots belonging to that volume is displayed. If both snapname and volname is not specified then status of all the snapshots present in the system are displayed.

      "},{"location":"Administrator-Guide/Managing-Snapshots/#configuring-the-snapshot-behavior","title":"Configuring the snapshot behavior","text":"
      snapshot config [volname] ([snap-max-hard-limit <count>] [snap-max-soft-limit <percent>])\n                            | ([auto-delete <enable|disable>])\n                            | ([activate-on-create <enable|disable>])\n

      Displays and sets the snapshot config values.

      snapshot config without any keywords displays the snapshot config values of all volumes in the system. If volname is provided, then the snapshot config values of that volume is displayed.

      Snapshot config command along with keywords can be used to change the existing config values. If volname is provided then config value of that volume is changed, else it will set/change the system limit.

      snap-max-soft-limit and auto-delete are global options, that will be inherited by all volumes in the system and cannot be set to individual volumes.

      The system limit takes precedence over the volume specific limit.

      When auto-delete feature is enabled, then upon reaching the soft-limit, with every successful snapshot creation, the oldest snapshot will be deleted.

      When auto-delete feature is disabled, then upon reaching the soft-limit, the user gets a warning with every successful snapshot creation.

      Upon reaching the hard-limit, further snapshot creations will not be allowed.

      activate-on-create is disabled by default. If you enable activate-on-create, then further snapshot will be activated during the time of snapshot creation.

      "},{"location":"Administrator-Guide/Managing-Snapshots/#activating-a-snapshot","title":"Activating a snapshot","text":"
      gluster snapshot activate <snapname>\n

      Activates the mentioned snapshot.

      Note: By default the snapshot will not be activated during snapshot creation.

      "},{"location":"Administrator-Guide/Managing-Snapshots/#deactivate-a-snapshot","title":"Deactivate a snapshot","text":"
      gluster snapshot deactivate <snapname>\n

      Deactivates the mentioned snapshot.

      "},{"location":"Administrator-Guide/Managing-Snapshots/#accessing-the-snapshot","title":"Accessing the snapshot","text":"

      Snapshots can be accessed in 2 ways.

      1. Mounting the snapshot:

        The snapshot can be accessed via FUSE mount (only fuse). To do that it has to be mounted first. A snapshot can be mounted via fuse by below command

        mount -t glusterfs <hostname>:/snaps/<snap-name>/<volume-name> <mount-path>\n

        i.e. say \"host1\" is one of the peers. Let \"vol\" be the volume name and \"my-snap\" be the snapshot name. In this case a snapshot can be mounted via this command

        mount -t glusterfs host1:/snaps/my-snap/vol /mnt/snapshot\n
      2. User serviceability:

        Apart from the above method of mounting the snapshot, a list of available snapshots and the contents of each snapshot can be viewed from any of the mount points accessing the glusterfs volume (either FUSE or NFS or SMB). For having user serviceable snapshots, it has to be enabled for a volume first. User serviceability can be enabled for a volume using the below command.

        gluster volume set <volname> features.uss enable\n

        Once enabled, from any of the directory (including root of the filesystem) an access point will be created to the snapshot world. The access point is a hidden directory cding into which will make the user enter the snapshot world. By default the hidden directory is \".snaps\". Once user serviceability is enabled, one will be able to cd into .snaps from any directory. Doing \"ls\" on that directory shows a list of directories which are nothing but the snapshots present for that volume. Say if there are 3 snapshots (\"snap1\", \"snap2\", \"snap3\"), then doing ls in .snaps directory will show those 3 names as the directory entries. They represent the state of the directory from which .snaps was entered, at different points in time.

        NOTE: The access to the snapshots are read-only. The snapshot needs to be activated for it to be accessible inside .snaps directory.

        Also, the name of the hidden directory (or the access point to the snapshot world) can be changed using the below command.

        gluster volume set <volname> snapshot-directory <new-name>\n
      3. Accessing from windows:

        The glusterfs volumes can be made accessible by windows via samba. (the glusterfs plugin for samba helps achieve this, without having to re-export a fuse mounted glusterfs volume). The snapshots of a glusterfs volume can also be viewed in the windows explorer.

        There are 2 ways:

        • Give the path of the entry point directory (<hostname><samba-share><directory><entry-point path>) in the run command window

        • Go to the samba share via windows explorer. Make hidden files and folders visible so that in the root of the samba share a folder icon for the entry point can be seen.

      NOTE: From the explorer, snapshot world can be entered via entry point only from the root of the samba share. If snapshots have to be seen from subfolders, then the path should be provided in the run command window.

      For snapshots to be accessible from windows, below 2 options can be used.

      1. The glusterfs plugin for samba should give the option \"snapdir-entry-path\" while starting. The option is an indication to glusterfs, that samba is loading it and the value of the option should be the path that is being used as the share for windows.

        Ex: Say, there is a glusterfs volume and a directory called \"export\" from the root of the volume is being used as the samba share, then samba has to load glusterfs with this option as well.

         ret = glfs_set_xlator_option(\n         fs,\n         \"*-snapview-client\",\n         \"snapdir-entry-path\", \"/export\"\n );\n

        The xlator option \"snapdir-entry-path\" is not exposed via volume set options, cannot be changed from CLI. Its an option that has to be provided at the time of mounting glusterfs or when samba loads glusterfs.

      2. The accessibility of snapshots via root of the samba share from windows is configurable. By default it is turned off. It is a volume set option which can be changed via CLI.

        gluster volume set <volname> features.show-snapshot-directory <on/off>. By default it is off.

      Only when both the above options have been provided (i.e snapdir-entry-path contains a valid unix path that is exported and show-snapshot-directory option is set to true), snapshots can accessed via windows explorer.

      If only 1st option (i.e. snapdir-entry-path) is set via samba and 2nd option (i.e. show-snapshot-directory) is off, then snapshots can be accessed from windows via the run command window, but not via the explorer.

      "},{"location":"Administrator-Guide/Managing-Volumes/","title":"Managing GlusterFS Volumes","text":"

      This section describes how to perform common GlusterFS management operations, including the following:

      • Tuning Volume Options
      • Configuring Transport Types for a Volume
      • Expanding Volumes
      • Shrinking Volumes
      • Replacing Bricks
      • Rebalancing Volumes
      • Stopping Volumes
      • Deleting Volumes
      • Triggering Self-Heal on Replicate
      • Non Uniform File Allocation(NUFA)

      "},{"location":"Administrator-Guide/Managing-Volumes/#configuring-transport-types-for-a-volume","title":"Configuring Transport Types for a Volume","text":"

      A volume can support one or more transport types for communication between clients and brick processes. There are three types of supported transport, which are tcp, rdma, and tcp,rdma.

      To change the supported transport types of a volume, follow the procedure:

      1. Unmount the volume on all the clients using the following command:

        umount mount-point\n
      2. Stop the volumes using the following command:

        gluster volume stop <VOLNAME>\n
      3. Change the transport type. For example, to enable both tcp and rdma execute the followimg command:

        gluster volume set test-volume config.transport tcp,rdma OR tcp OR rdma\n
      4. Mount the volume on all the clients. For example, to mount using rdma transport, use the following command:

        mount -t glusterfs -o transport=rdma server1:/test-volume /mnt/glusterfs\n

      "},{"location":"Administrator-Guide/Managing-Volumes/#expanding-volumes","title":"Expanding Volumes","text":"

      You can expand volumes, as needed, while the cluster is online and available. For example, you might want to add a brick to a distributed volume, thereby increasing the distribution and adding to the capacity of the GlusterFS volume.

      Similarly, you might want to add a group of bricks to a distributed replicated volume, increasing the capacity of the GlusterFS volume.

      Note When expanding distributed replicated and distributed dispersed volumes, you need to add a number of bricks that is a multiple of the replica or disperse count. For example, to expand a distributed replicated volume with a replica count of 2, you need to add bricks in multiples of 2 (such as 4, 6, 8, etc.).

      To expand a volume

      1. If they are not already part of the TSP, probe the servers which contain the bricks you want to add to the volume using the following command:

        gluster peer probe <SERVERNAME>\n

        For example:

        # gluster peer probe server4\nProbe successful\n
      2. Add the brick using the following command:

        gluster volume add-brick <VOLNAME> <NEW-BRICK>\n

        For example:

        # gluster volume add-brick test-volume server4:/exp4\nAdd Brick successful\n
      3. Check the volume information using the following command:

        gluster volume info <VOLNAME>\n

        The command displays information similar to the following:

        Volume Name: test-volume\nType: Distribute\nStatus: Started\nNumber of Bricks: 4\nBricks:\nBrick1: server1:/exp1\nBrick2: server2:/exp2\nBrick3: server3:/exp3\nBrick4: server4:/exp4\n
      4. Rebalance the volume to ensure that files are distributed to the new brick.

        You can use the rebalance command as described in Rebalancing Volumes

      "},{"location":"Administrator-Guide/Managing-Volumes/#shrinking-volumes","title":"Shrinking Volumes","text":"

      You can shrink volumes, as needed, while the cluster is online and available. For example, you might need to remove a brick that has become inaccessible in a distributed volume due to hardware or network failure.

      Note Data residing on the brick that you are removing will no longer be accessible at the Gluster mount point. Note however that only the configuration information is removed - you can continue to access the data directly from the brick, as necessary.

      When shrinking distributed replicated and distributed dispersed volumes, you need to remove a number of bricks that is a multiple of the replica or stripe count. For example, to shrink a distributed replicate volume with a replica count of 2, you need to remove bricks in multiples of 2 (such as 4, 6, 8, etc.). In addition, the bricks you are trying to remove must be from the same sub-volume (the same replica or disperse set).

      Running remove-brick with the start option will automatically trigger a rebalance operation to migrate data from the removed-bricks to the rest of the volume.

      To shrink a volume

      1. Remove the brick using the following command:

        gluster volume remove-brick <VOLNAME> <BRICKNAME> start\n

        For example, to remove server2:/exp2:

        # gluster volume remove-brick test-volume server2:/exp2 start\nvolume remove-brick start: success\n
      2. View the status of the remove brick operation using the following command:

        gluster volume remove-brick <VOLNAME> <BRICKNAME> status\n

        For example, to view the status of remove brick operation on server2:/exp2 brick:

        # gluster volume remove-brick test-volume server2:/exp2 status\n                                Node  Rebalanced-files  size  scanned       status\n                           ---------  ----------------  ----  -------  -----------\n617c923e-6450-4065-8e33-865e28d9428f               34   340      162   in progress\n
      3. Once the status displays \"completed\", commit the remove-brick operation

        gluster volume remove-brick <VOLNAME> <BRICKNAME> commit\n

        In this example:

        # gluster volume remove-brick test-volume server2:/exp2 commit\nRemoving brick(s) can result in data loss. Do you want to Continue? (y/n) y\nvolume remove-brick commit: success\nCheck the removed bricks to ensure all files are migrated.\nIf files with data are found on the brick path, copy them via a gluster mount point before re-purposing the removed brick.\n
      4. Check the volume information using the following command:

        gluster volume info\n

        The command displays information similar to the following:

        # gluster volume info\nVolume Name: test-volume\nType: Distribute\nStatus: Started\nNumber of Bricks: 3\nBricks:\nBrick1: server1:/exp1\nBrick3: server3:/exp3\nBrick4: server4:/exp4\n

      "},{"location":"Administrator-Guide/Managing-Volumes/#replace-faulty-brick","title":"Replace faulty brick","text":"

      Replacing a brick in a pure distribute volume

      To replace a brick on a distribute only volume, add the new brick and then remove the brick you want to replace. This will trigger a rebalance operation which will move data from the removed brick.

      NOTE: Replacing a brick using the 'replace-brick' command in gluster is supported only for pure replicate or distributed-replicate volumes.

      Steps to remove brick Server1:/home/gfs/r2_1 and add Server1:/home/gfs/r2_2:

      1. Here is the initial volume configuration:

        Volume Name: r2\nType: Distribute\nVolume ID: 25b4e313-7b36-445d-b524-c3daebb91188\nStatus: Started\nNumber of Bricks: 2\nTransport-type: tcp\nBricks:\nBrick1: Server1:/home/gfs/r2_0\nBrick2: Server1:/home/gfs/r2_1\n
      2. Here are the files that are present on the mount:

        # ls\n1  10  2  3  4  5  6  7  8  9\n
      3. Add the new brick - Server1:/home/gfs/r2_2 now:

        # gluster volume add-brick r2 Server1:/home/gfs/r2_2\nvolume add-brick: success\n
      4. Start remove-brick using the following command:

        # gluster volume remove-brick r2 Server1:/home/gfs/r2_1 start\nvolume remove-brick start: success\nID: fba0a488-21a4-42b7-8a41-b27ebaa8e5f4\n
      5. Wait until remove-brick status indicates that it is complete.

        # gluster volume remove-brick r2 Server1:/home/gfs/r2_1 status\n                                Node Rebalanced-files          size       scanned      failures       skipped               status   run time in secs\n                           ---------      -----------   -----------   -----------   -----------   -----------         ------------     --------------\n                           localhost                5       20Bytes            15             0             0            completed               0.00\n
      6. Now we can safely remove the old brick, so commit the changes:

        # gluster volume remove-brick r2 Server1:/home/gfs/r2_1 commit\nRemoving brick(s) can result in data loss. Do you want to Continue? (y/n) y\nvolume remove-brick commit: success\n
      7. Here is the new volume configuration.

        Volume Name: r2\nType: Distribute\nVolume ID: 25b4e313-7b36-445d-b524-c3daebb91188\nStatus: Started\nNumber of Bricks: 2\nTransport-type: tcp\nBricks:\nBrick1: Server1:/home/gfs/r2_0\nBrick2: Server1:/home/gfs/r2_2\n
      8. Check the contents of the mount:

        # ls\n1  10  2  3  4  5  6  7  8  9\n

      Replacing bricks in Replicate/Distributed Replicate volumes

      This section of the document describes how brick: Server1:/home/gfs/r2_0 is replaced with brick: Server1:/home/gfs/r2_5 in volume r2 with replica count 2.

          Volume Name: r2\n    Type: Distributed-Replicate\n    Volume ID: 24a0437a-daa0-4044-8acf-7aa82efd76fd\n    Status: Started\n    Number of Bricks: 2 x 2 = 4\n    Transport-type: tcp\n    Bricks:\n    Brick1: Server1:/home/gfs/r2_0\n    Brick2: Server2:/home/gfs/r2_1\n    Brick3: Server1:/home/gfs/r2_2\n    Brick4: Server2:/home/gfs/r2_3\n

      Steps:

      1. Make sure there is no data in the new brick Server1:/home/gfs/r2_5
      2. Check that all the bricks are running. It is okay if the brick that is going to be replaced is down.
      3. Replace the brick with 'commit force' option. Please note that other variants of replace-brick command are not supported.

        • Execute replace-brick command

          # gluster volume replace-brick r2 Server1:/home/gfs/r2_0 Server1:/home/gfs/r2_5 commit force volume replace-brick: success: replace-brick commit successful

        • Check that the new brick is now online

          # gluster volume status Status of volume: r2 Gluster process Port Online Pid

          Brick Server1:/home/gfs/r2_5 49156 Y 5731 <---- new brick is online Brick Server2:/home/gfs/r2_1 49153 Y 5354 Brick Server1:/home/gfs/r2_2 49154 Y 5365 Brick Server2:/home/gfs/r2_3 49155 Y 5376

        • Users can track the progress of self-heal using: gluster volume heal [volname] info, or by checking the size of the new brick.

        • # gluster volume heal <volname> info will show that no heal is required when the data is fully synced to the replaced brick.

          # gluster volume heal r2 info Brick Server1:/home/gfs/r2_5 Number of entries: 0

          Brick Server2:/home/gfs/r2_1 Number of entries: 0

          Brick Server1:/home/gfs/r2_2 Number of entries: 0

          Brick Server2:/home/gfs/r2_3 Number of entries: 0

      "},{"location":"Administrator-Guide/Managing-Volumes/#rebalancing-volumes","title":"Rebalancing Volumes","text":"

      After expanding a volume using the add-brick command, you may need to rebalance the data among the servers. New directories created after expanding or shrinking of the volume will be evenly distributed automatically. For all the existing directories, the distribution can be fixed by rebalancing the layout and/or data.

      This section describes how to rebalance GlusterFS volumes in your storage environment, using the following common scenarios:

      • Fix Layout - Fixes the layout to use the new volume topology so that files can be distributed to newly added nodes.

      • Fix Layout and Migrate Data - Rebalances volume by fixing the layout to use the new volume topology and migrating the existing data.

      "},{"location":"Administrator-Guide/Managing-Volumes/#rebalancing-volume-to-fix-layout-changes","title":"Rebalancing Volume to Fix Layout Changes","text":"

      Fixing the layout is necessary because the layout structure is static for a given directory. Even after new bricks are added to the volume, newly created files in existing directories will still be distributed only among the original bricks. The command gluster volume rebalance <volname> fix-layout start will fix the layout information so that the files can be created on the newly added bricks. When this command is issued, all the file stat information which is already cached will get revalidated.

      As of GlusterFS 3.6, the assignment of files to bricks will take into account the sizes of the bricks. For example, a 20TB brick will be assigned twice as many files as a 10TB brick. In versions before 3.6, the two bricks were treated as equal regardless of size, and would have been assigned an equal share of files.

      A fix-layout rebalance will only fix the layout changes and does not migrate data. If you want to migrate the existing data, use gluster volume rebalance <volume> start command to rebalance data among the servers.

      To rebalance a volume to fix layout

      • Start the rebalance operation on any Gluster server using the following command:

      # gluster volume rebalance <VOLNAME> fix-layout start

      For example:

        # gluster volume rebalance test-volume fix-layout start\n  Starting rebalance on volume test-volume has been successful\n
      "},{"location":"Administrator-Guide/Managing-Volumes/#rebalancing-volume-to-fix-layout-and-migrate-data","title":"Rebalancing Volume to Fix Layout and Migrate Data","text":"

      After expanding a volume using the add-brick respectively, you need to rebalance the data among the servers. A remove-brick command will automatically trigger a rebalance.

      To rebalance a volume to fix layout and migrate the existing data

      • Start the rebalance operation on any one of the server using the following command:

      # gluster volume rebalance <VOLNAME> start

      For example:

        # gluster volume rebalance test-volume start\n  Starting rebalancing on volume test-volume has been successful\n
      • Start the migration operation forcefully on any one of the servers using the following command:

      # gluster volume rebalance <VOLNAME> start force

      For example:

        # gluster volume rebalance test-volume start force\n  Starting rebalancing on volume test-volume has been successful\n

      A rebalance operation will attempt to balance the diskusage across nodes, therefore it will skip files where the move will result in a less balanced volume. This leads to link files that are still left behind in the system and hence may cause performance issues. The behaviour can be overridden with the force argument.

      "},{"location":"Administrator-Guide/Managing-Volumes/#displaying-the-status-of-rebalance-operation","title":"Displaying the Status of Rebalance Operation","text":"

      You can display the status information about rebalance volume operation, as needed.

      • Check the status of the rebalance operation, using the following command:

      # gluster volume rebalance <VOLNAME> status

      For example:

        # gluster volume rebalance test-volume status\n                                  Node  Rebalanced-files  size  scanned       status\n                             ---------  ----------------  ----  -------  -----------\n  617c923e-6450-4065-8e33-865e28d9428f               416  1463      312  in progress\n

      The time to complete the rebalance operation depends on the number of files on the volume along with the corresponding file sizes. Continue checking the rebalance status, verifying that the number of files rebalanced or total files scanned keeps increasing.

      For example, running the status command again might display a result similar to the following:

        # gluster volume rebalance test-volume status\n                                  Node  Rebalanced-files  size  scanned       status\n                             ---------  ----------------  ----  -------  -----------\n  617c923e-6450-4065-8e33-865e28d9428f               498  1783      378  in progress\n

      The rebalance status displays the following when the rebalance is complete:

        # gluster volume rebalance test-volume status\n                                  Node  Rebalanced-files  size  scanned       status\n                             ---------  ----------------  ----  -------  -----------\n  617c923e-6450-4065-8e33-865e28d9428f               502  1873      334   completed\n
      "},{"location":"Administrator-Guide/Managing-Volumes/#stopping-an-ongoing-rebalance-operation","title":"Stopping an Ongoing Rebalance Operation","text":"

      You can stop the rebalance operation, if needed.

      • Stop the rebalance operation using the following command:

      # gluster volume rebalance <VOLNAME> stop

      For example:

        # gluster volume rebalance test-volume stop\n                                  Node  Rebalanced-files  size  scanned       status\n                             ---------  ----------------  ----  -------  -----------\n  617c923e-6450-4065-8e33-865e28d9428f               59   590      244       stopped\n  Stopped rebalance process on volume test-volume\n

      "},{"location":"Administrator-Guide/Managing-Volumes/#stopping-volumes","title":"Stopping Volumes","text":"
      1. Stop the volume using the following command:

        # gluster volume stop <VOLNAME>

        For example, to stop test-volume:

        # gluster volume stop test-volume\nStopping volume will make its data inaccessible. Do you want to continue? (y/n)\n
      2. Enter y to confirm the operation. The output of the command displays the following:

        Stopping volume test-volume has been successful\n

      "},{"location":"Administrator-Guide/Managing-Volumes/#deleting-volumes","title":"Deleting Volumes","text":"
      1. Delete the volume using the following command:

        # gluster volume delete <VOLNAME>

        For example, to delete test-volume:

        # gluster volume delete test-volume\nDeleting volume will erase all information about the volume. Do you want to continue? (y/n)\n
      2. Enter y to confirm the operation. The command displays the following:

        Deleting volume test-volume has been successful\n

      "},{"location":"Administrator-Guide/Managing-Volumes/#triggering-self-heal-on-replicate","title":"Triggering Self-Heal on Replicate","text":"

      In replicate module, previously you had to manually trigger a self-heal when a brick goes offline and comes back online, to bring all the replicas in sync. Now the pro-active self-heal daemon runs in the background, diagnoses issues and automatically initiates self-healing every 10 minutes on the files which requireshealing.

      You can view the list of files that need healing, the list of files which are currently/previously healed, list of files which are in split-brain state, and you can manually trigger self-heal on the entire volume or only on the files which need healing.

      • Trigger self-heal only on the files which requires healing:

      # gluster volume heal <VOLNAME>

      For example, to trigger self-heal on files which requires healing of test-volume:

        # gluster volume heal test-volume\n  Heal operation on volume test-volume has been successful\n
      • Trigger self-heal on all the files of a volume:

      # gluster volume heal <VOLNAME> full

      For example, to trigger self-heal on all the files of of test-volume:

        # gluster volume heal test-volume full\n  Heal operation on volume test-volume has been successful\n
      • View the list of files that needs healing:

      # gluster volume heal <VOLNAME> info

      For example, to view the list of files on test-volume that needs healing:

        # gluster volume heal test-volume info\n  Brick server1:/gfs/test-volume_0\n  Number of entries: 0\n\n  Brick server2:/gfs/test-volume_1\n  Number of entries: 101\n  /95.txt\n  /32.txt\n  /66.txt\n  /35.txt\n  /18.txt\n  /26.txt\n  /47.txt\n  /55.txt\n  /85.txt\n  ...\n
      • View the list of files that are self-healed:

      # gluster volume heal <VOLNAME> info healed

      For example, to view the list of files on test-volume that are self-healed:

        # gluster volume heal test-volume info healed\n  Brick Server1:/gfs/test-volume_0\n  Number of entries: 0\n\n  Brick Server2:/gfs/test-volume_1\n  Number of entries: 69\n  /99.txt\n  /93.txt\n  /76.txt\n  /11.txt\n  /27.txt\n  /64.txt\n  /80.txt\n  /19.txt\n  /41.txt\n  /29.txt\n  /37.txt\n  /46.txt\n  ...\n
      • View the list of files of a particular volume on which the self-heal failed:

      # gluster volume heal <VOLNAME> info failed

      For example, to view the list of files of test-volume that are not self-healed:

        # gluster volume heal test-volume info failed\n  Brick Server1:/gfs/test-volume_0\n  Number of entries: 0\n\n  Brick Server2:/gfs/test-volume_3\n  Number of entries: 72\n  /90.txt\n  /95.txt\n  /77.txt\n  /71.txt\n  /87.txt\n  /24.txt\n  ...\n
      • View the list of files of a particular volume which are in split-brain state:

      # gluster volume heal <VOLNAME> info split-brain

      For example, to view the list of files of test-volume which are in split-brain state:

        # gluster volume heal test-volume info split-brain\n  Brick Server1:/gfs/test-volume_2\n  Number of entries: 12\n  /83.txt\n  /28.txt\n  /69.txt\n  ...\n\n  Brick Server2:/gfs/test-volume_3\n  Number of entries: 12\n  /83.txt\n  /28.txt\n  /69.txt\n  ...\n

      "},{"location":"Administrator-Guide/Managing-Volumes/#non-uniform-file-allocation","title":"Non Uniform File Allocation","text":"

      NUFA translator or Non Uniform File Access translator is designed for giving higher preference to a local drive when used in a HPC type of environment. It can be applied to Distribute and Replica translators; in the latter case it ensures that one copy is local if space permits.

      When a client on a server creates files, the files are allocated to a brick in the volume based on the file name. This allocation may not be ideal, as there is higher latency and unnecessary network traffic for read/write operations to a non-local brick or export directory. NUFA ensures that the files are created in the local export directory of the server, and as a result, reduces latency and conserves bandwidth for that server accessing that file. This can also be useful for applications running on mount points on the storage server.

      If the local brick runs out of space or reaches the minimum disk free limit, instead of allocating files to the local brick, NUFA distributes files to other bricks in the same volume if there is space available on those bricks.

      NUFA should be enabled before creating any data in the volume.

      Use the following steps to enable NUFA:

      • decide which group is going to be used for managing settings of your volume. Assuming .
      • define cluster.nufa enable for this group:
      • # echo \"cluster.nufa=enable\" | tee -a /var/lib/glusterd/groups/<GROUPNAME>

        • add your volume to the group

        # gluster volume set <VOLNAME> group <GROUPNAME>

        • verify whether the nufa setting was set properly

        # gluster volume info

        Important

        NUFA is supported under the following conditions:

        • Volumes with only one brick per server.
        • For use with a FUSE client. NUFA is not supported with NFS or SMB.
        • A client that is mounting a NUFA-enabled volume must be present within the trusted storage pool.

        The NUFA scheduler also exists, for use with the Unify translator; see below.

        volume bricks\n  type cluster/nufa\n  option local-volume-name brick1\n  subvolumes brick1 brick2 brick3 brick4 brick5 brick6 brick7\nend-volume\n
        "},{"location":"Administrator-Guide/Managing-Volumes/#nufa-additional-options","title":"NUFA additional options","text":"
        • lookup-unhashed

        This is an advanced option where files are looked up in all subvolumes if they are missing on the subvolume matching the hash value of the filename. The default is on.

        • local-volume-name

        The volume name to consider local and prefer file creations on. The default is to search for a volume matching the hostname of the system.

        • subvolumes

        This option lists the subvolumes that are part of this 'cluster/nufa' volume. This translator requires more than one subvolume.

        "},{"location":"Administrator-Guide/Managing-Volumes/#bitrot-detection","title":"BitRot Detection","text":"

        With BitRot detection in Gluster, it's possible to identify \"insidious\" type of disk errors where data is silently corrupted with no indication from the disk to the storage software layer than an error has occured. This also helps in catching \"backend\" tinkering of bricks (where data is directly manipulated on the bricks without going through FUSE, NFS or any other access protocol(s).

        BitRot detection is disbled by default and needs to be enabled to make use of other sub-commands.

        1. To enable bitrot detection for a given volume :

          # gluster volume bitrot <VOLNAME> enable

          and similarly to disable bitrot use:

          # gluster volume bitrot <VOLNAME> disable

          Note Enabling bitrot spawns the Signer & Scrubber daemon per node. Signer is responsible for signing (calculating checksum for each file) an object and scrubber verifies the calculated checksum against the objects data.

          1. Scrubber daemon has three (3) throttling modes that adjusts the rate at which objects are verified.

             # volume bitrot <VOLNAME> scrub-throttle lazy\n # volume bitrot <VOLNAME> scrub-throttle normal\n # volume bitrot <VOLNAME> scrub-throttle aggressive\n
          2. By default scrubber scrubs the filesystem biweekly. It's possible to tune it to scrub based on predefined frequency such as monthly, etc. This can be done as shown below:

             # volume bitrot <VOLNAME> scrub-frequency daily\n # volume bitrot <VOLNAME> scrub-frequency weekly\n # volume bitrot <VOLNAME> scrub-frequency biweekly\n # volume bitrot <VOLNAME> scrub-frequency monthly\n

          NOTE: Daily scrubbing would not be available with GA release.

          1. Scrubber daemon can be paused and later resumed when required. This can be done as shown below:

          # volume bitrot <VOLNAME> scrub pause

          and to resume scrubbing:

          # volume bitrot <VOLNAME> scrub resume

          Note Signing cannot be paused (and resumed) and would always be active as long as bitrot is enabled for that particular volume.

          "},{"location":"Administrator-Guide/Mandatory-Locks/","title":"Mandatory Locks","text":"

          Support for mandatory locks inside GlusterFS does not converge all by itself to what Linux kernel provides to user space file systems. Here we enforce core mandatory lock semantics with and without the help of file mode bits. Please read through the design specification which explains the whole concept behind the mandatory locks implementation done for GlusterFS.

          "},{"location":"Administrator-Guide/Mandatory-Locks/#implications-and-usage","title":"Implications and Usage","text":"

          By default, mandatory locking will be disabled for a volume and a volume set options is available to configure volume to operate under 3 different mandatory locking modes.

          "},{"location":"Administrator-Guide/Mandatory-Locks/#volume-option","title":"Volume Option","text":"
          gluster volume set <VOLNAME> locks.mandatory-locking <off / file / forced / optimal>\n

          off - Disable mandatory locking for specified volume. file - Enable Linux kernel style mandatory locking semantics with the help of mode bits (not well tested) forced - Check for conflicting byte range locks for every data modifying operation in a volume optimal - Combinational mode where POSIX clients can live with their advisory lock semantics which will still honour the mandatory locks acquired by other clients like SMB.

          Note:- Please refer the design doc for more information on these key values.

          "},{"location":"Administrator-Guide/Mandatory-Locks/#points-to-be-remembered","title":"Points to be remembered","text":"
          • Valid key values available with mandatory-locking volume set option are taken into effect only after a subsequent start/restart of the volume.
          • Due to some outstanding issues, it is recommended to turn off the performance translators in order to have the complete functionality of mandatory-locks when volume is configured in any one of the above described mandatory-locking modes. Please see the 'Known issue' section below for more details.
          "},{"location":"Administrator-Guide/Mandatory-Locks/#known-issues","title":"Known issues","text":"
          • Since the whole logic of mandatory-locks are implemented within the locks translator loaded at the server side, early success returned to fops like open, read, write to upper/application layer by performance translators residing at the client side will impact the intended functionality of mandatory-locks. One such issue is being tracked in the following bugzilla report:

          https://bugzilla.redhat.com/show_bug.cgi?id=1194546

          • There is a possible race window uncovered with respect to mandatory locks and an ongoing read/write operation. For more details refer the bug report given below:

          https://bugzilla.redhat.com/show_bug.cgi?id=1287099

          "},{"location":"Administrator-Guide/Monitoring-Workload/","title":"Monitoring your GlusterFS Workload","text":"

          You can monitor the GlusterFS volumes on different parameters. Monitoring volumes helps in capacity planning and performance tuning tasks of the GlusterFS volume. Using these information, you can identify and troubleshoot issues.

          You can use Volume Top and Profile commands to view the performance and identify bottlenecks/hotspots of each brick of a volume. This helps system administrators to get vital performance information whenever performance needs to be probed.

          You can also perform statedump of the brick processes and nfs server process of a volume, and also view volume status and volume information.

          "},{"location":"Administrator-Guide/Monitoring-Workload/#running-glusterfs-volume-profile-command","title":"Running GlusterFS Volume Profile Command","text":"

          GlusterFS Volume Profile command provides an interface to get the per-brick I/O information for each File Operation (FOP) of a volume. The per brick information helps in identifying bottlenecks in the storage system.

          This section describes how to run GlusterFS Volume Profile command by performing the following operations:

          • Start Profiling
          • Displaying the I/0 Information
          • Stop Profiling

          "},{"location":"Administrator-Guide/Monitoring-Workload/#start-profiling","title":"Start Profiling","text":"

          You must start the Profiling to view the File Operation information for each brick.

          To start profiling, use following command:

          # gluster volume profile start

          For example, to start profiling on test-volume:

          # gluster volume profile test-volume start\nProfiling started on test-volume\n

          When profiling on the volume is started, the following additional options are displayed in the Volume Info:

          diagnostics.count-fop-hits: on\ndiagnostics.latency-measurement: on\n

          "},{"location":"Administrator-Guide/Monitoring-Workload/#displaying-the-i0-information","title":"Displaying the I/0 Information","text":"

          You can view the I/O information of each brick by using the following command:

          # gluster volume profile info

          For example, to see the I/O information on test-volume:

          # gluster volume profile test-volume info\nBrick: Test:/export/2\nCumulative Stats:\n\nBlock                     1b+           32b+           64b+\nSize:\n       Read:                0              0              0\n       Write:             908             28              8\n\nBlock                   128b+           256b+         512b+\nSize:\n       Read:                0               6             4\n       Write:               5              23            16\n\nBlock                  1024b+          2048b+        4096b+\nSize:\n       Read:                 0              52           17\n       Write:               15             120          846\n\nBlock                   8192b+         16384b+      32768b+\nSize:\n       Read:                52               8           34\n       Write:              234             134          286\n\nBlock                                  65536b+     131072b+\nSize:\n       Read:                               118          622\n       Write:                             1341          594\n\n\n%-latency  Avg-      Min-       Max-       calls     Fop\n          latency   Latency    Latency\n___________________________________________________________\n4.82      1132.28   21.00      800970.00   4575    WRITE\n5.70       156.47    9.00      665085.00   39163   READDIRP\n11.35      315.02    9.00     1433947.00   38698   LOOKUP\n11.88     1729.34   21.00     2569638.00    7382   FXATTROP\n47.35   104235.02 2485.00     7789367.00     488   FSYNC\n\n------------------\n\n------------------\n\nDuration     : 335\n\nBytesRead    : 94505058\n\nBytesWritten : 195571980\n

          "},{"location":"Administrator-Guide/Monitoring-Workload/#stop-profiling","title":"Stop Profiling","text":"

          You can stop profiling the volume, if you do not need profiling information anymore.

          Stop profiling using the following command:

          # gluster volume profile  stop\n

          For example, to stop profiling on test-volume:

          # gluster volume profile  stop\n\nProfiling stopped on test-volume\n
          "},{"location":"Administrator-Guide/Monitoring-Workload/#running-glusterfs-volume-top-command","title":"Running GlusterFS Volume TOP Command","text":"

          GlusterFS Volume Top command allows you to view the glusterfs bricks\u2019 performance metrics like read, write, file open calls, file read calls, file write calls, directory open calls, and directory real calls. The top command displays up to 100 results.

          This section describes how to run and view the results for the following GlusterFS Top commands:

          • Viewing Open fd Count and Maximum fd Count
          • Viewing Highest File Read Calls
          • Viewing Highest File Write Calls
          • Viewing Highest Open Calls on Directories
          • Viewing Highest Read Calls on Directory
          • Viewing List of Read Performance on each Brick
          • Viewing List of Write Performance on each Brick

          "},{"location":"Administrator-Guide/Monitoring-Workload/#viewing-open-fd-count-and-maximum-fd-count","title":"Viewing Open fd Count and Maximum fd Count","text":"

          You can view both current open fd count (list of files that are currently the most opened and the count) on the brick and the maximum open fd count (count of files that are the currently open and the count of maximum number of files opened at any given point of time, since the servers are up and running). If the brick name is not specified, then open fd metrics of all the bricks belonging to the volume will be displayed.

          • View open fd count and maximum fd count using the following command:

          # gluster volume top open [brick ] [list-cnt ]

          For example, to view open fd count and maximum fd count on brick server:/export of test-volume and list top 10 open calls:

          # gluster volume top open brick list-cnt

          Brick: server:/export/dir1

          Current open fd's: 34 Max open fd's: 209

                         ==========Open file stats========\n\n  open            file name\n  call count\n\n  2               /clients/client0/~dmtmp/PARADOX/\n                  COURSES.DB\n\n  11              /clients/client0/~dmtmp/PARADOX/\n                  ENROLL.DB\n\n  11              /clients/client0/~dmtmp/PARADOX/\n                  STUDENTS.DB\n\n  10              /clients/client0/~dmtmp/PWRPNT/\n                  TIPS.PPT\n\n  10              /clients/client0/~dmtmp/PWRPNT/\n                  PCBENCHM.PPT\n\n  9               /clients/client7/~dmtmp/PARADOX/\n                  STUDENTS.DB\n\n  9               /clients/client1/~dmtmp/PARADOX/\n                  STUDENTS.DB\n\n  9               /clients/client2/~dmtmp/PARADOX/\n                  STUDENTS.DB\n\n  9               /clients/client0/~dmtmp/PARADOX/\n                  STUDENTS.DB\n\n  9               /clients/client8/~dmtmp/PARADOX/\n                  STUDENTS.DB\n

          "},{"location":"Administrator-Guide/Monitoring-Workload/#viewing-highest-file-read-calls","title":"Viewing Highest File Read Calls","text":"

          You can view highest read calls on each brick. If brick name is not specified, then by default, list of 100 files will be displayed.

          • View highest file Read calls using the following command:

          # gluster volume top read [brick ] [list-cnt ]

          For example, to view highest Read calls on brick server:/export of test-volume:

          # gluster volume top read brick list-cnt

          Brick: server:/export/dir1

                      ==========Read file stats========\n\n  read              filename\n  call count\n\n  116              /clients/client0/~dmtmp/SEED/LARGE.FIL\n\n  64               /clients/client0/~dmtmp/SEED/MEDIUM.FIL\n\n  54               /clients/client2/~dmtmp/SEED/LARGE.FIL\n\n  54               /clients/client6/~dmtmp/SEED/LARGE.FIL\n\n  54               /clients/client5/~dmtmp/SEED/LARGE.FIL\n\n  54               /clients/client0/~dmtmp/SEED/LARGE.FIL\n\n  54               /clients/client3/~dmtmp/SEED/LARGE.FIL\n\n  54               /clients/client4/~dmtmp/SEED/LARGE.FIL\n\n  54               /clients/client9/~dmtmp/SEED/LARGE.FIL\n\n  54               /clients/client8/~dmtmp/SEED/LARGE.FIL\n

          "},{"location":"Administrator-Guide/Monitoring-Workload/#viewing-highest-file-write-calls","title":"Viewing Highest File Write Calls","text":"

          You can view list of files which has highest file write calls on each brick. If brick name is not specified, then by default, list of 100 files will be displayed.

          • View highest file Write calls using the following command:

          # gluster volume top write [brick ] [list-cnt ]

          For example, to view highest Write calls on brick server:/export of test-volume:

          # gluster volume top write brick list-cnt

          Brick: server:/export/dir1

                           ==========Write file stats========\n  write call count   filename\n\n  83                /clients/client0/~dmtmp/SEED/LARGE.FIL\n\n  59                /clients/client7/~dmtmp/SEED/LARGE.FIL\n\n  59                /clients/client1/~dmtmp/SEED/LARGE.FIL\n\n  59                /clients/client2/~dmtmp/SEED/LARGE.FIL\n\n  59                /clients/client0/~dmtmp/SEED/LARGE.FIL\n\n  59                /clients/client8/~dmtmp/SEED/LARGE.FIL\n\n  59                /clients/client5/~dmtmp/SEED/LARGE.FIL\n\n  59                /clients/client4/~dmtmp/SEED/LARGE.FIL\n\n  59                /clients/client6/~dmtmp/SEED/LARGE.FIL\n\n  59                /clients/client3/~dmtmp/SEED/LARGE.FIL\n

          "},{"location":"Administrator-Guide/Monitoring-Workload/#viewing-highest-open-calls-on-directories","title":"Viewing Highest Open Calls on Directories","text":"

          You can view list of files which has highest open calls on directories of each brick. If brick name is not specified, then the metrics of all the bricks belonging to that volume will be displayed.

          • View list of open calls on each directory using the following command:

          # gluster volume top opendir [brick ] [list-cnt ]

          For example, to view open calls on brick server:/export/ of test-volume:

          # gluster volume top opendir brick list-cnt

          Brick: server:/export/dir1

                     ==========Directory open stats========\n\n  Opendir count     directory name\n\n  1001              /clients/client0/~dmtmp\n\n  454               /clients/client8/~dmtmp\n\n  454               /clients/client2/~dmtmp\n\n  454               /clients/client6/~dmtmp\n\n  454               /clients/client5/~dmtmp\n\n  454               /clients/client9/~dmtmp\n\n  443               /clients/client0/~dmtmp/PARADOX\n\n  408               /clients/client1/~dmtmp\n\n  408               /clients/client7/~dmtmp\n\n  402               /clients/client4/~dmtmp\n

          "},{"location":"Administrator-Guide/Monitoring-Workload/#viewing-highest-read-calls-on-directory","title":"Viewing Highest Read Calls on Directory","text":"

          You can view list of files which has highest directory read calls on each brick. If brick name is not specified, then the metrics of all the bricks belonging to that volume will be displayed.

          • View list of highest directory read calls on each brick using the following command:

          # gluster volume top test-volume readdir [brick BRICK] [list-cnt {0..100}]

          For example, to view highest directory read calls on brick server:/export of test-volume:

          # gluster volume top test-volume readdir brick server:/export list-cnt 10

          Brick:

            ==========Directory readdirp stats========\n\n  readdirp count           directory name\n\n  1996                    /clients/client0/~dmtmp\n\n  1083                    /clients/client0/~dmtmp/PARADOX\n\n  904                     /clients/client8/~dmtmp\n\n  904                     /clients/client2/~dmtmp\n\n  904                     /clients/client6/~dmtmp\n\n  904                     /clients/client5/~dmtmp\n\n  904                     /clients/client9/~dmtmp\n\n  812                     /clients/client1/~dmtmp\n\n  812                     /clients/client7/~dmtmp\n\n  800                     /clients/client4/~dmtmp\n

          "},{"location":"Administrator-Guide/Monitoring-Workload/#viewing-list-of-read-performance-on-each-brick","title":"Viewing List of Read Performance on each Brick","text":"

          You can view the read throughput of files on each brick. If brick name is not specified, then the metrics of all the bricks belonging to that volume will be displayed. The output will be the read throughput.

                 ==========Read throughput file stats========\n\nread         filename                         Time\nthrough\nput(MBp\ns)\n\n2570.00    /clients/client0/~dmtmp/PWRPNT/      -2011-01-31\n           TRIDOTS.POT                      15:38:36.894610\n2570.00    /clients/client0/~dmtmp/PWRPNT/      -2011-01-31\n           PCBENCHM.PPT                     15:38:39.815310\n2383.00    /clients/client2/~dmtmp/SEED/        -2011-01-31\n           MEDIUM.FIL                       15:52:53.631499\n\n2340.00    /clients/client0/~dmtmp/SEED/        -2011-01-31\n           MEDIUM.FIL                       15:38:36.926198\n\n2299.00   /clients/client0/~dmtmp/SEED/         -2011-01-31\n          LARGE.FIL                         15:38:36.930445\n\n2259.00   /clients/client0/~dmtmp/PARADOX/      -2011-01-31\n          COURSES.X04                       15:38:40.549919\n\n2221.00   /clients/client0/~dmtmp/PARADOX/      -2011-01-31\n          STUDENTS.VAL                      15:52:53.298766\n\n2221.00   /clients/client3/~dmtmp/SEED/         -2011-01-31\n          COURSES.DB                        15:39:11.776780\n\n2184.00   /clients/client3/~dmtmp/SEED/         -2011-01-31\n          MEDIUM.FIL                        15:39:10.251764\n\n2184.00   /clients/client5/~dmtmp/WORD/         -2011-01-31\n          BASEMACH.DOC                      15:39:09.336572\n

          This command will initiate a dd for the specified count and block size and measures the corresponding throughput.

          • View list of read performance on each brick using the following command:

          # gluster volume top read-perf [bs count ] [brick ] [list-cnt ]

          For example, to view read performance on brick server:/export/ of test-volume, 256 block size of count 1, and list count 10:

          # gluster volume top read-perf bs 256 count 1 brick list-cnt

          Brick: server:/export/dir1 256 bytes (256 B) copied, Throughput: 4.1 MB/s

                   ==========Read throughput file stats========\n\n  read         filename                         Time\n  through\n  put(MBp\n  s)\n\n  2912.00   /clients/client0/~dmtmp/PWRPNT/    -2011-01-31\n             TRIDOTS.POT                   15:38:36.896486\n\n  2570.00   /clients/client0/~dmtmp/PWRPNT/    -2011-01-31\n             PCBENCHM.PPT                  15:38:39.815310\n\n  2383.00   /clients/client2/~dmtmp/SEED/      -2011-01-31\n             MEDIUM.FIL                    15:52:53.631499\n\n  2340.00   /clients/client0/~dmtmp/SEED/      -2011-01-31\n             MEDIUM.FIL                    15:38:36.926198\n\n  2299.00   /clients/client0/~dmtmp/SEED/      -2011-01-31\n             LARGE.FIL                     15:38:36.930445\n\n  2259.00  /clients/client0/~dmtmp/PARADOX/    -2011-01-31\n            COURSES.X04                    15:38:40.549919\n\n  2221.00  /clients/client9/~dmtmp/PARADOX/    -2011-01-31\n            STUDENTS.VAL                   15:52:53.298766\n\n  2221.00  /clients/client8/~dmtmp/PARADOX/    -2011-01-31\n           COURSES.DB                      15:39:11.776780\n\n  2184.00  /clients/client3/~dmtmp/SEED/       -2011-01-31\n            MEDIUM.FIL                     15:39:10.251764\n\n  2184.00  /clients/client5/~dmtmp/WORD/       -2011-01-31\n           BASEMACH.DOC                    15:39:09.336572\n

          "},{"location":"Administrator-Guide/Monitoring-Workload/#viewing-list-of-write-performance-on-each-brick","title":"Viewing List of Write Performance on each Brick","text":"

          You can view list of write throughput of files on each brick. If brick name is not specified, then the metrics of all the bricks belonging to that volume will be displayed. The output will be the write throughput.

          This command will initiate a dd for the specified count and block size and measures the corresponding throughput. To view list of write performance on each brick:

          • View list of write performance on each brick using the following command:

          # gluster volume top write-perf [bs count ] [brick ] [list-cnt ]

          For example, to view write performance on brick server:/export/ of test-volume, 256 block size of count 1, and list count 10:

          # gluster volume top write-perf bs 256 count 1 brick list-cnt

          Brick: server:/export/dir1

          256 bytes (256 B) copied, Throughput: 2.8 MB/s

                   ==========Write throughput file stats========\n\n  write                filename                 Time\n  throughput\n  (MBps)\n\n  1170.00    /clients/client0/~dmtmp/SEED/     -2011-01-31\n             SMALL.FIL                     15:39:09.171494\n\n  1008.00    /clients/client6/~dmtmp/SEED/     -2011-01-31\n             LARGE.FIL                      15:39:09.73189\n\n  949.00    /clients/client0/~dmtmp/SEED/      -2011-01-31\n            MEDIUM.FIL                     15:38:36.927426\n\n  936.00   /clients/client0/~dmtmp/SEED/       -2011-01-31\n           LARGE.FIL                        15:38:36.933177\n  897.00   /clients/client5/~dmtmp/SEED/       -2011-01-31\n           MEDIUM.FIL                       15:39:09.33628\n\n  897.00   /clients/client6/~dmtmp/SEED/       -2011-01-31\n           MEDIUM.FIL                       15:39:09.27713\n\n  885.00   /clients/client0/~dmtmp/SEED/       -2011-01-31\n            SMALL.FIL                      15:38:36.924271\n\n  528.00   /clients/client5/~dmtmp/SEED/       -2011-01-31\n           LARGE.FIL                        15:39:09.81893\n\n  516.00   /clients/client6/~dmtmp/ACCESS/    -2011-01-31\n           FASTENER.MDB                    15:39:01.797317\n
          "},{"location":"Administrator-Guide/Monitoring-Workload/#displaying-volume-information","title":"Displaying Volume Information","text":"

          You can display information about a specific volume, or all volumes, as needed.

          • Display information about a specific volume using the following command:

          # gluster volume info VOLNAME

          For example, to display information about test-volume:

            # gluster volume info test-volume\n  Volume Name: test-volume\n  Type: Distribute\n  Status: Created\n  Number of Bricks: 4\n  Bricks:\n  Brick1: server1:/exp1\n  Brick2: server2:/exp2\n  Brick3: server3:/exp3\n  Brick4: server4:/exp4\n
          • Display information about all volumes using the following command:

          # gluster volume info all

            # gluster volume info all\n\n  Volume Name: test-volume\n  Type: Distribute\n  Status: Created\n  Number of Bricks: 4\n  Bricks:\n  Brick1: server1:/exp1\n  Brick2: server2:/exp2\n  Brick3: server3:/exp3\n  Brick4: server4:/exp4\n\n  Volume Name: mirror\n  Type: Distributed-Replicate\n  Status: Started\n  Number of Bricks: 2 X 2 = 4\n  Bricks:\n  Brick1: server1:/brick1\n  Brick2: server2:/brick2\n  Brick3: server3:/brick3\n  Brick4: server4:/brick4\n\n  Volume Name: Vol\n  Type: Distribute\n  Status: Started\n  Number of Bricks: 1\n  Bricks:\n  Brick: server:/brick6\n
          "},{"location":"Administrator-Guide/Monitoring-Workload/#performing-statedump-on-a-volume","title":"Performing Statedump on a Volume","text":"

          Statedump is a mechanism through which you can get details of all internal variables and state of the glusterfs process at the time of issuing the command.You can perform statedumps of the brick processes and nfs server process of a volume using the statedump command. The following options can be used to determine what information is to be dumped:

          • mem - Dumps the memory usage and memory pool details of the bricks.

          • iobuf - Dumps iobuf details of the bricks.

          • priv - Dumps private information of loaded translators.

          • callpool - Dumps the pending calls of the volume.

          • fd - Dumps the open fd tables of the volume.

          • inode - Dumps the inode tables of the volume.

          To display volume statedump

          • Display statedump of a volume or NFS server using the following command:

          # gluster volume statedump [nfs] [all|mem|iobuf|callpool|priv|fd|inode]

          For example, to display statedump of test-volume:

            # gluster volume statedump test-volume\n  Volume statedump successful\n

          The statedump files are created on the brick servers in the/tmp directory or in the directory set using server.statedump-path volume option. The naming convention of the dump file is <brick-path>.<brick-pid>.dump.

          • By defult, the output of the statedump is stored at /tmp/<brickname.PID.dump> file on that particular server. Change the directory of the statedump file using the following command:

          # gluster volume set server.statedump-path

          For example, to change the location of the statedump file of test-volume:

            # gluster volume set test-volume server.statedump-path /usr/local/var/log/glusterfs/dumps/\n  Set volume successful\n

          You can view the changed path of the statedump file using the following command:

          # gluster volume info

          "},{"location":"Administrator-Guide/Monitoring-Workload/#displaying-volume-status","title":"Displaying Volume Status","text":"

          You can display the status information about a specific volume, brick or all volumes, as needed. Status information can be used to understand the current status of the brick, nfs processes, and overall file system. Status information can also be used to monitor and debug the volume information. You can view status of the volume along with the following details:

          • detail - Displays additional information about the bricks.

          • clients - Displays the list of clients connected to the volume.

          • mem - Displays the memory usage and memory pool details of the bricks.

          • inode - Displays the inode tables of the volume.

          • fd - Displays the open fd (file descriptors) tables of the volume.

          • callpool - Displays the pending calls of the volume.

          To display volume status

          • Display information about a specific volume using the following command:

          # gluster volume status [all| []] [detail|clients|mem|inode|fd|callpool]

          For example, to display information about test-volume:

            # gluster volume status test-volume\n  STATUS OF VOLUME: test-volume\n  BRICK                           PORT   ONLINE   PID\n  --------------------------------------------------------\n  arch:/export/1                  24009   Y       22445\n  --------------------------------------------------------\n  arch:/export/2                  24010   Y       22450\n
          • Display information about all volumes using the following command:

          # gluster volume status all

            # gluster volume status all\n  STATUS OF VOLUME: volume-test\n  BRICK                           PORT   ONLINE   PID\n  --------------------------------------------------------\n  arch:/export/4                  24010   Y       22455\n\n  STATUS OF VOLUME: test-volume\n  BRICK                           PORT   ONLINE   PID\n  --------------------------------------------------------\n  arch:/export/1                  24009   Y       22445\n  --------------------------------------------------------\n  arch:/export/2                  24010   Y       22450\n
          • Display additional information about the bricks using the following command:

          # gluster volume status detail

          For example, to display additional information about the bricks of test-volume:

            # gluster volume status test-volume detail\n  STATUS OF VOLUME: test-volume\n  -------------------------------------------\n  Brick                : arch:/export/1\n  Port                 : 24009\n  Online               : Y\n  Pid                  : 16977\n  File System          : rootfs\n  Device               : rootfs\n  Mount Options        : rw\n  Disk Space Free      : 13.8GB\n  Total Disk Space     : 46.5GB\n  Inode Size           : N/A\n  Inode Count          : N/A\n  Free Inodes          : N/A\n\n  Number of Bricks: 1\n  Bricks:\n  Brick: server:/brick6\n
          • Display the list of clients accessing the volumes using the following command:

          # gluster volume status test-volume clients

          For example, to display the list of clients connected to test-volume:

            # gluster volume status test-volume clients\n  Brick : arch:/export/1\n  Clients connected : 2\n  Hostname          Bytes Read   BytesWritten\n  --------          ---------    ------------\n  127.0.0.1:1013    776          676\n  127.0.0.1:1012    50440        51200\n
          • Display the memory usage and memory pool details of the bricks using the following command:

          # gluster volume status test-volume mem

          For example, to display the memory usage and memory pool details of the bricks of test-volume:

            Memory status for volume : test-volume\n  ----------------------------------------------\n  Brick : arch:/export/1\n  Mallinfo\n  --------\n  Arena    : 434176\n  Ordblks  : 2\n  Smblks   : 0\n  Hblks    : 12\n  Hblkhd   : 40861696\n  Usmblks  : 0\n  Fsmblks  : 0\n  Uordblks : 332416\n  Fordblks : 101760\n  Keepcost : 100400\n\n  Mempool Stats\n  -------------\n  Name                               HotCount ColdCount PaddedSizeof AllocCount MaxAlloc\n  ----                               -------- --------- ------------ ---------- --------\n  test-volume-server:fd_t                0     16384           92         57        5\n  test-volume-server:dentry_t           59       965           84         59       59\n  test-volume-server:inode_t            60       964          148         60       60\n  test-volume-server:rpcsvc_request_t    0       525         6372        351        2\n  glusterfs:struct saved_frame           0      4096          124          2        2\n  glusterfs:struct rpc_req               0      4096         2236          2        2\n  glusterfs:rpcsvc_request_t             1       524         6372          2        1\n  glusterfs:call_stub_t                  0      1024         1220        288        1\n  glusterfs:call_stack_t                 0      8192         2084        290        2\n  glusterfs:call_frame_t                 0     16384          172       1728        6\n
          • Display the inode tables of the volume using the following command:

          # gluster volume status inode

          For example, to display the inode tables of the test-volume:

            # gluster volume status test-volume inode\n  inode tables for volume test-volume\n  ----------------------------------------------\n  Brick : arch:/export/1\n  Active inodes:\n  GFID                                            Lookups            Ref   IA type\n  ----                                            -------            ---   -------\n  6f3fe173-e07a-4209-abb6-484091d75499                  1              9         2\n  370d35d7-657e-44dc-bac4-d6dd800ec3d3                  1              1         2\n\n  LRU inodes:\n  GFID                                            Lookups            Ref   IA type\n  ----                                            -------            ---   -------\n  80f98abe-cdcf-4c1d-b917-ae564cf55763                  1              0         1\n  3a58973d-d549-4ea6-9977-9aa218f233de                  1              0         1\n  2ce0197d-87a9-451b-9094-9baa38121155                  1              0         2\n
          • Display the open fd tables of the volume using the following command:

          # gluster volume status fd

          For example, to display the open fd tables of the test-volume:

            # gluster volume status test-volume fd\n\n  FD tables for volume test-volume\n  ----------------------------------------------\n  Brick : arch:/export/1\n  Connection 1:\n  RefCount = 0  MaxFDs = 128  FirstFree = 4\n  FD Entry            PID                 RefCount            Flags\n  --------            ---                 --------            -----\n  0                   26311               1                   2\n  1                   26310               3                   2\n  2                   26310               1                   2\n  3                   26311               3                   2\n\n  Connection 2:\n  RefCount = 0  MaxFDs = 128  FirstFree = 0\n  No open fds\n\n  Connection 3:\n  RefCount = 0  MaxFDs = 128  FirstFree = 0\n  No open fds\n
          • Display the pending calls of the volume using the following command:

          # gluster volume status callpool

          Each call has a call stack containing call frames.

          For example, to display the pending calls of test-volume:

            # gluster volume status test-volume\n\n  Pending calls for volume test-volume\n  ----------------------------------------------\n  Brick : arch:/export/1\n  Pending calls: 2\n  Call Stack1\n   UID    : 0\n   GID    : 0\n   PID    : 26338\n   Unique : 192138\n   Frames : 7\n   Frame 1\n    Ref Count   = 1\n    Translator  = test-volume-server\n    Completed   = No\n   Frame 2\n    Ref Count   = 0\n    Translator  = test-volume-posix\n    Completed   = No\n    Parent      = test-volume-access-control\n    Wind From   = default_fsync\n    Wind To     = FIRST_CHILD(this)->fops->fsync\n   Frame 3\n    Ref Count   = 1\n    Translator  = test-volume-access-control\n    Completed   = No\n    Parent      = repl-locks\n    Wind From   = default_fsync\n    Wind To     = FIRST_CHILD(this)->fops->fsync\n   Frame 4\n    Ref Count   = 1\n    Translator  = test-volume-locks\n    Completed   = No\n    Parent      = test-volume-io-threads\n    Wind From   = iot_fsync_wrapper\n    Wind To     = FIRST_CHILD (this)->fops->fsync\n   Frame 5\n    Ref Count   = 1\n    Translator  = test-volume-io-threads\n    Completed   = No\n    Parent      = test-volume-marker\n    Wind From   = default_fsync\n    Wind To     = FIRST_CHILD(this)->fops->fsync\n   Frame 6\n    Ref Count   = 1\n    Translator  = test-volume-marker\n    Completed   = No\n    Parent      = /export/1\n    Wind From   = io_stats_fsync\n    Wind To     = FIRST_CHILD(this)->fops->fsync\n   Frame 7\n    Ref Count   = 1\n    Translator  = /export/1\n    Completed   = No\n    Parent      = test-volume-server\n    Wind From   = server_fsync_resume\n    Wind To     = bound_xl->fops->fsync\n
          "},{"location":"Administrator-Guide/NFS-Ganesha-GlusterFS-Integration/","title":"Configuring NFS-Ganesha over GlusterFS","text":"

          NFS-Ganesha is a user-space file server for the NFS protocol with support for NFSv3, v4, v4.1, pNFS. It provides a FUSE-compatible File System Abstraction Layer(FSAL) to allow the file-system developers to plug in their storage mechanism and access it from any NFS client. NFS-Ganesha can access the FUSE filesystems directly through its FSAL without copying any data to or from the kernel, thus potentially improving response times.

          "},{"location":"Administrator-Guide/NFS-Ganesha-GlusterFS-Integration/#installing-nfs-ganesha","title":"Installing nfs-ganesha","text":""},{"location":"Administrator-Guide/NFS-Ganesha-GlusterFS-Integration/#gluster-rpms-310","title":"Gluster RPMs (>= 3.10)","text":"

          glusterfs-server glusterfs-api glusterfs-ganesha

          "},{"location":"Administrator-Guide/NFS-Ganesha-GlusterFS-Integration/#ganesha-rpms-25","title":"Ganesha RPMs (>= 2.5)","text":"

          nfs-ganesha nfs-ganesha-gluster

          "},{"location":"Administrator-Guide/NFS-Ganesha-GlusterFS-Integration/#start-nfs-ganesha-manually","title":"Start NFS-Ganesha manually","text":"
          • To start NFS-Ganesha manually, use the command: service nfs-ganesha start
          where:\n/var/log/ganesha.log is the default log file for the ganesha process.\n/etc/ganesha/ganesha.conf is the default configuration file\nNIV_EVENT is the default log level.\n
          • If the user wants to run ganesha in a preferred mode, execute the following command : ganesha.nfsd -f <location_of_nfs-ganesha.conf_file> -L <location_of_log_file> -N <log_level>
          For example:\n#ganesha.nfsd -f nfs-ganesha.conf -L nfs-ganesha.log -N NIV_DEBUG\nwhere:\nnfs-ganesha.log is the log file for the ganesha.nfsd process.\nnfs-ganesha.conf is the configuration file\nNIV_DEBUG is the log level.\n
          • By default, the export list for the server will be Null
          Note : include following parameters in ganesha configuration file for exporting gluster volumes\nNFS_Core_Param {\n        #Use supplied name other tha IP In NSM operations\n        NSM_Use_Caller_Name = true;\n        #Copy lock states into \"/var/lib/nfs/ganesha\" dir\n        Clustered = false;\n        #Use a non-privileged port for RQuota\n        Rquota_Port = 875;\n    #please note add below option for Mac clients\n    #Enable_RQUOTA = false;\n}\n
          "},{"location":"Administrator-Guide/NFS-Ganesha-GlusterFS-Integration/#step-by-step-procedures-to-exporting-glusterfs-volume-via-nfs-ganesha","title":"Step by step procedures to exporting GlusterFS volume via NFS-Ganesha","text":""},{"location":"Administrator-Guide/NFS-Ganesha-GlusterFS-Integration/#step-1","title":"step 1 :","text":"

          To export any GlusterFS volume or directory inside a volume, create the EXPORT block for each of those entries in an export configuration file. The following parameters are required to export any entry.

          • cat export.conf
          EXPORT{\n    Export_Id = 1 ;   # Export ID unique to each export\n    Path = \"volume_path\";  # Path of the volume to be exported. Eg: \"/test_volume\"\n\n    FSAL {\n        name = GLUSTER;\n        hostname = \"10.xx.xx.xx\";  # IP of one of the nodes in the trusted pool\n        volume = \"volume_name\";  # Volume name. Eg: \"test_volume\"\n    }\n\n    Access_type = RW;    # Access permissions\n    Squash = No_root_squash; # To enable/disable root squashing\n    Disable_ACL = TRUE;  # To enable/disable ACL\n    Pseudo = \"pseudo_path\";  # NFSv4 pseudo path for this export. Eg: \"/test_volume_pseudo\"\n    Protocols = \"3\",\"4\" ;    # NFS protocols supported\n    Transports = \"UDP\",\"TCP\" ; # Transport protocols supported\n    SecType = \"sys\";     # Security flavors supported\n}\n
          "},{"location":"Administrator-Guide/NFS-Ganesha-GlusterFS-Integration/#step-2","title":"step 2 :","text":"

          Now include the export configuration file in the ganesha configuration file (by default). This can be done by adding the line below at the end of file

          • %include \u201c<path of export configuration>\u201d
          Note :\nThe above two steps can be done with following script\n#/usr/libexec/ganesha/create-export-ganesha.sh <ganesha directory> on <volume name>\nBy default ganesha directory is \"/etc/ganesha\"\nThis will create export configuration file in <ganesha directory>/exports/export.<volume name>.conf\nAlso, it will add the above entry to ganesha.conf\n
          "},{"location":"Administrator-Guide/NFS-Ganesha-GlusterFS-Integration/#step-3","title":"step 3 :","text":"

          Turn on features.cache-invalidation for that volume

          • gluster volume set \\<volume name\\> features.cache-invalidation on
          "},{"location":"Administrator-Guide/NFS-Ganesha-GlusterFS-Integration/#step-4","title":"step 4 :","text":"

          dbus commands are used to export/unexport volume

          • export

          • dbus-send --system --print-reply --dest=org.ganesha.nfsd /org/ganesha/nfsd/ExportMgr org.ganesha.nfsd.exportmgr.AddExport string:<ganesha directory>/exports/export.<volume name>.conf string:\"EXPORT(Path=/<volume name>)\"

          • unexport

          • dbus-send --system --dest=org.ganesha.nfsd /org/ganesha/nfsd/ExportMgr org.ganesha.nfsd.exportmgr.RemoveExport uint16:<export id>
          Note :\nStep 4 can be performed via following script\n#/usr/libexec/ganesha/dbus-send.sh <ganesha directory> [on|off] <volume name>\n

          Above scripts (mentioned in step 3 and step 4) are available in glusterfs 3.10 rpms.

          You can download it from here

          "},{"location":"Administrator-Guide/NFS-Ganesha-GlusterFS-Integration/#step-5","title":"step 5 :","text":"
          • To check if the volume is exported, run
          • showmount -e localhost
          • Or else use the following dbus command
          • dbus-send --type=method_call --print-reply --system --dest=org.ganesha.nfsd /org/ganesha/nfsd/ExportMgr org.ganesha.nfsd.exportmgr.ShowExports
          • To see clients
          • dbus-send --type=method_call --print-reply --system --dest=org.ganesha.nfsd /org/ganesha/nfsd/ClientMgr org.ganesha.nfsd.clientmgr.ShowClients
          "},{"location":"Administrator-Guide/NFS-Ganesha-GlusterFS-Integration/#using-highly-available-active-active-nfs-ganesha-and-glusterfs-cli","title":"Using Highly Available Active-Active NFS-Ganesha And GlusterFS cli","text":"

          Please Note currently HA solution for nfs-ganesha is available in 3.10. From 3.12 onwards HA will be handled by a different project known as storhaug which is under development.

          In a highly available active-active environment, if an NFS-Ganesha server that is connected to an NFS client running a particular application crashes, the application/NFS client is seamlessly connected to another NFS-Ganesha server without any administrative intervention. The cluster is maintained using Pacemaker and Corosync. Pacemaker acts as a resource manager and Corosync provides the communication layer of the cluster. Data coherency across the multi-head NFS-Ganesha servers in the cluster is achieved using the UPCALL infrastructure. UPCALL infrastructure is a generic and extensible framework that sends notifications to the respective glusterfs clients (in this case NFS-Ganesha server) in case of any changes detected in the backend filesystem.

          The Highly Available cluster is configured in the following three stages:

          "},{"location":"Administrator-Guide/NFS-Ganesha-GlusterFS-Integration/#creating-the-ganesha-haconf-file","title":"Creating the ganesha-ha.conf file","text":"

          The ganesha-ha.conf.example is created in the following location /etc/ganesha when Gluster Storage is installed. Rename the file to ganesha-ha.conf and make the changes as suggested in the following example: sample ganesha-ha.conf file:

          # Name of the HA cluster created. must be unique within the subnet HA_NAME=\"ganesha-ha-360\" # The subset of nodes of the Gluster Trusted Pool that form the ganesha HA cluster. # Hostname is specified. HA_CLUSTER_NODES=\"server1,server2,...\" #HA_CLUSTER_NODES=\"server1.lab.redhat.com,server2.lab.redhat.com,...\" # Virtual IPs for each of the nodes specified above. VIP_server1=\"10.0.2.1\" VIP_server2=\"10.0.2.2\"

          "},{"location":"Administrator-Guide/NFS-Ganesha-GlusterFS-Integration/#configuring-nfs-ganesha-using-gluster-cli","title":"Configuring NFS-Ganesha using gluster CLI","text":"

          The HA cluster can be set up or torn down using gluster CLI. Also, it can export and unexport specific volumes. For more information, see section Configuring NFS-Ganesha using gluster CLI.

          "},{"location":"Administrator-Guide/NFS-Ganesha-GlusterFS-Integration/#modifying-the-ha-cluster-using-the-ganesha-hash-script","title":"Modifying the HA cluster using the ganesha-ha.sh script","text":"

          Post the cluster creation any further modification can be done using the ganesha-ha.sh script. For more information, see the section Modifying the HA cluster using the ganesha-ha.sh script.

          "},{"location":"Administrator-Guide/NFS-Ganesha-GlusterFS-Integration/#step-by-step-guide","title":"Step-by-step guide","text":""},{"location":"Administrator-Guide/NFS-Ganesha-GlusterFS-Integration/#configuring-nfs-ganesha-using-gluster-cli_1","title":"Configuring NFS-Ganesha using Gluster CLI\u2060","text":""},{"location":"Administrator-Guide/NFS-Ganesha-GlusterFS-Integration/#pre-requisites-to-run-nfs-ganesha","title":"Pre-requisites to run NFS-Ganesha","text":"

          Ensure that the following pre-requisites are taken into consideration before you run NFS-Ganesha in your environment:

          • A Gluster Storage volume must be available for export and NFS-Ganesha rpms are installed on all the nodes.
          • IPv6 must be enabled on the host interface which is used by the NFS-Ganesha daemon. To enable IPv6 support, perform the following steps:

          • Comment or remove the line options ipv6 disable=1 in the /etc/modprobe.d/ipv6.conf file.

          • Reboot the system.

          • Ensure that all the nodes in the cluster are DNS resolvable. For example, you can populate the /etc/hosts with the details of all the nodes in the cluster.

          • Disable and stop NetworkManager service.
          • Enable and start network service on all machines.
          • Create and mount a gluster shared volume.
          • gluster volume set all cluster.enable-shared-storage enable
          • Install Pacemaker and Corosync on all machines.
          • Set the cluster auth password on all the machines.
          • Passwordless ssh needs to be enabled on all the HA nodes. Follow these steps,

          • On one (primary) node in the cluster, run:

            • ssh-keygen -f /var/lib/glusterd/nfs/secret.pem
          • Deploy the pubkey ~root/.ssh/authorized keys on all nodes, run:
            • ssh-copy-id -i /var/lib/glusterd/nfs/secret.pem.pub root@$node
          • Copy the keys to all nodes in the cluster, run:

            • scp /var/lib/glusterd/nfs/secret.\\* $node:/var/lib/glusterd/nfs/
          • Create a directory named \"nfs-ganesha\" in shared storage path and create ganesha.conf & ganesha-ha.conf in it (from glusterfs 3.9 onwards)

          "},{"location":"Administrator-Guide/NFS-Ganesha-GlusterFS-Integration/#configuring-the-ha-cluster","title":"Configuring the HA Cluster","text":"

          To set up the HA cluster, enable NFS-Ganesha by executing the following command:

          gluster nfs-ganesha enable\n

          To tear down the HA cluster, execute the following command:

          gluster nfs-ganesha disable\n
          Note :\nEnable command performs the following\n* create a symlink ganesha.conf in /etc/ganesha using ganesha.conf in shared storage\n* start nfs-ganesha process on nodes part of ganesha cluster\n* set up ha cluster\nand disable does the reversal of enable\nAlso if gluster nfs-ganesha [enable/disable] fails of please check following logs\n* /var/log/glusterfs/glusterd.log\n* /var/log/messages (and grep for pcs commands)\n* /var/log/pcsd/pcsd.log\n
          "},{"location":"Administrator-Guide/NFS-Ganesha-GlusterFS-Integration/#exporting-volumes-through-nfs-ganesha-using-cli","title":"Exporting Volumes through NFS-Ganesha using cli","text":"

          To export a Red Hat Gluster Storage volume, execute the following command:

          gluster volume set <volname> ganesha.enable on\n

          To unexport a Red Hat Gluster Storage volume, execute the following command:

          gluster volume set <volname> ganesha.enable off\n

          This command unexports the Red Hat Gluster Storage volume without affecting other exports.

          To verify the status of the volume set options, follow the guidelines mentioned below:

          • Check if NFS-Ganesha is started by executing the following command:
          • ps aux | grep ganesha.nfsd
          • Check if the volume is exported.
          • showmount -e localhost

          The logs of ganesha.nfsd daemon is written to /var/log/ganesha.log. Check the log file on noticing any unexpected behavior.

          "},{"location":"Administrator-Guide/NFS-Ganesha-GlusterFS-Integration/#modifying-the-ha-cluster-using-the-ganesha-hash-script_1","title":"Modifying the HA cluster using the ganesha-ha.sh script","text":"

          To modify the existing HA cluster and to change the default values of the exports use the ganesha-ha.sh script located at /usr/libexec/ganesha/.

          "},{"location":"Administrator-Guide/NFS-Ganesha-GlusterFS-Integration/#adding-a-node-to-the-cluster","title":"Adding a node to the cluster","text":"

          Before adding a node to the cluster, ensure all the prerequisites mentioned in section Pre-requisites to run NFS-Ganesha are met. To add a node to the cluster. execute the following command on any of the nodes in the existing NFS-Ganesha cluster:

          #./ganesha-ha.sh --add <HA_CONF_DIR> <HOSTNAME> <NODE-VIP>\nwhere,\nHA_CONF_DIR: The directory path containing the ganesha-ha.conf file.\nHOSTNAME: Hostname of the new node to be added\nNODE-VIP: Virtual IP of the new node to be added.\n
          "},{"location":"Administrator-Guide/NFS-Ganesha-GlusterFS-Integration/#deleting-a-node-in-the-cluster","title":"Deleting a node in the cluster","text":"

          To delete a node from the cluster, execute the following command on any of the nodes in the existing NFS-Ganesha cluster:

          #./ganesha-ha.sh --delete <HA_CONF_DIR> <HOSTNAME>\n\nwhere,\nHA_CONF_DIR: The directory path containing the ganesha-ha.conf file.\nHOSTNAME: Hostname of the new node to be added\n
          "},{"location":"Administrator-Guide/NFS-Ganesha-GlusterFS-Integration/#modifying-the-default-export-configuration","title":"Modifying the default export configuration","text":"

          To modify the default export configurations perform the following steps on any of the nodes in the existing ganesha cluster:

          • Edit/add the required fields in the corresponding export file located at /etc/ganesha/exports.

          • Execute the following command:

              #./ganesha-ha.sh --refresh-config <HA_CONFDIR> <volname>\n\n  where,\n  HA_CONF_DIR: The directory path containing the ganesha-ha.conf file.\n  volname: The name of the volume whose export configuration has to be changed.\n

            Note: The export ID must not be changed.

          \u2060

          "},{"location":"Administrator-Guide/NFS-Ganesha-GlusterFS-Integration/#configure-ganesha-ha-cluster-outside-of-gluster-nodes","title":"Configure ganesha ha cluster outside of gluster nodes","text":"

          Currently, ganesha HA cluster creating tightly integrated with glusterd. So here user needs to create another TSP using ganesha nodes. Then create ganesha HA cluster using above mentioned steps till executing \"gluster nfs-ganesha enable\" Exporting/Unexporting should be performed without using glusterd cli (follow the manual steps, before performing step 4 replace localhost with required hostname/ip \"hostname=localhost;\" in the export configuration file)

          "},{"location":"Administrator-Guide/NFS-Ganesha-GlusterFS-Integration/#configuring-gluster-volume-for-pnfs","title":"Configuring Gluster volume for pNFS","text":"

          The Parallel Network File System (pNFS) is part of the NFS v4.1 protocol that allows computing clients to access storage devices directly and in parallel. The pNFS cluster consists of MDS (Meta-Data-Server) and DS (Data-Server). The client sends all the read/write requests directly to DS and all other operations are handle by the MDS.

          "},{"location":"Administrator-Guide/NFS-Ganesha-GlusterFS-Integration/#step-by-step-guide_1","title":"Step by step guide","text":"
          • Turn on feature.cache-invalidation for the volume.

          • gluster v set <volname> features.cache-invalidation on

          • Select one of the nodes in the cluster as MDS and configure it adding the following block to ganesha configuration file

          GLUSTER\n{\n PNFS_MDS = true;\n}\n
          • Manually start NFS-Ganesha in every node in the cluster.

          • Check whether the volume is exported via nfs-ganesha in all the nodes.

          • showmount -e localhost

          • Mount the volume using NFS version 4.1 protocol with the ip of MDS

          • mount -t nfs4 -o minorversion=1 <ip of MDS>:<volume name> <mount path>
          "},{"location":"Administrator-Guide/NFS-Ganesha-GlusterFS-Integration/#points-to-be-noted","title":"Points to be Noted","text":"
          • The current architecture supports only a single MDS and multiple DS. The server with which client mounts will act as MDS and all servers including MDS can act as DS.

          • Currently, HA is not supported for pNFS (more specifically MDS). Although it is configurable, consistency is guaranteed across the cluster.

          • If any of the DS goes down, then MDS will handle those I/O's.

          • Hereafter, all the subsequent NFS clients need to use the same server for mounting that volume via pNFS. i.e more than one MDS for a volume is not preferred

          • pNFS support is only tested with distributed, replicated, or distribute-replicate volumes

          • It is tested and verified with RHEL 6.5 , fedora 20, fedora 21 nfs clients. It is always better to use latest nfs-clients

          "},{"location":"Administrator-Guide/Network-Configurations-Techniques/","title":"Network Configurations Techniques","text":""},{"location":"Administrator-Guide/Network-Configurations-Techniques/#bonding-best-practices","title":"Bonding best practices","text":"

          Bonded network interfaces incorporate multiple physical interfaces into a single logical bonded interface, with a single IP addr. An N-way bonded interface can survive loss of N-1 physical interfaces, and performance can be improved in some cases.

          "},{"location":"Administrator-Guide/Network-Configurations-Techniques/#when-to-bond","title":"When to bond?","text":"
          • Need high availability for network link
          • Workload: sequential access to large files (most time spent reading/writing)
          • Network throughput limit of client/server \\<\\< storage throughput limit
          • 1 GbE (almost always)
          • 10-Gbps links or faster -- for writes, replication doubles the load on the network and replicas are usually on different peers to which the client can transmit in parallel.
          • LIMITATION: Bonding mode 6 doesn't improve throughput if network peers are not on the same VLAN.
          "},{"location":"Administrator-Guide/Network-Configurations-Techniques/#how-to-configure","title":"How to configure","text":"
          • Bonding-howto
          • Best bonding mode for Gluster client is mode 6 (balance-alb), this allows client to transmit writes in parallel on separate NICs much of the time. A peak throughput of 750 MB/s on writes from a single client was observed with bonding mode 6 on 2 10-GbE NICs with jumbo frames. That's 1.5 GB/s of network traffic.
          • Another way to balance both transmit and receive traffic is bonding mode 4 (802.3ad) but this requires switch configuration (trunking commands)
          • Still another way to load balance is bonding mode 2 (balance-xor) with option \"xmit_hash_policy=layer3+4\". The bonding modes 6 and 2 will not improve single-connection throughput, but improve aggregate throughput across all connections.
          "},{"location":"Administrator-Guide/Network-Configurations-Techniques/#jumbo-frames","title":"Jumbo frames","text":"

          Jumbo frames are Ethernet (or Infiniband) frames with size greater than the default of 1500 bytes (Infiniband default is around 2000 bytes). Increasing frame size reduces load on operating system and hardware, which must process interrupts and protocol messages per frame.

          "},{"location":"Administrator-Guide/Network-Configurations-Techniques/#when-to-configure","title":"When to configure?","text":"
          • Any network faster than 1-GbE
          • Workload is sequential large-file reads/writes
          • LIMITATION: Requires all network switches in VLAN must be configured to handle jumbo frames, do not configure otherwise.
          "},{"location":"Administrator-Guide/Network-Configurations-Techniques/#how-to-configure_1","title":"How to configure?","text":"
          • Edit network interface file at /etc/sysconfig/network-scripts/ifcfg-your-interface
          • Ethernet (on ixgbe driver): add \"MTU=9000\" (MTU means \"maximum transfer unit\") record to network interface file
          • Infiniband (on mlx4 driver): add \"CONNECTED_MODE=yes\" and \"MTU=65520\" records to network interface file
          • ifdown your-interface; ifup your-interface
          • Test with \"ping -s 16384 other-host-on-VLAN\"
          • Switch requires max frame size larger than MTU because of protocol headers, usually 9216 bytes
          "},{"location":"Administrator-Guide/Network-Configurations-Techniques/#configuring-a-backend-network-for-storage","title":"Configuring a backend network for storage","text":"

          This method lets you add network capacity for multi-protocol sites by segregating traffic for different protocols on different network interfaces. This method can lower latency and improve throughput. For example, this method can keep self-heal and rebalancing traffic from competing with non-Gluster client traffic for a network interface, and will better support multi-stream I/O.

          "},{"location":"Administrator-Guide/Network-Configurations-Techniques/#when-to-configure_1","title":"When to configure?","text":"
          • For non-Gluster services such as NFS, Swift (REST), CIFS being provided on Gluster servers. It will not help Gluster clients (external nodes with Gluster mountpoints on them).
          • Network port is over-utilized.
          "},{"location":"Administrator-Guide/Network-Configurations-Techniques/#how-to-configure_2","title":"How to configure?","text":"
          • Most network cards have multiple ports on them -- make port 1 the non-Gluster port and port 2 the Gluster port.
          • Separate Gluster ports onto a separate VLAN from non-Gluster ports, to simplify configuration.
          "},{"location":"Administrator-Guide/Object-Storage/","title":"SwiftOnFile","text":"

          SwiftOnFile project enables GlusterFS volume to be used as backend for Openstack Swift - a distributed object store. This allows objects PUT over Swift's RESTful API to be accessed as files over filesystem interface and vice versa i.e files created over filesystem interface (NFS/FUSE/native) can be accessed as objects over Swift's RESTful API.

          SwiftOnFile project was formerly known as gluster-swift and also as UFO (Unified File and Object) before that. More information about SwiftOnFile can be found here. There are differences in working of gluster-swift (now obsolete) and swiftonfile projects. The older gluster-swift code and relevant documentation can be found in icehouse branch of swiftonfile repo.

          "},{"location":"Administrator-Guide/Object-Storage/#swiftonfile-vs-gluster-swift","title":"SwiftOnFile vs gluster-swift","text":"Gluster-Swift SwiftOnFile One GlusterFS volume maps to and stores only one Swift account. Mountpoint Hierarchy: container/object One GlusterFS volume or XFS partition can have multiple accounts. Mountpoint Hierarchy: acc/container/object Over-rides account server, container server and object server. We need to keep in sync with upstream Swift and often may need code changes or workarounds to support new Swift features Implements only object-server. Very less need to catch-up to Swift as new features at proxy,container and account level would very likely be compatible with SwiftOnFile as it's just a storage policy. Does not use DBs for accounts and container.A container listing involves a filesystem crawl.A HEAD on account/container gives inaccurate or stale results without FS crawl. Uses Swift's DBs to store account and container information. An account or container listing does not involve FS crawl. Accurate info on HEAD to account/container \u2013 ability to support account quotas. GET on a container and account lists actual files in filesystem. GET on a container and account only lists objects PUT over Swift. Files created over filesystem interface do not appear in container and object listings. Standalone deployment required and does not integrate with existing Swift cluster. Integrates with any existing Swift deployment as a Storage Policy."},{"location":"Administrator-Guide/Performance-Testing/","title":"Gluster performance testing","text":"

          Once you have created a Gluster volume, you need to verify that it has adequate performance for your application, and if it does not, you need a way to isolate the root cause of the problem.

          There are two kinds of workloads:

          • synthetic - run a test program such as ones below
          • application - run existing application
          "},{"location":"Administrator-Guide/Performance-Testing/#profiling-tools","title":"Profiling tools","text":"

          Ideally it's best to use the actual application that you want to run on Gluster, but applications often don't tell the sysadmin much about where the performance problems are, particularly latency (response-time) problems. So there are non-invasive profiling tools built into Gluster that can measure performance as seen by the application, without changing the application. Gluster profiling methods at present are based on the io-stats translator, and include:

          • client-side profiling - instrument a Gluster mountpoint or libgfapi process to sample profiling data. In this case, the io-stats translator is at the \"top\" of the translator stack, so the profile data truly represents what the application (or FUSE mountpoint) is asking Gluster to do. For example, a single application write is counted once as a WRITE FOP (file operation) call, and the latency for that WRITE FOP includes latency of the data replication done by the AFR translator lower in the stack.

          • server-side profiling - this is done using the \"gluster volume profile\" command (and \"gluster volume top\" can be used to identify particular hot files in use as well). Server-side profiling can measure the throughput of an entire Gluster volume over time, and can measure server-side latencies. However, it does not incorporate network or client-side latencies. It is also hard to infer application behavior because of client-side translators that alter the I/O workload (examples: erasure coding, cache tiering).

          In short, use client-side profiling for understanding \"why is my application unresponsive\"? and use server-side profiling for understanding how busy your Gluster volume is, what kind of workload is being applied to it (i.e. is it mostly-read? is it small-file?), and how well the I/O load is spread across the volume.

          "},{"location":"Administrator-Guide/Performance-Testing/#client-side-profiling","title":"client-side profiling","text":"

          To run client-side profiling,

          • gluster volume profile your-volume start
          • setfattr -n trusted.io-stats-dump -v io-stats-pre.txt /your/mountpoint

          This will generate the specified file (/var/run/gluster/io-stats-pre.txt) on the client. A script like gvp-client.sh can automate collection of this data.

          TBS: what the different FOPs are and what they mean.

          "},{"location":"Administrator-Guide/Performance-Testing/#server-side-profiling","title":"server-side profiling","text":"

          To run it:

          • gluster volume profile your-volume start
          • repeat this command periodically: gluster volume profile your-volume info
          • gluster volume profile your-volume stop

          A script like gvp.sh can help you automate this procedure.

          Scripts to post-process this data are in development now, let us know what you need and what would be a useful format for presenting the data.

          "},{"location":"Administrator-Guide/Performance-Testing/#testing-tools","title":"Testing tools","text":"

          In this section, we suggest some basic workload tests that can be used to measure Gluster performance in an application-independent way for a wide variety of POSIX-like operating systems and runtime environments. We then provide some terminology and conceptual framework for interpreting these results.

          The tools that we suggest here are designed to run in a distributed filesystem. This is still a relatively rare attribute for filesystem benchmarks, even now! There is a much larger set of benchmarks available that can be run from a single system. While single-system results are important, they are far from a definitive measure of the performance capabilities of a distributed filesystem.

          • fio - for large file I/O tests.
          • smallfile - for pure-workload small-file tests
          • iozone - for pure-workload large-file tests
          • parallel-libgfapi - for pure-workload libgfapi tests

          The \"netmist\" mixed-workload generator of SPECsfs2014 may be suitable in some cases, but is not technically an open-source tool. This tool was written by Don Capps, who was an author of iozone.

          "},{"location":"Administrator-Guide/Performance-Testing/#fio","title":"fio","text":"

          fio is extremely powerful and is easily installed from traditional distros, unlike iozone, and has increasingly powerful distributed test capabilities described in its --client parameter upstream as of May 2015. To use this mode, start by launching an fio \"server\" instance on each workload generator host using:

              fio --server --daemonize=/var/run/fio-svr.pid\n

          And make sure your firewall allows port 8765 through for it. You can now run tests on sets of hosts using syntax like:

              fio --client=workload-generator.list --output-format=json my-workload.fiojob\n

          You can also use it for distributed testing, however, by launching fio instances on separate hosts, taking care to start all fio instances as close to the same time as possible, limiting per-thread throughput, and specifying the run duration rather than the amount of data, so that all fio instances end at around the same time. You can then aggregate the fio results from different hosts to get a meaningful aggregate result.

          fio also has different I/O engines, in particular Huamin Chen authored the libgfapi engine for fio so that you can use fio to test Gluster performance without using FUSE.

          Limitations of fio in distributed mode:

          • stonewalling - fio calculates throughput based on when the last thread finishes a test run. In contrast, iozone calculates throughput by default based on when the FIRST thread finishes the workload. This can lead to (deceptively?) higher throughput results for iozone, since there are inevitably some \"straggler\" threads limping to the finish line later than others. It is possible in some cases to overcome this limitation by specifying a time limit for the test. This works well for random I/O tests, where typically you do not want to read/write the entire file/device anyway.
          • inaccuracy when response times > 1 sec - at least in some cases fio has reported excessively high IOPS when fio threads encounter response times much greater than 1 second, this can happen for distributed storage when there is unfairness in the implementation.
          • io engines are not integrated.
          "},{"location":"Administrator-Guide/Performance-Testing/#smallfile-distributed-io-benchmark","title":"smallfile Distributed I/O Benchmark","text":"

          Smallfile is a python-based small-file distributed POSIX workload generator which can be used to quickly measure performance for a variety of metadata-intensive workloads across an entire cluster. It has no dependencies on any specific filesystem or implementation AFAIK. It runs on Linux, Windows and should work on most Unixes too. It is intended to complement use of iozone benchmark for measuring performance of large-file workloads, and borrows certain concepts from iozone and Ric Wheeler's fs_mark. It was developed by Ben England starting in March 2009, and is now open-source (Apache License v2).

          Here is a typical simple sequence of tests where files laid down in an initial create test are then used in subsequent tests. There are many more smallfile operation types than these 5 (see doc), but these are the most commonly used ones.

              SMF=\"./smallfile_cli.py --top /mnt/glusterfs/smf --host-set h1,h2,h3,h4 --threads 8 --file-size 4 --files 10000 --response-times Y \"\n    $SMF --operation create\n    for s in $SERVERS ; do ssh $h 'echo 3 > /proc/sys/vm/drop_caches' ; done\n    $SMF --operation read\n    $SMF --operation append\n    $SMF --operation rename\n    $SMF --operation delete\n
          "},{"location":"Administrator-Guide/Performance-Testing/#iozone","title":"iozone","text":"

          This tool has limitations but does distributed testing well using -+m option (below).

          The \"-a\" option for automated testing of all use cases is discouraged, because:

          • this does not allow you to drop the read cache in server before a test.
          • most of the data points being measured will be irrelevant to the problem you are solving.

          Single-thread testing is an important use case, but to fully utilize the available hardware you typically need to do multi-thread and even multi-host testing.

          Consider using \"-c -e\" options to measure the time it takes for data to reach persistent storage. \"-C\" option lets you see how much each thread participated in the test. \"-+n\" allows you to save time by skipping re-read and re-write tests. \"-w\" option tells iozone not to delete any files that it accessed, so that subsequent tests can use them. Specify these options with each test:

          • -i -- test type, 0=write, 1=read, 2=random read/write
          • -r -- data transfer size -- allows you to simulate I/O size used by application
          • -s -- per-thread file size -- choose this to be large enough for the system to reach steady state (typically multiple GB needed)
          • -t -- number of threads -- how many subprocesses will be concurrently issuing I/O requests
          • -F -- list of files -- what files to write/read. If you do not specify then the filenames iozone.DUMMY.* will be used in the default directory.

          Example of an 8-thread sequential write test with 64-KB transfer size and file size of 1 GB to shared Gluster mountpoint directory /mnt/glusterfs , including time to fsync() and close() the files in the throughput calculation:

              iozone\u00a0-w\u00a0-c\u00a0-e\u00a0-i\u00a00\u00a0-+n\u00a0-C\u00a0-r\u00a064k\u00a0-s\u00a01g\u00a0-t\u00a08\u00a0-F\u00a0/mnt/glusterfs/f{0,1,2,3,4,5,6,7,8}.ioz\n

          WARNING: random I/O testing in iozone is heavily restricted by the iozone constraint that it must randomly read then randomly write the entire file! This is not what we want - instead it should randomly read/write for some fraction of file size or time duration, allowing us to spread out more on the disk while not waiting too long for test to finish. This is why fio (below) is the preferred test tool for random I/O workloads.

          Distributed testing is a strength of the iozone utility, but this requires use of \"-+m\" option in place of \"-F\" option. The configuration file passed with \"-+m\" option contains a series of records that look like this:

              hostname\u00a0\u00a0\u00a0directory\u00a0\u00a0\u00a0iozone-pathname\n

          Where hostname is a host name or IP address of a test driver machine that iozone can use, directory is the pathname of a directory to use within that host, and iozone-pathname is the full pathname of the iozone executable to use on that host. Be sure that every target host can resolve the hostname of host where the iozone command was run. All target hosts must permit password-less ssh access from the host running the command.

          For example: (Here, my-ip-address refers to the machine from where the iozone is being run)

              export\u00a0RSH=ssh\n    iozone\u00a0-+m\u00a0ioz.cfg\u00a0-+h\u00a0my-ip-address\u00a0-w\u00a0-c\u00a0-e\u00a0-i\u00a00\u00a0-+n\u00a0-C\u00a0-r\u00a064k\u00a0-s\u00a01g\u00a0-t\u00a04\n

          And the file ioz.cfg contains these records (where /mnt/glusterfs is the Gluster mountpoint on each test machine and test-client-ip is the IP address of a client). Also note that, Each record in the file is a thread in IOZone terminology. Since we have defined the number of threads to be 4 in the above example, we have four records(threads) for a single client.

              test-client-ip\u00a0\u00a0/mnt/glusterfs\u00a0\u00a0/usr/local/bin/iozone\n    test-client-ip\u00a0\u00a0/mnt/glusterfs\u00a0\u00a0/usr/local/bin/iozone\n    test-client-ip\u00a0\u00a0/mnt/glusterfs\u00a0\u00a0/usr/local/bin/iozone\n    test-client-ip\u00a0\u00a0/mnt/glusterfs\u00a0\u00a0/usr/local/bin/iozone\n

          Restriction: Since iozone uses non-privileged ports it may be necessary to temporarily shut down or alter iptables on some/all of the hosts. Secondary machines must support password-less access from Primary machine via ssh.

          Note that the -+h option is undocumented but it tells the secondary host what IP address to use so that the secondary does not have to be able to resolve the hostname of the test driver. my-ip-address is the IP address that the secondary should connect to in order to report results back to the host. This need not be the same as the host's hostname.

          Typically you run the sequential write test first to lay down the file, drop cache on the servers (and clients if necessary), do the sequential read test, drop cache, do random I/O test if desired. Using above example:

              export\u00a0RSH=ssh\n    IOZ=\"iozone\u00a0-+m\u00a0ioz.cfg\u00a0-+h\u00a0my-ip-address\u00a0-w\u00a0-C\u00a0-c\u00a0-e\u00a0-r\u00a064k\u00a0-+n\u00a0\"\n     hosts=\"`awk\u00a0'{\u00a0print\u00a0$1\u00a0}'\u00a0ioz.cfg`\"\n    $IOZ\u00a0-i\u00a00\u00a0-s\u00a01g\u00a0-t\u00a04`\\\n    for\u00a0n\u00a0in\u00a0$hosts\u00a0$servers\u00a0;\u00a0do\u00a0\\\n    \u00a0\u00a0\u00a0ssh\u00a0$n\u00a0'sync;\u00a0echo\u00a01\u00a0>\u00a0/proc/sys/vm/drop_caches'\u00a0;\u00a0done\n    $IOZ\u00a0-i\u00a01\u00a0-s\u00a01g\u00a0-t\u00a04\n    for\u00a0n\u00a0in\u00a0$hosts\u00a0$servers\u00a0;\u00a0do\u00a0\\\n    \u00a0\u00a0\u00a0ssh\u00a0$n\u00a0'sync;\u00a0echo\u00a01\u00a0>\u00a0/proc/sys/vm/drop_caches'\u00a0;\u00a0done\n    $IOZ\u00a0-i\u00a02\u00a0-s\u00a01g\u00a0-t\u00a04\n

          If you use client with buffered I/O (the default), drop cache on the client machines first, then the server machines also as shown above.

          "},{"location":"Administrator-Guide/Performance-Testing/#parallel-libgfapi","title":"parallel-libgfapi","text":"

          This test exercises Gluster performance using the libgfapi API, bypassing FUSE - no mountpoints are used. Available here.

          To use it, you edit the script parameters in parallel_gfapi_test.sh script - all of them are above the comment \"NO EDITABLE PARAMETERS BELOW THIS LINE\". These include such things as the Gluster volume name, a host serving that volume, number of files, etc. You then make sure that the gfapi_perf_test executable is distributed to the client machines at the specified directory, and then run the script. The script starts all libgfapi workload generator processes in parallel in such a way that they all start the test at the same time. It waits until they all complete, and then it collects and aggregates the results for you.

          Note that libgfapi processes consume one socket per brick, so in Gluster volumes with high brick counts, there can be constraints on the number of libgfapi processes that can run concurrently. Specifically, each host can only support up to about 30000 concurrent TCP ports. You may need to adjust \"ulimit -n\" parameter (see /etc/security/limits.conf \"nofile\" parameter for persistent tuning).

          "},{"location":"Administrator-Guide/Performance-Testing/#object-store-tools","title":"Object Store tools","text":"

          COSBench was developed by Intel employees and is very useful for both Swift and S3 workload generation.

          ssbench is part of OpenStack Swift toolset and is command-line tool with a workload definition file format.

          "},{"location":"Administrator-Guide/Performance-Testing/#workload","title":"Workload","text":"

          An application can be as simple as writing some files, or it can be as complex as running a cloud on top of Gluster. But all applications have performance requirements, whether the users are aware of them or not, and if these requirements aren't met, the system as a whole is not functional from the user's perspective. The activities that the application spends most of its time doing with Gluster are called the \"workload\" below. For the Gluster filesystem, the \"workload\" consists of the filesystem requests being delivered to Gluster by the application. There are two ways to look at workload:

          • top-down - what is the application trying to get the filesystem to do?
          • bottom-up - what requests is the application actually generating to the filesystem?
          "},{"location":"Administrator-Guide/Performance-Testing/#data-vs-metadata","title":"data vs metadata","text":"

          In this page we frequently refer to \"large-file\" or \"small-file\" workloads. But what do we mean by the terms \"large-file\" or \"small-file\"? \"large-file\" is a deliberately vague but descriptive term that refers to workloads where most of the application time is spent reading/writing the file. This is in contrast to a \"small-file\" workload, where most of the application's time is spent opening/closing the file or accessing metadata about the file. Metadata means \"data about data\", so it is information that describes the state of the file, rather than the contents of the file. For example, a filename is a type of metadata, as are directories and extended attributes.

          "},{"location":"Administrator-Guide/Performance-Testing/#top-down-workload-analysis","title":"Top-down workload analysis","text":"

          Often this is what users will be able to help you with -- for example, a workload might consist of ingesting a billion .mp3 files. Typical questions that need to be answered (approximately) are:

          • what is file size distribution? Averages are often not enough - file size distributions can be bi-modal (i.e. consist mostly of the very large and very small file sizes). TBS: provide pointers to scripts that can collect this.
          • what fraction of file accesses are reads vs writes?
          • how cache-friendly is the workload? Do the same files get read repeatedly by different Gluster clients, or by different processes/threads on these clients?
          • for large-file workloads, what fraction of accesses are sequential/random? Sequential file access means that the application thread reads/writes the file from start to finish in byte offset order, and random file access is the exact opposite -- the thread may read/write from any offset at any time. Virtual machine disk images are typically accessed randomly, since the VM's filesystem is embedded in a Gluster file.

          Why do these questions matter? For example, if you have a large-file sequential read workload, network configuration + Gluster and Linux readahead is important. If you have a small-file workload, storage configuration is important, and so on. You will not know what tuning is appropriate for Gluster unless you have a basic understanding the workload.

          "},{"location":"Administrator-Guide/Performance-Testing/#bottom-up-analysis","title":"Bottom-up analysis","text":"

          Even a complex application may have a very simple workload from the point of view of the filesystem servicing its requests. If you don't know what your application spends its time doing, you can start by running the \"gluster volume profile\" and \"gluster volume top\" commands. These extremely useful tools will help you understand both the workload and the bottlenecks which are limiting performance of that workload.

          TBS: links to documentation for these tools and scripts that reduce the data to usable form.

          "},{"location":"Administrator-Guide/Performance-Testing/#configuration","title":"Configuration","text":"

          There are 4 basic hardware dimensions to a Gluster server, listed here in order of importance:

          • network - possibly the most important hardware component of a Gluster site
          • access protocol - what kind of client is used to get to the files/objects?
          • storage - this is absolutely critical to get right up front
          • cpu - on client, look for hot threads (see below)
          • memory - can impact performance of read-intensive, cacheable workloads
          "},{"location":"Administrator-Guide/Performance-Testing/#network-testing","title":"network testing","text":"

          Network configuration has a huge impact on performance of distributed storage, but is often not given the attention it deserves during the planning and installation phases of the cluster lifecycle. Fortunately, network configuration can be enhanced significantly, often without additional hardware.

          To measure network performance, consider use of a netperf-based script.

          The purpose of these two tools is to characterize the capacity of your entire network infrastructure to support the desired level of traffic induced by distributed storage, using multiple network connections in parallel. The latter script is probably the most realistic network workload for distributed storage.

          The two most common hardware problems impacting distributed storage are, not surprisingly, disk drive failures and network failures. Some of these failures do not cause hard errors, but instead cause performance degradation. For example, with a bonded network interface containing two physical network interfaces, if one of the physical interfaces fails (either port on NIC/switch, or cable), then the bonded interface will stay up, but will have less performance (how much less depends on the bonding mode). Another error would be failure of an 10-GbE Ethernet interface to autonegotiate speed to 10-Gbps -- sometimes network interfaces auto-negotiate to 1-Gbps instead. If the TCP connection is experiencing a high rate of packet loss or is not tuned correctly, it may not reach the full network speed supported by the hardware.

          So why run parallel netperf sessions instead of just one? There are a variety of network performance problems relating to network topology (the way in which hosts are interconnected), particularly network switch and router topology, that only manifest when several pairs of hosts are attempting to transmit traffic across the same shared resource, which could be a trunk connecting top-of-rack switches or a blade-based switch with insufficient bandwidth to switch backplane, for example. Individual netperf/iperf sessions will not find these problems, but this script will.

          This test can be used to simulate flow of data through a distributed filesystem, for example. If you want to simulate 4 Gluster clients, call them c1 through c4, writing large files to a set of 2 servers, call them s1 and s2, you can specify these (sender, receiver) pairs:

              (c1,s1),\u00a0(c2,\u00a0s2),\u00a0(c3,\u00a0s1),\u00a0(c4,\u00a0s2)\n

          If on the other hand you want to simulate reads, you can use these (sender, receiver) pairs:

              (s1,\u00a0c1),\u00a0(s2,\u00a0c2),\u00a0(s1,\u00a0c3),\u00a0(s2,\u00a0c4)\n

          To simulate a mixed read-write workload, use both sets of pairs:

              (c1,s1),\u00a0(c2,\u00a0s2),\u00a0(c3,\u00a0s1),\u00a0(c4,\u00a0s2),\u00a0(s1,\u00a0c1),\u00a0(s2,\u00a0c2),\u00a0(s1,\u00a0c3),\u00a0(s2,\u00a0c4)\n

          More complicated flows can model behavior of non-native protocols, where a cluster node acts as a proxy server- it is a server (for non-native protocol) and a client (for native protocol). For example, such protocols often induce full-duplex traffic which can stress the network differently than unidirectional in/out traffic. For example, try adding this set of flows to preceding flow:

              (s1, s2),.(s2, s3),.(s3, s4),.(s4, s1)\n

          The comments at the top of the script describe the input syntax, but here are some suggestions on how to best utilize it. You typically run this script from a head node or test driver that has password-less ssh access to the set of machines being tested. The hosts running the test do not need ssh access to each other -- they only have to allow password-less ssh access from the head node. The script does not rely on root privileges, so you can run it from a non-root account. Just create a public key on the head node in the right account (usually in \\$HOME/.ssh/id_rsa.pub ) and then append this public key to \\$HOME/.ssh/authorized_keys on each host participating in the test.

          We input senders and receivers using separate text files, 1 host per line. For pair (sender[j], receiver[j]), you get sender[j] from line j in the sender file, and receiver[j] from line j in the receiver file. You have to use the IP address/name that corresponds to the interface you want to test, and you have to be able to ssh to each host from the head node using this interface.

          "},{"location":"Administrator-Guide/Performance-Testing/#results","title":"Results","text":"

          There are 3 basic forms of performance results, not in order of importance:

          • throughput -- how much work is done in a unit of time? Best metrics typically are workload-dependent:
          • for large-file random: IOPS
          • for large-file sequential: MB/s
          • for small-file: files/sec
          • response time -- IMPORTANT, how long does it take for filesystem request to complete?
          • utilization -- how busy is the hardware while the workload is running?
          • scalability -- can we linearly scale throughput without sacrificing response time as we add servers to a Gluster volume?

          Typically throughput results get the most attention, but in a distributed-storage environment, the hardest goal to achieve may well be CONSISTENTLY LOW RESPONSE TIME, not throughput.

          While there are non-interactive workloads where response time does not matter as much, you should pay attention to response time in any situation where a user has to directly interact with the filesystem. Tuning the filesystem to achieve the absolute highest throughput can result in a filesystem that is unusable because of high response time. Unless you are in a benchmarking situation, you want to achieve a balance of good throughput and response time. Typically an interactive user wants to see a response time under 5 seconds always, with most response times much lower than this. To keep response times under control (including system management!), you do not want any hardware component to run at maximum utilization, typically 60-80% utilization is a good peak utilization target. On the other hand, to avoid wasting hardware, you want all of the hardware to be utilized to some extent.

          "},{"location":"Administrator-Guide/Performance-Tuning/","title":"Performance tuning","text":""},{"location":"Administrator-Guide/Performance-Tuning/#enable-metadata-cache","title":"Enable Metadata cache","text":"

          Metadata caching improves performance in almost all the workloads, except for use cases with most of the workload accessing a file sumultaneously from multiple clients.

          1. Execute the following command to enable metadata caching and cache invalidation:

            console gluster volume set <volname> group metadata-cache

            This group command enables caching of stat and xattr information of a file or directory. The caching is refreshed every 10 min, and cache-invalidation is enabled to ensure cache consistency.

          2. To increase the number of files that can be cached, execute the following command:

            console gluster volume set <volname> network.inode-lru-limit <n>

            n, is set to 50000. It can be increased if the number of active files in the volume is very high. Increasing this number increases the memory footprint of the brick processes.

          3. Execute the following command to enable samba specific metadata caching:

            console gluster volume set <volname> cache-samba-metadata on

          4. By default, some xattrs are cached by gluster like: capability xattrs, ima xattrs ACLs, etc. If there are any other xattrs that are used by the application using the Gluster storage, execute the following command to add these xattrs to the metadata cache list: console gluster volume set <volname> xattr-cache-list \"comma separated xattr list\" Eg: console gluster volume set <volname> xattr-cache-list \"user.org.netatalk.*,user.swift.metadata\"

          "},{"location":"Administrator-Guide/Performance-Tuning/#directory-operations","title":"Directory operations","text":"

          Along with enabling the metadata caching, the following options can be set to increase performance of directory operations:

          "},{"location":"Administrator-Guide/Performance-Tuning/#directory-listing-performance","title":"Directory listing Performance:","text":"
          • Enable parallel-readdir

          console gluster volume set <VOLNAME> performance.readdir-ahead on gluster volume set <VOLNAME> performance.parallel-readdir on

          "},{"location":"Administrator-Guide/Performance-Tuning/#filedirectory-create-performance","title":"File/Directory Create Performance","text":"
          • Enable nl-cache

          console gluster volume set <volname> group nl-cache gluster volume set <volname> nl-cache-positive-entry on

          The above command also enables cache invalidation and increases the timeout to 10 minutes

          "},{"location":"Administrator-Guide/Performance-Tuning/#small-file-read-operations","title":"Small file Read operations","text":"

          For use cases with dominant small file reads, enable the following options

          gluster volume set <volname> performance.cache-invalidation on\ngluster volume set <volname> features.cache-invalidation on\ngluster volume set <volname> performance.qr-cache-timeout 600 # 10 min recommended setting\ngluster volume set <volname> cache-invalidation-timeout 600 # 10 min recommended setting\n

          This command enables caching of the content of small file, in the client cache. Enabling cache invalidation ensures cache consistency.

          The total cache size can be set using

          gluster volume set <volname> cache-size <size>\n

          By default, the files with size <=64KB are cached. To change this value:

          gluster volume set <volname> performance.cache-max-file-size <size>\n

          Note that the size arguments use SI unit suffixes, e.g. 64KB or 2MB.

          "},{"location":"Administrator-Guide/Puppet/","title":"Puppet-Gluster","text":""},{"location":"Administrator-Guide/Puppet/#a-glusterfs-puppet-module-by-james","title":"A GlusterFS Puppet module by James","text":""},{"location":"Administrator-Guide/Puppet/#available-from","title":"Available from:","text":""},{"location":"Administrator-Guide/Puppet/#httpsgithubcompurpleideapuppet-gluster","title":"https://github.com/purpleidea/puppet-gluster/","text":""},{"location":"Administrator-Guide/Puppet/#table-of-contents","title":"Table of Contents","text":"
          1. Overview
          2. Module description - What the module does
          3. Setup - Getting started with Puppet-Gluster
            • What can Puppet-Gluster manage?
            • Simple setup
            • Elastic setup
            • Advanced setup
          4. Usage/FAQ - Notes on management and frequently asked questions
          5. Reference - Class and type reference
            • gluster::simple
            • gluster::elastic
            • gluster::server
            • gluster::host
            • gluster::brick
            • gluster::volume
            • gluster::volume::property
          6. Examples - Example configurations
          7. Limitations - Puppet versions, OS compatibility, etc...
          8. Development - Background on module development
          9. Author - Author and contact information
          "},{"location":"Administrator-Guide/Puppet/#overview","title":"Overview","text":"

          The Puppet-Gluster module installs, configures, and manages a GlusterFS cluster.

          "},{"location":"Administrator-Guide/Puppet/#module-description","title":"Module Description","text":"

          This Puppet-Gluster module handles installation, configuration, and management of GlusterFS across all of the hosts in the cluster.

          "},{"location":"Administrator-Guide/Puppet/#setup","title":"Setup","text":""},{"location":"Administrator-Guide/Puppet/#what-can-puppet-gluster-manage","title":"What can Puppet-Gluster manage?","text":"

          Puppet-Gluster is designed to be able to manage as much or as little of your GlusterFS cluster as you wish. All features are optional. If there is a feature that doesn't appear to be optional, and you believe it should be, please let me know. Having said that, it makes good sense to me to have Puppet-Gluster manage as much of your GlusterFS infrastructure as it can. At the moment, it cannot rack new servers, but I am accepting funding to explore this feature ;) At the moment it can manage:

          • GlusterFS packages (rpm)
          • GlusterFS configuration files (/var/lib/glusterd/)
          • GlusterFS host peering (gluster peer probe)
          • GlusterFS storage partitioning (fdisk)
          • GlusterFS storage formatting (mkfs)
          • GlusterFS brick creation (mkdir)
          • GlusterFS services (glusterd)
          • GlusterFS firewalling (whitelisting)
          • GlusterFS volume creation (gluster volume create)
          • GlusterFS volume state (started/stopped)
          • GlusterFS volume properties (gluster volume set)
          • And much more...
          "},{"location":"Administrator-Guide/Puppet/#simple-setup","title":"Simple setup","text":"

          include '::gluster::simple' is enough to get you up and running. When using the gluster::simple class, or with any other Puppet-Gluster configuration, identical definitions must be used on all hosts in the cluster. The simplest way to accomplish this is with a single shared puppet host definition like:

          node /^annex\\d+$/ {        # annex{1,2,..N}\n        class { '::gluster::simple':\n        }\n}\n

          If you wish to pass in different parameters, you can specify them in the class before you provision your hosts:

          class { '::gluster::simple':\n    replica => 2,\n    volume => ['volume1', 'volume2', 'volumeN'],\n}\n
          "},{"location":"Administrator-Guide/Puppet/#elastic-setup","title":"Elastic setup","text":"

          The gluster::elastic class is not yet available. Stay tuned!

          "},{"location":"Administrator-Guide/Puppet/#advanced-setup","title":"Advanced setup","text":"

          Some system administrators may wish to manually itemize each of the required components for the Puppet-Gluster deployment. This happens automatically with the higher level modules, but may still be a desirable feature, particularly for non-elastic storage pools where the configuration isn't expected to change very often (if ever).

          To put together your cluster piece by piece, you must manually include and define each class and type that you wish to use. If there are certain aspects that you wish to manage yourself, you can omit them from your configuration. See the reference section below for the specifics. Here is one possible example:

          class { '::gluster::server':\n    shorewall => true,\n}\n\ngluster::host { 'annex1.example.com':\n    # use uuidgen to make these\n    uuid => '1f660ca2-2c78-4aa0-8f4d-21608218c69c',\n}\n\n# note that this is using a folder on your existing file system...\n# this can be useful for prototyping gluster using virtual machines\n# if this isn't a separate partition, remember that your root fs will\n# run out of space when your gluster volume does!\ngluster::brick { 'annex1.example.com:/data/gluster-storage1':\n    areyousure => true,\n}\n\ngluster::host { 'annex2.example.com':\n    # NOTE: specifying a host uuid is now optional!\n    # if you don't choose one, one will be assigned\n    #uuid => '2fbe6e2f-f6bc-4c2d-a301-62fa90c459f8',\n}\n\ngluster::brick { 'annex2.example.com:/data/gluster-storage2':\n    areyousure => true,\n}\n\n$brick_list = [\n    'annex1.example.com:/data/gluster-storage1',\n    'annex2.example.com:/data/gluster-storage2',\n]\n\ngluster::volume { 'examplevol':\n    replica => 2,\n    bricks => $brick_list,\n    start => undef, # i'll start this myself\n}\n\n# namevar must be: <VOLNAME>#<KEY>\ngluster::volume::property { 'examplevol#auth.reject':\n    value => ['192.0.2.13', '198.51.100.42', '203.0.113.69'],\n}\n
          "},{"location":"Administrator-Guide/Puppet/#usage-and-frequently-asked-questions","title":"Usage and frequently asked questions","text":"

          All management should be done by manipulating the arguments on the appropriate Puppet-Gluster classes and types. Since certain manipulations are either not yet possible with Puppet-Gluster, or are not supported by GlusterFS, attempting to manipulate the Puppet configuration in an unsupported way will result in undefined behaviour, and possible even data loss, however this is unlikely.

          "},{"location":"Administrator-Guide/Puppet/#how-do-i-change-the-replica-count","title":"How do I change the replica count?","text":"

          You must set this before volume creation. This is a limitation of GlusterFS. There are certain situations where you can change the replica count by adding a multiple of the existing brick count to get this desired effect. These cases are not yet supported by Puppet-Gluster. If you want to use Puppet-Gluster before and / or after this transition, you can do so, but you'll have to do the changes manually.

          "},{"location":"Administrator-Guide/Puppet/#do-i-need-to-use-a-virtual-ip","title":"Do I need to use a virtual IP?","text":"

          Using a virtual IP (VIP) is strongly recommended as a distributed lock manager (DLM) and also to provide a highly-available (HA) IP address for your clients to connect to. For a more detailed explanation of the reasoning please see:

          https://ttboj.wordpress.com/2012/08/23/how-to-avoid-cluster-race-conditions-or-how-to-implement-a-distributed-lock-manager-in-puppet/

          Remember that even if you're using a hosted solution (such as AWS) that doesn't provide an additional IP address, or you want to avoid using an additional IP, and you're okay not having full HA client mounting, you can use an unused private RFC1918 IP address as the DLM VIP. Remember that a layer 3 IP can co-exist on the same layer 2 network with the layer 3 network that is used by your cluster.

          "},{"location":"Administrator-Guide/Puppet/#is-it-possible-to-have-puppet-gluster-complete-in-a-single-run","title":"Is it possible to have Puppet-Gluster complete in a single run?","text":"

          No. This is a limitation of Puppet, and is related to how GlusterFS operates. For example, it is not reliably possible to predict which ports a particular GlusterFS volume will run on until after the volume is started. As a result, this module will initially whitelist connections from GlusterFS host IP addresses, and then further restrict this to only allow individual ports once this information is known. This is possible in conjunction with the puppet-shorewall module. You should notice that each run should complete without error. If you do see an error, it means that either something is wrong with your system and / or configuration, or because there is a bug in Puppet-Gluster.

          "},{"location":"Administrator-Guide/Puppet/#can-you-integrate-this-with-vagrant","title":"Can you integrate this with vagrant?","text":"

          Not until vagrant properly supports libvirt/KVM. I have no desire to use VirtualBox for fun.

          "},{"location":"Administrator-Guide/Puppet/#awesome-work-but-its-missing-support-for-a-feature-andor-platform","title":"Awesome work, but it's missing support for a feature and/or platform!","text":"

          Since this is an Open Source / Free Software project that I also give away for free (as in beer, free as in gratis, free as in libre), I'm unable to provide unlimited support. Please consider donating funds, hardware, virtual machines, and other resources. For specific needs, you could perhaps sponsor a feature!

          "},{"location":"Administrator-Guide/Puppet/#you-didnt-answer-my-question-or-i-have-a-question","title":"You didn't answer my question, or I have a question!","text":"

          Contact me through my technical blog and I'll do my best to help. If you have a good question, please remind me to add my answer to this documentation!

          "},{"location":"Administrator-Guide/Puppet/#reference","title":"Reference","text":"

          Please note that there are a number of undocumented options. For more information on these options, please view the source at: https://github.com/purpleidea/puppet-gluster/. If you feel that a well used option needs documenting here, please contact me.

          "},{"location":"Administrator-Guide/Puppet/#overview-of-classes-and-types","title":"Overview of classes and types","text":"
          • gluster::simple: Simple Puppet-Gluster deployment.
          • gluster::elastic: Under construction.
          • gluster::server: Base class for server hosts.
          • gluster::host: Host type for each participating host.
          • gluster::brick: Brick type for each defined brick, per host.
          • gluster::volume: Volume type for each defined volume.
          • gluster::volume::property: Manages properties for each volume.
          "},{"location":"Administrator-Guide/Puppet/#glustersimple","title":"gluster::simple","text":"

          This is gluster::simple. It should probably take care of 80% of all use cases. It is particularly useful for deploying quick test clusters. It uses a finite-state machine (FSM) to decide when the cluster has settled and volume creation can begin. For more information on the FSM in Puppet-Gluster see: https://ttboj.wordpress.com/2013/09/28/finite-state-machines-in-puppet/

          "},{"location":"Administrator-Guide/Puppet/#replica","title":"replica","text":"

          The replica count. Can't be changed automatically after initial deployment.

          "},{"location":"Administrator-Guide/Puppet/#volume","title":"volume","text":"

          The volume name or list of volume names to create.

          "},{"location":"Administrator-Guide/Puppet/#path","title":"path","text":"

          The valid brick path for each host. Defaults to local file system. If you need a different path per host, then Gluster::Simple will not meet your needs.

          "},{"location":"Administrator-Guide/Puppet/#vip","title":"vip","text":"

          The virtual IP address to be used for the cluster distributed lock manager.

          "},{"location":"Administrator-Guide/Puppet/#shorewall","title":"shorewall","text":"

          Boolean to specify whether puppet-shorewall integration should be used or not.

          "},{"location":"Administrator-Guide/Puppet/#glusterelastic","title":"gluster::elastic","text":"

          Under construction.

          "},{"location":"Administrator-Guide/Puppet/#glusterserver","title":"gluster::server","text":"

          Main server class for the cluster. Must be included when building the GlusterFS cluster manually. Wrapper classes such as gluster::simple include this automatically.

          "},{"location":"Administrator-Guide/Puppet/#vip_1","title":"vip","text":"

          The virtual IP address to be used for the cluster distributed lock manager.

          "},{"location":"Administrator-Guide/Puppet/#shorewall_1","title":"shorewall","text":"

          Boolean to specify whether puppet-shorewall integration should be used or not.

          "},{"location":"Administrator-Guide/Puppet/#glusterhost","title":"gluster::host","text":"

          Main host type for the cluster. Each host participating in the GlusterFS cluster must define this type on itself, and on every other host. As a result, this is not a singleton like the gluster::server class.

          "},{"location":"Administrator-Guide/Puppet/#ip","title":"ip","text":"

          Specify which IP address this host is using. This defaults to the $::ipaddress variable. Be sure to set this manually if you're declaring this yourself on each host without using exported resources. If each host thinks the other hosts should have the same IP address as itself, then Puppet-Gluster and GlusterFS won't work correctly.

          "},{"location":"Administrator-Guide/Puppet/#uuid","title":"uuid","text":"

          Universally unique identifier (UUID) for the host. If empty, Puppet-Gluster will generate this automatically for the host. You can generate your own manually with uuidgen, and set them yourself. I found this particularly useful for testing, because I would pick easy to recognize UUID's like: aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa, bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb, and so on. If you set a UUID manually, and Puppet-Gluster has a chance to run, then it will remember your choice, and store it locally to be used again if you no longer specify the UUID. This is particularly useful for upgrading an existing un-managed GlusterFS installation to a Puppet-Gluster managed one, without changing any UUID's.

          "},{"location":"Administrator-Guide/Puppet/#glusterbrick","title":"gluster::brick","text":"

          Main brick type for the cluster. Each brick is an individual storage segment to be used on a host. Each host must have at least one brick to participate in the cluster, but usually a host will have multiple bricks. A brick can be as simple as a file system folder, or it can be a separate file system. Please read the official GlusterFS documentation, if you aren't entirely comfortable with the concept of a brick.

          For most test clusters, and for experimentation, it is easiest to use a directory on the root file system. You can even use a /tmp sub folder if you don't care about the persistence of your data. For more serious clusters, you might want to create separate file systems for your data. On self-hosted iron, it is not uncommon to create multiple RAID-6 drive pools, and to then create a separate file system per virtual drive. Each file system can then be used as a single brick.

          So that each volume in GlusterFS has the maximum ability to grow, without having to partition storage separately, the bricks in Puppet-Gluster are actually folders (on whatever backing store you wish) which then contain sub folders-- one for each volume. As a result, all the volumes on a given GlusterFS cluster can share the total available storage space. If you wish to limit the storage used by each volume, you can setup quotas. Alternatively, you can buy more hardware, and elastically grow your GlusterFS volumes, since the price per GB will be significantly less than any proprietary storage system. The one downside to this brick sharing, is that if you have chosen the brick per host count specifically to match your performance requirements, and each GlusterFS volume on the same cluster has drastically different brick per host performance requirements, then this won't suit your needs. I doubt that anyone actually has such requirements, but if you do insist on needing this compartmentalization, then you can probably use the Puppet-Gluster grouping feature to accomplish this goal. Please let me know about your use-case, and be warned that the grouping feature hasn't been extensively tested.

          To prove to you that I care about automation, this type offers the ability to automatically partition and format your file systems. This means you can plug in new iron, boot, provision and configure the entire system automatically. Regrettably, I don't have a lot of test hardware to routinely use this feature. If you'd like to donate some, I'd be happy to test this thoroughly. Having said that, I have used this feature, I consider it to be extremely safe, and it has never caused me to lose data. If you're uncertain, feel free to look at the code, or avoid using this feature entirely. If you think there's a way to make it even safer, then feel free to let me know.

          "},{"location":"Administrator-Guide/Puppet/#dev","title":"dev","text":"

          Block device, such as /dev/sdc or /dev/disk/by-id/scsi-0123456789abcdef. By default, Puppet-Gluster will assume you're using a folder to store the brick data, if you don't specify this parameter.

          "},{"location":"Administrator-Guide/Puppet/#fsuuid","title":"fsuuid","text":"

          File system UUID. This ensures we can distinctly identify a file system. You can set this to be used with automatic file system creation, or you can specify the file system UUID that you'd like to use.

          "},{"location":"Administrator-Guide/Puppet/#labeltype","title":"labeltype","text":"

          Only gpt is supported. Other options include msdos, but this has never been used because of it's size limitations.

          "},{"location":"Administrator-Guide/Puppet/#fstype","title":"fstype","text":"

          This should be xfs or ext4. Using xfs is recommended, but ext4 is also quite common. This only affects a file system that is getting created by this module. If you provision a new machine, with a root file system of ext4, and the brick you create is a root file system path, then this option does nothing.

          "},{"location":"Administrator-Guide/Puppet/#xfs_inode64","title":"xfs_inode64","text":"

          Set inode64 mount option when using the xfs fstype. Choose true to set.

          "},{"location":"Administrator-Guide/Puppet/#xfs_nobarrier","title":"xfs_nobarrier","text":"

          Set nobarrier mount option when using the xfs fstype. Choose true to set.

          "},{"location":"Administrator-Guide/Puppet/#ro","title":"ro","text":"

          Whether the file system should be mounted read only. For emergencies only.

          "},{"location":"Administrator-Guide/Puppet/#force","title":"force","text":"

          If true, this will overwrite any xfs file system it sees. This is useful for rebuilding GlusterFS repeatedly and wiping data. There are other safeties in place to stop this. In general, you probably don't ever want to touch this.

          "},{"location":"Administrator-Guide/Puppet/#areyousure","title":"areyousure","text":"

          Do you want to allow Puppet-Gluster to do dangerous things? You have to set this to true to allow Puppet-Gluster to fdisk and mkfs your file system.

          "},{"location":"Administrator-Guide/Puppet/#glustervolume","title":"gluster::volume","text":"

          Main volume type for the cluster. This is where a lot of the magic happens. Remember that changing some of these parameters after the volume has been created won't work, and you'll experience undefined behaviour. There could be FSM based error checking to verify that no changes occur, but it has been left out so that this code base can eventually support such changes, and so that the user can manually change a parameter if they know that it is safe to do so.

          "},{"location":"Administrator-Guide/Puppet/#bricks","title":"bricks","text":"

          List of bricks to use for this volume. If this is left at the default value of true, then this list is built automatically. The algorithm that determines this order does not support all possible situations, and most likely can't handle certain corner cases. It is possible to examine the FSM to view the selected brick order before it has a chance to create the volume. The volume creation script won't run until there is a stable brick list as seen by the FSM running on the host that has the DLM. If you specify this list of bricks manually, you must choose the order to match your desired volume layout. If you aren't sure about how to order the bricks, you should review the GlusterFS documentation first.

          "},{"location":"Administrator-Guide/Puppet/#transport","title":"transport","text":"

          Only tcp is supported. Possible values can include rdma, but this won't get any testing if I don't have access to infiniband hardware. Donations welcome.

          "},{"location":"Administrator-Guide/Puppet/#replica_1","title":"replica","text":"

          Replica count. Usually you'll want to set this to 2. Some users choose 3. Other values are seldom seen. A value of 1 can be used for simply testing a distributed setup, when you don't care about your data or high availability. A value greater than 4 is probably wasteful and unnecessary. It might even cause performance issues if a synchronous write is waiting on a slow fourth server.

          "},{"location":"Administrator-Guide/Puppet/#stripe","title":"stripe","text":"

          Stripe count. Thoroughly unsupported and untested option. Not recommended for use by GlusterFS.

          "},{"location":"Administrator-Guide/Puppet/#ping","title":"ping","text":"

          Do we want to include ping checks with fping?

          "},{"location":"Administrator-Guide/Puppet/#settle","title":"settle","text":"

          Do we want to run settle checks?

          "},{"location":"Administrator-Guide/Puppet/#start","title":"start","text":"

          Requested state for the volume. Valid values include: true (start), false (stop), or undef (un-managed start/stop state).

          "},{"location":"Administrator-Guide/Puppet/#glustervolumeproperty","title":"gluster::volume::property","text":"

          Main volume property type for the cluster. This allows you to manage GlusterFS volume specific properties. There are a wide range of properties that volumes support. For the full list of properties, you should consult the GlusterFS documentation, or run the gluster volume set help command. To set a property you must use the special name pattern of: volume#key. The value argument is used to set the associated value. It is smart enough to accept values in the most logical format for that specific property. Some properties aren't yet supported, so please report any problems you have with this functionality. Because this feature is an awesome way to document as code the volume specific optimizations that you've made, make sure you use this feature even if you don't use all the others.

          "},{"location":"Administrator-Guide/Puppet/#value","title":"value","text":"

          The value to be used for this volume property.

          "},{"location":"Administrator-Guide/Puppet/#examples","title":"Examples","text":"

          For example configurations, please consult the examples/ directory in the git source repository. It is available from:

          https://github.com/purpleidea/puppet-gluster/tree/master/examples

          "},{"location":"Administrator-Guide/Puppet/#limitations","title":"Limitations","text":"

          This module has been tested against open source Puppet 3.2.4 and higher.

          The module has been tested on:

          • CentOS 6.4

          It will probably work without incident or without major modification on:

          • CentOS 5.x/6.x
          • RHEL 5.x/6.x

          It will most likely work with other Puppet versions and on other platforms, but testing under other conditions has been light due to lack of resources. It will most likely not work on Debian/Ubuntu systems without modification. I would really love to add support for these operating systems, but I do not have any test resources to do so. Please sponsor this if you'd like to see it happen.

          "},{"location":"Administrator-Guide/Puppet/#development","title":"Development","text":"

          This is my personal project that I work on in my free time. Donations of funding, hardware, virtual machines, and other resources are appreciated. Please contact me if you'd like to sponsor a feature, invite me to talk/teach or for consulting.

          You can follow along on my technical blog.

          "},{"location":"Administrator-Guide/Puppet/#author","title":"Author","text":"

          Copyright (C) 2010-2013+ James Shubin

          • github
          • @purpleidea
          • https://ttboj.wordpress.com/
          "},{"location":"Administrator-Guide/RDMA-Transport/","title":"NOTE: FEATURE DEPRECATED","text":"

          THE RDMA is no longer supported in Gluster builds. This has been removed from release 8 onwards.

          Currently we dont have

          1. The expertise to support RDMA
          2. Infrastructure to test/verify the performances each release The options are getting discussed here - https://github.com/gluster/glusterfs/issues/2000

          Ready to enable as a compile time option, if there is proper support and testing infrastructure.

          "},{"location":"Administrator-Guide/RDMA-Transport/#introduction","title":"Introduction","text":"

          GlusterFS supports using RDMA protocol for communication between glusterfs clients and glusterfs bricks. GlusterFS clients include FUSE client, libgfapi clients(Samba and NFS-Ganesha included), gNFS server and other glusterfs processes that communicate with bricks like self-heal daemon, quotad, rebalance process etc.

          NOTE: As of now only FUSE client and gNFS server would support RDMA transport.

          NOTE: NFS client to gNFS Server/NFS Ganesha Server communication would still happen over tcp. CIFS Clients/Windows Clients to Samba Server communication would still happen over tcp.

          "},{"location":"Administrator-Guide/RDMA-Transport/#setup","title":"Setup","text":"

          Please refer to these external documentation to setup RDMA on your machines http://people.redhat.com/dledford/infiniband_get_started.html

          "},{"location":"Administrator-Guide/RDMA-Transport/#creating-trusted-storage-pool","title":"Creating Trusted Storage Pool","text":"

          All the servers in the Trusted Storage Pool must have RDMA devices if either RDMA or TCP,RDMA volumes are created in the storage pool. The peer probe must be performed using IP/hostname assigned to the RDMA device.

          "},{"location":"Administrator-Guide/RDMA-Transport/#ports-and-firewall","title":"Ports and Firewall","text":"

          Process glusterd will listen on both tcp and rdma if rdma device is found. Port used for rdma is 24008. Similarly, brick processes will also listen on two ports for a volume created with transport \"tcp,rdma\".

          Make sure you update the firewall to accept packets on these ports.

          "},{"location":"Administrator-Guide/RDMA-Transport/#gluster-volume-create","title":"Gluster Volume Create","text":"

          A volume can support one or more transport types for communication between clients and brick processes. There are three types of supported transport, which are, tcp, rdma, and tcp,rdma.

          Example: To create a distributed volume with four storage servers over InfiniBand:

          # gluster volume create test-volume transport rdma server1:/exp1 server2:/exp2 server3:/exp3 server4:/exp4 Creation of test-volume has been successful Please start the volume to access data.

          "},{"location":"Administrator-Guide/RDMA-Transport/#changing-transport-of-volume","title":"Changing Transport of Volume","text":"

          To change the supported transport types of a existing volume, follow the procedure: NOTE: This is possible only if the volume was created with IP/hostname assigned to RDMA device.

          1. Unmount the volume on all the clients using the following command:

            umount mount-point\n
          2. Stop the volumes using the following command:

            gluster volume stop volname\n
          3. Change the transport type. For example, to enable both tcp and rdma execute the followimg command:

            gluster volume set volname config.transport tcp,rdma\n
          4. Mount the volume on all the clients. For example, to mount using rdma transport, use the following command:

            mount -t glusterfs -o transport=rdma server1:/test-volume /mnt/glusterfs`\n

          NOTE: config.transport option does not have a entry in help of gluster cli.

          gluster vol set help | grep config.transport`\n

          However, the key is a valid one.

          "},{"location":"Administrator-Guide/RDMA-Transport/#mounting-a-volume-using-rdma","title":"Mounting a Volume using RDMA","text":"

          You can use the mount option \"transport\" to specify the transport type that FUSE client must use to communicate with bricks. If the volume was created with only one transport type, then that becomes the default when no value is specified. In case of tcp,rdma volume, tcp is the default.

          For example, to mount using rdma transport, use the following command:

          mount -t glusterfs -o transport=rdma server1:/test-volume /mnt/glusterfs\n
          "},{"location":"Administrator-Guide/RDMA-Transport/#transport-used-by-auxillary-processes","title":"Transport used by auxillary processes","text":"

          All the auxillary processes like self-heal daemon, rebalance process etc use the default transport.In case you have a tcp,rdma volume it will use tcp. In case of rdma volume, rdma will be used. Configuration options to select transport used by these processes when volume is tcp,rdma are not yet available and will be coming in later releases.

          "},{"location":"Administrator-Guide/SSL/","title":"Setting up GlusterFS with SSL/TLS","text":"

          GlusterFS allows its communication to be secured using the Transport Layer Security standard (which supersedes Secure Sockets Layer), using the OpenSSL library. Setting this up requires a basic working knowledge of some SSL/TLS concepts, which can only be briefly summarized here.

          • \"Authentication\" is the process of one entity (e.g. a machine, process, or person) proving its identity to a second entity.

          • \"Authorization\" is the process of checking whether an entity has permission to perform an action.

          • TLS provides authentication and encryption. It does not provide authorization, though GlusterFS can use TLS-authenticated identities to authorize client connections to bricks/volumes.

          • An entity X which must authenticate to a second entity Y does so by sharing with Y a certificate, which contains information sufficient to prove X's identity. X's proof of identity also requires possession of a private key which matches its certificate, but this key is never seen by Y or anyone else. Because the certificate is already public, anyone who has the key can claim that identity.

          • Each certificate contains the identity of its principal (owner) along with the identity of a certifying authority or CA who can verify the integrity of the certificate's contents. The principal and CA can be the same (a \"self-signed certificate\"). If they are different, the CA must sign the certificate by appending information derived from both the certificate contents and the CA's own private key.

          • Certificate-signing relationships can extend through multiple levels. For example, a company X could sign another company Y's certificate, which could then be used to sign a third certificate Z for a specific user or purpose. Anyone who trusts X (and is willing to extend that trust through a certificate depth of two or more) would therefore be able to authenticate Y and Z as well.

          • Any entity willing to accept other entities' authentication attempts must have some sort of database seeded with the certificates that already accept.

          In GlusterFS's case, a client or server X uses the following files to contain TLS-related information:

          • /etc/ssl/glusterfs.pem X's own certificate

          • /etc/ssl/glusterfs.key X's private key

          • /etc/ssl/glusterfs.ca concatenation of others' certificates

          GlusterFS always performs mutual authentication, though clients do not currently do anything with the authenticated server identity. Thus, if client X wants to communicate with server Y, then X's certificate (or that of a signer) must be in Y's CA file, and vice versa.

          For all uses of TLS in GlusterFS, if one side of a connection is configured to use TLS then the other side must use it as well. There is no automatic fallback to non-TLS communication, or allowance for concurrent TLS and non-TLS access to the same resource, because either would be insecure. Instead, any such \"mixed mode\" connections will be rejected by the TLS-using side, sacrificing availability to maintain security.

          NOTEThe TLS certificate verification will fail if the machines' date and time are not in sync with each other. Certificate verification depends on the time of the client as well as the server and if that is not found to be in sync then it is deemed to be an invalid certificate. To get the date and times in sync, tools such as ntpdate can be used.

          "},{"location":"Administrator-Guide/SSL/#using-certmonger-and-freeipa-to-generate-and-manage-certs","title":"Using Certmonger and FreeIPA to generate and manage certs","text":"

          Certmonger can be used to generate keys, request certs from a CA and then automatically keep the Gluster certificate and the CA bundle updated as required, simplifying deployment. Either a commercial CA or a local CA can be used. E.g., FreeIPA (with dogtag CA) is an open-source CA with user-friendly tooling.

          If using FreeIPA, first add the host. This is required for FreeIPA to issue certificates. This can be done via the web UI, or the CLI with:

          ipa host-add <hostname>\n

          If the host has been added the following should show the host:

          ipa host-show <hostname>\n

          And it should show a kerberos principal for the host in the form of:

          host/<hostname>\n

          Now use certmonger on the gluster server or client to generate the key (if required), and submit a CSR to the CA. Certmonger will monitor the request, and create and update the files as required. For FreeIPA we need to specify the Kerberos principal from above to -K. E.g.:

           getcert request -r  \\\n    -K host/$(hostname)  \\\n    -f /etc/ssl/gluster.pem \\\n    -k /etc/ssl/gluster.key \\\n    -D $(hostname)  \\\n    -F /etc/ssl/gluster.ca\n

          Certmonger should print out an ID for the request, e.g.:

          New signing request \"20210801190305\" added.\n

          You can check the status of the request with this ID:

          getcert list -i 20210801190147\n

          If the CA approves the CSR and issues the cert, then the previous command should print a status field with:

          status: MONITORING\n

          As this point, the key, the cert and the CA bundle should all be in /etc/ssl ready for Gluster to use. Certmonger will renew the certificates as required for you.

          You do not need to manually concatenate certs to a trusted cert bundle and distribute them to all servers.

          You may need to set the certificate depth to allow the CA signed certs to be used, if there are intermediate CAs in the signing path. E.g., on every server and client:

          echo \"option transport.socket.ssl-cert-depth 3\" >  /var/lib/glusterd/secure-access\n

          This should not be necessary where a local CA (e.g., FreeIPA) has directly signed the cart.

          "},{"location":"Administrator-Guide/SSL/#enabling-tls-on-the-io-path","title":"Enabling TLS on the I/O Path","text":"

          To enable authentication and encryption between clients and brick servers, two options must be set:

          gluster volume set MYVOLUME client.ssl on\ngluster volume set MYVOLUME server.ssl on\n

          Note that the above options affect only the GlusterFS native protocol. For foreign protocols such as NFS, SMB, or Swift the encryption will not be affected between:

          1. NFS client and Glusterfs NFS Ganesha Server
          2. SMB client and Glusterfs SMB server

          While it affects the encryption between the following:

          1. NFS Ganesha server and Glusterfs bricks
          2. Glusterfs SMB server and Glusterfs bricks
          "},{"location":"Administrator-Guide/SSL/#using-tls-identities-for-authorization","title":"Using TLS Identities for Authorization","text":"

          Once TLS has been enabled on the I/O path, TLS identities can be used instead of IP addresses or plain usernames to control access to specific volumes. For example:

          gluster volume set MYVOLUME auth.ssl-allow Zaphod\n

          Here, we're allowing the TLS-authenticated identity \"Zaphod\" to access MYVOLUME. This is intentionally identical to the existing \"auth.allow\" option, except that the name is taken from a TLS certificate instead of a command-line string. Note that infelicities in the gluster CLI preclude using names that include spaces, which would otherwise be allowed.

          "},{"location":"Administrator-Guide/SSL/#enabling-tls-on-the-management-path","title":"Enabling TLS on the Management Path","text":"

          Management-daemon traffic is not controlled by an option. Instead, it is controlled by the presence of a file on each machine:

          /var/lib/glusterd/secure-access\n

          Creating this file will cause glusterd connections made from that machine to use TLS. Note that even clients must do this to communicate with a remote glusterd while mounting, but not thereafter.

          "},{"location":"Administrator-Guide/SSL/#additional-options","title":"Additional Options","text":"

          The GlusterFS TLS implementation supports two additional options related to TLS internals.

          The first option allows the user to set the certificate depth, as mentioned above.

          gluster volume set MYVOLUME ssl.certificate-depth 2\n

          Here, we're setting our certificate depth to two, as in the introductory example. By default this value is zero, meaning that only certificates which are directly specified in the local CA file will be accepted (i.e. no signed certificates at all).

          The second option allows the user to specify the set of allowed TLS ciphers.

          gluster volume set MYVOLUME ssl.cipher-list 'HIGH:!SSLv2'\n

          Cipher lists are negotiated between the two parties to a TLS connection so that both sides' security needs are satisfied. In this example, we're setting the initial cipher list to HIGH, representing ciphers that the cryptography community still believes to be unbroken. We are also explicitly disallowing ciphers specific to SSL version 2. The default is based on this example but also excludes CBC-based cipher modes to provide extra mitigation against the POODLE attack.

          "},{"location":"Administrator-Guide/Setting-Up-Clients/","title":"Accessing Data - Setting Up GlusterFS Client","text":"

          You can access gluster volumes in multiple ways. You can use Gluster Native Client method for high concurrency, performance and transparent failover in GNU/Linux clients. You can also use NFS v3 to access gluster volumes. Extensive testing has been done on GNU/Linux clients and NFS implementation in other operating system, such as FreeBSD, and Mac OS X, as well as Windows 7 (Professional and Up) and Windows Server 2003. Other NFS client implementations may work with gluster NFS server.

          You can use CIFS to access volumes when using Microsoft Windows as well as SAMBA clients. For this access method, Samba packages need to be present on the client side.

          "},{"location":"Administrator-Guide/Setting-Up-Clients/#gluster-native-client","title":"Gluster Native Client","text":"

          The Gluster Native Client is a FUSE-based client running in user space. Gluster Native Client is the recommended method for accessing volumes when high concurrency and high write performance is required.

          This section introduces the Gluster Native Client and explains how to install the software on client machines. This section also describes how to mount volumes on clients (both manually and automatically) and how to verify that the volume has mounted successfully.

          "},{"location":"Administrator-Guide/Setting-Up-Clients/#installing-the-gluster-native-client","title":"Installing the Gluster Native Client","text":"

          Before you begin installing the Gluster Native Client, you need to verify that the FUSE module is loaded on the client and has access to the required modules as follows:

          1. Add the FUSE loadable kernel module (LKM) to the Linux kernel:

            modprobe fuse\n
          2. Verify that the FUSE module is loaded:

            # dmesg | grep -i fuse\nfuse init (API version 7.13)\n
          "},{"location":"Administrator-Guide/Setting-Up-Clients/#installing-on-red-hat-package-manager-rpm-distributions","title":"Installing on Red Hat Package Manager (RPM) Distributions","text":"

          To install Gluster Native Client on RPM distribution-based systems

          1. Install required prerequisites on the client using the following command:

            sudo yum -y install openssh-server wget fuse fuse-libs openib libibverbs\n
          2. Ensure that TCP and UDP ports 24007 and 24008 are open on all Gluster servers. Apart from these ports, you need to open one port for each brick starting from port 49152 (instead of 24009 onwards as with previous releases). The brick ports assignment scheme is now compliant with IANA guidelines. For example: if you have five bricks, you need to have ports 49152 to 49156 open.

            From Gluster-10 onwards, the brick ports will be randomized. A port is randomly selected within the range of base-port to max-port as defined in the glusterd.vol file and then assigned to the brick. For example: if you have five bricks, you need to have at least 5 ports open within the given range of base-port and max-port. To reduce the number of open ports (for best security practices), one can lower the max-port value in the glusterd.vol file and restart glusterd to get it into effect.

            You can use the following chains with iptables:

            sudo iptables -A RH-Firewall-1-INPUT -m state --state NEW -m tcp -p tcp --dport 24007:24008 -j ACCEPT\n\nsudo iptables -A RH-Firewall-1-INPUT -m state --state NEW -m tcp -p tcp --dport 49152:49156 -j ACCEPT\n

            Note If you already have iptable chains, make sure that the above ACCEPT rules precede the DROP rules. This can be achieved by providing a lower rule number than the DROP rule.

          3. Download the latest glusterfs, glusterfs-fuse, and glusterfs-rdma RPM files to each client. The glusterfs package contains the Gluster Native Client. The glusterfs-fuse package contains the FUSE translator required for mounting on client systems and the glusterfs-rdma packages contain OpenFabrics verbs RDMA module for Infiniband.

            You can download the software at GlusterFS download page.

          4. Install Gluster Native Client on the client.

            Note The package versions listed in the example below may not be the latest release. Please refer to the download page to ensure that you have the recently released packages.

            sudo rpm -i glusterfs-3.8.5-1.x86_64\nsudo rpm -i glusterfs-fuse-3.8.5-1.x86_64\nsudo rpm -i glusterfs-rdma-3.8.5-1.x86_64\n

          Note: The RDMA module is only required when using Infiniband.

          "},{"location":"Administrator-Guide/Setting-Up-Clients/#installing-on-debian-based-distributions","title":"Installing on Debian-based Distributions","text":"

          To install Gluster Native Client on Debian-based distributions

          1. Install OpenSSH Server on each client using the following command:

            sudo apt-get install openssh-server vim wget\n
          2. Download the latest GlusterFS .deb file and checksum to each client.

            You can download the software at GlusterFS download page.

          3. For each .deb file, get the checksum (using the following command) and compare it against the checksum for that file in the md5sum file.

            md5sum GlusterFS_DEB_file.deb\n

            The md5sum of the packages is available at: GlusterFS download page

          4. Uninstall GlusterFS v3.1 (or an earlier version) from the client using the following command:

            sudo dpkg -r glusterfs\n

            (Optional) Run $ sudo dpkg -purge glusterfsto purge the configuration files.

          5. Install Gluster Native Client on the client using the following command:

            sudo dpkg -i GlusterFS_DEB_file\n

            For example:

            sudo dpkg -i glusterfs-3.8.x.deb\n
          6. Ensure that TCP and UDP ports 24007 and 24008 are open on all Gluster servers. Apart from these ports, you need to open one port for each brick starting from port 49152 (instead of 24009 onwards as with previous releases). The brick ports assignment scheme is now compliant with IANA guidelines. For example: if you have five bricks, you need to have ports 49152 to 49156 open.

            From Gluster-10 onwards, the brick ports will be randomized. A port is randomly selected within the range of base_port to max_port as defined in glusterd.vol file and then assigned to the brick. For example: if you have five bricks, you need to have at least 5 ports open within the given range of base_port and max_port. To reduce the number of open ports (for best security practices), one can lower the max_port value in the glusterd.vol file and restart glusterd to get it into effect.

            You can use the following chains with iptables:

            sudo iptables -A RH-Firewall-1-INPUT -m state --state NEW -m tcp -p tcp --dport 24007:24008 -j ACCEPT\n\nsudo iptables -A RH-Firewall-1-INPUT -m state --state NEW -m tcp -p tcp --dport 49152:49156 -j ACCEPT\n

          Note If you already have iptable chains, make sure that the above ACCEPT rules precede the DROP rules. This can be achieved by providing a lower rule number than the DROP rule.

          "},{"location":"Administrator-Guide/Setting-Up-Clients/#performing-a-source-installation","title":"Performing a Source Installation","text":"

          To build and install Gluster Native Client from the source code

          1. Create a new directory using the following commands:

            mkdir glusterfs\ncd glusterfs\n
          2. Download the source code.

            You can download the source at link.

          3. Extract the source code using the following command:

            tar -xvzf SOURCE-FILE\n
          4. Run the configuration utility using the following command:

            $ ./configure\n\nGlusterFS configure summary\n===========================\nFUSE client : yes\nInfiniband verbs : yes\nepoll IO multiplex : yes\nargp-standalone : no\nfusermount : no\nreadline : yes\n

            The configuration summary shows the components that will be built with Gluster Native Client.

          5. Build the Gluster Native Client software using the following commands:

            make\nmake install`\n
          6. Verify that the correct version of Gluster Native Client is installed, using the following command:

            glusterfs --version\n
          "},{"location":"Administrator-Guide/Setting-Up-Clients/#mounting-volumes","title":"Mounting Volumes","text":"

          After installing the Gluster Native Client, you need to mount Gluster volumes to access data. There are two methods you can choose:

          • Manually Mounting Volumes
          • Automatically Mounting Volumes

          Note Server names selected during creation of Volumes should be resolvable in the client machine. You can use appropriate /etc/hosts entries or DNS server to resolve server names to IP addresses.

          "},{"location":"Administrator-Guide/Setting-Up-Clients/#manually-mounting-volumes","title":"Manually Mounting Volumes","text":"
          • To mount a volume, use the following command:
            mount -t glusterfs HOSTNAME-OR-IPADDRESS:/VOLNAME MOUNTDIR\n

          For example:

              mount -t glusterfs server1:/test-volume /mnt/glusterfs\n

          Note The server specified in the mount command is only used to fetch the gluster configuration volfile describing the volume name. Subsequently, the client will communicate directly with the servers mentioned in the volfile (which might not even include the one used for mount).

          If you see a usage message like \"Usage: mount.glusterfs\", mount usually requires you to create a directory to be used as the mount point. Run \"mkdir /mnt/glusterfs\" before you attempt to run the mount command listed above.

          Mounting Options

          You can specify the following options when using the mount -t glusterfs command. Note that you need to separate all options with commas.

          backupvolfile-server=server-name\n\nvolfile-max-fetch-attempts=number of attempts\n\nlog-level=loglevel\n\nlog-file=logfile\n\ntransport=transport-type\n\ndirect-io-mode=[enable|disable]\n\nuse-readdirp=[yes|no]\n\n

          For example:

          mount -t glusterfs -o backupvolfile-server=volfile_server2,use-readdirp=no,volfile-max-fetch-attempts=2,log-level=WARNING,log-file=/var/log/gluster.log server1:/test-volume /mnt/glusterfs

          If backupvolfile-server option is added while mounting fuse client, when the first volfile server fails, then the server specified in backupvolfile-server option is used as volfile server to mount the client.

          In volfile-max-fetch-attempts=X option, specify the number of attempts to fetch volume files while mounting a volume. This option is useful when you mount a server with multiple IP addresses or when round-robin DNS is configured for the server-name..

          If use-readdirp is set to ON, it forces the use of readdirp mode in fuse kernel module

          "},{"location":"Administrator-Guide/Setting-Up-Clients/#automatically-mounting-volumes","title":"Automatically Mounting Volumes","text":"

          You can configure your system to automatically mount the Gluster volume each time your system starts.

          The server specified in the mount command is only used to fetch the gluster configuration volfile describing the volume name. Subsequently, the client will communicate directly with the servers mentioned in the volfile (which might not even include the one used for mount).

          • To mount a volume, edit the /etc/fstab file and add the following line:

          HOSTNAME-OR-IPADDRESS:/VOLNAME MOUNTDIR glusterfs defaults,_netdev 0 0

          For example:

          server1:/test-volume /mnt/glusterfs glusterfs defaults,_netdev 0 0

          Mounting Options

          You can specify the following options when updating the /etc/fstab file. Note that you need to separate all options with commas.

          log-level=loglevel\n\nlog-file=logfile\n\ntransport=transport-type\n\ndirect-io-mode=[enable|disable]\n\nuse-readdirp=no\n

          For example:

          HOSTNAME-OR-IPADDRESS:/VOLNAME MOUNTDIR glusterfs defaults,_netdev,log-level=WARNING,log-file=/var/log/gluster.log 0 0

          "},{"location":"Administrator-Guide/Setting-Up-Clients/#testing-mounted-volumes","title":"Testing Mounted Volumes","text":"

          To test mounted volumes

          • Use the following command:

          # mount

          If the gluster volume was successfully mounted, the output of the mount command on the client will be similar to this example:

          server1:/test-volume on /mnt/glusterfs type fuse.glusterfs (rw,allow_other,default_permissions,max_read=131072

          • Use the following command:

          # df

          The output of df command on the client will display the aggregated storage space from all the bricks in a volume similar to this example:

            # df -h /mnt/glusterfs\n  Filesystem               Size Used Avail Use% Mounted on\n  server1:/test-volume     28T 22T 5.4T 82% /mnt/glusterfs\n
          • Change to the directory and list the contents by entering the following:
              `# cd MOUNTDIR `\n    `# ls`\n
          • For example,
              `# cd /mnt/glusterfs `\n    `# ls`\n
          "},{"location":"Administrator-Guide/Setting-Up-Clients/#nfs","title":"NFS","text":"

          You can use NFS v3 to access to gluster volumes. Extensive testing has be done on GNU/Linux clients and NFS implementation in other operating system, such as FreeBSD, and Mac OS X, as well as Windows 7 (Professional and Up), Windows Server 2003, and others, may work with gluster NFS server implementation.

          GlusterFS now includes network lock manager (NLM) v4. NLM enables applications on NFSv3 clients to do record locking on files on NFS server. It is started automatically whenever the NFS server is run.

          You must install nfs-common package on both servers and clients (only for Debian-based) distribution.

          This section describes how to use NFS to mount Gluster volumes (both manually and automatically) and how to verify that the volume has been mounted successfully.

          "},{"location":"Administrator-Guide/Setting-Up-Clients/#using-nfs-to-mount-volumes","title":"Using NFS to Mount Volumes","text":"

          You can use either of the following methods to mount Gluster volumes:

          • Manually Mounting Volumes Using NFS
          • Automatically Mounting Volumes Using NFS

          Prerequisite: Install nfs-common package on both servers and clients (only for Debian-based distribution), using the following command:

              sudo aptitude install nfs-common\n

          "},{"location":"Administrator-Guide/Setting-Up-Clients/#manually-mounting-volumes-using-nfs","title":"Manually Mounting Volumes Using NFS","text":"

          To manually mount a Gluster volume using NFS

          • To mount a volume, use the following command:
            mount -t nfs -o vers=3 HOSTNAME-OR-IPADDRESS:/VOLNAME MOUNTDIR\n

          For example:

             mount -t nfs -o vers=3 server1:/test-volume /mnt/glusterfs\n

          Note Gluster NFS server does not support UDP. If the NFS client you are using defaults to connecting using UDP, the following message appears:

          requested NFS version or transport protocol is not supported.

          To connect using TCP

          • Add the following option to the mount command:

          -o mountproto=tcp

          For example:

              mount -o mountproto=tcp -t nfs server1:/test-volume /mnt/glusterfs\n

          To mount Gluster NFS server from a Solaris client

          • Use the following command:
            mount -o proto=tcp,vers=3 nfs://HOSTNAME-OR-IPADDRESS:38467/VOLNAME MOUNTDIR\n

          For example:

              mount -o proto=tcp,vers=3 nfs://server1:38467/test-volume /mnt/glusterfs\n

          "},{"location":"Administrator-Guide/Setting-Up-Clients/#automatically-mounting-volumes-using-nfs","title":"Automatically Mounting Volumes Using NFS","text":"

          You can configure your system to automatically mount Gluster volumes using NFS each time the system starts.

          To automatically mount a Gluster volume using NFS

          • To mount a volume, edit the /etc/fstab file and add the following line:
            HOSTNAME-OR-IPADDRESS:/VOLNAME MOUNTDIR nfs defaults,_netdev,vers=3 0 0\n

          For example,

          server1:/test-volume /mnt/glusterfs nfs defaults,_netdev,vers=3 0 0

          Note Gluster NFS server does not support UDP. If the NFS client you are using defaults to connecting using UDP, the following message appears:

          requested NFS version or transport protocol is not supported.

          To connect using TCP

          • Add the following entry in /etc/fstab file :
            HOSTNAME-OR-IPADDRESS:/VOLNAME MOUNTDIR nfs defaults,_netdev,mountproto=tcp 0 0\n

          For example,

          server1:/test-volume /mnt/glusterfs nfs defaults,_netdev,mountproto=tcp 0 0

          To automount NFS mounts

          Gluster supports *nix standard method of automounting NFS mounts. Update the /etc/auto.master and /etc/auto.misc and restart the autofs service. After that, whenever a user or process attempts to access the directory it will be mounted in the background.

          "},{"location":"Administrator-Guide/Setting-Up-Clients/#testing-volumes-mounted-using-nfs","title":"Testing Volumes Mounted Using NFS","text":"

          You can confirm that Gluster directories are mounting successfully.

          To test mounted volumes

          • Use the mount command by entering the following:

          # mount

          For example, the output of the mount command on the client will display an entry like the following:

          server1:/test-volume on /mnt/glusterfs type nfs (rw,vers=3,addr=server1)

          • Use the df command by entering the following:

          # df

          For example, the output of df command on the client will display the aggregated storage space from all the bricks in a volume.

            # df -h /mnt/glusterfs\n  Filesystem              Size Used Avail Use% Mounted on\n  server1:/test-volume    28T  22T  5.4T  82%  /mnt/glusterfs\n
          • Change to the directory and list the contents by entering the following:

          # cd MOUNTDIR # ls

          "},{"location":"Administrator-Guide/Setting-Up-Clients/#cifs","title":"CIFS","text":"

          You can use CIFS to access to volumes when using Microsoft Windows as well as SAMBA clients. For this access method, Samba packages need to be present on the client side. You can export glusterfs mount point as the samba export, and then mount it using CIFS protocol.

          This section describes how to mount CIFS shares on Microsoft Windows-based clients (both manually and automatically) and how to verify that the volume has mounted successfully.

          Note

          CIFS access using the Mac OS X Finder is not supported, however, you can use the Mac OS X command line to access Gluster volumes using CIFS.

          "},{"location":"Administrator-Guide/Setting-Up-Clients/#using-cifs-to-mount-volumes","title":"Using CIFS to Mount Volumes","text":"

          You can use either of the following methods to mount Gluster volumes:

          • Exporting Gluster Volumes Through Samba
          • Manually Mounting Volumes Using CIFS
          • Automatically Mounting Volumes Using CIFS

          You can also use Samba for exporting Gluster Volumes through CIFS protocol.

          "},{"location":"Administrator-Guide/Setting-Up-Clients/#exporting-gluster-volumes-through-samba","title":"Exporting Gluster Volumes Through Samba","text":"

          We recommend you to use Samba for exporting Gluster volumes through the CIFS protocol.

          To export volumes through CIFS protocol

          1. Mount a Gluster volume.

          2. Setup Samba configuration to export the mount point of the Gluster volume.

            For example, if a Gluster volume is mounted on /mnt/gluster, you must edit smb.conf file to enable exporting this through CIFS. Open smb.conf file in an editor and add the following lines for a simple configuration:

              [glustertest]\n\n    comment = For testing a Gluster volume exported through CIFS\n\n    path = /mnt/glusterfs\n\n    read only = no\n\n    guest ok = yes\n

          Save the changes and start the smb service using your systems init scripts (/etc/init.d/smb [re]start). Abhove steps is needed for doing multiple mount. If you want only samba mount then in your smb.conf you need to add

              kernel share modes = no\n    kernel oplocks = no\n    map archive = no\n    map hidden = no\n    map read only = no\n    map system = no\n    store dos attributes = yes\n

          Note

          To be able mount from any server in the trusted storage pool, you must repeat these steps on each Gluster node. For more advanced configurations, see Samba documentation.

          "},{"location":"Administrator-Guide/Setting-Up-Clients/#manually-mounting-volumes-using-cifs","title":"Manually Mounting Volumes Using CIFS","text":"

          You can manually mount Gluster volumes using CIFS on Microsoft Windows-based client machines.

          To manually mount a Gluster volume using CIFS

          1. Using Windows Explorer, choose Tools > Map Network Drive\u2026 from the menu. The Map Network Drivewindow appears.

          2. Choose the drive letter using the Drive drop-down list.

          3. Click Browse, select the volume to map to the network drive, and click OK.

          4. Click Finish.

          The network drive (mapped to the volume) appears in the Computer window.

          Alternatively, to manually mount a Gluster volume using CIFS by going to Start > Run and entering Network path manually.

          "},{"location":"Administrator-Guide/Setting-Up-Clients/#automatically-mounting-volumes-using-cifs","title":"Automatically Mounting Volumes Using CIFS","text":"

          You can configure your system to automatically mount Gluster volumes using CIFS on Microsoft Windows-based clients each time the system starts.

          To automatically mount a Gluster volume using CIFS

          The network drive (mapped to the volume) appears in the Computer window and is reconnected each time the system starts.

          1. Using Windows Explorer, choose Tools > Map Network Drive\u2026 from the menu. The Map Network Drivewindow appears.

          2. Choose the drive letter using the Drive drop-down list.

          3. Click Browse, select the volume to map to the network drive, and click OK.

          4. Click the Reconnect at logon checkbox.

          5. Click Finish.

          "},{"location":"Administrator-Guide/Setting-Up-Clients/#testing-volumes-mounted-using-cifs","title":"Testing Volumes Mounted Using CIFS","text":"

          You can confirm that Gluster directories are mounting successfully by navigating to the directory using Windows Explorer.

          "},{"location":"Administrator-Guide/Setting-Up-Volumes/","title":"Setting up GlusterFS Volumes","text":"

          A volume is a logical collection of bricks where each brick is an export directory on a server in the trusted storage pool. To create a new volume in your storage environment, specify the bricks that comprise the volume. After you have created a new volume, you must start it before attempting to mount it.

          See Setting up Storage for how to set up bricks.

          "},{"location":"Administrator-Guide/Setting-Up-Volumes/#volume-types","title":"Volume Types","text":"
          • Volumes of the following types can be created in your storage environment:

            • Distributed - Distributed volumes distribute files across the bricks in the volume. You can use distributed volumes where the requirement is to scale storage and the redundancy is either not important or is provided by other hardware/software layers.

            • Replicated \u2013 Replicated volumes replicate files across bricks in the volume. You can use replicated volumes in environments where high-availability and high-reliability are critical.

            • Distributed Replicated - Distributed replicated volumes distribute files across replicated bricks in the volume. You can use distributed replicated volumes in environments where the requirement is to scale storage and high-reliability is critical. Distributed replicated volumes also offer improved read performance in most environments.

            • Dispersed - Dispersed volumes are based on erasure codes, providing space-efficient protection against disk or server failures. It stores an encoded fragment of the original file to each brick in a way that only a subset of the fragments is needed to recover the original file. The number of bricks that can be missing without losing access to data is configured by the administrator on volume creation time.

            • Distributed Dispersed - Distributed dispersed volumes distribute files across dispersed subvolumes. This has the same advantages of distribute replicate volumes, but using disperse to store the data into the bricks.

          To create a new volume

          • Create a new volume :

            # gluster volume create <NEW-VOLNAME> [[replica <COUNT> [arbiter <COUNT>]]|[replica 2 thin-arbiter 1]] [disperse [<COUNT>]] [disperse-data <COUNT>] [redundancy <COUNT>] [transport <tcp>] <NEW-BRICK> <TA-BRICK>... [force]

            For example, to create a volume called test-volume consisting of server3:/exp3 and server4:/exp4:

            # gluster volume create test-volume server3:/exp3 server4:/exp4\nCreation of test-volume has been successful\nPlease start the volume to access data.\n

            tcp is the default and currently only available transport.

          "},{"location":"Administrator-Guide/Setting-Up-Volumes/#creating-distributed-volumes","title":"Creating Distributed Volumes","text":"

          In a distributed volume files are spread randomly across the bricks in the volume. Use distributed volumes where you need to scale storage and redundancy is either not important or is provided by other hardware/software layers.

          Note: Disk/server failure in distributed volumes can result in a serious loss of data because directory contents are spread randomly across the bricks in the volume.

          To create a distributed volume

          1. Create a trusted storage pool.

          2. Create the distributed volume:

            # gluster volume create [transport tcp]

            For example, to create a distributed volume with four storage servers using tcp:

            # gluster volume create test-volume server1:/exp1 server2:/exp2 server3:/exp3 server4:/exp4\nCreation of test-volume has been successful\nPlease start the volume to access data.\n

            (Optional) You can display the volume information:

            # gluster volume info\nVolume Name: test-volume\nType: Distribute\nStatus: Created\nNumber of Bricks: 4\nTransport-type: tcp\nBricks:\nBrick1: server1:/exp1\nBrick2: server2:/exp2\nBrick3: server3:/exp3\nBrick4: server4:/exp4\n

            For example, to create a distributed volume with four storage servers over InfiniBand:

            # gluster volume create test-volume transport tcp server1:/exp1 server2:/exp2 server3:/exp3 server4:/exp4\nCreation of test-volume has been successful\nPlease start the volume to access data.\n

            If the transport type is not specified, tcp is used as the default. You can also set additional options if required, such as auth.allow or auth.reject.

            Note: Make sure you start your volumes before you try to mount them or else client operations after the mount will hang.

          "},{"location":"Administrator-Guide/Setting-Up-Volumes/#creating-replicated-volumes","title":"Creating Replicated Volumes","text":"

          Replicated volumes create copies of files across multiple bricks in the volume. You can use replicated volumes in environments where high-availability and high-reliability are critical.

          Note: The number of bricks should be equal to of the replica count for a replicated volume. To protect against server and disk failures, it is recommended that the bricks of the volume are from different servers.

          To create a replicated volume

          1. Create a trusted storage pool.

          2. Create the replicated volume:

            # gluster volume create [replica ] [transport tcp]

            For example, to create a replicated volume with two storage servers:

            # gluster volume create test-volume replica 2 transport tcp server1:/exp1 server2:/exp2\nCreation of test-volume has been successful\nPlease start the volume to access data.\n

            If the transport type is not specified, tcp is used as the default. You can also set additional options if required, such as auth.allow or auth.reject.

            Note:

            • Make sure you start your volumes before you try to mount them or else client operations after the mount will hang.

            • GlusterFS will fail to create a replicate volume if more than one brick of a replica set is present on the same peer. For eg. a four node replicated volume where more than one brick of a replica set is present on the same peer.

              # gluster volume create <volname> replica 4 server1:/brick1 server1:/brick2 server2:/brick3 server4:/brick4\nvolume create: <volname>: failed: Multiple bricks of a replicate volume are present on the same server. This setup is not optimal. Use 'force' at the end of the command if you want to override this behavior.\n

            Use the force option at the end of command if you still want to create the volume with this configuration.

          "},{"location":"Administrator-Guide/Setting-Up-Volumes/#arbiter-configuration-for-replica-volumes","title":"Arbiter configuration for replica volumes","text":"

          Arbiter volumes are replica 3 volumes where the 3rd brick acts as the arbiter brick. This configuration has mechanisms that prevent occurrence of split-brains.

          It can be created with the following command:

          `# gluster volume create <VOLNAME> replica 2 arbiter 1 host1:brick1 host2:brick2 host3:brick3`\n

          More information about this configuration can be found at Administrator-Guide : arbiter-volumes-and-quorum

          Note that the arbiter configuration for replica 3 can be used to create distributed-replicate volumes as well.

          "},{"location":"Administrator-Guide/Setting-Up-Volumes/#creating-distributed-replicated-volumes","title":"Creating Distributed Replicated Volumes","text":"

          Distributes files across replicated bricks in the volume. You can use distributed replicated volumes in environments where the requirement is to scale storage and high-reliability is critical. Distributed replicated volumes also offer improved read performance in most environments.

          Note: The number of bricks should be a multiple of the replica count for a distributed replicated volume. Also, the order in which bricks are specified has a great effect on data protection. Each replica_count consecutive bricks in the list you give will form a replica set, with all replica sets combined into a volume-wide distribute set. To make sure that replica-set members are not placed on the same node, list the first brick on every server, then the second brick on every server in the same order, and so on.

          To create a distributed replicated volume

          1. Create a trusted storage pool.

          2. Create the distributed replicated volume:

            # gluster volume create [replica ] [transport tcp]

            For example, a four node distributed (replicated) volume with a two-way mirror:

            # gluster volume create test-volume replica 2 transport tcp server1:/exp1 server2:/exp2 server3:/exp3 server4:/exp4\nCreation of test-volume has been successful\nPlease start the volume to access data.\n

            For example, to create a six node distributed (replicated) volume with a two-way mirror:

            # gluster volume create test-volume replica 2 transport tcp server1:/exp1 server2:/exp2 server3:/exp3 server4:/exp4 server5:/exp5 server6:/exp6\nCreation of test-volume has been successful\nPlease start the volume to access data.\n

            If the transport type is not specified, tcp is used as the default. You can also set additional options if required, such as auth.allow or auth.reject.

            Note: - Make sure you start your volumes before you try to mount them or else client operations after the mount will hang.

            • GlusterFS will fail to create a distribute replicate volume if more than one brick of a replica set is present on the same peer. For eg. for a four node distribute (replicated) volume where more than one brick of a replica set is present on the same peer.
              # gluster volume create <volname> replica 2 server1:/brick1 server1:/brick2 server2:/brick3 server4:/brick4\nvolume create: <volname>: failed: Multiple bricks of a replicate volume are present on the same server. This setup is not optimal. Use 'force' at the end of the command if you want to override this behavior.\n

            Use the force option at the end of command if you want to create the volume in this case.

          "},{"location":"Administrator-Guide/Setting-Up-Volumes/#creating-dispersed-volumes","title":"Creating Dispersed Volumes","text":"

          Dispersed volumes are based on erasure codes. It stripes the encoded data of files, with some redundancy added, across multiple bricks in the volume. You can use dispersed volumes to have a configurable level of reliability with minimum space waste.

          Redundancy

          Each dispersed volume has a redundancy value defined when the volume is created. This value determines how many bricks can be lost without interrupting the operation of the volume. It also determines the amount of usable space of the volume using this formula:

          <Usable size> = <Brick size> * (#Bricks - Redundancy)\n

          All bricks of a disperse set should have the same capacity, otherwise, when the smallest brick becomes full, no additional data will be allowed in the disperse set.

          It's important to note that a configuration with 3 bricks and redundancy 1 will have less usable space (66.7% of the total physical space) than a configuration with 10 bricks and redundancy 1 (90%). However the first one will be safer than the second one (roughly the probability of failure of the second configuration if more than 4.5 times bigger than the first one).

          For example, a dispersed volume composed of 6 bricks of 4TB and a redundancy of 2 will be completely operational even with two bricks inaccessible. However a third inaccessible brick will bring the volume down because it won't be possible to read or write to it. The usable space of the volume will be equal to 16TB.

          The implementation of erasure codes in GlusterFS limits the redundancy to a value smaller than #Bricks / 2 (or equivalently, redundancy * 2 < #Bricks). Having a redundancy equal to half of the number of bricks would be almost equivalent to a replica-2 volume, and probably a replicated volume will perform better in this case.

          Optimal volumes

          One of the worst things erasure codes have in terms of performance is the RMW (Read-Modify-Write) cycle. Erasure codes operate in blocks of a certain size and it cannot work with smaller ones. This means that if a user issues a write of a portion of a file that doesn't fill a full block, it needs to read the remaining portion from the current contents of the file, merge them, compute the updated encoded block and, finally, writing the resulting data.

          This adds latency, reducing performance when this happens. Some GlusterFS performance xlators can help to reduce or even eliminate this problem for some workloads, but it should be taken into account when using dispersed volumes for a specific use case.

          Current implementation of dispersed volumes use blocks of a size that depends on the number of bricks and redundancy: 512 * (#Bricks - redundancy) bytes. This value is also known as the stripe size.

          Using combinations of #Bricks/redundancy that give a power of two for the stripe size will make the disperse volume perform better in most workloads because it's more typical to write information in blocks that are multiple of two (for example databases, virtual machines and many applications).

          These combinations are considered optimal.

          For example, a configuration with 6 bricks and redundancy 2 will have a stripe size of 512 * (6 - 2) = 2048 bytes, so it's considered optimal. A configuration with 7 bricks and redundancy 2 would have a stripe size of 2560 bytes, needing a RMW cycle for many writes (of course this always depends on the use case).

          To create a dispersed volume

          1. Create a trusted storage pool.

          2. Create the dispersed volume:

            # gluster volume create [disperse [<count>]] [redundancy <count>] [transport tcp]

            A dispersed volume can be created by specifying the number of bricks in a disperse set, by specifying the number of redundancy bricks, or both.

            If disperse is not specified, or the <count> is missing, the entire volume will be treated as a single disperse set composed by all bricks enumerated in the command line.

            If redundancy is not specified, it is computed automatically to be the optimal value. If this value does not exist, it's assumed to be '1' and a warning message is shown:

            # gluster volume create test-volume disperse 4 server{1..4}:/bricks/test-volume\nThere isn't an optimal redundancy value for this configuration. Do you want to create the volume with redundancy 1 ? (y/n)\n

            In all cases where redundancy is automatically computed and it's not equal to '1', a warning message is displayed:

            # gluster volume create test-volume disperse 6 server{1..6}:/bricks/test-volume\nThe optimal redundancy for this configuration is 2. Do you want to create the volume with this value ? (y/n)\n

            redundancy must be greater than 0, and the total number of bricks must be greater than 2 * redundancy. This means that a dispersed volume must have a minimum of 3 bricks.

            If the transport type is not specified, tcp is used as the default. You can also set additional options if required, like in the other volume types.

            Note:

            • Make sure you start your volumes before you try to mount them or else client operations after the mount will hang.

            • GlusterFS will fail with a warning to create a dispersed volume if more than one brick of a disperse set is present on the same peer.

              # gluster volume create <volname> disperse 3 server1:/brick{1..3}\nvolume create: <volname>: failed: Multiple bricks of a disperse volume are present on the same server. This setup is not optimal. Bricks should be on different nodes to have best fault tolerant configuration. Use 'force' at the end of the command if you want to override this behavior.\n
          "},{"location":"Administrator-Guide/Setting-Up-Volumes/#creating-distributed-dispersed-volumes","title":"Creating Distributed Dispersed Volumes","text":"

          Distributed dispersed volumes are the equivalent to distributed replicated volumes, but using dispersed subvolumes instead of replicated ones.

          To create a distributed dispersed volume

          1. Create a trusted storage pool.

          2. Create the distributed dispersed volume:

            # gluster volume create disperse <count> [redundancy <count>] [transport tcp]

            To create a distributed dispersed volume, the disperse keyword and <count> is mandatory, and the number of bricks specified in the command line must must be a multiple of the disperse count.

            redundancy is exactly the same as in the dispersed volume.

            If the transport type is not specified, tcp is used as the default. You can also set additional options if required, like in the other volume types.

            Note:

            • Make sure you start your volumes before you try to mount them or else client operations after the mount will hang.

            • For distributed disperse volumes bricks can be hosted on same node if they belong to different subvol.

              # gluster volume create <volname> disperse 3 server1:/br1 server2:/br1 server3:/br1 server1:/br2 server2:/br2 server3:/br2\n

              volume create: : success: please start the volume to access data"},{"location":"Administrator-Guide/Setting-Up-Volumes/#starting-volumes","title":"Starting Volumes","text":"

              You must start your volumes before you try to mount them.

              To start a volume

              • Start a volume:

                # gluster volume start <VOLNAME> [force]

                For example, to start test-volume:

                # gluster volume start test-volume\nStarting test-volume has been successful\n
              "},{"location":"Administrator-Guide/Split-brain-and-ways-to-deal-with-it/","title":"Split brain and the ways to deal with it","text":""},{"location":"Administrator-Guide/Split-brain-and-ways-to-deal-with-it/#split-brain","title":"Split brain:","text":"

              Split brain is a situation where two or more replicated copies of a file become divergent. When a file is in split brain, there is an inconsistency in either data or metadata of the file amongst the bricks of a replica and do not have enough information to authoritatively pick a copy as being pristine and heal the bad copies, despite all bricks being up and online. For a directory, there is also an entry split brain where a file inside it can have different gfid/file-type across the bricks of a replica. Split brain can happen mainly because of 2 reasons:

              • Due to network disconnect Where a client temporarily loses connection to the bricks.
              1. There is a replica pair of 2 bricks, brick1 on server1 and brick2 on server2.

              2. Client1 loses connection to brick2 and client2 loses connection to brick1 due to network split.

              3. Writes from client1 goes to brick1 and from client2 goes to brick2, which is nothing but split-brain.

              • Gluster brick processes going down or returning error:
              1. Server1 is down and server2 is up: Writes happen on server 2.

              2. Server1 comes up, server2 goes down (Heal not happened / data on server 2 is not replicated on server1): Writes happen on server1.

              3. Server2 comes up: Both server1 and server2 has data independent of each other.

              If we use the replica 2 volume, it is not possible to prevent split-brain without losing availability.

              "},{"location":"Administrator-Guide/Split-brain-and-ways-to-deal-with-it/#ways-to-deal-with-split-brain","title":"Ways to deal with split brain:","text":"

              In glusterfs there are ways to resolve split brain. You can see the detailed description of how to resolve a split-brain here. Moreover, there are ways to reduce the chances of ending up in split-brain situations. They are:

              1. Replica 3 volume
              2. Arbiter volume

              Both of these use the client-quorum option of glusterfs to avoid the split-brain situations.

              "},{"location":"Administrator-Guide/Split-brain-and-ways-to-deal-with-it/#client-quorum","title":"Client quorum:","text":"

              This is a feature implemented in Automatic File Replication (AFR here on) module, to prevent split-brains in the I/O path for replicate/distributed-replicate volumes. By default, if the client-quorum is not met for a particular replica subvol, it becomes read-only. The other subvols (in a dist-rep volume) will still have R/W access. Here you can see more details about client-quorum.

              "},{"location":"Administrator-Guide/Split-brain-and-ways-to-deal-with-it/#client-quorum-in-replica-2-volumes","title":"Client quorum in replica 2 volumes:","text":"

              In a replica 2 volume it is not possible to achieve high availability and consistency at the same time, without sacrificing tolerance to partition. If we set the client-quorum option to auto, then the first brick must always be up, irrespective of the status of the second brick. If only the second brick is up, the subvolume becomes read-only. If the quorum-type is set to fixed, and the quorum-count is set to 1, then we may end up in split brain. - Brick1 is up and brick2 is down. Quorum is met and write happens on brick1. - Brick1 goes down and brick2 comes up (No heal happened). Quorum is met, write happens on brick2. - Brick1 comes up. Quorum is met, but both the bricks have independent writes - split-brain. To avoid this we have to set the quorum-count to 2, which will cost the availability. Even if we have one replica brick up and running, the quorum is not met and we end up seeing EROFS.

              "},{"location":"Administrator-Guide/Split-brain-and-ways-to-deal-with-it/#1-replica-3-volume","title":"1. Replica 3 volume:","text":"

              When we create a replicated or distributed replicated volume with replica count 3, the cluster.quorum-type option is set to auto by default. That means at least 2 bricks should be up and running to satisfy the quorum and allow the writes. This is the recommended setting for a replica 3 volume and this should not be changed. Here is how it prevents files from ending up in split brain:

              B1, B2, and B3 are the 3 bricks of a replica 3 volume.

              1. B1 & B2 are up and B3 is down. Quorum is met and write happens on B1 & B2.
              2. B3 comes up and B2 is down. Quorum is met and write happens on B1 & B3.
              3. B2 comes up and B1 goes down. Quorum is met. But when a write request comes, AFR sees that B2 & B3 are blaming each other (B2 says that some writes are pending on B3 and B3 says that some writes are pending on B2), therefore the write is not allowed and is failed with EIO.

              Command to create a replica 3 volume:

              gluster volume create <volname> replica 3 host1:brick1 host2:brick2 host3:brick3\n
              "},{"location":"Administrator-Guide/Split-brain-and-ways-to-deal-with-it/#2-arbiter-volume","title":"2. Arbiter volume:","text":"

              Arbiter offers the sweet spot between replica 2 and replica 3, where user wants the split-brain protection offered by replica 3 but does not want to invest in 3x storage space. Arbiter is also an replica 3 volume where the third brick of the replica is automatically configured as an arbiter node. This means that the third brick stores only the file name and metadata, but not any data. This will help in avoiding split brain while providing the same level of consistency as a normal replica 3 volume.

              Command to create a arbiter volume:

              gluster volume create <volname> replica 3 arbiter 1 host1:brick1 host2:brick2 host3:brick3\n

              The only difference in the command is, we need to add one more keyword arbiter 1 after the replica count. Since it is also a replica 3 volume, the cluster.quorum-type option is set to auto by default and at least 2 bricks should be up to satisfy the quorum and allow writes. Since the arbiter brick has only name and metadata of the files, there are some more checks to guarantee consistency. Arbiter works as follows:

              1. Clients take full file locks while writing (replica 3 takes range locks).
              2. If 2 bricks are up and if one of them is the arbiter, and it blames the other up brick, then all FOPs will fail with ENOTCONN (Transport endpoint is not connected). If the arbiter doesn't blame the other brick, FOPs will be allowed to proceed.
              3. If 2 bricks are up and the arbiter is down, then FOPs will be allowed.
              4. If only one brick is up, then client-quorum is not met and the volume becomes EROFS.
              5. In all cases, if there is only one source before the FOP is initiated and if the FOP fails on that source, the application will receive ENOTCONN.

              You can find more details on arbiter here.

              "},{"location":"Administrator-Guide/Split-brain-and-ways-to-deal-with-it/#differences-between-replica-3-and-arbiter-volumes","title":"Differences between replica 3 and arbiter volumes:","text":"
              1. In case of a replica 3 volume, we store the entire file in all the bricks and it is recommended to have bricks of same size. But in case of arbiter, since we do not store data, the size of the arbiter brick is comparatively lesser than the other bricks.
              2. Arbiter is a state between replica 2 and replica 3 volume. If we have only arbiter and one of the other brick is up and the arbiter brick blames the other brick, then we can not proceed with the FOPs.
              3. Replica 3 gives high availability compared to arbiter, because unlike in arbiter, replica 3 has a full copy of the data in all 3 bricks.
              "},{"location":"Administrator-Guide/Start-Stop-Daemon/","title":"Managing the glusterd Service","text":"

              After installing GlusterFS, you must start glusterd service. The glusterd service serves as the Gluster elastic volume manager, overseeing glusterfs processes, and co-ordinating dynamic volume operations, such as adding and removing volumes across multiple storage servers non-disruptively.

              This section describes how to start the glusterd service in the following ways:

              • Starting and stopping glusterd manually on distributions using systemd
              • Starting glusterd automatically on distributions using systemd
              • Starting and stopping glusterd manually
              • Starting glusterd Automatically

              Note: You must start glusterd on all GlusterFS servers.

              "},{"location":"Administrator-Guide/Start-Stop-Daemon/#distributions-with-systemd","title":"Distributions with systemd","text":""},{"location":"Administrator-Guide/Start-Stop-Daemon/#starting-and-stopping-glusterd-manually","title":"Starting and stopping glusterd manually","text":"
              • To start glusterd manually:

                systemctl start glusterd\n
              • To stop glusterd manually:

                systemctl stop glusterd\n

              "},{"location":"Administrator-Guide/Start-Stop-Daemon/#starting-glusterd-automatically","title":"Starting glusterd automatically","text":"
              • To enable the glusterd service and start it if stopped:

                systemctl enable --now glusterd\n
              • To disable the glusterd service and stop it if started:

                systemctl disable --now glusterd\n
              "},{"location":"Administrator-Guide/Start-Stop-Daemon/#distributions-without-systemd","title":"Distributions without systemd","text":""},{"location":"Administrator-Guide/Start-Stop-Daemon/#starting-and-stopping-glusterd-manually_1","title":"Starting and stopping glusterd manually","text":"

              This section describes how to start and stop glusterd manually

              • To start glusterd manually, enter the following command:

                /etc/init.d/glusterd start\n
              • To stop glusterd manually, enter the following command:

                /etc/init.d/glusterd stop\n

              "},{"location":"Administrator-Guide/Start-Stop-Daemon/#starting-glusterd-automatically_1","title":"Starting glusterd Automatically","text":"

              This section describes how to configure the system to automatically start the glusterd service every time the system boots.

              "},{"location":"Administrator-Guide/Start-Stop-Daemon/#red-hat-and-fedora-distributions","title":"Red Hat and Fedora distributions","text":"

              To configure Red Hat-based systems to automatically start the glusterd service every time the system boots, enter the following from the command line:

              chkconfig glusterd on\n
              "},{"location":"Administrator-Guide/Start-Stop-Daemon/#debian-and-derivatives-like-ubuntu","title":"Debian and derivatives like Ubuntu","text":"

              To configure Debian-based systems to automatically start the glusterd service every time the system boots, enter the following from the command line:

              update-rc.d glusterd defaults\n
              "},{"location":"Administrator-Guide/Start-Stop-Daemon/#systems-other-than-red-hat-and-debian","title":"Systems Other than Red Hat and Debian","text":"

              To configure systems other than Red Hat or Debian to automatically start the glusterd service every time the system boots, enter the following entry to the/etc/rc.local file:

              echo \"glusterd\" >> /etc/rc.local\n
              "},{"location":"Administrator-Guide/Storage-Pools/","title":"Managing Trusted Storage Pools","text":""},{"location":"Administrator-Guide/Storage-Pools/#overview","title":"Overview","text":"

              A trusted storage pool(TSP) is a trusted network of storage servers. Before you can configure a GlusterFS volume, you must create a trusted storage pool of the storage servers that will provide bricks to the volume by peer probing the servers. The servers in a TSP are peers of each other.

              After installing Gluster on your servers and before creating a trusted storage pool, each server belongs to a storage pool consisting of only that server.

              • Managing Trusted Storage Pools
                • Overview
                • Adding Servers
                • Listing Servers
                • Viewing Peer Status
                • Removing Servers

              Before you start:

              • The servers used to create the storage pool must be resolvable by hostname.

              • The glusterd daemon must be running on all storage servers that you want to add to the storage pool. See Managing the glusterd Service for details.

              • The firewall on the servers must be configured to allow access to port 24007.

              The following commands were run on a TSP consisting of 3 servers - server1, server2, and server3.

              "},{"location":"Administrator-Guide/Storage-Pools/#adding-servers","title":"Adding Servers","text":"

              To add a server to a TSP, peer probe it from a server already in the pool.

                  # gluster peer probe <server>\n

              For example, to add a new server4 to the cluster described above, probe it from one of the other servers:

                  server1#  gluster peer probe server4\n    Probe successful\n

              Verify the peer status from the first server (server1):

                  server1# gluster peer status\n    Number of Peers: 3\n\n    Hostname: server2\n    Uuid: 5e987bda-16dd-43c2-835b-08b7d55e94e5\n    State: Peer in Cluster (Connected)\n\n    Hostname: server3\n    Uuid: 1e0ca3aa-9ef7-4f66-8f15-cbc348f29ff7\n    State: Peer in Cluster (Connected)\n\n    Hostname: server4\n    Uuid: 3e0cabaa-9df7-4f66-8e5d-cbc348f29ff7\n    State: Peer in Cluster (Connected)\n

              "},{"location":"Administrator-Guide/Storage-Pools/#listing-servers","title":"Listing Servers","text":"

              To list all nodes in the TSP:

                  server1# gluster pool list\n    UUID                                    Hostname        State\n    d18d36c5-533a-4541-ac92-c471241d5418    localhost       Connected\n    5e987bda-16dd-43c2-835b-08b7d55e94e5    server2         Connected\n    1e0ca3aa-9ef7-4f66-8f15-cbc348f29ff7    server3         Connected\n    3e0cabaa-9df7-4f66-8e5d-cbc348f29ff7    server4         Connected\n

              "},{"location":"Administrator-Guide/Storage-Pools/#viewing-peer-status","title":"Viewing Peer Status","text":"

              To view the status of the peers in the TSP:

                  server1# gluster peer status\n    Number of Peers: 3\n\n    Hostname: server2\n    Uuid: 5e987bda-16dd-43c2-835b-08b7d55e94e5\n    State: Peer in Cluster (Connected)\n\n    Hostname: server3\n    Uuid: 1e0ca3aa-9ef7-4f66-8f15-cbc348f29ff7\n    State: Peer in Cluster (Connected)\n\n    Hostname: server4\n    Uuid: 3e0cabaa-9df7-4f66-8e5d-cbc348f29ff7\n    State: Peer in Cluster (Connected)\n

              "},{"location":"Administrator-Guide/Storage-Pools/#removing-servers","title":"Removing Servers","text":"

              To remove a server from the TSP, run the following command from another server in the pool:

                  # gluster peer detach <server>\n

              For example, to remove server4 from the trusted storage pool:

                  server1# gluster peer detach server4\n    Detach successful\n

              Verify the peer status:

                  server1# gluster peer status\n    Number of Peers: 2\n\n    Hostname: server2\n    Uuid: 5e987bda-16dd-43c2-835b-08b7d55e94e5\n    State: Peer in Cluster (Connected)\n\n    Hostname: server3\n    Uuid: 1e0ca3aa-9ef7-4f66-8f15-cbc348f29ff7\n    State: Peer in Cluster (Connected)\n
              "},{"location":"Administrator-Guide/Thin-Arbiter-Volumes/","title":"Thin Arbiter volumes in gluster","text":"

              Thin Arbiter is a new type of quorum node where granularity of what is good and what is bad data is less compared to the traditional arbiter brick. In this type of volume, quorum is taken into account at a brick level rather than per file basis. If there is even one file that is marked bad (i.e. needs healing) on a data brick, that brick is considered bad for all files as a whole. So, even different file, if the write fails on the other data brick but succeeds on this 'bad' brick we will return failure for the write.

              • Thin Arbiter volumes in gluster
              • Why Thin Arbiter?
              • Setting UP Thin Arbiter Volume
              • How Thin Arbiter works
              "},{"location":"Administrator-Guide/Thin-Arbiter-Volumes/#why-thin-arbiter","title":"Why Thin Arbiter?","text":"

              This is a solution for handling stretch cluster kind of workload, but it can be used for regular workloads as well in case users are satisfied with this kind of quorum in comparison to arbiter/3-way-replication. Thin arbiter node can be placed outside of trusted storage pool i.e, thin arbiter is the \"stretched\" node in the cluster. This node can be placed on cloud or anywhere even if that connection has high latency. As this node will take part only in case of failure (or a brick is down) and to decide the quorum, it will not impact the performance in normal cases. Cost to perform any file operation would be lesser than arbiter if everything is fine. I/O will only go to the data bricks and goes to thin-arbiter only in the case of first failure until heal completes.

              "},{"location":"Administrator-Guide/Thin-Arbiter-Volumes/#setting-up-thin-arbiter-volume","title":"Setting UP Thin Arbiter Volume","text":"

              The command to run thin-arbiter process on node:

              /usr/local/sbin/glusterfsd -N --volfile-id ta-vol -f /var/lib/glusterd/vols/thin-arbiter.vol --brick-port 24007 --xlator-option ta-vol-server.transport.socket.listen-port=24007\n

              Creating a thin arbiter replica 2 volume:

              glustercli volume create <volname> --replica 2 <host1>:<brick1> <host2>:<brick2> --thin-arbiter <quorum-host>:<path-to-store-replica-id-file>\n

              For example:

              glustercli volume create testvol --replica 2 server{1..2}:/bricks/brick-{1..2} --thin-arbiter server-3:/bricks/brick_ta --force\nvolume create: testvol: success: please start the volume to access data\n
              "},{"location":"Administrator-Guide/Thin-Arbiter-Volumes/#how-thin-arbiter-works","title":"How Thin Arbiter works","text":"

              There will be only one process running on thin arbiter node which will be used to update replica id file for all replica pairs across all volumes. Replica id file contains the information of good and bad data bricks in the form of xattrs. Replica pairs will use its respective replica-id file that is going to be created during mount.

              1. Read Transactions: Reads are allowed when quorum is met. i.e.

              2. When all data bricks and thin arbiter are up: Perform lookup on data bricks to figure out good/bad bricks and serve content from the good brick.

              3. When one brick is up: Fail FOP with EIO.
              4. Two bricks are up: If two data bricks are up, lookup is done on data bricks to figure out good/bad bricks and content will be served from the good brick. One lookup is enough to figure out good/bad copy of that file and keep this in inode context. If one data brick and thin arbiter brick are up, xattrop is done on thin arbiter to get information of source (good) brick. If the data brick, which is UP, has also been marked as source brick on thin arbiter, lookup on this file is done on the data brick to check if the file is really healthy or not. If the file is good, data will be served from this brick else an EIO error would be returned to user.

              5. Write transactions: Thin arbiter doesn\u2019t participate in I/O, transaction will choose to wind operations on thin-arbiter brick to make sure the necessary metadata is kept up-to-date in case of failures. Operation failure will lead to updating the replica-id file on thin-arbiter with source/sink information in the xattrs just how it happens in AFR.

              "},{"location":"Administrator-Guide/Trash/","title":"Trash Translator","text":"

              Trash translator will allow users to access deleted or truncated files. Every brick will maintain a hidden .trashcan directory, which will be used to store the files deleted or truncated from the respective brick. The aggregate of all those .trashcan directories can be accessed from the mount point. To avoid name collisions, a timestamp is appended to the original file name while it is being moved to the trash directory.

              "},{"location":"Administrator-Guide/Trash/#implications-and-usage","title":"Implications and Usage","text":"

              Apart from the primary use-case of accessing files deleted or truncated by the user, the trash translator can be helpful for internal operations such as self-heal and rebalance. During self-heal and rebalance it is possible to lose crucial data. In those circumstances, the trash translator can assist in the recovery of the lost data. The trash translator is designed to intercept unlink, truncate and ftruncate fops, store a copy of the current file in the trash directory, and then perform the fop on the original file. For the internal operations, the files are stored under the 'internal_op' folder inside the trash directory.

              "},{"location":"Administrator-Guide/Trash/#volume-options","title":"Volume Options","text":"
              • gluster volume set <VOLNAME> features.trash <on/off>

              This command can be used to enable a trash translator in a volume. If set to on, a trash directory will be created in every brick inside the volume during the volume start command. By default, a translator is loaded during volume start but remains non-functional. Disabling trash with the help of this option will not remove the trash directory or even its contents from the volume.

              • gluster volume set <VOLNAME> features.trash-dir <name>

              This command is used to reconfigure the trash directory to a user-specified name. The argument is a valid directory name. The directory will be created inside every brick under this name. If not specified by the user, the trash translator will create the trash directory with the default name \u201c.trashcan\u201d. This can be used only when the trash-translator is on.

              • gluster volume set <VOLNAME> features.trash-max-filesize <size>

              This command can be used to filter files entering the trash directory based on their size. Files above trash_max_filesize are deleted/truncated directly. Value for size may be followed by multiplicative suffixes as KB(=1024 bytes), MB(=1024*1024 bytes) ,and GB(=1024*1024*1024 bytes). The default size is set to 5MB.

              • gluster volume set <VOLNAME> features.trash-eliminate-path <path1> [ , <path2> , . . . ]

              This command can be used to set the eliminate pattern for the trash translator. Files residing under this pattern will not be moved to the trash directory during deletion/truncation. The path must be a valid one present in the volume.

              • gluster volume set <VOLNAME> features.trash-internal-op <on/off>

              This command can be used to enable trash for internal operations like self-heal and re-balance. By default set to off.

              "},{"location":"Administrator-Guide/Trash/#sample-usage","title":"Sample usage","text":"

              The following steps give illustrates a simple scenario of deletion of a file from a directory

              1. Create a simple distributed volume and start it.

                gluster volume create test rhs:/home/brick\ngluster volume start test\n
              2. Enable trash translator

                gluster volume set test features.trash on\n
              3. Mount glusterfs volume via native client as follows.

                mount -t glusterfs  rhs:test /mnt\n
              4. Create a directory and file in the mount.

                mkdir mnt/dir\necho abc > mnt/dir/file\n
              5. Delete the file from the mount.

                rm mnt/dir/file -rf\n
              6. Checkout inside the trash directory.

                ls mnt/.trashcan\n

              We can find the deleted file inside the trash directory with a timestamp appending on its filename.

              For example,

              mount -t glusterfs rh-host:/test /mnt/test\nmkdir /mnt/test/abc\ntouch /mnt/test/abc/file\nrm -f /mnt/test/abc/file\n\nls /mnt/test/abc\n\nls /mnt/test/.trashcan/abc/\n

              You will see file2014-08-21_123400 as the output of the last ls command.

              "},{"location":"Administrator-Guide/Trash/#points-to-be-remembered","title":"Points to be remembered","text":"
              • As soon as the volume is started, the trash directory will be created inside the volume and will be visible through the mount. Disabling the trash will not have any impact on its visibility from the mount.
              • Even though deletion of trash-directory is not permitted, currently residing trash contents will be removed on issuing delete on it and only an empty trash-directory exists.
              "},{"location":"Administrator-Guide/Trash/#known-issue","title":"Known issue","text":"

              Since trash translator resides on the server side higher translators like AFR, DHT are unaware of rename and truncate operations being done by this translator which eventually moves the files to trash directory. Unless and until a complete-path-based lookup comes on trashed files, those may not be visible from the mount.

              "},{"location":"Administrator-Guide/Tuning-Volume-Options/","title":"Tuning Volume Options","text":"

              You can tune volume options, as needed, while the cluster is online and available.

              Note

              It is recommended to set server.allow-insecure option to ON if there are too many bricks in each volume or if there are too many services which have already utilized all the privileged ports in the system. Turning this option ON allows ports to accept/reject messages from insecure ports. So, use this option only if your deployment requires it.

              Tune volume options using the following command:

              # gluster volume set <VOLNAME> <OPT-NAME> <OPT-VALUE>

              For example, to specify the performance cache size for test-volume:

              # gluster volume set test-volume performance.cache-size 256MB\nSet volume successful\n

              You can view the changed volume options using command:

              # gluster volume info

              The following table lists the Volume options along with its description and default value:

              Note

              The default options given here are subject to modification at any given time and may not be the same for all versions.

              Type Option Description Default Value Available Options auth.allow IP addresses of the clients which should be allowed to access the volume. * (allow all) Valid IP address which includes wild card patterns including *, such as 192.168.1.* auth.reject IP addresses of the clients which should be denied to access the volume. NONE (reject none) Valid IP address which includes wild card patterns including *, such as 192.168.2.* Cluster cluster.self-heal-window-size Specifies the maximum number of blocks per file on which self-heal would happen simultaneously. 1 0 - 1024 blocks cluster.data-self-heal-algorithm Specifies the type of self-heal. If you set the option as \"full\", the entire file is copied from source to destinations. If the option is set to \"diff\" the file blocks that are not in sync are copied to destinations. Reset uses a heuristic model. If the file does not exist on one of the subvolumes, or a zero-byte file exists (created by entry self-heal) the entire content has to be copied anyway, so there is no benefit from using the \"diff\" algorithm. If the file size is about the same as page size, the entire file can be read and written with a few operations, which will be faster than \"diff\" which has to read checksums and then read and write. reset full/diff/reset cluster.min-free-disk Specifies the percentage of disk space that must be kept free. Might be useful for non-uniform bricks 10% Percentage of required minimum free disk space cluster.min-free-inodes Specifies when system has only N% of inodes remaining, warnings starts to appear in log files 10% Percentage of required minimum free inodes cluster.stripe-block-size Specifies the size of the stripe unit that will be read from or written to. 128 KB (for all files) size in bytes cluster.self-heal-daemon Allows you to turn-off proactive self-heal on replicated On On/Off cluster.ensure-durability This option makes sure the data/metadata is durable across abrupt shutdown of the brick. On On/Off cluster.lookup-unhashed This option does a lookup through all the sub-volumes, in case a lookup didn\u2019t return any result from the hashed subvolume. If set to OFF, it does not do a lookup on the remaining subvolumes. on auto, yes/no, enable/disable, 1/0, on/off cluster.lookup-optimize This option enables the optimization of -ve lookups, by not doing a lookup on non-hashed subvolumes for files, in case the hashed subvolume does not return any result. This option disregards the lookup-unhashed setting, when enabled. on on/off cluster.randomize-hash-range-by-gfid Allows to use gfid of directory to determine the subvolume from which hash ranges are allocated starting with 0. Note that we still use a directory/file\u2019s name to determine the subvolume to which it hashes off on/off cluster.rebal-throttle Sets the maximum number of parallel file migrations allowed on a node during the rebalance operation. The default value is normal and allows 2 files to be migrated at a time. Lazy will allow only one file to be migrated at a time and aggressive will allow maxof[(((processing units) - 4) / 2), 4] normal lazy/normal/aggressive cluster.background-self-heal-count Specifies the number of per client self-heal jobs that can perform parallel heals in the background. 8 0-256 cluster.heal-timeout Time interval for checking the need to self-heal in self-heal-daemon 600 5-(signed-int) cluster.eager-lock If eager-lock is off, locks release immediately after file operations complete, improving performance for some operations, but reducing access efficiency on on/off cluster.quorum-type If value is \u201cfixed\u201d only allow writes if quorum-count bricks are present. If value is \u201cauto\u201d only allow writes if more than half of bricks, or exactly half including the first brick, are present none none/auto/fixed cluster.quorum-count If quorum-type is \u201cfixed\u201d only allow writes if this many bricks are present. Other quorum types will OVERWRITE this value null 1-(signed-int) cluster.heal-wait-queue-length Specifies the number of heals that can be queued for the parallel background self heal jobs. 128 0-10000 cluster.favorite-child-policy Specifies which policy can be used to automatically resolve split-brains without user intervention. \u201csize\u201d picks the file with the biggest size as the source. \u201cctime\u201d and \u201cmtime\u201d pick the file with the latest ctime and mtime respectively as the source. \u201cmajority\u201d picks a file with identical mtime and size in more than half the number of bricks in the replica. none none/size/ctime/mtime/majority cluster.use-anonymous-inode Setting this option heals directory renames efficiently no no/yes Disperse disperse.eager-lock If eager-lock is on, the lock remains in place either until lock contention is detected, or for 1 second in order to check if there is another request for that file from the same client. If eager-lock is off, locks release immediately after file operations complete, improving performance for some operations, but reducing access efficiency. on on/off disperse.other-eager-lock This option is equivalent to the disperse.eager-lock option but applicable only for non regular files. When multiple clients access a particular directory, disabling disperse.other-eager-lockoption for the volume can improve performance for directory access without compromising performance of I/O's for regular files. off on/off disperse.shd-max-threads Specifies the number of entries that can be self healed in parallel on each disperse subvolume by self-heal daemon. 1 1 - 64 disperse.shd-wait-qlength Specifies the number of entries that must be kept in the dispersed subvolume's queue for self-heal daemon threads to take up as soon as any of the threads are free to heal. This value should be changed based on how much memory self-heal daemon process can use for keeping the next set of entries that need to be healed. 1024 1 - 655536 disprse.eager-lock-timeout Maximum time (in seconds) that a lock on an inode is kept held if no new operations on the inode are received. 1 1-60 disperse.other-eager-lock-timeout It\u2019s equivalent to eager-lock-timeout option but for non regular files. 1 1-60 disperse.background-heals This option can be used to control number of parallel heals running in background. 8 0-256 disperse.heal-wait-qlength This option can be used to control number of heals that can wait 128 0-65536 disperse.read-policy inode-read fops happen only on \u2018k\u2019 number of bricks in n=k+m disperse subvolume. \u2018round-robin\u2019 selects the read subvolume using round-robin algo. \u2018gfid-hash\u2019 selects read subvolume based on hash of the gfid of that file/directory. gfid-hash round-robin/gfid-hash disperse.self-heal-window-size Maximum number blocks(128KB) per file for which self-heal process would be applied simultaneously. 1 1-1024 disperse.optimistic-change-log This option Set/Unset dirty flag for every update fop at the start of the fop. If OFF, this option impacts performance of entry or metadata operations as it will set dirty flag at the start and unset it at the end of ALL update fop. If ON and all the bricks are good, dirty flag will be set at the start only for file fops, For metadata and entry fops dirty flag will not be set at the start This does not impact performance for metadata operations and entry operation but has a very small window to miss marking entry as dirty in case it is required to be healed. on on/off disperse.parallel-writes This controls if writes can be wound in parallel as long as it doesn\u2019t modify same stripes on on/off disperse.stripe-cache This option will keep the last stripe of write fop in memory. If next write falls in this stripe, we need not to read it again from backend and we can save READ fop going over the network. This will improve performance, specially for sequential writes. However, this will also lead to extra memory consumption, maximum (cache size * stripe size) Bytes per open file 4 0-10 disperse.quorum-count This option can be used to define how many successes on the bricks constitute a success to the application. This count should be in the range [disperse-data-count, disperse-count] (inclusive) 0 0-(signedint) disperse.use-anonymous-inode Setting this option heals renames efficiently off on/off Logging diagnostics.brick-log-level Changes the log-level of the bricks INFO DEBUG/WARNING/ERROR/CRITICAL/NONE/TRACE diagnostics.client-log-level Changes the log-level of the clients. INFO DEBUG/WARNING/ERROR/CRITICAL/NONE/TRACE diagnostics.brick-sys-log-level Depending on the value defined for this option, log messages at and above the defined level are generated in the syslog and the brick log files. CRITICAL INFO/WARNING/ERROR/CRITICAL diagnostics.client-sys-log-level Depending on the value defined for this option, log messages at and above the defined level are generated in the syslog and the client log files. CRITICAL INFO/WARNING/ERROR/CRITICAL diagnostics.brick-log-format Allows you to configure the log format to log either with a message id or without one on the brick. with-msg-id no-msg-id/with-msg-id diagnostics.client-log-format Allows you to configure the log format to log either with a message ID or without one on the client. with-msg-id no-msg-id/with-msg-id diagnostics.brick-log-buf-size The maximum number of unique log messages that can be suppressed until the timeout or buffer overflow, whichever occurs first on the bricks. 5 0 and 20 (0 and 20 included) diagnostics.client-log-buf-size The maximum number of unique log messages that can be suppressed until the timeout or buffer overflow, whichever occurs first on the clients. 5 0 and 20 (0 and 20 included) diagnostics.brick-log-flush-timeout The length of time for which the log messages are buffered, before being flushed to the logging infrastructure (gluster or syslog files) on the bricks. 120 30 - 300 seconds (30 and 300 included) diagnostics.client-log-flush-timeout The length of time for which the log messages are buffered, before being flushed to the logging infrastructure (gluster or syslog files) on the clients. 120 30 - 300 seconds (30 and 300 included) Performance *features.trash Enable/disable trash translator off on/off *performance.readdir-ahead Enable/disable readdir-ahead translator in the volume off on/off *performance.read-ahead Enable/disable read-ahead translator in the volume off on/off *performance.io-cache Enable/disable io-cache translator in the volume off on/off performance.quick-read To enable/disable quick-read translator in the volume. on off/on performance.md-cache Enables and disables md-cache translator. off off/on performance.open-behind Enables and disables open-behind translator. on off/on performance.nl-cache Enables and disables nl-cache translator. off off/on performance.stat-prefetch Enables and disables stat-prefetch translator. on off/on performance.client-io-threads Enables and disables client-io-thread translator. on off/on performance.write-behind Enables and disables write-behind translator. on off/on performance.write-behind-window-size Size of the per-file write-behind buffer. 1MB Write-behind cache size performance.io-thread-count The number of threads in IO threads translator. 16 1-64 performance.flush-behind If this option is set ON, instructs write-behind translator to perform flush in background, by returning success (or any errors, if any of previous writes were failed) to application even before flush is sent to backend filesystem. On On/Off performance.cache-max-file-size Sets the maximum file size cached by the io-cache translator. Can use the normal size descriptors of KB, MB, GB,TB or PB (for example, 6GB). Maximum size uint64. 2 ^ 64 -1 bytes size in bytes performance.cache-min-file-size Sets the minimum file size cached by the io-cache translator. Values same as \"max\" above 0B size in bytes performance.cache-refresh-timeout The cached data for a file will be retained till 'cache-refresh-timeout' seconds, after which data re-validation is performed. 1s 0-61 performance.cache-size Size of the read cache. 32 MB size in bytes performance.lazy-open This option requires open-behind to be on. Perform an open in the backend only when a necessary FOP arrives (for example, write on the file descriptor, unlink of the file). When this option is disabled, perform backend open immediately after an unwinding open. Yes Yes/No performance.md-cache-timeout The time period in seconds which controls when metadata cache has to be refreshed. If the age of cache is greater than this time-period, it is refreshed. Every time cache is refreshed, its age is reset to 0. 1 0-600 seconds performance.nfs-strict-write-ordering Specifies whether to prevent later writes from overtaking earlier writes for NFS, even if the writes do not relate to the same files or locations. off on/off performance.nfs.flush-behind Specifies whether the write-behind translator performs flush operations in the background for NFS by returning (false) success to the application before flush file operations are sent to the backend file system. on on/off performance.nfs.strict-o-direct Specifies whether to attempt to minimize the cache effects of I/O for a file on NFS. When this option is enabled and a file descriptor is opened using the O_DIRECT flag, write-back caching is disabled for writes that affect that file descriptor. When this option is disabled, O_DIRECT has no effect on caching. This option is ignored if performance.write-behind is disabled. off on/off performance.nfs.write-behind-trickling-writes Enables and disables trickling-write strategy for the write-behind translator for NFS clients. on off/on performance.nfs.write-behind-window-size Specifies the size of the write-behind buffer for a single file or inode for NFS. 1 MB 512 KB - 1 GB performance.rda-cache-limit The value specified for this option is the maximum size of cache consumed by the readdir-ahead translator. This value is global and the total memory consumption by readdir-ahead is capped by this value, irrespective of the number/size of directories cached. 10MB 0-1GB performance.rda-request-size The value specified for this option will be the size of buffer holding directory entries in readdirp response. 128KB 4KB-128KB performance.resync-failed-syncs-after-fsync If syncing cached writes that were issued before an fsync operation fails, this option configures whether to reattempt the failed sync operations. off on/off performance.strict-o-direct Specifies whether to attempt to minimize the cache effects of I/O for a file. When this option is enabled and a file descriptor is opened using the O_DIRECT flag, write-back caching is disabled for writes that affect that file descriptor. When this option is disabled, O_DIRECT has no effect on caching. This option is ignored if performance.write-behind is disabled. on on/off performance.strict-write-ordering Specifies whether to prevent later writes from overtaking earlier writes, even if the writes do not relate to the same files or locations. on on/off performance.use-anonymous-fd This option requires open-behind to be on. For read operations, use anonymous file descriptor when the original file descriptor is open-behind and not yet opened in the backend. Yes No/Yes performance.write-behind-trickling-writes Enables and disables trickling-write strategy for the write-behind translator for FUSE clients. on off/on performance.write-behind-window-size Specifies the size of the write-behind buffer for a single file or inode. 1MB 512 KB - 1 GB features.read-only Enables you to mount the entire volume as read-only for all the clients (including NFS clients) accessing it. Off On/Off features.quota-deem-statfs When this option is set to on, it takes the quota limits into consideration while estimating the filesystem size. The limit will be treated as the total size instead of the actual size of filesystem. on on/off features.shard Enables or disables sharding on the volume. Affects files created after volume configuration. disable enable/disable features.shard-block-size Specifies the maximum size of file pieces when sharding is enabled. Affects files created after volume configuration. 64MB 4MB-4TB features.uss This option enable/disable User Serviceable Snapshots on the volume. off on/off geo-replication.indexing Use this option to automatically sync the changes in the filesystem from Primary to Secondary. Off On/Off network.frame-timeout The time frame after which the operation has to be declared as dead, if the server does not respond for a particular operation. 1800 (30 mins) 1800 secs network.ping-timeout The time duration for which the client waits to check if the server is responsive. When a ping timeout happens, there is a network disconnect between the client and server. All resources held by server on behalf of the client get cleaned up. When a reconnection happens, all resources will need to be re-acquired before the client can resume its operations on the server. Additionally, the locks will be acquired and the lock tables updated. This reconnect is a very expensive operation and should be avoided. 42 Secs 42 Secs nfs nfs.enable-ino32 For 32-bit nfs clients or applications that do not support 64-bit inode numbers or large files, use this option from the CLI to make Gluster NFS return 32-bit inode numbers instead of 64-bit inode numbers. Off On/Off nfs.volume-access Set the access type for the specified sub-volume. read-write read-write/read-only nfs.trusted-write If there is an UNSTABLE write from the client, STABLE flag will be returned to force the client to not send a COMMIT request. In some environments, combined with a replicated GlusterFS setup, this option can improve write performance. This flag allows users to trust Gluster replication logic to sync data to the disks and recover when required. COMMIT requests if received will be handled in a default manner by fsyncing. STABLE writes are still handled in a sync manner. Off On/Off nfs.trusted-sync All writes and COMMIT requests are treated as async. This implies that no write requests are guaranteed to be on server disks when the write reply is received at the NFS client. Trusted sync includes trusted-write behavior. Off On/Off nfs.export-dir This option can be used to export specified comma separated subdirectories in the volume. The path must be an absolute path. Along with path allowed list of IPs/hostname can be associated with each subdirectory. If provided connection will allowed only from these IPs. Format: \\<dir>[(hostspec[hostspec...])][,...]. Where hostspec can be an IP address, hostname or an IP range in CIDR notation. Note: Care must be taken while configuring this option as invalid entries and/or unreachable DNS servers can introduce unwanted delay in all the mount calls. No sub directory exported. Absolute path with allowed list of IP/hostname nfs.export-volumes Enable/Disable exporting entire volumes, instead if used in conjunction with nfs3.export-dir, can allow setting up only subdirectories as exports. On On/Off nfs.rpc-auth-unix Enable/Disable the AUTH_UNIX authentication type. This option is enabled by default for better interoperability. However, you can disable it if required. On On/Off nfs.rpc-auth-null Enable/Disable the AUTH_NULL authentication type. It is not recommended to change the default value for this option. On On/Off nfs.rpc-auth-allow\\<IP- Addresses> Allow a comma separated list of addresses and/or hostnames to connect to the server. By default, all clients are disallowed. This allows you to define a general rule for all exported volumes. Reject All IP address or Host name nfs.rpc-auth-reject\\<IP- Addresses> Reject a comma separated list of addresses and/or hostnames from connecting to the server. By default, all connections are disallowed. This allows you to define a general rule for all exported volumes. Reject All IP address or Host name nfs.ports-insecure Allow client connections from unprivileged ports. By default only privileged ports are allowed. This is a global setting in case insecure ports are to be enabled for all exports using a single option. Off On/Off nfs.addr-namelookup Turn-off name lookup for incoming client connections using this option. In some setups, the name server can take too long to reply to DNS queries resulting in timeouts of mount requests. Use this option to turn off name lookups during address authentication. Note, turning this off will prevent you from using hostnames in rpc-auth.addr.* filters. On On/Off nfs.register-with-portmap For systems that need to run multiple NFS servers, you need to prevent more than one from registering with portmap service. Use this option to turn off portmap registration for Gluster NFS. On On/Off nfs.port \\<PORT- NUMBER> Use this option on systems that need Gluster NFS to be associated with a non-default port number. NA 38465-38467 nfs.disable Turn-off volume being exported by NFS Off On/Off Server server.allow-insecure Allow client connections from unprivileged ports. By default only privileged ports are allowed. This is a global setting in case insecure ports are to be enabled for all exports using a single option. On On/Off server.statedump-path Location of the state dump file. tmp directory of the brick New directory path server.allow-insecure Allows FUSE-based client connections from unprivileged ports.By default, this is enabled, meaning that ports can accept and reject messages from insecure ports. When disabled, only privileged ports are allowed. on on/off server.anongid Value of the GID used for the anonymous user when root-squash is enabled. When root-squash is enabled, all the requests received from the root GID (that is 0) are changed to have the GID of the anonymous user. 65534 (this UID is also known as nfsnobody) 0 - 4294967295 server.anonuid Value of the UID used for the anonymous user when root-squash is enabled. When root-squash is enabled, all the requests received from the root UID (that is 0) are changed to have the UID of the anonymous user. 65534 (this UID is also known as nfsnobody) 0 - 4294967295 server.event-threads Specifies the number of event threads to execute in parallel. Larger values would help process responses faster, depending on available processing power. 2 1-1024 server.gid-timeout The time period in seconds which controls when cached groups has to expire. This is the cache that contains the groups (GIDs) where a specified user (UID) belongs to. This option is used only when server.manage-gids is enabled. 2 0-4294967295 seconds server.manage-gids Resolve groups on the server-side. By enabling this option, the groups (GIDs) a user (UID) belongs to gets resolved on the server, instead of using the groups that were send in the RPC Call by the client. This option makes it possible to apply permission checks for users that belong to bigger group lists than the protocol supports (approximately 93). off on/off server.root-squash Prevents root users from having root privileges, and instead assigns them the privileges of nfsnobody. This squashes the power of the root users, preventing unauthorized modification of files on the Red Hat Gluster Storage servers. This option is used only for glusterFS NFS protocol. off on/off server.statedump-path Specifies the directory in which the statedumpfiles must be stored. path to directory /var/run/gluster (for a default installation) Storage storage.health-check-interval Number of seconds between health-checks done on the filesystem that is used for the brick(s). Defaults to 30 seconds, set to 0 to disable. tmp directory of the brick New directory path storage.linux-io_uring Enable/Disable io_uring based I/O at the posix xlator on the bricks. Off On/Off storage.fips-mode-rchecksum If enabled, posix_rchecksum uses the FIPS compliant SHA256 checksum, else it uses MD5. on on/ off storage.create-mask Maximum set (upper limit) of permission for the files that will be created. 0777 0000 - 0777 storage.create-directory-mask Maximum set (upper limit) of permission for the directories that will be created. 0777 0000 - 0777 storage.force-create-mode Minimum set (lower limit) of permission for the files that will be created. 0000 0000 - 0777 storage.force-create-directory Minimum set (lower limit) of permission for the directories that will be created. 0000 0000 - 0777 storage.health-check-interval Sets the time interval in seconds for a filesystem health check. You can set it to 0 to disable. 30 seconds 0-4294967295 seconds storage.reserve To reserve storage space at the brick. This option accepts size in form of MB and also in form of percentage. If user has configured the storage.reserve option using size in MB earlier, and then wants to give the size in percentage, it can be done using the same option. Also, the newest set value is considered, if it was in MB before and then if it sent in percentage, the percentage value becomes new value and the older one is over-written 1 (1% of the brick size) 0-100

              Note

              We've found few performance xlators, options marked with * in above table have been causing more performance regression than improving. These xlators should be turned off for volumes.

              "},{"location":"Administrator-Guide/arbiter-volumes-and-quorum/","title":"Arbiter volumes and quorum options in gluster","text":"

              The arbiter volume is a special subset of replica volumes that is aimed at preventing split-brains and providing the same consistency guarantees as a normal replica 3 volume without consuming 3x space.

              • Arbiter volumes and quorum options in gluster
              • Arbiter configuration
              • Arbiter brick(s) sizing
              • Why Arbiter?
              • Split-brains in replica volumes
              • Server-quorum and some pitfalls
              • Client Quorum
              • Replica 2 and Replica 3 volumes
              • How Arbiter works
              "},{"location":"Administrator-Guide/arbiter-volumes-and-quorum/#arbiter-configuration","title":"Arbiter configuration","text":"

              The syntax for creating the volume is:

              gluster volume create <VOLNAME>  replica 2 arbiter 1 <NEW-BRICK> ...\n

              Note: The earlier syntax used to be replica 3 arbiter 1 but that was leading to confusions among users about the total no. of data bricks. For the sake of backward compatibility, the old syntax also works. In any case, the implied meaning is that there are 2 data bricks and 1 arbiter brick in a nx(2+1) arbiter volume.

              For example:

              gluster volume create testvol replica 2 arbiter 1  server{1..6}:/bricks/brick\n
              volume create: testvol: success: please start the volume to access data\n

              This means that for every 3 bricks listed, 1 of them is an arbiter. We have created 6 bricks. With a replica count of three, each 3rd brick in the series will be a replica subvolume. Since we have two sets of 3, this created a distribute subvolume made of up two replica subvolumes.

              Each replica subvolume is defined to have 1 arbiter out of the 3 bricks. The arbiter bricks are taken from the end of each replica subvolume.

              gluster volume info\n
              Volume Name: testvol\nType: Distributed-Replicate\nVolume ID: ae6c4162-38c2-4368-ae5d-6bad141a4119\nStatus: Created\nNumber of Bricks: 2 x (2 + 1) = 6\nTransport-type: tcp\nBricks:\nBrick1: server1:/bricks/brick\nBrick2: server2:/bricks/brick\nBrick3: server3:/bricks/brick (arbiter)\nBrick4: server4:/bricks/brick\nBrick5: server5:/bricks/brick\nBrick6: server6:/bricks/brick (arbiter)\nOptions Reconfigured  :\ntransport.address-family: inet\nperformance.readdir-ahead: on\n

              The arbiter brick will store only the file/directory names (i.e. the tree structure) and extended attributes (metadata) but not any data. i.e. the file size (as shown by ls -l) will be zero bytes. It will also store other gluster metadata like the .glusterfs folder and its contents.

              Note: Enabling the arbiter feature automatically configures client-quorum to 'auto'. This setting is not to be changed.

              "},{"location":"Administrator-Guide/arbiter-volumes-and-quorum/#arbiter-bricks-sizing","title":"Arbiter brick(s) sizing","text":"

              Since the arbiter brick does not store file data, its disk usage will be considerably smaller than for the other bricks of the replica. The sizing of the brick will depend on how many files you plan to store in the volume. A good estimate will be 4KB times the number of files in the replica. Note that the estimate also depends on the inode space alloted by the underlying filesystem for a given disk size.

              The maxpct value in XFS for volumes of size 1TB to 50TB is only 5%. If you want to store say 300 million files, 4KB x 300M gives us 1.2TB. 5% of this is around 60GB. Assuming the recommended inode size of 512 bytes, that gives us the ability to store only 60GB/512 ~= 120 million files. So it is better to choose a higher maxpct value (say 25%) while formatting an XFS disk of size greater than 1TB. Refer the man page of mkfs.xfs for details.

              "},{"location":"Administrator-Guide/arbiter-volumes-and-quorum/#why-arbiter","title":"Why Arbiter?","text":""},{"location":"Administrator-Guide/arbiter-volumes-and-quorum/#split-brains-in-replica-volumes","title":"Split-brains in replica volumes","text":"

              When a file is in split-brain, there is an inconsistency in either data or metadata (permissions, uid/gid, extended attributes etc.) of the file amongst the bricks of a replica and we do not have enough information to authoritatively pick a copy as being pristine and heal to the bad copies, despite all bricks being up and online. For directories, there is also an entry-split brain where a file inside it has different gfids/ file-type (say one is a file and another is a directory of the same name) across the bricks of a replica.

              This document describes how to resolve files that are in split-brain using gluster cli or the mount point. Almost always, split-brains occur due to network disconnects (where a client temporarily loses connection to the bricks) and very rarely due to the gluster brick processes going down or returning an error.

              "},{"location":"Administrator-Guide/arbiter-volumes-and-quorum/#server-quorum-and-some-pitfalls","title":"Server-quorum and some pitfalls","text":"

              This document provides a detailed description of this feature. The volume options for server-quorum are:

              Option: cluster.server-quorum-ratio Value Description: 0 to 100

              Option: cluster.server-quorum-type Value Description: none | server If set to server, this option enables the specified volume to participate in the server-side quorum. If set to none, that volume alone is not considered for volume checks.

              The cluster.server-quorum-ratio is a percentage figure and is cluster wide- i.e. you cannot have different ratio for different volumes in the same trusted pool.

              For a two-node trusted storage pool, it is important to set this value greater than 50%, so that two nodes separated from each other do not believe they have quorum simultaneously. For a two-node plain replica volume, this would mean both nodes need to be up and running. So there is no notion of HA/failover.

              There are users who create a replica 2 volume from 2 nodes and peer-probe a 'dummy' node without bricks and enable server quorum with a ratio of 51%. This does not prevent files from getting into split-brain. For example, if B1 and B2 are the bricks/nodes of the replica and B3 is the dummy node, we can still end up in split-brain like so:

              1. B1 goes down, B2 and B3 are up. Server-quorum is still. File is modified by the client.
              2. B2 goes down, B1 comes back up. Server-quorum is met. Same file is modified by the client.
              3. We now have different contents for the file in B1 and B2 ==>split-brain.

              In author\u2019s opinion, server-quorum is useful if you want to avoid split-brains to the volume(s) configuration across the nodes and not in the I/O path. Unlike in client-quorum where the volume becomes read-only when quorum is lost, loss of server-quorum in a particular node makes glusterd kill the brick processes on that node (for the participating volumes) making even reads impossible.

              "},{"location":"Administrator-Guide/arbiter-volumes-and-quorum/#client-quorum","title":"Client Quorum","text":"

              Client-quorum is a feature implemented in AFR to prevent split-brains in the I/O path for replicate/distributed-replicate volumes. By default, if the client-quorum is not met for a particular replica subvol, it becomes unavailable. The other subvols (in a dist-rep volume) will still have R/W access.

              The following volume set options are used to configure it:

              Option: cluster.quorum-type Default Value: none Value Description: none|auto|fixed If set to \"fixed\", this option allows writes to a file only if the number of active bricks in that replica set (to which the file belongs) is greater than or equal to the count specified in the 'quorum-count' option. If set to \"auto\", this option allows write to the file only if number of bricks that are up >= ceil (of the total number of bricks that constitute that replica/2). If the number of replicas is even, then there is a further check: If the number of up bricks is exactly equal to n/2, then the first brick must be one of the bricks that are up. If it is more than n/2 then it is not necessary that the first brick is one of the up bricks.

              Option: cluster.quorum-count Value Description: The number of bricks that must be active in a replica-set to allow writes. This option is used in conjunction with cluster.quorum-type =fixed option to specify the number of bricks to be active to participate in quorum. If the quorum-type is auto then this option has no significance.

              Earlier, when quorum was not met, the replica subvolume turned read-only. But since glusterfs-3.13 and upwards, the subvolume becomes unavailable, i.e. all the file operations fail with ENOTCONN error instead of becoming EROFS. This means the cluster.quorum-reads volume option is also not supported.

              "},{"location":"Administrator-Guide/arbiter-volumes-and-quorum/#replica-2-and-replica-3-volumes","title":"Replica 2 and Replica 3 volumes","text":"

              From the above descriptions, it is clear that client-quorum cannot really be applied to a replica 2 volume (without costing HA). If the quorum-type is set to auto, then by the description given earlier, the first brick must always be up, irrespective of the status of the second brick. IOW, if only the second brick is up, the subvol returns ENOTCONN, i.e. no HA. If quorum-type is set to fixed, then the quorum-count has to be two to prevent split-brains (otherwise a write can succeed in brick1, another in brick2 =>split-brain). So for all practical purposes, if you want high availability in a replica 2 volume, it is recommended not to enable client-quorum.

              In a replica 3 volume, client-quorum is enabled by default and set to 'auto'. This means 2 bricks need to be up for the write to succeed. Here is how this configuration prevents files from ending up in split-brain:

              Say B1, B2 and B3 are the bricks:

              1. B3 is down, quorum is met, write happens on file B1 and B2.
              2. B3 comes up, B2 is down, quorum is again met, write happens on B1 and B3.
              3. B2 comes up, B1 goes down, quorum is met. Now when a write is issued, AFR sees that B2 and B3's pending xattrs blame each other and therefore the write is not allowed and is failed with ENOTCONN.
              "},{"location":"Administrator-Guide/arbiter-volumes-and-quorum/#how-arbiter-works","title":"How Arbiter works","text":"

              There are 2 components to the arbiter volume. One is the arbiter xlator that is loaded in the brick process of every 3rd (i.e. the arbiter) brick. The other is the arbitration logic itself that is present in AFR (the replicate xlator) loaded on the clients.

              The former acts as a sort of 'filter' translator for the FOPS- i.e. it allows entry operations to hit POSIX, blocks certain inode operations like read (unwinds the call with ENOTCONN) and unwinds other inode operations like write, truncate etc. with success without winding it down to POSIX.

              The latter i.e. the arbitration logic present in AFR takes full file locks when writing to a file, just like in normal replica volumes. The behavior of arbiter volumes in allowing/failing write FOPS in conjunction with client-quorum can be summarized in the below steps:

              • If all 3 bricks are up (happy case), then there is no issue and the FOPs are allowed.

              • If 2 bricks are up and if one of them is the arbiter (i.e. the 3rd brick) and it blames the other up brick for a given file, then all write FOPS will fail with ENOTCONN. This is because, in this scenario, the only true copy is on the brick that is down. Hence we cannot allow writes until that brick is also up. If the arbiter doesn't blame the other brick, FOPS will be allowed to proceed. 'Blaming' here is w.r.t the values of AFR changelog extended attributes.

              • If 2 bricks are up and the arbiter is down, then FOPS will be allowed. When the arbiter comes up, the entry/metadata heals to it happen. Of course data heals are not needed.

              • If only one brick is up, then client-quorum is not met and the volume returns ENOTCONN.

              • In all cases, if there is only one source before the FOP is initiated (even if all bricks are up) and if the FOP fails on that source, the application will receive ENOTCONN. For example, assume that a write failed on B2 and B3, i.e. B1 is the only source. Now if for some reason, the second write failed on B1 (before there was a chance for selfheal to complete despite all brick being up), the application would receive failure (ENOTCONN) for that write.

              The bricks being up or down described above does not necessarily mean the brick process is offline. It can also mean the mount lost the connection to the brick due to network disconnects etc.

              "},{"location":"Administrator-Guide/formatting-and-mounting-bricks/","title":"Formatting and Mounting Bricks","text":""},{"location":"Administrator-Guide/formatting-and-mounting-bricks/#formatting-and-mounting-bricks","title":"Formatting and Mounting Bricks","text":""},{"location":"Administrator-Guide/formatting-and-mounting-bricks/#creating-a-thinly-provisioned-logical-volume","title":"Creating a Thinly Provisioned Logical Volume","text":"

              To create a thinly provisioned logical volume, proceed with the following steps:

              1. Create a physical volume(PV) by using the pvcreate command. For example:

                pvcreate --dataalignment 128K /dev/sdb\n

                Here, /dev/sdb is a storage device. Use the correct dataalignment option based on your device.

                Note: The device name and the alignment value will vary based on the device you are using.

              2. Create a Volume Group (VG) from the PV using the vgcreate command: For example:

                vgcreate --physicalextentsize 128K gfs_vg /dev/sdb\n

                It is recommended that only one VG must be created from one storage device.

              3. Create a thin-pool using the following commands:

                1. Create an LV to serve as the metadata device using the following command:

                  lvcreate -L metadev_sz --name metadata_device_name VOLGROUP\n

                  For example:

                  lvcreate -L 16776960K --name gfs_pool_meta gfs_vg\n
                2. Create an LV to serve as the data device using the following command:

                  lvcreate -L datadev_sz --name thin_pool VOLGROUP`\n

                  For example:

                  lvcreate -L 536870400K --name gfs_pool gfs_vg\n
                3. Create a thin pool from the data LV and the metadata LV using the following command:

                  lvconvert --chunksize STRIPE_WIDTH --thinpool VOLGROUP/thin_pool --poolmetadata VOLGROUP/metadata_device_name\n

                  For example:

                  lvconvert --chunksize 1280K --thinpool gfs_vg/gfs_pool --poolmetadata gfs_vg/gfs_pool_meta\n

                  Note: By default, the newly provisioned chunks in a thin pool are zeroed to prevent data leaking between different block devices.

                  lvchange --zero n VOLGROUP/thin_pool\n

                  For example:

                  lvchange --zero n gfs_vg/gfs_pool\n
              4. Create a thinly provisioned volume from the previously created pool using the lvcreate command:

                For example:

                lvcreate -V 1G -T gfs_vg/gfs_pool -n gfs_lv\n

                It is recommended that only one LV should be created in a thin pool.

                Format bricks using the supported XFS configuration, mount the bricks, and verify the bricks are mounted correctly.

                Run mkfs.xfs -f -i size=512 -n size=8192 -d su=128k,sw=10 DEVICE to format the bricks to the supported XFS file system format. Here, DEVICE is the thin LV(here /dev/gfs_vg/gfs_lv). The inode size is set to 512 bytes to accommodate for the extended attributes used by GlusterFS.

                Run mkdir /mountpoint to create a directory to link the brick to.

                Add an entry in /etc/fstab:

                /dev/gfs_vg/gfs_lv    /mountpoint  xfs rw,inode64,noatime,nouuid      1 2\n

                Run mount /mountpoint to mount the brick.

                Run the df -h command to verify the brick is successfully mounted:

                # df -h\n/dev/gfs_vg/gfs_lv   16G  1.2G   15G   7% /exp1\n
              "},{"location":"Administrator-Guide/io_uring/","title":"io_uring support in gluster","text":"

              io_uring is an asynchronous I/O interface similar to linux-aio, but aims to be more performant. Refer https://kernel.dk/io_uring.pdf and https://kernel-recipes.org/en/2019/talks/faster-io-through-io_uring for more details.

              Incorporating io_uring in various layers of gluster is an ongoing activity but beginning with glusterfs-9.0, support has been added to the posix translator via the storage.linux-io_uring volume option. When this option is enabled, the posix translator in the glusterfs brick process (at the server side) will use io_uring calls for reads, writes and fsyncs as opposed to the normal pread/pwrite based syscalls.

              "},{"location":"Administrator-Guide/io_uring/#example","title":"Example:","text":"
              # gluster volume set testvol storage.linux-io_uring on\nvolume set: success\n\n# gluster volume set testvol storage.linux-io_uring off\nvolume set: success\n

              This option can be enabled/disabled only when the volume is not running. i.e. you can toggle the option when the volume is Created or is Stopped as indicated in gluster volume status $VOLNAME

              "},{"location":"Administrator-Guide/overview/","title":"Overview","text":""},{"location":"Administrator-Guide/overview/#overview","title":"Overview","text":"

              The Administration guide covers day to day management tasks as well as advanced configuration methods for your Gluster setup.

              You can manage your Gluster cluster using the Gluster CLI

              See the glossary for an explanation of the various terms used in this document.

              "},{"location":"Administrator-Guide/setting-up-storage/","title":"Setting Up Storage","text":"

              A volume is a logical collection of bricks where each brick is an export directory on a server in the trusted storage pool. Before creating a volume, you need to set up the bricks that will form the volume.

              • Brick Naming Conventions
              • Formatting and Mounting Bricks
              • Posix ACLS
              "},{"location":"CLI-Reference/cli-main/","title":"Overview","text":""},{"location":"CLI-Reference/cli-main/#gluster-command-line-interface","title":"Gluster Command Line Interface","text":""},{"location":"CLI-Reference/cli-main/#overview","title":"Overview","text":"

              Use the Gluster CLI to setup and manage your Gluster cluster from a terminal. You can run the Gluster CLI on any Gluster server either by invoking the commands or by running the Gluster CLI in interactive mode. You can also use the gluster command remotely using SSH.

              The gluster CLI syntax is gluster <command>.

              To run a command directly:

              gluster <command>\n

              For example, to view the status of all peers:

              gluster peer status\n

              To run a command in interactive mode, start a gluster shell by typing:

              gluster\n

              This will open a gluster command prompt. You now run the command at the prompt.

              gluster> <command>\n

              For example, to view the status of all peers,

              gluster> peer status\n
              "},{"location":"CLI-Reference/cli-main/#peer-commands","title":"Peer Commands","text":"

              The peer commands are used to manage the Trusted Server Pool (TSP).

              Command Syntax Description peer probe peer probe server Add server to the TSP peer detach peer detach server Remove server from the TSP peer status peer status Display the status of all nodes in the TSP pool list pool list List all nodes in the TSP"},{"location":"CLI-Reference/cli-main/#volume-commands","title":"Volume Commands","text":"

              The volume commands are used to setup and manage Gluster volumes.

              Command Syntax Description volume create volume create volname [options] bricks Create a volume called volname using the specified bricks with the configuration specified by options volume start volume start volname [force] Start volume volname volume stop volume stop volname Stop volume volname volume info volume info [volname] Display volume info for volname if provided, else for all volumes on the TSP volume status volumes status[volname] Display volume status for volname if provided, else for all volumes on the TSP volume list volume list List all volumes in the TSP volume set volume set volname option value Set option to value for volname volume get volume get volname <option|all> Display the value of option (if specified)for volname , or all options otherwise volume add-brick volume add-brick brick-1 ... brick-n Expand volname to include the bricks brick-1 to brick-n volume remove-brick volume remove-brick brick-1 ... brick-n \\<start|stop|status|commit|force> Shrink volname by removing the bricks brick-1 to brick-n . start will trigger a rebalance to migrate data from the removed bricks. stop will stop an ongoing remove-brick operation. force will remove the bricks immediately and any data on them will no longer be accessible from Gluster clients. volume replace-brick volume replace-brick volname old-brick new-brick Replace old-brick of volname with new-brick volume delete volume delete volname Delete volname

              For additional detail of all the available CLI commands, please refer to man gluster output.

              "},{"location":"Contributors-Guide/Adding-your-blog/","title":"Adding your gluster blog","text":""},{"location":"Contributors-Guide/Adding-your-blog/#adding-your-blog","title":"Adding your blog","text":"

              As a developer/user, you have blogged about gluster and want to share the post to Gluster community.

              OK, you can do that by editing planet-gluster feeds on Github.

              Please find instructions mentioned in the file and send a pull request.

              Once approved, all your gluster related posts will appear in planet.gluster.org website.

              "},{"location":"Contributors-Guide/Bug-Reporting-Guidelines/","title":"Bug reporting guidelines","text":""},{"location":"Contributors-Guide/Bug-Reporting-Guidelines/#before-filing-an-issue","title":"Before filing an issue","text":"

              If you are finding any issues, these preliminary checks as useful:

              • Is SELinux enabled? (you can use getenforce to check)
              • Are iptables rules blocking any data traffic? (iptables -L can help check)
              • Are all the nodes reachable from each other? [ Network problem ]
              • Please search issues to see if the bug has already been reported

                • If an issue has been already filed for a particular release and you found the issue in another release, add a comment in issue.

              Anyone can search in github issues, you don't need an account. Searching requires some effort, but helps avoid duplicates, and you may find that your problem has already been solved.

              "},{"location":"Contributors-Guide/Bug-Reporting-Guidelines/#reporting-an-issue","title":"Reporting An Issue","text":"
              • You should have an account with github.com
              • Here is the link to file an issue: Github

              Note: Please go through all below sections to understand what information we need to put in a bug. So it will help the developer to root cause and fix it

              "},{"location":"Contributors-Guide/Bug-Reporting-Guidelines/#required-information","title":"Required Information","text":"

              You should gather the information below before creating the bug report.

              "},{"location":"Contributors-Guide/Bug-Reporting-Guidelines/#package-information","title":"Package Information","text":"
              • Location from which the packages are used
              • Package Info - version of glusterfs package installed
              "},{"location":"Contributors-Guide/Bug-Reporting-Guidelines/#cluster-information","title":"Cluster Information","text":"
              • Number of nodes in the cluster
              • Hostnames and IPs of the gluster Node [if it is not a security issue]

                • Hostname / IP will help developers in understanding & correlating with the logs
              • Output of gluster peer status

              • Node IP, from which the \"x\" operation is done

                • \"x\" here means any operation that causes the issue
              "},{"location":"Contributors-Guide/Bug-Reporting-Guidelines/#volume-information","title":"Volume Information","text":"
              • Number of volumes
              • Volume Names
              • Volume on which the particular issue is seen [ if applicable ]
              • Type of volumes
              • Volume options if available
              • Output of gluster volume info
              • Output of gluster volume status
              • Get the statedump of the volume with the problem gluster volume statedump <vol-name>

              This dumps statedump per brick process in /var/run/gluster

              NOTE: Collect statedumps from one gluster Node in a directory.

              Repeat it in all Nodes containing the bricks of the volume. All the so collected directories could be archived, compressed and attached to bug

              "},{"location":"Contributors-Guide/Bug-Reporting-Guidelines/#brick-information","title":"Brick Information","text":"
              • xfs options when a brick partition was done

                • This could be obtained with this command: xfs_info /dev/mapper/vg1-brick
              • Extended attributes on the bricks

                • This could be obtained with this command: getfattr -d -m. -ehex /rhs/brick1/b1
              "},{"location":"Contributors-Guide/Bug-Reporting-Guidelines/#client-information","title":"Client Information","text":"
              • OS Type ( Ubuntu, Fedora, RHEL )
              • OS Version: In case of Linux distro get the following :
              uname -r\ncat /etc/issue\n
              • Fuse or NFS Mount point on the client with output of mount commands
              • Output of df -Th command
              "},{"location":"Contributors-Guide/Bug-Reporting-Guidelines/#tool-information","title":"Tool Information","text":"
              • If any tools are used for testing, provide the info/version about it
              • if any IO is simulated using a script, provide the script
              "},{"location":"Contributors-Guide/Bug-Reporting-Guidelines/#logs-information","title":"Logs Information","text":"
              • You can check logs for issues/warnings/errors.

                • Self-heal logs
                • Rebalance logs
                • Glusterd logs
                • Brick logs
                • NFS logs (if applicable)
                • Samba logs (if applicable)
                • Client mount log
              • Add the entire logs as attachment, if its very large to paste as a comment

              "},{"location":"Contributors-Guide/Bug-Reporting-Guidelines/#sos-report-for-centosfedora","title":"SOS report for CentOS/Fedora","text":"
              • Get the sosreport from the involved gluster Node and Client [ in case of CentOS /Fedora ]
              • Add a meaningful name/IP to the sosreport, by renaming/adding hostname/ip to the sosreport name
              "},{"location":"Contributors-Guide/Bug-Triage/","title":"Issues Triage Guidelines","text":"
              • Triaging of issues is an important task; when done correctly, it can reduce the time between reporting an issue and the availability of a fix enormously.

              • Triager should focus on new issues, and try to define the problem easily understandable and as accurate as possible. The goal of the triagers is to reduce the time that developers need to solve the bug report.

              • A triager is like an assistant that helps with the information gathering and possibly the debugging of a new bug report. Because a triager helps preparing a bug before a developer gets involved, it can be a very nice role for new community members that are interested in technical aspects of the software.

              • Triagers will stumble upon many different kind of issues, ranging from reports about spelling mistakes, or unclear log messages to memory leaks causing crashes or performance issues in environments with several hundred storage servers.

              Nobody expects that triagers can prepare all bug reports. Therefore most developers will be able to assist the triagers, answer questions and suggest approaches to debug and data to gather. Over time, triagers get more experienced and will rely less on developers.

              Issue triage can be summarized as below points:

              • Is the issue a bug? an enhancement request? or a question? Assign the relevant label.
              • Is there enough information in the issue description?
              • Is it a duplicate issue?
              • Is it assigned to correct component of GlusterFS?
              • Is the bug summary is correct?
              • Assigning issue or Adding people's github handle in the comment, so they get notified.

              The detailed discussion about the above points are below.

              "},{"location":"Contributors-Guide/Bug-Triage/#is-there-enough-information","title":"Is there enough information?","text":"

              It's hard to generalize what makes a good report. For \"average\" reporters is definitely often helpful to have good steps to reproduce, GlusterFS software version , and information about the test/production environment, Linux/GNU distribution.

              If the reporter is a developer, steps to reproduce can sometimes be omitted as context is obvious. However, this can create a problem for contributors that need to find their way, hence it is strongly advised to list the steps to reproduce an issue.

              Other tips:

              • There should be only one issue per report. Try not to mix related or similar looking bugs per report.

              • It should be possible to call the described problem fixed at some point. \"Improve the documentation\" or \"It runs slow\" could never be called fixed, while \"Documentation should cover the topic Embedding\" or \"The page at http://en.wikipedia.org/wiki/Example should load in less than five seconds\" would have a criterion. A good summary of the bug will also help others in finding existing bugs and prevent filing of duplicates.

              • If the bug is a graphical problem, you may want to ask for a screenshot to attach to the bug report. Make sure to ask that the screenshot should not contain any confidential information.

              "},{"location":"Contributors-Guide/Bug-Triage/#is-it-a-duplicate","title":"Is it a duplicate?","text":"

              If you think that you have found a duplicate but you are not totally sure, just add a comment like \"This issue looks related to issue #NNN\" (and replace NNN by issue-id) so somebody else can take a look and help judging.

              "},{"location":"Contributors-Guide/Bug-Triage/#is-it-assigned-with-correct-label","title":"Is it assigned with correct label?","text":"

              Go through the labels and assign the appropriate label

              "},{"location":"Contributors-Guide/Bug-Triage/#are-the-fields-correct","title":"Are the fields correct?","text":""},{"location":"Contributors-Guide/Bug-Triage/#description","title":"Description","text":"

              Sometimes the description does not summarize the bug itself well. You may want to update the bug summary to make the report distinguishable. A good title may contain:

              • A brief explanation of the root cause (if it was found)
              • Some of the symptoms people are experiencing
              "},{"location":"Contributors-Guide/Bug-Triage/#assigning-issue-or-adding-peoples-github-handle-in-the-comment","title":"Assigning issue or Adding people's github handle in the comment","text":"

              Normally, developers and potential assignees of an area are already watching all the issues by default, but sometimes reports describe general issues. Only if you know developers who work in the area covered by the issue, and if you know that these developers accept getting CCed or assigned to certain reports, you can mention in comment or even assign the bug report to her/him.

              To get an idea who works in which area, check to know component owners, you can check the \"MAINTAINERS\" file in root of glusterfs code directory (see Simplified dev workflow)

              "},{"location":"Contributors-Guide/Bug-Triage/#bugs-present-in-multiple-versions","title":"Bugs present in multiple Versions","text":"

              During triaging you might come across a particular bug which is present across multiple version of GlusterFS. Add that in comment.

              "},{"location":"Contributors-Guide/GlusterFS-Release-process/","title":"Release Process for GlusterFS","text":"

              The GlusterFS release process aims to provide regular, stable releases, with the ability to also ship new features quickly, while also attempting to reduce the complexity for release maintainers.

              "},{"location":"Contributors-Guide/GlusterFS-Release-process/#glusterfs-releases","title":"GlusterFS releases","text":"

              GlusterFS Major releases happen once every 4-6 months. Check Release Schedule for more information on the schedule for major releases. Minor releases happen every month for corresponding branch of major release. Each major release is supported till we have N+2 version is made available.

              Major releases don't guarantee complete backwards compatability with the previous major release.

              Minor releases will have guaranteed backwards compatibilty with earlier minor releases of the same branch.

              "},{"location":"Contributors-Guide/GlusterFS-Release-process/#glusterfs-major-release","title":"GlusterFS major release","text":"

              Each GlusterFS major release has a 4-6 month release window, in which changes get merged. This window is split into two phases.

              1. A Open phase, where all changes get merged
              2. A Stability phase, where only changes that stabilize the release get merged.

              The first 2-4 months of a release window will be the Open phase, and the last month will be the stability phase.

              The release engineer (or team doing the release) is responsible for messaging.

              "},{"location":"Contributors-Guide/GlusterFS-Release-process/#open-phase","title":"Open phase","text":"

              Any changes are accepted during this phase. New features that are introduced in this phase, need to be capable of being selectively built. All changes in the master branch are automatically incuded in the next release.

              All changes will be accepted during the Open phase. The changes have a few requirements,

              • a change fixing a bug SHOULD have public test case
              • a change introducing a new feature MUST have a disable switch that can disable the feature during a build
              "},{"location":"Contributors-Guide/GlusterFS-Release-process/#stability-phase","title":"Stability phase","text":"

              This phase is used to stabilize any new features introduced in the open phase, or general bug fixes for already existing features.

              A new release-<version> branch is created at the beginning of this phase. All changes need to be sent to the master branch before getting backported to the new release branch.

              No new features will be merged in this phase. At the end of this phase, any new feature introduced that hasn't been declared stable will be disabled, if possible removed, to prevent confusion and set clear expectations towards users and developers.

              Patches accepted in the Stability phase have the following requirements:

              • a change MUST fix an issue that users have reported or are very likely to hit
              • each change SHOULD have a public test-case (.t or DiSTAF)
              • a change MUST NOT add a new FOP
              • a change MUST NOT add a new xlator
              • a change SHOULD NOT add a new volume option, unless a public discussion was kept and several maintainers agree that this is the only right approach
              • a change MAY add new values for existing volume options, these need to be documented in the release notes and be marked as a 'minor feature enhancement' or similar
              • it is NOT RECOMMENDED to modify the contents of existing log messages, automation and log parsers can depend on the phrasing
              • a change SHOULD NOT have more than approx. 100 lines changed, additional public discussion and agreement among maintainers is required to get big changes approved
              • a change SHOULD NOT modify existing structures or parameters that get sent over the network, unless a public discussion was kept and several maintainers agree that this is the only right approach
              • existing structures or parameters MAY get extended with additional values (i.e. new flags in a bitmap/mask) if the extensions are optional and do not affect older/newer client/server combinations

              Patches that do not satisfy the above requirements can still be submitted for review, but cannot be merged.

              "},{"location":"Contributors-Guide/GlusterFS-Release-process/#release-procedure","title":"Release procedure","text":"

              This procedure is followed by a release maintainer/manager, to perform the actual release.

              The release procedure for both major releases and minor releases is nearly the same.

              The procedure for the major releases starts at the beginning of the Stability phase, and for the minor release at the start of the release window.

              TODO: Add the release verification procedure

              "},{"location":"Contributors-Guide/GlusterFS-Release-process/#release-steps","title":"Release steps","text":"

              The release-manager needs to follow the following steps, to actually perform the release once ready.

              "},{"location":"Contributors-Guide/GlusterFS-Release-process/#create-tarball","title":"Create tarball","text":"
              1. Add the release-notes to the docs/release-notes/ directory in the sources
              2. after merging the release-notes, create a tag like v3.6.2
              3. push the tag to git.gluster.org
              4. create the tarball with the release job in Jenkins
              "},{"location":"Contributors-Guide/GlusterFS-Release-process/#notify-packagers","title":"Notify packagers","text":"

              Notify the packagers that we need packages created. Provide the link to the source tarball from the Jenkins release job to the packagers mailinglist. A list of the people involved in the package maintenance for the different distributions is in the MAINTAINERS file in the sources, all of them should be subscribed to the packagers mailinglist.

              "},{"location":"Contributors-Guide/GlusterFS-Release-process/#create-a-new-tracker-bug-for-the-next-release","title":"Create a new Tracker Bug for the next release","text":"

              The tracker bugs are used as guidance for blocker bugs and should get created when a release is made. To create one

              • Create a new milestone
              • base the contents on open issues, like the one for glusterfs-8
              • issues that were not fixed in previous release, but in milestone should be moved to the new milestone.
              "},{"location":"Contributors-Guide/GlusterFS-Release-process/#create-release-announcement","title":"Create Release Announcement","text":"

              (Major releases) The Release Announcement is based off the release notes. This needs to indicate:

              • What this release's overall focus is
              • Which versions will stop receiving updates as of this release
              • Links to the direct download folder
              • Feature set

              Best practice as of version-8 is to create a collaborative version of the release notes that both the release manager and community lead work on together, and the release manager posts to the mailing lists (gluster-users@, gluster-devel@, announce@).

              "},{"location":"Contributors-Guide/GlusterFS-Release-process/#create-upgrade-guide","title":"Create Upgrade Guide","text":"

              (Major releases) If required, as in the case of a major release, an upgrade guide needs to be available at the same time as the release. This document should go under the Upgrade Guide section of the glusterdocs repository.

              "},{"location":"Contributors-Guide/GlusterFS-Release-process/#send-release-announcement","title":"Send Release Announcement","text":"

              Once the Fedora/EL RPMs are ready (and any others that are ready by then), send the release announcement:

              • Gluster Mailing lists

                • gluster-announce
                • gluster-devel
                • gluster-users
              • Gluster Blog The blog will automatically post to both Facebook and Twitter. Be careful with this!

                • Gluster Twitter account
                • Gluster Facebook page
              • Gluster LinkedIn group

              "},{"location":"Contributors-Guide/Guidelines-For-Maintainers/","title":"Guidelines For Maintainers","text":""},{"location":"Contributors-Guide/Guidelines-For-Maintainers/#guidelines-for-maintainers","title":"Guidelines For Maintainers","text":"

              GlusterFS has maintainers, sub-maintainers and release maintainers to manage the project's codebase. Sub-maintainers are the owners for specific areas/components of the source tree. Maintainers operate across all components in the source tree.Release maintainers are the owners for various release branches (release-x.y) present in the GlusterFS repository.

              In the guidelines below, release maintainers and sub-maintainers are also implied when there is a reference to maintainers unless it is explicitly called out.

              "},{"location":"Contributors-Guide/Guidelines-For-Maintainers/#guidelines-that-maintainers-are-expected-to-adhere-to","title":"Guidelines that Maintainers are expected to adhere to","text":"
              1. Ensure qualitative and timely management of patches sent for review.

              2. For merging patches into the repository, it is expected of maintainers to:

                • Merge patches of owned components only.
                • Seek approvals from all maintainers before merging a patchset spanning multiple components.
                • Ensure that regression tests pass for all patches before merging.
                • Ensure that regression tests accompany all patch submissions.
                • Ensure the related Bug or GitHub Issue has sufficient details about the cause of the problem, or description of the introduction for the change.
                • Ensure that documentation is updated for a noticeable change in user perceivable behavior or design.
                • Encourage code unit tests from patch submitters to improve the overall quality of the codebase.
                • Not merge patches written by themselves until there is a +2 Code Review vote by other reviewers.
              3. The responsibility of merging a patch into a release branch in normal circumstances will be that of the release maintainer's. Only in exceptional situations, maintainers & sub-maintainers will merge patches into a release branch.

              4. Release maintainers will ensure approval from appropriate maintainers before merging a patch into a release branch.

              5. Maintainers have a responsibility to the community, it is expected of maintainers to:

                • Facilitate the community in all aspects.
                • Be very active and visible in the community.
                • Be objective and consider the larger interests of the community ahead of individual interests.
                • Be receptive to user feedback.
                • Address concerns & issues affecting users.
                • Lead by example.
              "},{"location":"Contributors-Guide/Guidelines-For-Maintainers/#queries-on-guidelines","title":"Queries on Guidelines","text":"

              Any questions or comments regarding these guidelines can be routed to gluster-devel or slack channel.

              "},{"location":"Contributors-Guide/Guidelines-For-Maintainers/#patches-in-github","title":"Patches in Github","text":"

              Github can be used to list patches that need reviews and/or can get merged from Pull Requests

              "},{"location":"Contributors-Guide/Index/","title":"Workflow Guide","text":""},{"location":"Contributors-Guide/Index/#bug-handling","title":"Bug Handling","text":"
              • Bug reporting guidelines - Guideline for reporting a bug in GlusterFS
              • Bug triage guidelines - Guideline on how to triage bugs for GlusterFS
              "},{"location":"Contributors-Guide/Index/#release-process","title":"Release Process","text":"
              • GlusterFS Release process - Our release process / checklist
              "},{"location":"Contributors-Guide/Index/#patch-acceptance","title":"Patch Acceptance","text":"
              • The Guidelines For Maintainers explains when maintainers can merge patches.
              "},{"location":"Contributors-Guide/Index/#blogging-about-gluster","title":"Blogging about gluster","text":"
              • The Adding your gluster blog explains how to add your gluster blog to Community blogger.
              "},{"location":"Developer-guide/Backport-Guidelines/","title":"Backport Guidelines","text":"

              In GlusterFS project, as a policy, any new change, bug fix, etc., are to be fixed in 'devel' branch before release branches. When a bug is fixed in the devel branch, it might be desirable or necessary in release branch.

              This page describes the policy GlusterFS has regarding the backports. As a user, or contributor, being aware of this policy would help you to understand how to request for backport from community.

              "},{"location":"Developer-guide/Backport-Guidelines/#policy","title":"Policy","text":"
              • No feature from devel would be backported to the release branch
              • CVE ie., security vulnerability (listed on the CVE database) reported in the existing releases would be backported, after getting fixed in devel branch.
              • Only topics which bring about data loss or, unavailability would be backported to the release.
              • For any other issues, the project recommends that the installation be upgraded to a newer release where the specific bug has been addressed.
              • Gluster provides 'rolling' upgrade support, i.e., one can upgrade their server version without stopping the application I/O, so we recommend migrating to higher version.
              "},{"location":"Developer-guide/Backport-Guidelines/#things-to-pay-attention-to-while-backporting-a-patch","title":"Things to pay attention to while backporting a patch.","text":"

              If your patch meets the criteria above, or you are a user, who prefer to have a fix backported, because your current setup is facing issues, below are the steps you need to take care to submit a patch on release branch.

              • The patch should have same 'Change-Id'.
              "},{"location":"Developer-guide/Backport-Guidelines/#how-to-contact-release-owners","title":"How to contact release owners?","text":"

              All release owners are part of 'gluster-devel@gluster.org' mailing list. Please write your expectation from next release there, so we can take that to consideration while making the release.

              "},{"location":"Developer-guide/Building-GlusterFS/","title":"Building GlusterFS","text":"

              This page describes how to build and install GlusterFS.

              "},{"location":"Developer-guide/Building-GlusterFS/#build-requirements","title":"Build Requirements","text":"

              The following packages are required for building GlusterFS,

              • GNU Autotools

                • Automake
                • Autoconf
                • Libtool
              • lex (generally flex)

              • GNU Bison
              • OpenSSL
              • libxml2
              • Python 2.x
              • libaio
              • libibverbs
              • librdmacm
              • readline
              • lvm2
              • glib2
              • liburcu
              • cmocka
              • libacl
              • sqlite
              • fuse-devel
              • liburing-devel
              "},{"location":"Developer-guide/Building-GlusterFS/#fedora","title":"Fedora","text":"

              The following dnf command installs all the build requirements for Fedora,

              dnf install automake autoconf libtool flex bison openssl-devel  \\\n  libxml2-devel python-devel libaio-devel libibverbs-devel      \\\n  librdmacm-devel readline-devel lvm2-devel glib2-devel         \\\n  userspace-rcu-devel libcmocka-devel libacl-devel sqlite-devel \\\n  fuse-devel redhat-rpm-config rpcgen libtirpc-devel make       \\\n  libuuid-devel liburing-devel gperftools libcurl-devel\n
              "},{"location":"Developer-guide/Building-GlusterFS/#ubuntu","title":"Ubuntu","text":"

              The following apt-get command will install all the build requirements on Ubuntu,

              sudo apt-get install make automake autoconf libtool flex bison  \\\n  pkg-config libssl-dev libxml2-dev python-dev libaio-dev       \\\n  libibverbs-dev librdmacm-dev libreadline-dev liblvm2-dev      \\\n  libglib2.0-dev liburcu-dev libcmocka-dev libsqlite3-dev       \\\n  libacl1-dev liburing-dev google-perftools\n
              "},{"location":"Developer-guide/Building-GlusterFS/#centos-enterprise-linux-v7","title":"CentOS / Enterprise Linux v7","text":"

              The following yum command installs the build requirements for CentOS / Enterprise Linux 7,

              yum install autoconf automake bison cmockery2-devel dos2unix flex   \\\n  fuse-devel glib2-devel libacl-devel libaio-devel libattr-devel    \\\n  libcurl-devel libibverbs-devel librdmacm-devel libtirpc-devel     \\\n  libtool libxml2-devel lvm2-devel make openssl-devel pkgconfig     \\\n  pyliblzma python-devel python-eventlet python-netifaces           \\\n  python-paste-deploy python-simplejson python-sphinx python-webob  \\\n  pyxattr readline-devel rpm-build sqlite-devel systemtap-sdt-devel \\\n  tar userspace-rcu-devel\n

              Note: You will need to enable the CentOS SIG repos in order to install userspace-rcu-devel package For details check https://wiki.centos.org/SpecialInterestGroup/Storage

              "},{"location":"Developer-guide/Building-GlusterFS/#enable-repositories-for-centos","title":"Enable repositories for CentOS","text":"

              The following yum command enables needed repositories providing the build requirements:

              CentOS Stream 8:

              yum-config-manager --enable powertools --enable devel\n\n

              CentOS Stream 9:

              yum-config-manager --enable crb --enable devel\n\n
              "},{"location":"Developer-guide/Building-GlusterFS/#centos-enterprise-linux-v8","title":"CentOS / Enterprise Linux v8","text":"

              The following yum command installs the build requirements for CentOS / Enterprise Linux 8,

              yum install autoconf automake bison dos2unix flex fuse-devel glib2-devel   \\\n  libacl-devel libaio-devel libattr-devel libcurl-devel libibverbs-devel   \\\n  librdmacm-devel libtirpc-devel libuuid-devel libtool libxml2-devel       \\\n  lvm2-devel make openssl-devel pkgconfig xz-devel  python3-devel          \\\n  python3-netifaces python3-paste-deploy python3-simplejson python3-sphinx \\\n  python3-webob python3-pyxattr readline-devel rpm-build sqlite-devel      \\\n  systemtap-sdt-devel tar userspace-rcu-devel rpcgen liburing-devel\n
              "},{"location":"Developer-guide/Building-GlusterFS/#building-from-source","title":"Building from Source","text":"

              This section describes how to build GlusterFS from source. It is assumed you have a copy of the GlusterFS source (either from a released tarball or a git clone). All the commands below are to be run with the source directory as the working directory.

              "},{"location":"Developer-guide/Building-GlusterFS/#configuring-for-building","title":"Configuring for building","text":"

              Run the below commands once for configuring and setting up the build process.

              Run autogen to generate the configure script.

              ./autogen.sh\n

              Once autogen completes successfully a configure script is generated. Run the configure script to generate the makefiles.

              ./configure\n

              For CentOS 7, use:

              ./configure --without-libtirpc\n

              If the above build requirements have been installed, running the configure script should give the below configure summary,

              GlusterFS configure summary\n===========================\nFUSE client          : yes\nInfiniband verbs     : yes\nepoll IO multiplex   : yes\nargp-standalone      : no\nfusermount           : yes\nreadline             : yes\ngeoreplication       : yes\nLinux-AIO            : yes\nEnable Debug         : no\nBlock Device xlator  : yes\nglupy                : yes\nUse syslog           : yes\nXML output           : yes\nEncryption xlator    : yes\nUnit Tests       : no\nTrack priv ports     : yes\nPOSIX ACLs           : yes\nData Classification  : yes\nSELinux features     : yes\nfirewalld-config     : no\nExperimental xlators : yes\nEvents           : yes\nEC dynamic support   : x64 sse avx\nUse memory pools     : yes\nNanosecond m/atimes  : yes\nLegacy gNFS server   : no\n

              During development it is good to enable a debug build. To do this run configure with a '--enable-debug' flag.

              ./configure --enable-debug\n

              Further configuration flags can be found by running configure with a '--help' flag,

              ./configure --help\n

              Please note to enable gNFS use the following flag

              ./configure --enable-gnfs\n

              If you are looking at contributing by fixing some of the memory issues, use --enable-asan option

              ./configure --enable-asan\n

              The above option will build with -fsanitize=address -fno-omit-frame-pointer options and uses the libasan.so shared library, so that needs to be available.

              io_uring is introduced on Linux kernel version 5.1. GlusterFS also needs the user space liburing helper library. If these are not available for your machine or if you wish to build GlusterFS without io_uring support, use --disable-linux-io_uring option

              ./configure --disable-linux-io_uring\n
              "},{"location":"Developer-guide/Building-GlusterFS/#building","title":"Building","text":"

              Once configured, GlusterFS can be built with a simple make command.

              make\n

              To speed up the build process on a multicore machine, add a '-jN' flag, where N is the number of parallel jobs.

              "},{"location":"Developer-guide/Building-GlusterFS/#installing","title":"Installing","text":"

              Run 'make install' to install GlusterFS. By default, GlusterFS will be installed into '/usr/local' prefix. To change the install prefix, give the appropriate option to configure. If installing into the default prefix, you might need to use 'sudo' or 'su -c' to install.

              sudo make install\n

              NOTE: glusterfs can be installed on any target path. However, the mount.glusterfs script has to be in /sbin/mount.glusterfs for mounting via command mount -t glusterfs to work. See -t section in man 8 mount for more details.

              "},{"location":"Developer-guide/Building-GlusterFS/#running-glusterfs","title":"Running GlusterFS","text":"

              GlusterFS can be only run as root, so the following commands will need to be run as root. If you've installed into the default '/usr/local' prefix, add '/usr/local/sbin' and '/usr/local/bin' to your PATH before running the below commands.

              A source install will generally not install any init scripts. So you will need to start glusterd manually. To manually start glusterd just run,

              systemctl daemon-reload\nsystemctl start glusterd\n

              This will start glusterd and fork it into the background as a daemon process. You now run 'gluster' commands and make use of GlusterFS.

              "},{"location":"Developer-guide/Building-GlusterFS/#building-packages","title":"Building packages","text":""},{"location":"Developer-guide/Building-GlusterFS/#building-rpms","title":"Building RPMs","text":"

              Building RPMs is really simple. On a RPM based system, for eg. Fedora, get the source and do the configuration steps as shown in the 'Building from Source' section. After the configuration step, run the following steps to build RPMs,

              cd extras/LinuxRPM\nmake glusterrpms\n

              This will create rpms from the source in 'extras/LinuxRPM'. (Note: You will need to install the rpmbuild requirements including rpmbuild and mock) For Fedora / CentOS / Enterprise Linux 8 the dependencies can be installed via:

              yum install mock rpm-build  selinux-policy-devel\n
              "},{"location":"Developer-guide/Developers-Index/","title":"Developers","text":""},{"location":"Developer-guide/Developers-Index/#contributing-to-the-gluster-community","title":"Contributing to the Gluster community","text":"

              Are you itching to send in patches and participate as a developer in the Gluster community? Here are a number of starting points for getting involved. All you need is your 'github' account to be handy.

              Remember that, Gluster community has multiple projects, each of which has its own way of handling PRs and patches. Decide on which project you want to contribute. Below documents are mostly about 'GlusterFS' project, which is the core of Gluster Community.

              "},{"location":"Developer-guide/Developers-Index/#workflow","title":"Workflow","text":"
              • Simplified Developer Workflow

                • A simpler and faster intro to developing with GlusterFS, than the document below
              • Developer Workflow

                • Covers detail about requirements from a patch; tools and toolkits used by developers. This is recommended reading in order to begin contributions to the project.
              • GD2 Developer Workflow

                • Helps in on-boarding developers to contribute in GlusterD2 project.
              "},{"location":"Developer-guide/Developers-Index/#compiling-gluster","title":"Compiling Gluster","text":"
              • Building GlusterFS - How to compile Gluster from source code.
              "},{"location":"Developer-guide/Developers-Index/#developing","title":"Developing","text":"
              • Projects - Ideas for projects you could create
              • Fixing issues reported by tools for static code analysis

                • This is a good starting point for developers to fix bugs in GlusterFS project.
              "},{"location":"Developer-guide/Developers-Index/#releases-and-backports","title":"Releases and Backports","text":"
              • Backport Guidelines describe the steps that branches too.

              Some more GlusterFS Developer documentation can be found in glusterfs documentation directory

              "},{"location":"Developer-guide/Development-Workflow/","title":"Development workflow of Gluster","text":"

              This document provides a detailed overview of the development model followed by the GlusterFS project. For a simpler overview visit Simplified development workflow.

              "},{"location":"Developer-guide/Development-Workflow/#basics","title":"Basics","text":"

              The GlusterFS development model largely revolves around the features and functionality provided by Git version control system, Github and Jenkins continuous integration system. It is a primer for a contributor to the project.

              "},{"location":"Developer-guide/Development-Workflow/#git-and-github","title":"Git and Github","text":"

              Git is an extremely flexible, distributed version control system. GlusterFS's main repository is at Git and at GitHub. A good introduction to Git can be found at http://www-cs-students.stanford.edu/~blynn/gitmagic/.

              "},{"location":"Developer-guide/Development-Workflow/#jenkins","title":"Jenkins","text":"

              Jenkins is a Continuous Integration build system. Jenkins is hosted at http://build.gluster.org. Jenkins is configured to work with Github by setting up hooks. Every \"Change\" which is pushed to Github is automatically picked up by Jenkins, built and smoke tested. The output of all builds and tests can be viewed at http://build.gluster.org/job/smoke/. Jenkins is also set up with a 'regression' job which is designed to execute test scripts provided as part of the code change.

              "},{"location":"Developer-guide/Development-Workflow/#preparatory-setup","title":"Preparatory Setup","text":"

              Here is a list of initial one-time steps before you can start hacking on code.

              "},{"location":"Developer-guide/Development-Workflow/#fork-repository","title":"Fork Repository","text":"

              Fork GlusterFS repository

              "},{"location":"Developer-guide/Development-Workflow/#clone-a-working-tree","title":"Clone a working tree","text":"

              Get yourself a working tree by cloning the development repository from

              git clone git@github.com:${username}/glusterfs.git\ncd glusterfs/\ngit remote add upstream git@github.com:gluster/glusterfs.git\n
              "},{"location":"Developer-guide/Development-Workflow/#preferred-email-and-set-username","title":"Preferred email and set username","text":"

              On the first login, add your git/work email to your identity. You will have to click on the URL which is sent to your email and set up a proper Full Name. Select yourself a username. Make sure you set your git/work email as your preferred email. This should be the email address from which all your code commits are associated.

              "},{"location":"Developer-guide/Development-Workflow/#watch-glusterfs","title":"Watch glusterfs","text":"

              In Github, watch the 'glusterfs' repository. Tick on suitable (All activity, Ignore, participating, or custom) type of notifications to get alerts.

              "},{"location":"Developer-guide/Development-Workflow/#email-filters","title":"Email filters","text":"

              Set up a filter rule in your mail client to tag or classify emails with the header

              list: <glusterfs.gluster.github.com>\n

              as mails originating from the github system.

              "},{"location":"Developer-guide/Development-Workflow/#development-other-flows","title":"Development & Other flows","text":""},{"location":"Developer-guide/Development-Workflow/#issue","title":"Issue","text":"
              • Make sure there is an issue filed for the task you are working on.
              • If it is not filed, open the issue with all the description.
              • If it is a bug fix, add label \"Type:Bug\".
              • If it is an RFC, provide all the documentation, and request for \"DocApproved\", and \"SpecApproved\" label.
              "},{"location":"Developer-guide/Development-Workflow/#code","title":"Code","text":"
              • Start coding
              • Make sure clang-format is installed and is run on the patch.
              "},{"location":"Developer-guide/Development-Workflow/#keep-up-to-date","title":"Keep up-to-date","text":"
              • GlusterFS is a large project with many developers, so there would be one or the other patch everyday.
              • It is critical for developer to be up-to-date with devel repo to be Conflict-Free when PR is opened.
              • Git provides many options to keep up-to-date, below is one of them
              git fetch upstream\ngit rebase upstream/devel\n
              "},{"location":"Developer-guide/Development-Workflow/#branching-policy","title":"Branching policy","text":"

              This section describes both, the branching policies on the public repo as well as the suggested best-practice for local branching

              "},{"location":"Developer-guide/Development-Workflow/#develrelease-branches","title":"Devel/release branches","text":"

              In glusterfs, the 'devel' branch is the forward development branch. This is where new features come in first. In fact this is where almost every change (commit) comes in first. The devel branch is always kept in a buildable state and smoke tests pass.

              Release trains (3.1.z, 3.2.z,..., 8.y, 9.y) each have a branch originating from devel. Code freeze of each new release train is marked by the creation of the release-x.y branch. At this point, no new features are added to the release-x.y branch. All fixes and commits first get into devel. From there, only bug fixes get backported to the relevant release branches. From the release-x.y branch, actual release code snapshots (e.g. glusterfs-3.2.2 etc.) are tagged (git annotated tag with 'git tag -a') shipped as a tarball.

              "},{"location":"Developer-guide/Development-Workflow/#personal-per-task-branches","title":"Personal per-task branches","text":"

              As a best practice, it is recommended you perform all code changes for a task in a local branch in your working tree. The local branch should be created from the upstream branch to which you intend to submit the change. The name of the branch on your personal fork can start with issueNNNN, followed by anything of your choice. If you are submitting changes to the devel branch, first create a local task branch like this -

              # git checkout -b issueNNNN upstream/main\n... <hack, commit>\n
              "},{"location":"Developer-guide/Development-Workflow/#building","title":"Building","text":""},{"location":"Developer-guide/Development-Workflow/#environment-setup","title":"Environment Setup","text":"

              For details about the required packages for the build environment refer : Building GlusterFS

              "},{"location":"Developer-guide/Development-Workflow/#creating-build-environment","title":"Creating build environment","text":"

              Once the required packages are installed for your appropiate system, generate the build configuration:

              ./autogen.sh\n./configure --enable-fusermount\n
              "},{"location":"Developer-guide/Development-Workflow/#build-and-install","title":"Build and install","text":"
              make && make install\n
              "},{"location":"Developer-guide/Development-Workflow/#commit-policy-pr-description","title":"Commit policy / PR description","text":"

              Typically you would have a local branch per task. You will need to sign-off your commit (git commit -s) before sending the patch for review. By signing off your patch, you agree to the terms listed under the \"Developer's Certificate of Origin\" section in the CONTRIBUTING file available in the repository root.

              Provide a meaningful commit message. Your commit message should be in the following format

              • A short one-line title of format 'component: title', describing what the patch accomplishes
              • An empty line following the subject
              • Situation necessitating the patch
              • Description of the code changes
              • Reason for doing it this way (compared to others)
              • Description of test cases
              • When you open a PR, having a reference Issue for the commit is mandatory in GlusterFS.
              • Commit message can have, either Fixes: #NNNN or Updates: #NNNN in a separate line in the commit message. Here, NNNN is the Issue ID in glusterfs repository.
              • Each commit needs the author to have the 'Signed-off-by: Name ' line. Can do this by -s option for git commit.
              • If the PR is not ready for review, apply the label work-in-progress. Check the availability of \"Draft PR\" is present for you, if yes, use that instead.
              • "},{"location":"Developer-guide/Development-Workflow/#push-the-change","title":"Push the change","text":"

                After doing the local commit, it is time to submit the code for review. There is a script available inside glusterfs.git called rfc.sh. It is recommended you keep pushing to your repo every day, so you don't loose any work. You can submit your changes for review by simply executing

                ./rfc.sh\n

                or

                git push origin HEAD:issueNNN\n

                This script rfc.sh does the following:

                • The first time it is executed, it downloads a git hook from http://review.gluster.org/tools/hooks/commit-msg and sets it up locally to generate a Change-Id: tag in your commit message (if it was not already generated.)
                • Rebase your commit against the latest upstream HEAD. This rebase also causes your commits to undergo massaging from the just downloaded commit-msg hook.
                • Prompt for a Reference Id for each commit (if it was not already provided) and include it as a \"fixes: #n\" tag in the commit log. You can just hit at this prompt if your submission is purely for review purposes.
                • Push the changes for review. On a successful push, you will see a URL pointing to the change in Pull requests section.
                • "},{"location":"Developer-guide/Development-Workflow/#test-cases-and-verification","title":"Test cases and Verification","text":""},{"location":"Developer-guide/Development-Workflow/#auto-triggered-tests","title":"Auto-triggered tests","text":"

                  The integration between Jenkins and Github triggers an event in Jenkins on every push of changes, to pick up the change and run build and smoke test on it. Part of the workflow is to aggregate and execute pre-commit test cases that accompany patches, cumulatively for every new patch. This guarantees that tests that are working till the present are not broken with the new patch. This is so that code changes and accompanying test cases are reviewed together. Once you upload the patch -

                  1. All the required smoke tests would be auto-triggered. You can retrigger the smoke tests using \"/recheck smoke\" as comment. Passing the automated smoke test is a necessary condition but not sufficient.

                  2. The regression tests would be triggered by a comment \"/run regression\" from developers in the @gluster organization once smoke test is passed.

                  If smoke/regression fails, it is a good reason to skip code review till a fixed change is pushed later. You can click on the build URL automatically to inspect the reason for auto verification failure. In the Jenkins job page, you can click on the 'Console Output' link to see the exact point of failure.

                  All code changes which are not trivial (typo fixes, code comment changes) must be accompanied with either a new test case script or extend/modify an existing test case script. It is important to review the test case in conjunction with the code change to analyze whether the code change is actually verified by the test case.

                  Regression tests (i.e, execution of all test cases accumulated with every commit) is not automatically triggered as the test cases can be extensive and is quite expensive to execute for every change submission in the review/resubmit cycle. Passing the regression test is a necessary condition for merge along with code review points.

                  To check and run all regression tests locally, run the below script from glusterfs root directory.

                  ./run-tests.sh\n

                  To run a single regression test locally, run the below command.

                  prove -vf <path_to_the_file>\n

                  NOTE: The testing framework needs perl-Test-Harness package to be installed. Ask for help as comment in PR if you have any questions about the process!

                  It is important to note that Jenkins verification is only a generic verification of high-level tests. More concentrated testing effort for the patch is necessary with manual verification.

                  "},{"location":"Developer-guide/Development-Workflow/#glusto-test-framework","title":"Glusto test framework","text":"

                  For any new feature that is posted for review, there should be accompanying set of tests in glusto-tests. These tests will be run nightly and/or before release to determine the health of the feature. Please go through glusto-tests project to understand more information on how to write and execute the tests in glusto.

                  1. Extend/Modify old test cases in existing scripts - This is typically when present behavior (default values etc.) of code is changed.

                  2. No test cases - This is typically when a code change is trivial (e.g. fixing typos in output strings, code comments).

                  3. Only test case and no code change - This is typically when we are adding test cases to old code (already existing before this regression test policy was enforced). More details on how to work with test case scripts can be found in tests/README.

                  "},{"location":"Developer-guide/Development-Workflow/#reviewing-commenting","title":"Reviewing / Commenting","text":"

                  Code review with Github is relatively easy compared to other available tools. Each change is presented as multiple files and each file can be reviewed in Side-by-Side mode. While reviewing it is possible to comment on each line by clicking on '+' icon and writing in your comments in the text box. Such in-line comments are saved as drafts, till you finally publish them by Starting a Review.

                  "},{"location":"Developer-guide/Development-Workflow/#incorporate-rfcsh-reverify","title":"Incorporate, rfc.sh, Reverify","text":"

                  Code review comments are notified via email. After incorporating the changes in code, you can mark each of the inline comments as 'done' (optional). After all the changes to your local files, create new commits in the same branch with -

                  git commit -a -s\n

                  Push the commit by executing rfc.sh. If your previous push was an \"rfc\" push (i.e, without a Issue Id) you will be prompted for a Issue Id again. You can re-push an rfc change without any other code change too by giving a Issue Id.

                  On the new push, Jenkins will re-verify the new change (independent of what the verification result was for the previous push).

                  It is the Change-Id line in the commit log (which does not change) that associates the new push as an update for the old push (even though they had different commit ids) under the same Change.

                  If further changes are found necessary, changes can be requested or comments can be made on the new patch as well, and the same cycle repeats.

                  If no further changes are necessary, the reviewer can approve the patch.

                  "},{"location":"Developer-guide/Development-Workflow/#submission-qualifiers","title":"Submission Qualifiers","text":"

                  GlusterFS project follows 'Squash and Merge' method.

                  • This is mainly to preserve the historic Gerrit method of one patch in git log for one URL link.
                  • This also makes every merge a complete patch, which has passed all tests.

                  For a change to get merged, there are two qualifiers that are enforced by the Github system. They are -

                  1. A change should have at approver flag from Reviewers
                  2. A change should have passed smoke and regression tests.

                  The project maintainer will merge the changes once a patch meets these qualifiers. If you feel there is delay, feel free to add a comment, discuss the same in Slack channel, or send email.

                  "},{"location":"Developer-guide/Development-Workflow/#submission-disqualifiers","title":"Submission Disqualifiers","text":"
                  • +2 : is equivalent to \"Approve\" from the people in the maintainer's group.
                  • +1 : can be given by a maintainer/reviewer by explicitly stating that in the comment.
                  • -1 : provide details on required changes and pick \"Request Changes\" while submitting your review.
                  • -2 : done by adding the DO-NOT-MERGE label.

                  Any further discussions can happen as comments in the PR.

                  "},{"location":"Developer-guide/Easy-Fix-Bugs/","title":"Easy Fix Bugs","text":"

                  Fixing easy issues is an excellent method to start contributing patches to Gluster.

                  Sometimes an Easy Fix issue has a patch attached. In those cases, the Patch keyword has been added to the bug. These bugs can be used by new contributors that would like to verify their workflow. Bug 1099645 is one example of those.

                  All such issues can be found here

                  "},{"location":"Developer-guide/Easy-Fix-Bugs/#guidelines-for-new-comers","title":"Guidelines for new comers","text":"
                  • While trying to write a patch, do not hesitate to ask questions.
                  • If something in the documentation is unclear, we do need to know so that we can improve it.
                  • There are no stupid questions, and it's more stupid to not ask questions that others can easily answer. Always assume that if you have a question, someone else would like to hear the answer too.

                  Reach out to the developers in #gluster on Gluster Slack channel, or on one of the mailing lists, try to keep the discussions public so that anyone can learn from it.

                  "},{"location":"Developer-guide/Fixing-issues-reported-by-tools-for-static-code-analysis/","title":"Fixing issues reported by tools for static code analysis","text":""},{"location":"Developer-guide/Fixing-issues-reported-by-tools-for-static-code-analysis/#static-code-analysis-tools","title":"Static Code Analysis Tools","text":"

                  Bug fixes for issues reported by Static Code Analysis Tools should follow Development Work Flow

                  "},{"location":"Developer-guide/Fixing-issues-reported-by-tools-for-static-code-analysis/#coverity","title":"Coverity","text":"

                  GlusterFS is part of Coverity's scan program.

                  • To see Coverity issues you have to be a member of the GlusterFS project in Coverity scan website.
                  • Here is the link to Coverity scan website
                  • Go to above link and subscribe to GlusterFS project (as contributor). It will send a request to Admin for including you in the Project.
                  • Once admins for the GlusterFS Coverity scan approve your request, you will be able to see the defects raised by Coverity.
                  • Issue #1060 can be used as a umbrella bug for Coverity issues in master branch unless you are trying to fix a specific issue.
                  • When you decide to work on some issue, please assign it to your name in the same Coverity website. So that we don't step on each others work.
                  • When marking a bug intentional in Coverity scan website, please put an explanation for the same. So that it will help others to understand the reasoning behind it.

                  If you have more questions please send it to gluster-devel mailing list

                  "},{"location":"Developer-guide/Fixing-issues-reported-by-tools-for-static-code-analysis/#cpp-check","title":"CPP Check","text":"

                  Cppcheck is available in Fedora and EL's EPEL repo

                  • Install Cppcheck

                    dnf install cppcheck\n
                  • Clone GlusterFS code

                    git clone https://github.com/gluster/glusterfs\n
                  • Run Cpp check

                    cppcheck glusterfs/ 2>cppcheck.log\n
                  "},{"location":"Developer-guide/Fixing-issues-reported-by-tools-for-static-code-analysis/#clang-scan-daily-runs","title":"Clang-Scan Daily Runs","text":"

                  We have daily runs of static source code analysis tool clang-scan on the glusterfs sources. There are daily analyses of the master and on currently supported branches.

                  Results are posted at https://build.gluster.org/job/clang-scan/lastBuild/clangScanBuildBugs/

                  Issue #1000 can be used as a umbrella bug for Clang issues in master branch unless you are trying to fix a specific issue.

                  "},{"location":"Developer-guide/Projects/","title":"Projects","text":"

                  This page contains a list of project ideas which will be suitable for students (for GSOC, internship etc.)

                  "},{"location":"Developer-guide/Projects/#projectsfeatures-which-needs-contributors","title":"Projects/Features which needs contributors","text":""},{"location":"Developer-guide/Projects/#rio","title":"RIO","text":"

                  Issue: https://github.com/gluster/glusterfs/issues/243

                  This is a new distribution logic, which can scale Gluster to 1000s of nodes.

                  "},{"location":"Developer-guide/Projects/#composition-xlator-for-small-files","title":"Composition xlator for small files","text":"

                  Merge small files into a designated large file using our own custom semantics. This can improve our small file performance.

                  "},{"location":"Developer-guide/Projects/#path-based-geo-replication","title":"Path based geo-replication","text":"

                  Issue: https://github.com/gluster/glusterfs/issues/460

                  This would allow remote volume to be of different type (NFS/S3 etc etc) too.

                  "},{"location":"Developer-guide/Projects/#project-quota-support","title":"Project Quota support","text":"

                  Issue: https://github.com/gluster/glusterfs/issues/184

                  This will make Gluster's Quota faster, and also provide desired behavior.

                  "},{"location":"Developer-guide/Projects/#cluster-testing-framework-based-on-gluster-tester","title":"Cluster testing framework based on gluster-tester","text":"

                  Repo: https://github.com/aravindavk/gluster-tester

                  Build a cluster using docker images (or VMs). Write a tool which would extend current gluster testing's .t format to take NODE as an addition parameter to run command. This would make upgrade and downgrade testing very easy and feasible.

                  "},{"location":"Developer-guide/Projects/#network-layer-changes","title":"Network layer changes","text":"

                  Issue: https://github.com/gluster/glusterfs/issues/391

                  There is many improvements we can do in this area

                  "},{"location":"Developer-guide/Simplified-Development-Workflow/","title":"Simplified development workflow for GlusterFS","text":"

                  This page gives a simplified model of the development workflow used by the GlusterFS project. This will give the steps required to get a patch accepted into the GlusterFS source.

                  Visit Development Work Flow a more detailed description of the workflow.

                  "},{"location":"Developer-guide/Simplified-Development-Workflow/#initial-preparation","title":"Initial preparation","text":"

                  The GlusterFS development workflow revolves around GitHub and Jenkins. Using these both tools requires some initial preparation.

                  "},{"location":"Developer-guide/Simplified-Development-Workflow/#get-the-source","title":"Get the source","text":"

                  Git clone the GlusterFS source using

                  git clone git@github.com:${username}/glusterfs.git\ncd glusterfs/\ngit remote add upstream git@github.com:gluster/glusterfs.git\n

                  This will clone the GlusterFS source into a subdirectory named glusterfs with the devel branch checked out.

                  "},{"location":"Developer-guide/Simplified-Development-Workflow/#dev-system-setup","title":"Dev system setup","text":"

                  You should install and setup Git on your development system. Use your distribution specific package manger to install git. After installation configure git. At the minimum, set a git user email. To set the email do,

                  git config --global user.name <name>\ngit config --global user.email <email address>\n

                  Next, install the build requirements for GlusterFS. Refer Building GlusterFS - Build Requirements for the actual requirements.

                  "},{"location":"Developer-guide/Simplified-Development-Workflow/#actual-development","title":"Actual development","text":"

                  The commands in this section are to be run inside the glusterfs source directory.

                  "},{"location":"Developer-guide/Simplified-Development-Workflow/#create-a-development-branch","title":"Create a development branch","text":"

                  It is recommended to use separate local development branches for each change you want to contribute to GlusterFS. To create a development branch, first checkout the upstream branch you want to work on and update it. More details on the upstream branching model for GlusterFS can be found at Development Work Flow - Branching_policy. For example if you want to develop on the devel branch,

                  git checkout devel\ngit pull\n

                  Now, create a new branch from devel and switch to the new branch. It is recommended to have descriptive branch names. Do,

                  git branch issueNNNN\ngit checkout issueNNNN\n

                  or,

                  git checkout -b issueNNNN upstream/main\n

                  to do both in one command. Here, NNNN is the Issue ID in glusterfs repository.

                  "},{"location":"Developer-guide/Simplified-Development-Workflow/#hack","title":"Hack","text":"

                  Once you've switched to the development branch, you can perform the actual code changes. Build and test to see if your changes work.

                  "},{"location":"Developer-guide/Simplified-Development-Workflow/#tests","title":"Tests","text":"

                  Unless your changes are very minor and trivial, you should also add a test for your change. Tests are used to ensure that the changes you did are not broken inadvertently. More details on tests can be found at Development Workflow - Test cases and Development Workflow - Regression tests and test cases.

                  "},{"location":"Developer-guide/Simplified-Development-Workflow/#regression-test","title":"Regression test","text":"

                  Once your change is working, locally you can run the regression test suite to make sure you haven't broken anything. The regression test suite requires a working GlusterFS installation and needs to be run as root. To run the regression test suite, do

                  make install\n./run-tests.sh\n

                  or, After uploading the patch The regression tests would be triggered by a comment \"/run regression\" from developers in the @gluster organization.

                  "},{"location":"Developer-guide/Simplified-Development-Workflow/#commit-your-changes","title":"Commit your changes","text":"

                  If you haven't broken anything, you can now commit your changes. First identify the files that you modified/added/deleted using git-status and stage these files.

                  git status\ngit add <list of modified files>\n

                  Now, commit these changes using

                  git commit -s\n

                  Provide a meaningful commit message. The commit message policy is described at Development Work Flow - Commit policy. It is essential that you commit with the '-s' option, which will sign-off the commit with your configured email.

                  "},{"location":"Developer-guide/Simplified-Development-Workflow/#submit-for-review","title":"Submit for review","text":"

                  To submit your change for review, run the rfc.sh script,

                  ./rfc.sh\n

                  or

                  git push origin HEAD:issueNNN\n

                  More details on the rfc.sh script are available at Development Work Flow - rfc.sh.

                  "},{"location":"Developer-guide/Simplified-Development-Workflow/#review-process","title":"Review process","text":"

                  Your change will now be reviewed by the GlusterFS maintainers and component owners. You can follow and take part in the review process on the change at the review url. The review process involves several steps.

                  To know component owners , you can check the \"MAINTAINERS\" file in root of glusterfs code directory

                  "},{"location":"Developer-guide/Simplified-Development-Workflow/#automated-verification","title":"Automated verification","text":"

                  Every change submitted to github triggers an initial automated verification on jenkins known as smoke tests. The automated verification ensures that your change doesn't break the build and has an associated bug-id. Developers get a chance to retrigger the smoke tests using \"/recheck smoke\" as comment.

                  More details can be found at Development Work Flow - Auto verification.

                  "},{"location":"Developer-guide/Simplified-Development-Workflow/#formal-review","title":"Formal review","text":"

                  Once the auto verification is successful, the component owners will perform a formal review. If they are okay with your change, they will give a positive review. If not they will give a negative review and add comments on the reasons.

                  More information regarding the review qualifiers and disqualifiers is available at Development Work Flow - Submission Qualifiers and Development Work Flow - Submission Disqualifiers.

                  If your change gets a negative review, you will need to address the comments and resubmit your change.

                  "},{"location":"Developer-guide/Simplified-Development-Workflow/#resubmission","title":"Resubmission","text":"

                  Switch to your development branch and make new changes to address the review comments. Build and test to see if the new changes are working.

                  Stage your changes and commit your new changes in new commits using,

                  git commit -a -s\n

                  Now you can resubmit the commit for review using the rfc.sh script or git push.

                  The formal review process could take a long time. To increase chances for a speedy review, you can add the component owners as reviewers on the pull request. This will ensure they notice the change. The list of component owners can be found in the MAINTAINERS file present in the GlusterFS source

                  "},{"location":"Developer-guide/Simplified-Development-Workflow/#verification","title":"Verification","text":"

                  After a component owner has given a positive review, a developer will run the regression test suite on your change to verify that your change works and hasn't broken anything. This verification is done with the help of jenkins.

                  If the verification fails, you will need to make necessary changes and resubmit an updated commit for review.

                  "},{"location":"Developer-guide/Simplified-Development-Workflow/#acceptance","title":"Acceptance","text":"

                  After successful verification, a maintainer will Squash and merge your change into the upstream GlusterFS source. Your change will now be available in the upstream git repo for everyone to use.

                  "},{"location":"Developer-guide/compiling-rpms/","title":"Compiling RPMS","text":""},{"location":"Developer-guide/compiling-rpms/#how-to-compile-glusterfs-rpms-from-git-source-for-rhelcentos-and-fedora","title":"How to compile GlusterFS RPMs from git source, for RHEL/CentOS, and Fedora","text":"

                  Creating rpm's of GlusterFS from git source is fairly easy, once you know the steps.

                  RPMs can be compiled on at least the following OS's:

                  • Red Hat Enterprise Linux 5, 6 (& 7 when available)
                  • CentOS 5, 6, 7 and 8
                  • Fedora 16-20

                  Specific instructions for compiling are below. If you're using:

                  • Fedora 16-20 - Follow the Fedora steps, then do all of the Common steps.
                  • CentOS 5.x - Follow the CentOS 5.x steps, then do all of the Common steps
                  • CentOS 6.x - Follow the CentOS 6.x steps, then do all of the Common steps.
                  • CentOS 8.x - Follow the CentOS 8.x steps, then follow from step 2 in the Common steps.
                  • RHEL 6.x - Follow the RHEL 6.x steps, then do all of the Common steps.

                  Note - these instructions have been explicitly tested on all of CentOS 5.10, RHEL 6.4, CentOS 6.4+, CentOS 8.4, and Fedora 16-20. Other releases of RHEL/CentOS and Fedora may work too but haven't been tested. Please update this page appropriately if you do so. :)

                  "},{"location":"Developer-guide/compiling-rpms/#preparation-steps-for-fedora-16-20-only","title":"Preparation steps for Fedora 16-20 (only)","text":"
                  1. Install gcc, the python development headers, and python setuptools:

                    sudo yum -y install gcc python-devel python-setuptools\n
                  2. If you're compiling GlusterFS version 3.4, then install python-swiftclient. Other GlusterFS versions don't need it:

                    sudo easy_install simplejson python-swiftclient\n

                  Now follow through with the Common Steps part below.

                  "},{"location":"Developer-guide/compiling-rpms/#preparation-steps-for-centos-5x-only","title":"Preparation steps for CentOS 5.x (only)","text":"

                  You'll need EPEL installed first and some CentOS-specific packages. The commands below will get that done for you. After that, follow through the \"Common steps\" section.

                  1. Install EPEL first:

                    curl -OL http://download.fedoraproject.org/pub/epel/5/x86_64/epel-release-5-4.noarch.rpm\nsudo yum -y install epel-release-5-4.noarch.rpm --nogpgcheck\n
                  2. Install the packages required only on CentOS 5.x:

                    sudo yum -y install buildsys-macros gcc ncurses-devel \\\n  python-ctypes python-sphinx10 redhat-rpm-config\n

                  Now follow through with the Common Steps part below.

                  "},{"location":"Developer-guide/compiling-rpms/#preparation-steps-for-centos-6x-only","title":"Preparation steps for CentOS 6.x (only)","text":"

                  You'll need EPEL installed first and some CentOS-specific packages. The commands below will get that done for you. After that, follow through the \"Common steps\" section.

                  1. Install EPEL first:

                    sudo yum -y install http://download.fedoraproject.org/pub/epel/6/x86_64/epel-release-6-8.noarch.rpm\n
                  2. Install the packages required only on CentOS:

                    sudo yum -y install python-webob1.0 python-paste-deploy1.5 python-sphinx10 redhat-rpm-config\n

                  Now follow through with the Common Steps part below.

                  "},{"location":"Developer-guide/compiling-rpms/#preparation-steps-for-centos-8x-only","title":"Preparation steps for CentOS 8.x (only)","text":"

                  You'll need EPEL installed and then the powertools package enabled.

                  1. Install EPEL first:

                    sudo rpm -ivh https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm\n
                  2. Enable the PowerTools repo and install CentOS 8.x specific packages for building the rpms.

                    sudo yum --enablerepo=PowerTools install automake autoconf libtool flex bison openssl-devel \\\n  libxml2-devel libaio-devel libibverbs-devel librdmacm-devel readline-devel lvm2-devel \\\n  glib2-devel userspace-rcu-devel libcmocka-devel libacl-devel sqlite-devel fuse-devel \\\n  redhat-rpm-config rpcgen libtirpc-devel make python3-devel rsync libuuid-devel \\\n  rpm-build dbench perl-Test-Harness attr libcurl-devel selinux-policy-devel -y\n

                  Now follow through from Point 2 in the Common Steps part below.

                  "},{"location":"Developer-guide/compiling-rpms/#preparation-steps-for-rhel-6x-only","title":"Preparation steps for RHEL 6.x (only)","text":"

                  You'll need EPEL installed first and some RHEL specific packages. The 2 commands below will get that done for you. After that, follow through the \"Common steps\" section.

                  1. Install EPEL first:

                    sudo yum -y install http://download.fedoraproject.org/pub/epel/6/x86_64/epel-release-6-8.noarch.rpm\n
                  2. Install the packages required only on RHEL:

                    sudo yum -y --enablerepo=rhel-6-server-optional-rpms install python-webob1.0 \\\n  python-paste-deploy1.5 python-sphinx10 redhat-rpm-config\n

                  Now follow through with the Common Steps part below.

                  "},{"location":"Developer-guide/compiling-rpms/#common-steps","title":"Common Steps","text":"

                  These steps are for both Fedora and RHEL/CentOS. At the end you'll have the complete set of GlusterFS RPMs for your platform, ready to be installed.

                  NOTES for step 1 below:

                  • If you're on RHEL/CentOS 5.x and get a message about lvm2-devel not being available, it's ok. You can ignore it. :)
                  • If you're on RHEL/CentOS 6.x and get any messages about python-eventlet, python-netifaces, python-sphinx and/or pyxattr not being available, it's ok. You can ignore them. :)
                  • If you're on CentOS 8.x, you can skip step 1 and start from step 2. Also, for CentOS 8.x, the steps have been tested for the master branch. It is unknown if it would work for older branches.

                  1. Install the needed packages

                    sudo yum -y --disablerepo=rhs* --enablerepo=*optional-rpms install git autoconf \\\n  automake bison dos2unix flex fuse-devel glib2-devel libaio-devel \\\n  libattr-devel libibverbs-devel librdmacm-devel libtool libxml2-devel lvm2-devel make \\\n  openssl-devel pkgconfig pyliblzma python-devel python-eventlet python-netifaces \\\n  python-paste-deploy python-simplejson python-sphinx python-webob pyxattr readline-devel \\\n  rpm-build systemtap-sdt-devel tar libcmocka-devel\n
                  2. Clone the GlusterFS git repository

                    git clone git://git.gluster.org/glusterfs\ncd glusterfs\n
                  3. Choose which branch to compile

                    If you want to compile the latest development code, you can skip this step and go on to the next one. :)

                    If instead, you want to compile the code for a specific release of GlusterFS (such as v3.4), get the list of release names here:

                    # git branch -a | grep release\nremotes/origin/release-2.0\nremotes/origin/release-3.0\nremotes/origin/release-3.1\nremotes/origin/release-3.2\nremotes/origin/release-3.3\nremotes/origin/release-3.4\nremotes/origin/release-3.5\n

                    Then switch to the correct release using the git \"checkout\" command, and the name of the release after the \"remotes/origin/\" bit from the list above:

                    git checkout release-3.4\n

                    NOTE - The CentOS 5.x instructions have only been tested for the master branch in GlusterFS git. It is unknown (yet) if they work for branches older than release-3.5.

                    If you are compiling the latest development code you can skip steps 4 and 5. Instead, you can run the below command and you will get the RPMs.

                    extras/LinuxRPM/make_glusterrpms\n
                  4. Configure and compile GlusterFS

                    Now you're ready to compile Gluster:

                    ./autogen.sh\n./configure --enable-fusermount\nmake dist\n
                  5. Create the GlusterFS RPMs

                    cd extras/LinuxRPM\nmake glusterrpms\n

                    That should complete with no errors, leaving you with a directory containing the RPMs.

                    # ls -l *rpm\n-rw-rw-r-- 1 jc jc 3966111 Mar  2 12:15 glusterfs-3git-1.el5.centos.src.rpm\n-rw-rw-r-- 1 jc jc 1548890 Mar  2 12:17 glusterfs-3git-1.el5.centos.x86_64.rpm\n-rw-rw-r-- 1 jc jc   66680 Mar  2 12:17 glusterfs-api-3git-1.el5.centos.x86_64.rpm\n-rw-rw-r-- 1 jc jc   20399 Mar  2 12:17 glusterfs-api-devel-3git-1.el5.centos.x86_64.rpm\n-rw-rw-r-- 1 jc jc  123806 Mar  2 12:17 glusterfs-cli-3git-1.el5.centos.x86_64.rpm\n-rw-rw-r-- 1 jc jc 7850357 Mar  2 12:17 glusterfs-debuginfo-3git-1.el5.centos.x86_64.rpm\n-rw-rw-r-- 1 jc jc  112677 Mar  2 12:17 glusterfs-devel-3git-1.el5.centos.x86_64.rpm\n-rw-rw-r-- 1 jc jc  100410 Mar  2 12:17 glusterfs-fuse-3git-1.el5.centos.x86_64.rpm\n-rw-rw-r-- 1 jc jc  187221 Mar  2 12:17 glusterfs-geo-replication-3git-1.el5.centos.x86_64.rpm\n-rw-rw-r-- 1 jc jc  299171 Mar  2 12:17 glusterfs-libs-3git-1.el5.centos.x86_64.rpm\n-rw-rw-r-- 1 jc jc   44943 Mar  2 12:17 glusterfs-rdma-3git-1.el5.centos.x86_64.rpm\n-rw-rw-r-- 1 jc jc  123065 Mar  2 12:17 glusterfs-regression-tests-3git-1.el5.centos.x86_64.rpm\n-rw-rw-r-- 1 jc jc   16224 Mar  2 12:17 glusterfs-resource-agents-3git-1.el5.centos.x86_64.rpm\n-rw-rw-r-- 1 jc jc  654043 Mar  2 12:17 glusterfs-server-3git-1.el5.centos.x86_64.rpm\n
                  "},{"location":"Developer-guide/coredump-on-customer-setup/","title":"Get core dump on a customer set up without killing the process","text":""},{"location":"Developer-guide/coredump-on-customer-setup/#why-do-we-need-this","title":"Why do we need this?","text":"

                  Finding the root cause of an issue that occurred in the customer/production setup is a challenging task. Most of the time we cannot replicate/setup the environment and scenario which is leading to the issue on our test setup. In such cases, we got to grab most of the information from the system where the problem has occurred.

                  "},{"location":"Developer-guide/coredump-on-customer-setup/#what-information-we-look-for-and-also-useful","title":"What information we look for and also useful?","text":"

                  The information like a core dump is very helpful to catch the root cause of an issue by adding ASSERT() in the code at the places where we feel something is wrong and install the custom build on the affected setup. But the issue is ASSERT() would kill the process and produce the core dump.

                  "},{"location":"Developer-guide/coredump-on-customer-setup/#is-it-a-good-idea-to-do-assert-on-customer-setup","title":"Is it a good idea to do ASSERT() on customer setup?","text":"

                  Remember we are seeking help from customer setup, they unlikely agree to kill the process and produce the core dump for us to root cause it. It affects the customer\u2019s business and nobody agrees with this proposal.

                  "},{"location":"Developer-guide/coredump-on-customer-setup/#what-if-we-have-a-way-to-produce-a-core-dump-without-a-kill","title":"What if we have a way to produce a core dump without a kill?","text":"

                  Yes, Glusterfs provides a way to do this. Gluster has customized ASSERT() i.e GF_ASSERT() in place which helps in producing the core dump without killing the associated process and also provides a script which can be run on the customer set up that produces the core dump without harming the running process (This presumes we already have GF_ASSERT() at the expected place in the current build running on customer setup. If not, we need to install custom build on that setup by adding GF_ASSERT()).

                  "},{"location":"Developer-guide/coredump-on-customer-setup/#is-gf_assert-newly-introduced-in-gluster-code","title":"Is GF_ASSERT() newly introduced in Gluster code?","text":"

                  No. GF_ASSERT() is already there in the codebase before this improvement. In the debug build, GF_ASSERT() kills the process and produces the core dump but in the production build, it just logs the error and moves on. What we have done is we just changed the implementation of the code and now in production build also we get the core dump but the process won\u2019t be killed. The code places where GF_ASSERT() is not covered, please add it as per the requirement.

                  "},{"location":"Developer-guide/coredump-on-customer-setup/#here-are-the-steps-to-achieve-the-goal","title":"Here are the steps to achieve the goal:","text":"
                  • Add GF_ASSERT() in the Gluster code path where you expect something wrong is happening.
                  • Build the Gluster code, install and mount the Gluster volume (For detailed steps refer: Gluster quick start guide).
                  • Now, in the other terminal, run the gfcore.py script # ./extras/debug/gfcore.py $PID 1 /tmp/ (PID of the gluster process you are interested in, got it by ps -ef | grep gluster in the previous step. For more details, check # ./extras/debug/gfcore.py --help)
                  • Hit the code path where you have introduced GF_ASSERT(). If GF_ASSERT() is in fuse_write() path, you can hit the code path by writing on to a file present under Gluster moun. Ex: # dd if=/dev/zero of=/mnt/glustrefs/abcd bs=1M count=1 where /mnt/glusterfs is the gluster mount
                  • Go to the terminal where the gdb is running (step 3) and observe that the gdb process is terminated
                  • Go to the directory where the core-dump is produced. Default would be present working directory.
                  • Access the core dump using gdb Ex: # gdb -ex \"core-file $GFCORE_FILE\" $GLUSTER_BINARY (1st arg would be core file name and 2nd arg is o/p of file command in the previous step)
                  • Observe that the Gluster process is unaffected by checking its process state. Check pid status using ps -ef | grep gluster

                  Thanks, Xavi Hernandez(jahernan@redhat.com) for the idea. This will ease many Gluster developer's/maintainer\u2019s life.

                  "},{"location":"GlusterFS-Tools/","title":"GlusterFS Tools List","text":""},{"location":"GlusterFS-Tools/#glusterfs-tools","title":"GlusterFS Tools","text":"
                  • glusterfind
                  • gfind missing files
                  "},{"location":"GlusterFS-Tools/gfind-missing-files/","title":"gfind missing files","text":""},{"location":"GlusterFS-Tools/gfind-missing-files/#introduction","title":"Introduction","text":"

                  The tool gfind_missing_files.sh can be used to find the missing files in a GlusterFS geo-replicated secondary volume. The tool uses a multi-threaded crawler operating on the backend .glusterfs of a brickpath which is passed as one of the parameters to the tool. It does a stat on each entry in the secondary volume mount to check for the presence of a file. The tool uses the aux-gfid-mount thereby avoiding path conversions and potentially saving time.

                  This tool should be run on every node and each brickpath in a geo-replicated primary volume to find the missing files on the secondary volume.

                  The script gfind_missing_files.sh is a wrapper script that in turn uses the gcrawler binary to do the backend crawling. The script detects the gfids of the missing files and runs the gfid-to-path conversion script to list out the missing files with their full pathnames.

                  "},{"location":"GlusterFS-Tools/gfind-missing-files/#usage","title":"Usage","text":"
                  bash gfind_missing_files.sh <BRICK_PATH> <SECONDARY_HOST> <SECONDARY_VOL> <OUTFILE>\n            BRICK_PATH     -   Full path of the brick\n            SECONDARY_HOST -   Hostname of gluster volume\n            SECONDARY_VOL  -   Gluster volume name\n            OUTFILE        -    Output file which contains gfids of the missing files\n

                  The gfid-to-path conversion uses a quicker algorithm for converting gfids to paths and it is possible that in some cases all missing gfids may not be converted to their respective paths.

                  "},{"location":"GlusterFS-Tools/gfind-missing-files/#example-output126733-missing-files","title":"Example output(126733 missing files)","text":"
                  # ionice -c 2 -n 7 ./gfind_missing_files.sh /bricks/m3 acdc secondary-vol ~/test_results/m3-4.txt\nCalling crawler...\nCrawl Complete.\ngfids of skipped files are available in the file /root/test_results/m3-4.txt\nStarting gfid to path conversion\nPath names of skipped files are available in the file /root/test_results/m3-4.txt_pathnames\nWARNING: Unable to convert some GFIDs to Paths, GFIDs logged to /root/test_results/m3-4.txt_gfids\nUse bash gfid_to_path.sh <brick-path> /root/test_results/m3-4.txt_gfids to convert those GFIDs to Path\nTotal Missing File Count : 126733\n

                  In such cases, an additional step is needed to convert those gfids to paths. This can be used as shown below:

                  bash gfid_to_path.sh <BRICK_PATH> <GFID_FILE>\n             BRICK_PATH - Full path of the brick.\n             GFID_FILE  - OUTFILE_gfids got from gfind_missing_files.sh\n
                  "},{"location":"GlusterFS-Tools/gfind-missing-files/#things-to-keep-in-mind-when-running-the-tool","title":"Things to keep in mind when running the tool","text":"
                  1. Running this tool can result in a crawl of the backend filesystem at each brick which can be intensive. To ensure there is no impact on ongoing I/O on RHS volumes, we recommend that this tool be run at a low I/O scheduling class (best-effort) and priority.

                     ionice -c 2 -p <pid of gfind_missing_files.sh>\n
                  2. We do not recommend interrupting the tool when it is running (e.g. by doing CTRL^C). It is better to wait for the tool to finish execution. In case it is interrupted, manually unmount the Slave Volume.

                     umount <MOUNT_POINT>\n
                  "},{"location":"GlusterFS-Tools/glusterfind/","title":"glusterfind - A tool to find Modified files/dirs","text":"

                  A tool which helps to get full/incremental list of files/dirs from GlusterFS Volume using Changelog/Find. In Gluster volumes, detecting the modified files is challenging. Readdir on a directory leads to multiple network calls since files in a directory are distributed across nodes.

                  This tool should be run in one of the node, which will get Volume info and gets the list of nodes and brick paths. For each brick, it spawns the process and runs crawler command in respective node. Crawler will be run in brick FS(xfs, ext4 etc) and not in Gluster Mount. Crawler generates output file with the list of files modified after last run or after the session creation.

                  "},{"location":"GlusterFS-Tools/glusterfind/#session-management","title":"Session Management","text":"

                  Create a glusterfind session to remember the time when last sync or processing complete. For example, your backup application runs every day and gets incremental results on each run. The tool maintains session in $GLUSTERD_WORKDIR/glusterfind/, for each session it creates and directory and creates a sub directory with Volume name. (Default working directory is /var/lib/glusterd, in some systems this location may change. To find Working dir location run

                  grep working-directory /etc/glusterfs/glusterd.vol\n

                  or

                  grep working-directory /usr/local/etc/glusterfs/glusterd.vol\n

                  if you installed from the source.

                  For example, if the session name is \"backup\" and volume name is \"datavol\", then the tool creates $GLUSTERD_WORKDIR/glusterfind/backup/datavol. Now onwards we refer this directory as $SESSION_DIR.

                  create => pre => post => [delete]\n

                  Once the session is created, we can run the tool with two steps Pre and Post. To collect the list of modified files after the create time or last run time, we need to call pre command. Pre command finds the modified files and generates output file. Consumer can check the exit code of pre command and start processing those files. As a post processing step run the post command to update the session time as per latest run.

                  For example, backup utility runs Pre command and gets the list of files/directories changed. Sync those files to backup target and inform to glusterfind by calling Post command.

                  At the end of Pre command, $SESSION_DIR/status.pre status file will get created. Pre status file stores the time when current crawl is started, and get all the files/dirs modified till that time. Once Post is called, $SESSION_DIR/status.pre will be renamed to $SESSION_DIR/status. content of this file will be used as start time for the next crawl.

                  During Pre, we can force the tool to do full find instead of incremental find. Tool uses find command in brick backend to get list of files/dirs.

                  When glusterfind create, in that node it generates ssh key($GLUSTERD_WORKDIR/glusterfind.secret.pem) and distributes to all Peers via Glusterd. Once ssh key is distributed in Trusted pool, tool can run ssh commands and copy files from other Volume nodes.

                  When glusterfind pre is run, it internally runs gluster volume info to get list of nodes and respective brick paths. For each brick, it calls respective node agents via ssh to find the modified files/dirs which are local them. Once each node agents generates output file, glusterfind collects all the files via scp and merges it into given output file.

                  When glusterfind post is run, it renames $SESSION_DIR/status.pre file to $SESSION_DIR/status.

                  "},{"location":"GlusterFS-Tools/glusterfind/#changelog-mode-and-gfid-to-path-conversion","title":"Changelog Mode and GFID to Path conversion","text":"

                  Incremental find uses Changelogs to get the list of GFIDs modified/created. Any application expects file path instead of GFID. Their is no standard/easy way to convert from GFID to Path.

                  If we set build-pgfid option in Volume GlusterFS starts recording each files parent directory GFID as xattr in file on any ENTRY fop.

                  trusted.pgfid.<GFID>=NUM_LINKS\n

                  To convert from GFID to path, we can mount Volume with aux-gfid-mount option, and get Path information by a getfattr query.

                  getfattr -n glusterfs.ancestry.path -e text /mnt/datavol/.gfid/<GFID>\n

                  This approach is slow, for a requested file gets parent GFID via xattr and reads that directory to gets the file which is having same inode number as of GFID file. To improve the performance, glusterfind uses build-pgfid option, but instead of using getfattr on mount it gets the details from brick backend. glusterfind collects all parent GFIDs at once and starts crawling each directory. Instead of processing one GFID to Path conversion, it gets inode numbers of all input GFIDs and filter while reading parent directory.

                  Above method is fast compared to find -samefile since it crawls only required directories to find files with same inode number as GFID file. But pgfid information only available when a lookup is made or any ENTRY fop to a file after enabling build-pgfid. The files created before build-pgfid enable will not get converted to path from GFID with this approach.

                  Tool collects the list of GFIDs failed to convert with above method and does a full crawl to convert it to path. Find command is used to crawl entire namespace. Instead of calling find command for every GFID, glusterfind uses an efficient way to convert all GFID to path with single call to find command.

                  "},{"location":"GlusterFS-Tools/glusterfind/#usage","title":"Usage","text":""},{"location":"GlusterFS-Tools/glusterfind/#create-the-session","title":"Create the session","text":"
                  glusterfind create SESSION_NAME VOLNAME [--force]\nglusterfind create --help\n

                  Where, SESSION_NAME is any name without space to identify when run second time. When a node is added to Volume then the tool expects ssh keys to be copied to new node(s) also. Run Create command with --force to distribute keys again.

                  Examples,

                  # glusterfind create --help\n# glusterfind create backup datavol\n# glusterfind create antivirus_scanner datavol\n# glusterfind create backup datavol --force\n
                  "},{"location":"GlusterFS-Tools/glusterfind/#pre-command","title":"Pre Command","text":"
                  glusterfind pre SESSION_NAME VOLUME_NAME OUTFILE\nglusterfind pre --help\n

                  We need not specify Volume name since session already has the details. List of files will be populated in OUTFILE.

                  To trigger the full find, call the pre command with --full argument. Multiple crawlers are available for incremental find, we can choose crawl type with --crawl argument.

                  Examples,

                  # glusterfind pre backup datavol /root/backup.txt\n# glusterfind pre backup datavol /root/backup.txt --full\n\n# # Changelog based crawler, works only for incremental\n# glusterfind pre backup datavol /root/backup.txt --crawler=changelog\n\n# # Find based crawler, works for both full and incremental\n# glusterfind pre backup datavol /root/backup.txt --crawler=brickfind\n

                  Output file contains list of files/dirs relative to the Volume mount, if we need to prefix with any path to have absolute path then,

                  glusterfind pre backup datavol /root/backup.txt --file-prefix=/mnt/datavol/\n
                  "},{"location":"GlusterFS-Tools/glusterfind/#list-command","title":"List Command","text":"

                  To get the list of sessions and respective session time,

                  glusterfind list [--session SESSION_NAME] [--volume VOLUME_NAME]\n

                  Examples,

                  # glusterfind list\n# glusterfind list --session backup\n

                  Example output,

                  SESSION                   VOLUME                    SESSION TIME\n---------------------------------------------------------------------------\nbackup                    datavol                   2015-03-04 17:35:34\n
                  "},{"location":"GlusterFS-Tools/glusterfind/#post-command","title":"Post Command","text":"
                  glusterfind post SESSION_NAME VOLUME_NAME\n

                  Examples,

                  glusterfind post backup datavol\n
                  "},{"location":"GlusterFS-Tools/glusterfind/#delete-command","title":"Delete Command","text":"
                  glusterfind delete SESSION_NAME VOLUME_NAME\n

                  Examples,

                  glusterfind delete backup datavol\n
                  "},{"location":"GlusterFS-Tools/glusterfind/#adding-more-crawlers","title":"Adding more Crawlers","text":"

                  Adding more crawlers is very simple, Add an entry in $GLUSTERD_WORKDIR/glusterfind.conf. glusterfind can choose your crawler using --crawl argument.

                  [crawlers]\nchangelog=/usr/libexec/glusterfs/glusterfind/changelog.py\nbrickfind=/usr/libexec/glusterfs/glusterfind/brickfind.py\n

                  For example, if you have a multithreaded brick crawler, say parallelbrickcrawl add it to the conf file.

                  [crawlers]\nchangelog=/usr/libexec/glusterfs/glusterfind/changelog.py\nbrickfind=/usr/libexec/glusterfs/glusterfind/brickfind.py\nparallelbrickcrawl=/root/parallelbrickcrawl\n

                  Custom crawler can be executable script/binary which accepts volume name, brick path, output_file and start time(and optional debug flag)

                  For example,

                  /root/parallelbrickcrawl SESSION_NAME VOLUME BRICK_PATH OUTFILE START_TIME [--debug]\n

                  Where START_TIME is in unix epoch format, START_TIME will be zero for full find.

                  "},{"location":"GlusterFS-Tools/glusterfind/#known-issues","title":"Known Issues","text":"
                  1. Deleted files will not get listed, since we can't convert GFID to Path if file/dir is deleted.
                  2. Only new name will get listed if Renamed.
                  3. All hardlinks will get listed.
                  "},{"location":"Install-Guide/Common-criteria/","title":"Common Criteria","text":""},{"location":"Install-Guide/Common-criteria/#getting-started","title":"Getting Started","text":"

                  This tutorial will cover different options for getting a Gluster cluster up and running. Here is a rundown of the steps we need to do.

                  To start, we will go over some common things you will need to know for setting up Gluster.

                  Next, choose the method you want to use to set up your first cluster:

                  • Within a virtual machine
                  • To bare metal servers
                  • To EC2 instances in Amazon

                  Finally, we will install Gluster, create a few volumes, and test using them.

                  "},{"location":"Install-Guide/Common-criteria/#general-setup-principles","title":"General Setup Principles","text":"

                  No matter where you will be installing Gluster, it helps to understand a few key concepts on what the moving parts are.

                  First, it is important to understand that GlusterFS isn\u2019t really a filesystem in and of itself. It concatenates existing filesystems into one (or more) big chunks so that data being written into or read out of Gluster gets distributed across multiple hosts simultaneously. This means that you can use space from any host that you have available. Typically, XFS is recommended but it can be used with other filesystems as well. Most commonly EXT4 is used when XFS isn\u2019t, but you can (and many, many people do) use another filesystem that suits you.

                  Now that we understand that, we can define a few of the common terms used in Gluster.

                  • A trusted pool refers collectively to the hosts in a given Gluster Cluster.
                  • A node or \u201cserver\u201d refers to any server that is part of a trusted pool. In general, this assumes all nodes are in the same trusted pool.
                  • A brick is used to refer to any device (really this means filesystem) that is being used for Gluster storage.
                  • An export refers to the mount path of the brick(s) on a given server, for example, /export/brick1
                  • The term Global Namespace is a fancy way of saying a Gluster volume
                  • A Gluster volume is a collection of one or more bricks (of course, typically this is two or more). This is analogous to /etc/exports entries for NFS.
                  • GNFS and kNFS. GNFS is how we refer to our inline NFS server. kNFS stands for kernel NFS, or, as most people would say, just plain NFS. Most often, you will want kNFS services disabled on the Gluster nodes. Gluster NFS doesn't take any additional configuration and works just like you would expect with NFSv3. It is possible to configure Gluster and NFS to live in harmony if you want to.

                  Other notes:

                  • For this test, if you do not have DNS set up, you can get away with using /etc/hosts entries for the two nodes. However, when you move from this basic setup to using Gluster in production, correct DNS entries (forward and reverse) and NTP are essential.
                  • When you install the Operating System, do not format the Gluster storage disks! We will use specific settings with the mkfs command later on when we set up Gluster. If you are testing with a single disk (not recommended), make sure to carve out a free partition or two to be used by Gluster later, so that you can format or reformat at will during your testing.
                  • Firewalls are great, except when they aren\u2019t. For storage servers, being able to operate in a trusted environment without firewalls can mean huge gains in performance, and is recommended. In case you absolutely need to set up a firewall, have a look at Setting up clients for information on the ports used.

                  Click here to get started

                  "},{"location":"Install-Guide/Community-Packages/","title":"Community Packages","text":""},{"location":"Install-Guide/Community-Packages/#community-packages","title":"Community Packages","text":""},{"location":"Install-Guide/Community-Packages/#glusterfs","title":"GlusterFS","text":"

                  Tentative plans for community convenience packages.

                  A yes means packages are (or will be) provided in the respective repository. A no means no plans to build new updates. Existing packages will remain in the repos. The following GlusterFS versions have reached EOL[1]: 8, 7, 6 and earlier.

                  11 10 CentOS Storage SIG[2] Stream 8 yes yes Stream 9 yes yes Fedora[3] F37 yes yes\u00b9 F38 yes\u00b9 yes F39(rawhide) yes\u00b9 yes Debian[3] Buster/10 yes yes Bullseye/11 yes yes Bookworm/12 yes yes Trixie/13(sid) yes no Ubuntu Launchpad[4] Bionic/18.04 yes yes Focal/20.04 yes yes Jammy/22.04 yes yes Kinetic/22.10 yes yes Lunar/23.04 yes yes Mantic/23.10 yes no OpenSUSE Build Service[5] Leap15.4 yes yes SLES15SP4 yes yes Tumbleweed yes yes

                  NOTE - We are not building Debian arm packages due to resource constraints for a while now. There will be only amd64 packages present on download.gluster.org

                  "},{"location":"Install-Guide/Community-Packages/#related-packages","title":"Related Packages","text":"glusterfs-selinux gdeploy gluster-block glusterfs-coreutils nfs-ganesha Samba CentOS Storage SIG[2] Stream 8 yes tbd yes yes yes yes Stream 9 yes tbd yes yes yes yes Fedora[3] F37 yes yes yes yes yes ? F38 yes yes yes yes yes ? F39(rawhide) yes yes yes yes yes ? Debian[3] Buster/10 n/a no no yes yes ? Bullseye/11 n/a no no yes yes ? Bookworm/12 n/a no no yes yes ? Trixie/13(sid) n/a no no yes yes ? Ubuntu Launchpad[4] Bionic/18.04 n/a no no yes yes ? Focal/20.04 n/a no no yes yes ? Jammy/22.04 n/a no no yes yes ? Kinetic/22.10 n/a no no yes yes ? Lunar/23.04 n/a no no yes yes ? Mantic/23.10 n/a no no yes yes ? OpenSUSE Build Service[5] Leap15.4 n/a yes yes yes yes ? SLES15SP4 n/a yes yes yes yes ? Tumbleweed n/a yes yes yes yes ?

                  [1] https://www.gluster.org/release-schedule/ [2] https://wiki.centos.org/SpecialInterestGroup/Storage [3] https://download.gluster.org/pub/gluster/glusterfs [4] https://launchpad.net/~gluster [5] http://download.opensuse.org/repositories/home:/glusterfs:/

                  \u00b9 Fedora Updates, UpdatesTesting, or Rawhide repository. Use dnf to install.

                  "},{"location":"Install-Guide/Configure/","title":"Configure","text":""},{"location":"Install-Guide/Configure/#configure-firewall","title":"Configure Firewall","text":"

                  For the Gluster to communicate within a cluster either the firewalls have to be turned off or enable communication for each server.

                  iptables -I INPUT -p all -s `<ip-address>` -j ACCEPT\n
                  "},{"location":"Install-Guide/Configure/#configure-the-trusted-pool","title":"Configure the trusted pool","text":"

                  Remember that the trusted pool is the term used to define a cluster of nodes in Gluster. Choose a server to be your \u201cprimary\u201d server. This is just to keep things simple, you will generally want to run all commands from this tutorial. Keep in mind, running many Gluster specific commands (like gluster volume create) on one server in the cluster will execute the same command on all other servers.

                  Replace nodename with hostname of the other server in the cluster, or IP address if you don\u2019t have DNS or /etc/hosts entries. Let say we want to connect to node02:

                  gluster peer probe node02\n

                  Notice that running gluster peer status from the second node shows that the first node has already been added.

                  "},{"location":"Install-Guide/Configure/#partition-the-disk","title":"Partition the disk","text":"

                  Assuming you have an empty disk at /dev/sdb: (You can check the partitions on your system using fdisk -l)

                  fdisk /dev/sdb\n

                  And then create a single XFS partition using fdisk

                  "},{"location":"Install-Guide/Configure/#format-the-partition","title":"Format the partition","text":"
                  mkfs.xfs -i size=512 /dev/sdb1\n
                  "},{"location":"Install-Guide/Configure/#add-an-entry-to-etcfstab","title":"Add an entry to /etc/fstab","text":"
                  echo \"/dev/sdb1 /export/sdb1 xfs defaults 0 0\"  >> /etc/fstab\n
                  "},{"location":"Install-Guide/Configure/#mount-the-partition-as-a-gluster-brick","title":"Mount the partition as a Gluster \"brick\"","text":"
                  mkdir -p /export/sdb1 && mount -a\n
                  "},{"location":"Install-Guide/Configure/#set-up-a-gluster-volume","title":"Set up a Gluster volume","text":"

                  The most basic Gluster volume type is a \u201cDistribute only\u201d volume (also referred to as a \u201cpure DHT\u201d volume if you want to impress the folks at the water cooler). This type of volume simply distributes the data evenly across the available bricks in a volume. So, if I write 100 files, on average, fifty will end up on one server, and fifty will end up on another. This is faster than a \u201creplicated\u201d volume, but isn\u2019t as popular since it doesn\u2019t give you two of the most sought after features of Gluster \u2014 multiple copies of the data, and automatic failover if something goes wrong.

                  To set up a replicated volume:

                  gluster volume create gv0 replica 3 node01.mydomain.net:/export/sdb1/brick \\\n  node02.mydomain.net:/export/sdb1/brick                                   \\\n  node03.mydomain.net:/export/sdb1/brick\n

                  Breaking this down into pieces:

                  • the first part says to create a gluster volume named gv0 (the name is arbitrary, gv0 was chosen simply because it\u2019s less typing than gluster_volume_0).
                  • make the volume a replica volume
                  • keep a copy of the data on at least 3 bricks at any given time. Since we only have three bricks total, this means each server will house a copy of the data.
                  • we specify which nodes to use, and which bricks on those nodes. The order here is important when you have more bricks.
                  • the brick directory will be created by this command. If the directory already exists, you may get <brick> is already part of a volume errors.

                  It is possible (as of the most current release as of this writing, Gluster 3.3) to specify the bricks in such a way that you would make both copies of the data reside on a single node. This would make for an embarrassing explanation to your boss when your bulletproof, completely redundant, always on super cluster comes to a grinding halt when a single point of failure occurs.

                  Now, we can check to make sure things are working as expected:

                  gluster volume info\n

                  And you should see results similar to the following:

                  Volume Name: gv0\nType: Replicate\nVolume ID: 8bc3e96b-a1b6-457d-8f7a-a91d1d4dc019\nStatus: Created\nNumber of Bricks: 1 x 3 = 3\nTransport-type: tcp\nBricks:\nBrick1: node01.yourdomain.net:/export/sdb1/brick\nBrick2: node02.yourdomain.net:/export/sdb1/brick\nBrick3: node03.yourdomain.net:/export/sdb1/brick\n

                  This shows us essentially what we just specified during the volume creation. The one key output worth noticing is Status. A status of Created means that the volume has been created, but hasn\u2019t yet been started, which would cause any attempt to mount the volume fail.

                  Now, we should start the volume before we try to mount it.

                  gluster volume start gv0\n
                  "},{"location":"Install-Guide/Install/","title":"Install","text":""},{"location":"Install-Guide/Install/#installing-gluster","title":"Installing Gluster","text":"

                  For RPM based distributions, if you will be using InfiniBand, add the glusterfs RDMA package to the installations. For RPM based systems, yum/dnf is used as the install method in order to satisfy external depencies such as compat-readline5

                  "},{"location":"Install-Guide/Install/#community-packages","title":"Community Packages","text":"

                  Packages are provided according to this table.

                  "},{"location":"Install-Guide/Install/#for-debian","title":"For Debian","text":"

                  Download the GPG key to apt config directory:

                  wget -O - https://download.gluster.org/pub/gluster/glusterfs/9/rsa.pub | gpg --dearmor > /etc/apt/trusted.gpg.d/gluster.gpg\n

                  If the rsa.pub is not available at the above location, please look here https://download.gluster.org/pub/gluster/glusterfs/7/rsa.pub and add the GPG key to apt:

                  wget -O - https://download.gluster.org/pub/gluster/glusterfs/7/rsa.pub | gpg --dearmor > /etc/apt/trusted.gpg.d/gluster.gpg\n

                  Add the source:

                  DEBID=$(grep 'VERSION_ID=' /etc/os-release | cut -d '=' -f 2 | tr -d '\"')\nDEBVER=$(grep 'VERSION=' /etc/os-release | grep -Eo '[a-z]+')\nDEBARCH=$(dpkg --print-architecture)\necho \"deb [signed-by=/etc/apt/trusted.gpg.d/gluster.gpg] https://download.gluster.org/pub/gluster/glusterfs/LATEST/Debian/${DEBID}/${DEBARCH}/apt ${DEBVER} main\" > /etc/apt/sources.list.d/gluster.list\n

                  Update package list:

                  apt update\n

                  Install:

                  apt install glusterfs-server\n
                  "},{"location":"Install-Guide/Install/#for-ubuntu","title":"For Ubuntu","text":"

                  Install software-properties-common:

                  apt install software-properties-common\n

                  Then add the community GlusterFS PPA:

                  add-apt-repository ppa:gluster/glusterfs-7\napt update\n

                  Finally, install the packages:

                  apt install glusterfs-server\n

                  Note: Packages exist for Ubuntu 16.04 LTS, 18.04 LTS, 20.04 LTS, 20.10, 21.04

                  "},{"location":"Install-Guide/Install/#for-red-hatcentos","title":"For Red Hat/CentOS","text":"

                  RPMs for CentOS and other RHEL clones are available from the CentOS Storage SIG mirrors.

                  For more installation details refer Gluster Quick start guide from CentOS Storage SIG.

                  "},{"location":"Install-Guide/Install/#for-fedora","title":"For Fedora","text":"

                  Install the Gluster packages:

                  dnf install glusterfs-server\n

                  Once you are finished installing, you can move on to configuration section.

                  "},{"location":"Install-Guide/Install/#for-arch-linux","title":"For Arch Linux","text":"

                  Install the Gluster package:

                  pacman -S glusterfs\n
                  "},{"location":"Install-Guide/Overview/","title":"Overview","text":""},{"location":"Install-Guide/Overview/#purpose","title":"Purpose","text":"

                  The Install Guide (IG) is aimed at providing the sequence of steps needed for setting up Gluster. It contains a reasonable degree of detail which helps an administrator to understand the terminology, the choices and how to configure the deployment to the storage needs of their application workload. The Quick Start Guide (QSG) is designed to get a deployment with default choices and is aimed at those who want to spend less time to get to a deployment.

                  After you deploy Gluster by following these steps, we recommend that you read the Gluster Admin Guide to learn how to administer Gluster and how to select a volume type that fits your needs. Also, be sure to enlist the help of the Gluster community via the IRC or, Slack channels (see https://www.gluster.org/community/) or Q&A section.

                  "},{"location":"Install-Guide/Overview/#overview_1","title":"Overview","text":"

                  Before we begin, let\u2019s talk about what Gluster is, address a few myths and misconceptions, and define a few terms. This will help you to avoid some of the common issues that others encounter as they start their journey with Gluster.

                  "},{"location":"Install-Guide/Overview/#what-is-gluster","title":"What is Gluster","text":"

                  Gluster is a distributed scale-out filesystem that allows rapid provisioning of additional storage based on your storage consumption needs. It incorporates automatic failover as a primary feature. All of this is accomplished without a centralized metadata server.

                  "},{"location":"Install-Guide/Overview/#what-is-gluster-without-making-me-learn-an-extra-glossary-of-terminology","title":"What is Gluster without making me learn an extra glossary of terminology?","text":"
                  • Gluster is an easy way to provision your own storage backend NAS using almost any hardware you choose.
                  • You can add as much as you want to start with, and if you need more later, adding more takes just a few steps.
                  • You can configure failover automatically, so that if a server goes down, you don\u2019t lose access to the data. No manual steps are required for failover. When you fix the server that failed and bring it back online, you don\u2019t have to do anything to get the data back except wait. In the meantime, the most current copy of your data keeps getting served from the node that was still running.
                  • You can build a clustered filesystem in a matter of minutes\u2026 it is trivially easy for basic setups
                  • It takes advantage of what we refer to as \u201ccommodity hardware\u201d, which means, we run on just about any hardware you can think of, from that stack of decomm\u2019s and gigabit switches in the corner no one can figure out what to do with (how many license servers do you really need, after all?), to that dream array you were speccing out online. Don\u2019t worry, I won\u2019t tell your boss.
                  • It takes advantage of commodity software too. No need to mess with kernels or fine tune the OS to a tee. We run on top of most unix filesystems, with XFS and ext4 being the most popular choices. We do have some recommendations for more heavily utilized arrays, but these are simple to implement and you probably have some of these configured already anyway.
                  • Gluster data can be accessed from just about anywhere \u2013 You can use traditional NFS, SMB/CIFS for Windows clients, or our own native GlusterFS (a few additional packages are needed on the client machines for this, but as you will see, they are quite small).
                  • There are even more advanced features than this, but for now we will focus on the basics.
                  • It\u2019s not just a toy. Gluster is enterprise-ready, and commercial support is available if you need it. It is used in some of the most taxing environments like media serving, natural resource exploration, medical imaging, and even as a filesystem for Big Data.
                  "},{"location":"Install-Guide/Overview/#is-gluster-going-to-work-for-me-and-what-i-need-it-to-do","title":"Is Gluster going to work for me and what I need it to do?","text":"

                  Most likely, yes. People use Gluster for storage needs of a variety of application workloads. You are encouraged to ask around in our IRC or, Slack channels or Q&A forums to see if anyone has tried something similar. That being said, there are a few places where Gluster is going to need more consideration than others.

                  • Accessing Gluster from SMB/CIFS is often going to be slow by most people\u2019s standards. If you only moderate access by users, then it most likely won\u2019t be an issue for you. On the other hand, adding enough Gluster servers into the mix, some people have seen better performance with us than other solutions due to the scale out nature of the technology
                  • Gluster is traditionally better when using file sizes of at least 16KB (with a sweet spot around 128KB or so).
                  "},{"location":"Install-Guide/Overview/#what-is-the-cost-and-complexity-required-to-set-up-cluster","title":"What is the cost and complexity required to set up cluster?","text":"

                  Question: How many billions of dollars is it going to cost to setup a cluster? Don\u2019t I need redundant networking, super fast SSD\u2019s, technology from Alpha Centauri delivered by men in black, etc\u2026?

                  I have never seen anyone spend even close to a billion, unless they got the rust proof coating on the servers. You don\u2019t seem like the type that would get bamboozled like that, so have no fear. For the purpose of this tutorial, if your laptop can run two VM\u2019s with 1GB of memory each, you can get started testing and the only thing you are going to pay for is coffee (assuming the coffee shop doesn\u2019t make you pay them back for the electricity to power your laptop).

                  If you want to test on bare metal, since Gluster is built with commodity hardware in mind, and because there is no centralized meta-data server, a very simple cluster can be deployed with two basic servers (2 CPU\u2019s, 4GB of RAM each, 1 Gigabit network). This is sufficient to have a nice file share or a place to put some nightly backups. Gluster is deployed successfully on all kinds of disks, from the lowliest 5200 RPM SATA to mightiest 1.21 gigawatt SSD\u2019s. The more performance you need, the more consideration you will want to put into how much hardware to buy, but the great thing about Gluster is that you can start small, and add on as your needs grow.

                  "},{"location":"Install-Guide/Overview/#ok-but-if-i-add-servers-on-later-dont-they-have-to-be-exactly-the-same","title":"OK, but if I add servers on later, don\u2019t they have to be exactly the same?","text":"

                  In a perfect world, sure. Having the hardware be the same means less troubleshooting when the fires start popping up. But plenty of people deploy Gluster on mix and match hardware, and successfully.

                  Get started by checking some Common Criteria

                  "},{"location":"Install-Guide/Setup-Bare-metal/","title":"Setup Bare Metal","text":"

                  Note: You only need one of the three setup methods!

                  "},{"location":"Install-Guide/Setup-Bare-metal/#setup-method-2-setting-up-on-physical-servers","title":"Setup, Method 2 \u2013 Setting up on physical servers","text":"

                  To set up Gluster on physical servers, we recommend two servers of very modest specifications (2 CPUs, 2GB of RAM, 1GBE). Since we are dealing with physical hardware here, keep in mind, what we are showing here is for testing purposes. In the end, remember that forces beyond your control (aka, your bosses\u2019 boss...) can force you to take that the \u201cjust for a quick test\u201d environment right into production, despite your kicking and screaming against it. To prevent this, it can be a good idea to deploy your test environment as much as possible the same way you would to a production environment (in case it becomes one, as mentioned above). That being said, here is a reminder of some of the best practices we mentioned before:

                  • Make sure DNS and NTP are setup, correct, and working
                  • If you have access to a backend storage network, use it! 10GBE or InfiniBand are great if you have access to them, but even a 1GBE backbone can help you get the most out of your deployment. Make sure that the interfaces you are going to use are also in DNS since we will be using the hostnames when we deploy Gluster
                  • When it comes to disks, the more the merrier. Although you could technically fake things out with a single disk, there would be performance issues as soon as you tried to do any real work on the servers

                  With the explosion of commodity hardware, you don\u2019t need to be a hardware expert these days to deploy a server. Although this is generally a good thing, it also means that paying attention to some important, performance-impacting BIOS settings is commonly ignored. Several points that might cause issues when if you're unaware of them:

                  • Most manufacturers enable power saving mode by default. This is a great idea for servers that do not have high-performance requirements. For the average storage server, the performance-impact of the power savings is not a reasonable tradeoff
                  • Newer motherboards and processors have lots of nifty features! Enhancements in virtualization, newer ways of doing predictive algorithms and NUMA are just a few to mention. To be safe, many manufactures ship hardware with settings meant to work with as massive a variety of workloads and configurations as they have customers. One issue you could face is when you set up that blazing-fast 10GBE card you were so thrilled about installing? In many cases, it would end up being crippled by a default 1x speed put in place on the PCI-E bus by the motherboard.

                  Thankfully, most manufacturers show all the BIOS settings, including the defaults, right in the manual. It only takes a few minutes to download, and you don\u2019t even have to power off the server unless you need to make changes. More and more boards include the functionality to make changes in the BIOS on the fly without even powering the box off. One word of caution of course, is don\u2019t go too crazy. Fretting over each tiny little detail and setting is usually not worth the time, and the more changes you make, the more you need to document and implement later. Try to find the happy balance between time spent managing the hardware (which ideally should be as close to zero after you setup initially) and the expected gains you get back from it.

                  Finally, remember that some hardware really is better than others. Without pointing fingers anywhere specifically, it is often true that onboard components are not as robust as add-ons. As a general rule, you can safely delegate the onboard hardware to things like management network for the NIC\u2019s, and for installing the OS onto a SATA drive. At least twice a year you should check the manufacturer's website for bulletins about your hardware. Critical performance issues are often resolved with a simple driver or firmware update. As often as not, these updates affect the two most critical pieces of hardware on a machine you want to use for networked storage - the RAID controller and the NIC's.

                  Once you have set up the servers and installed the OS, you are ready to move on to the install section.

                  "},{"location":"Install-Guide/Setup-aws/","title":"Setup AWS","text":"

                  Note: You only need one of the three setup methods!

                  "},{"location":"Install-Guide/Setup-aws/#setup-method-3-deploying-in-aws","title":"Setup, Method 3 \u2013 Deploying in AWS","text":"

                  Deploying in Amazon can be one of the fastest ways to get up and running with Gluster. Of course, most of what we cover here will work with other cloud platforms.

                  • Deploy at least two instances. For testing, you can use micro instances (I even go as far as using spot instances in most cases). Debates rage on what size instance to use in production, and there is really no correct answer. As with most things, the real answer is \u201cwhatever works for you\u201d, where the trade-offs between cost and performance are balanced in a continual dance of trying to make your project successful while making sure there is enough money left over in the budget for you to get that sweet new ping pong table in the break room.
                  • For cloud platforms, your data is wide open right from the start. As such, you shouldn\u2019t allow open access to all ports in your security groups if you plan to put a single piece of even the least valuable information on the test instances. By least valuable, I mean \u201cCash value of this coupon is 1/100th of 1 cent\u201d kind of least valuable. Don\u2019t be the next one to end up as a breaking news flash on the latest inconsiderate company to allow their data to fall into the hands of the baddies. See Step 2 for the minimum ports you will need open to use Gluster
                  • You can use the free \u201cephemeral\u201d storage for the Gluster bricks during testing, but make sure to use some form of protection against data loss when you move to production. Typically this means EBS backed volumes or using S3 to periodically back up your data bricks.

                  Other notes:

                  • In production, it is recommended to replicate your VM\u2019s across multiple zones. For purpose of this tutorial, it is overkill, but if anyone is interested in this please let us know since we are always looking to write articles on the most requested features and questions.
                  • Using EBS volumes and Elastic IPs are also recommended in production. For testing, you can safely ignore these as long as you are aware that the data could be lost at any moment, so make sure your test deployment is just that, testing only.
                  • Performance can fluctuate wildly in a cloud environment. If performance issues are seen, there are several possible strategies, but keep in mind that this is the perfect place to take advantage of the scale-out capability of Gluster. While it is not true in all cases that deploying more instances will necessarily result in a \u201cfaster\u201d cluster, in general, you will see that adding more nodes means more performance for the cluster overall.
                  • If a node reboots, you will typically need to do some extra work to get Gluster running again using the default EC2 configuration. If a node is shut down, it can mean absolute loss of the node (depending on how you set things up). This is well beyond the scope of this document but is discussed in any number of AWS-related forums and posts. Since I found out the hard way myself (oh, so you read the manual every time?!), I thought it worth at least mentioning.

                  Once you have both instances up, you can proceed to the install page.

                  "},{"location":"Install-Guide/Setup-virt/","title":"Setup on Virtual Machine","text":"

                  Note: You only need one of the three setup methods!

                  "},{"location":"Install-Guide/Setup-virt/#setup-method-1-setting-up-in-virtual-machines","title":"Setup, Method 1 \u2013 Setting up in virtual machines","text":"

                  As we just mentioned, to set up Gluster using virtual machines, you will need at least two virtual machines with at least 1GB of RAM each. You may be able to test with less but most users will find it too slow for their tastes. The particular virtualization product you use is a matter of choice. Common platforms include Xen, VMware ESX and Workstation, VirtualBox, and KVM. For purpose of this article, all steps assume KVM but the concepts are expected to be simple to translate to other platforms as well. The article assumes you know the particulars of how to create a virtual machine and have installed a 64 bit linux distribution already.

                  Create or clone two VM\u2019s, with the following setup on each:

                  • 2 disks using the VirtIO driver, one for the base OS and one that we will use as a Gluster \u201cbrick\u201d. You can add more later to try testing some more advanced configurations, but for now let\u2019s keep it simple.

                  Note: If you have ample space available, consider allocating all the disk space at once.

                  • 2 NIC\u2019s using VirtIO driver. The second NIC is not strictly required, but can be used to demonstrate setting up a separate network for storage and management traffic.

                  Note: Attach each NIC to a separate network.

                  Other notes: Make sure that if you clone the VM, that Gluster has not already been installed. Gluster generates a UUID to \u201cfingerprint\u201d each system, so cloning a previously deployed system will result in errors later on.

                  Once these are prepared, you are ready to move on to the install section.

                  "},{"location":"Ops-Guide/Overview/","title":"Overview","text":"

                  Over the years the infrastructure and services consumed by the Gluster.org community have grown organically. There have been instances of design and planning but the growth has mostly been ad-hoc and need-based.

                  Central to the plan of revitalizing the Gluster.org community is the ability to provide well-maintained infrastructure services with predictable uptimes and resilience. We're migrating the existing services into the Community Cage. The implied objective is that the transition would open up ways and means of the formation of a loose coalition among Infrastructure Administrators who provide expertise and guidance to the community projects within the OSAS team.

                  A small group of Gluster.org community members was asked to assess the current utilization and propose a planned growth. The ad-hoc nature of the existing infrastructure impedes the development of a proposal based on standardized methods of extrapolation. A part of the projection is based on a combination of patterns and heuristics - problems that have been observed and how mitigation strategies have enabled the community to continue to consume the services available.

                  The guiding principle for the assessment has been the need to migrate services to \"Software-as-a-Service\" models and providers wherever applicable and deemed fit. To illustrate this specific directive - the documentation/docs aspect of Gluster.org has been continuously migrating artifacts to readthedocs.org while focusing on simple integration with the website. The website itself has been put within the Gluster.org Github.com account to enable ease of maintenance and sustainability.

                  For more details look at the full Tools List.

                  "},{"location":"Ops-Guide/Tools/","title":"Tools","text":""},{"location":"Ops-Guide/Tools/#tools-we-use","title":"Tools We Use","text":"Service/Tool Purpose Hosted At Github Code Review Github Jenkins CI, build-verification-test Temporary Racks Backups Website, Gerrit and Jenkins backup Rackspace Docs Documentation content mkdocs.org download.gluster.org Official download site of the binaries Rackspace Mailman Lists mailman Rackspace www.gluster.org Web asset Rackspace"},{"location":"Ops-Guide/Tools/#notes","title":"Notes","text":"
                  • download.gluster.org: Resiliency is important for availability and metrics. Since it's official download, access need to restricted as much as possible. Few developers building the community packages have access. If anyone requires access can raise an issue at gluster/project-infrastructure with valid reason
                  • Mailman: Should be migrated to a separate host. Should be made more redundant (ie, more than 1 MX).
                  • www.gluster.org: Framework, Artifacts now exist under gluster.github.com. Has various legacy installation of software (mediawiki, etc ), being cleaned as we find them.
                  "},{"location":"Quick-Start-Guide/Architecture/","title":"Architecture","text":"

                  A gluster volume is a collection of servers belonging to a Trusted Storage Pool. A management daemon (glusterd) runs on each server and manages a brick process (glusterfsd) which in turn exports the underlying on disk storage (XFS filesystem). The client process mounts the volume and exposes the storage from all the bricks as a single unified storage namespace to the applications accessing it. The client and brick processes' stacks have various translators loaded in them. I/O from the application is routed to different bricks via these translators.

                  "},{"location":"Quick-Start-Guide/Architecture/#types-of-volumes","title":"Types of Volumes","text":"

                  Gluster file system supports different types of volumes based on the requirements. Some volumes are good for scaling storage size, some for improving performance and some for both.

                  1.Distributed Glusterfs Volume - This is the type of volume which is created by default if no volume type is specified. Here, files are distributed across various bricks in the volume. So file1 may be stored only in brick1 or brick2 but not on both. Hence there is no data redundancy. The purpose for such a storage volume is to easily & cheaply scale the volume size. However this also means that a brick failure will lead to complete loss of data and one must rely on the underlying hardware for data loss protection.

                  Create a Distributed Volume

                  gluster volume create NEW-VOLNAME [transport [tcp | rdma | tcp,rdma]] NEW-BRICK...\n

                  For example to create a distributed volume with four storage servers using TCP.

                  gluster volume create test-volume server1:/exp1 server2:/exp2 server3:/exp3 server4:/exp4\n
                  volume create: test-volume: success: please start the volume to access data\n

                  To display the volume info:

                  gluster volume info\n
                  Volume Name: test-volume\nType: Distribute\nStatus: Created\nNumber of Bricks: 4\nTransport-type: tcp\nBricks:\nBrick1: server1:/exp1\nBrick2: server2:/exp2\nBrick3: server3:/exp3\nBrick4: server4:/exp4\n

                  2.Replicated Glusterfs Volume - In this volume we overcome the risk of data loss which is present in the distributed volume. Here exact copies of the data are maintained on all bricks. The number of replicas in the volume can be decided by client while creating the volume. So we need to have at least two bricks to create a volume with 2 replicas or a minimum of three bricks to create a volume of 3 replicas. One major advantage of such a volume is that even if one brick fails the data can still be accessed from its replicated bricks. Such a volume is used for better reliability and data redundancy.

                  Create a Replicated Volume

                  gluster volume create NEW-VOLNAME [replica COUNT] [transport [tcp |rdma | tcp,rdma]] NEW-BRICK...\n

                  For example, to create a replicated volume with three storage servers:

                  gluster volume create test-volume replica 3 transport tcp \\\n      server1:/exp1 server2:/exp2 server3:/exp3\n
                  volume create: test-volume: success: please start the volume to access data\n

                  3.Distributed Replicated Glusterfs Volume - In this volume files are distributed across replicated sets of bricks. The number of bricks must be a multiple of the replica count. Also the order in which we specify the bricks is important since adjacent bricks become replicas of each other. This type of volume is used when high availability of data due to redundancy and scaling storage is required. So if there were eight bricks and replica count 2 then the first two bricks become replicas of each other then the next two and so on. This volume is denoted as 4x2. Similarly if there were eight bricks and replica count 4 then four bricks become replica of each other and we denote this volume as 2x4 volume.

                  Create the distributed replicated volume:

                  gluster volume create NEW-VOLNAME [replica COUNT] [transport [tcp | rdma | tcp,rdma]] NEW-BRICK...\n

                  For example, six node distributed replicated volume with a three-way mirror:

                  gluster volume create test-volume replica 3 transport tcp server1:/exp1 server2:/exp2 server3:/exp3 server4:/exp4 server5:/exp5 server6:/exp6\n
                  volume create: test-volume: success: please start the volume to access data\n

                  4.Dispersed Glusterfs Volume - Dispersed volumes are based on erasure codes. It stripes the encoded data of files, with some redundancy added, across multiple bricks in the volume. You can use dispersed volumes to have a configurable level of reliability with minimum space waste. The number of redundant bricks in the volume can be decided by clients while creating the volume. Redundant bricks determines how many bricks can be lost without interrupting the operation of the volume.

                  Create a dispersed volume:

                  gluster volume create test-volume [disperse [<COUNT>]] [disperse-data <COUNT>] [redundancy <COUNT>] [transport tcp | rdma | tcp,rdma] <NEW-BRICK>\n

                  For example, three node dispersed volume with level of redundancy 1, (2 + 1):

                  gluster volume create test-volume disperse 3 redundancy 1 server1:/exp1 server2:/exp2 server3:/exp3\n
                  volume create: test-volume: success: please start the volume to access data\n

                  5.Distributed Dispersed Glusterfs Volume - Distributed dispersed volumes are the equivalent to distributed replicated volumes, but using dispersed subvolumes instead of replicated ones. The number of bricks must be a multiple of the 1st subvol. The purpose for such a volume is to easily scale the volume size and distribute the load across various bricks.

                  Create a distributed dispersed volume:

                  gluster volume create [disperse [<COUNT>]] [disperse-data <COUNT>] [redundancy <COUNT>] [transport tcp | rdma | tcp,rdma] <NEW-BRICK>\n

                  For example, six node distributed dispersed volume with level of redundancy 1, 2 x (2 + 1) = 6:

                  gluster volume create test-volume disperse 3 redundancy 1 server1:/exp1 server2:/exp2 server3:/exp3 server4:/exp4 server5:/exp5 server6:/exp6\n
                  volume create: test-volume: success: please start the volume to access data\n

                  Note:

                  • A dispersed volume can be created by specifying the number of bricks in a disperse set, by specifying the number of redundancy bricks, or both.

                  • If disperse is not specified, or the <COUNT> is missing, the entire volume will be treated as a single disperse set composed by all bricks enumerated in the command line.

                  • If redundancy is not specified, it is computed automatically to be the optimal value. If this value does not exist, it's assumed to be '1' and a warning message is shown:

                    # gluster volume create test-volume disperse 4 server{1..4}:/bricks/test-volume

                    There isn't an optimal redundancy value for this configuration. Do you want to create the volume with redundancy 1 ? (y/n)

                  • In all cases where redundancy is automatically computed and it's not equal to '1', a warning message is displayed:

                    # gluster volume create test-volume disperse 6 server{1..6}:/bricks/test-volume

                    The optimal redundancy for this configuration is 2. Do you want to create the volume with this value ? (y/n)

                  • redundancy must be greater than 0, and the total number of bricks must be greater than 2 * redundancy. This means that a dispersed volume must have a minimum of 3 bricks.

                  "},{"location":"Quick-Start-Guide/Architecture/#fuse","title":"FUSE","text":"

                  GlusterFS is a userspace filesystem. The GluserFS developers opted for this approach in order to avoid the need to have modules in the Linux kernel.

                  As it is a userspace filesystem, to interact with kernel VFS, GlusterFS makes use of FUSE (File System in Userspace). For a long time, implementation of a userspace filesystem was considered impossible. FUSE was developed as a solution for this. FUSE is a kernel module that supports interaction between kernel VFS and non-privileged user applications and it has an API that can be accessed from userspace. Using this API, any type of filesystem can be written using almost any language you prefer as there are many bindings between FUSE and other languages.

                  Structural diagram of FUSE.

                  This shows a filesystem \"hello world\" that is compiled to create a binary \"hello\". It is executed with a filesystem mount point /tmp/fuse. Then the user issues a command ls -l on the mount point /tmp/fuse. This command reaches VFS via glibc and since the mount /tmp/fuse corresponds to a FUSE based filesystem, VFS passes it over to FUSE module. The FUSE kernel module contacts the actual filesystem binary \"hello\" after passing through glibc and FUSE library in userspace(libfuse). The result is returned by the \"hello\" through the same path and reaches the ls -l command.

                  The communication between FUSE kernel module and the FUSE library(libfuse) is via a special file descriptor which is obtained by opening /dev/fuse. This file can be opened multiple times, and the obtained file descriptor is passed to the mount syscall, to match up the descriptor with the mounted filesystem.

                  • More about userspace filesystems
                  • FUSE reference
                  "},{"location":"Quick-Start-Guide/Architecture/#translators","title":"Translators","text":"

                  Translating \u201ctranslators\u201d:

                  • A translator converts requests from users into requests for storage.

                  *One to one, one to many, one to zero (e.g. caching)

                  • A translator can modify requests on the way through :

                  convert one request type to another ( during the request transfer amongst the translators) modify paths, flags, even data (e.g. encryption)

                  • Translators can intercept or block the requests. (e.g. access control)

                  • Or spawn new requests (e.g. pre-fetch)

                  How Do Translators Work?

                  • Shared Objects
                  • Dynamically loaded according to 'volfile'

                  dlopen/dlsync setup pointers to parents / children call init (constructor) call IO functions through fops.

                  • Conventions for validating/ passing options, etc.
                  • The configuration of translators (since GlusterFS 3.1) is managed through the gluster command line interface (cli), so you don't need to know in what order to graph the translators together.
                  "},{"location":"Quick-Start-Guide/Architecture/#types-of-translators","title":"Types of Translators","text":"

                  List of known translators with their current status.

                  Translator Type Functional Purpose Storage Lowest level translator, stores and accesses data from local file system. Debug Provide interface and statistics for errors and debugging. Cluster Handle distribution and replication of data as it relates to writing to and reading from bricks & nodes. Encryption Extension translators for on-the-fly encryption/decryption of stored data. Protocol Extension translators for client/server communication protocols. Performance Tuning translators to adjust for workload and I/O profiles. Bindings Add extensibility, e.g. The Python interface written by Jeff Darcy to extend API interaction with GlusterFS. System System access translators, e.g. Interfacing with file system access control. Scheduler I/O schedulers that determine how to distribute new write operations across clustered systems. Features Add additional features such as Quotas, Filters, Locks, etc.

                  The default / general hierarchy of translators in vol files :

                  All the translators hooked together to perform a function is called a graph. The left-set of translators comprises of Client-stack.The right-set of translators comprises of Server-stack.

                  The glusterfs translators can be sub-divided into many categories, but two important categories are - Cluster and Performance translators :

                  One of the most important and the first translator the data/request has to go through is fuse translator which falls under the category of Mount Translators.

                  1. Cluster Translators:

                    • DHT(Distributed Hash Table)
                    • AFR(Automatic File Replication)
                  2. Performance Translators:

                    • io-cache
                    • io-threads
                    • md-cache
                    • O-B (open behind)
                    • QR (quick read)
                    • r-a (read-ahead)
                    • w-b (write-behind)

                  Other Feature Translators include:

                  • changelog
                  • locks - GlusterFS has locks translator which provides the following internal locking operations called inodelk, entrylk, which are used by afr to achieve synchronization of operations on files or directories that conflict with each other.
                  • marker
                  • quota

                  Debug Translators

                  • trace - To trace the error logs generated during the communication amongst the translators.
                  • io-stats
                  "},{"location":"Quick-Start-Guide/Architecture/#dhtdistributed-hash-table-translator","title":"DHT(Distributed Hash Table) Translator","text":"

                  What is DHT?

                  DHT is the real core of how GlusterFS aggregates capacity and performance across multiple servers. Its responsibility is to place each file on exactly one of its subvolumes \u2013 unlike either replication (which places copies on all of its subvolumes) or striping (which places pieces onto all of its subvolumes). It\u2019s a routing function, not splitting or copying.

                  How DHT works?

                  The basic method used in DHT is consistent hashing. Each subvolume (brick) is assigned a range within a 32-bit hash space, covering the entire range with no holes or overlaps. Then each file is also assigned a value in that same space, by hashing its name. Exactly one brick will have an assigned range including the file\u2019s hash value, and so the file \u201cshould\u201d be on that brick. However, there are many cases where that won\u2019t be the case, such as when the set of bricks (and therefore the range assignment of ranges) has changed since the file was created, or when a brick is nearly full. Much of the complexity in DHT involves these special cases, which we\u2019ll discuss in a moment.

                  When you open() a file, the distribute translator is giving one piece of information to find your file, the file-name. To determine where that file is, the translator runs the file-name through a hashing algorithm in order to turn that file-name into a number.

                  A few Observations of DHT hash-values assignment:

                  1. The assignment of hash ranges to bricks is determined by extended attributes stored on directories, hence distribution is directory-specific.
                  2. Consistent hashing is usually thought of as hashing around a circle, but in GlusterFS it\u2019s more linear. There\u2019s no need to \u201cwrap around\u201d at zero, because there\u2019s always a break (between one brick\u2019s range and another\u2019s) at zero.
                  3. If a brick is missing, there will be a hole in the hash space. Even worse, if hash ranges are reassigned while a brick is offline, some of the new ranges might overlap with the (now out of date) range stored on that brick, creating a bit of confusion about where files should be.
                  "},{"location":"Quick-Start-Guide/Architecture/#afrautomatic-file-replication-translator","title":"AFR(Automatic File Replication) Translator","text":"

                  The Automatic File Replication (AFR) translator in GlusterFS makes use of the extended attributes to keep track of the file operations.It is responsible for replicating the data across the bricks.

                  "},{"location":"Quick-Start-Guide/Architecture/#responsibilities-of-afr","title":"Responsibilities of AFR","text":"

                  Its responsibilities include the following:

                  1. Maintain replication consistency (i.e. Data on both the bricks should be same, even in the cases where there are operations happening on same file/directory in parallel from multiple applications/mount points as long as all the bricks in replica set are up).
                  2. Provide a way of recovering data in case of failures as long as there is at least one brick which has the correct data.
                  3. Serve fresh data for read/stat/readdir etc.
                  "},{"location":"Quick-Start-Guide/Architecture/#geo-replication","title":"Geo-Replication","text":"

                  Geo-replication provides asynchronous replication of data across geographically distinct locations and was introduced in Glusterfs 3.2. It mainly works across WAN and is used to replicate the entire volume unlike AFR which is intra-cluster replication. This is mainly useful for backup of entire data for disaster recovery.

                  Geo-replication uses a primary-secondary model, whereby replication occurs between a Primary and a Secondary, both of which should be GlusterFS volumes. Geo-replication provides an incremental replication service over Local Area Networks (LANs), Wide Area Network (WANs), and across the Internet.

                  Geo-replication over LAN

                  You can configure Geo-replication to mirror data over a Local Area Network.

                  Geo-replication over WAN

                  You can configure Geo-replication to replicate data over a Wide Area Network.

                  Geo-replication over Internet

                  You can configure Geo-replication to mirror data over the Internet.

                  Multi-site cascading Geo-replication

                  You can configure Geo-replication to mirror data in a cascading fashion across multiple sites.

                  There are mainly two aspects while asynchronously replicating data:

                  1.Change detection - These include file-operation necessary details. There are two methods to sync the detected changes:

                  i. Changelogs - Changelog is a translator which records necessary details for the fops that occur. The changes can be written in binary format or ASCII. There are three category with each category represented by a specific changelog format. All three types of categories are recorded in a single changelog file.

                  Entry - create(), mkdir(), mknod(), symlink(), link(), rename(), unlink(), rmdir()

                  Data - write(), writev(), truncate(), ftruncate()

                  Meta - setattr(), fsetattr(), setxattr(), fsetxattr(), removexattr(), fremovexattr()

                  In order to record the type of operation and entity underwent, a type identifier is used. Normally, the entity on which the operation is performed would be identified by the pathname, but we choose to use GlusterFS internal file identifier (GFID) instead (as GlusterFS supports GFID based backend and the pathname field may not always be valid and other reasons which are out of scope of this document). Therefore, the format of the record for the three types of operation can be summarized as follows:

                  Entry - GFID + FOP + MODE + UID + GID + PARGFID/BNAME [PARGFID/BNAME]

                  Meta - GFID of the file

                  Data - GFID of the file

                  GFID's are analogous to inodes. Data and Meta fops record the GFID of the entity on which the operation was performed, thereby recording that there was a data/metadata change on the inode. Entry fops record at the minimum a set of six or seven records (depending on the type of operation), that is sufficient to identify what type of operation the entity underwent. Normally this record includes the GFID of the entity, the type of file operation (which is an integer [an enumerated value which is used in Glusterfs]) and the parent GFID and the basename (analogous to parent inode and basename).

                  Changelog file is rolled over after a specific time interval. We then perform processing operations on the file like converting it to understandable/human readable format, keeping private copy of the changelog etc. The library then consumes these logs and serves application requests.

                  ii. Xsync - Marker translator maintains an extended attribute \u201cxtime\u201d for each file and directory. Whenever any update happens it would update the xtime attribute of that file and all its ancestors. So the change is propagated from the node (where the change has occurred) all the way to the root.

                  Consider the above directory tree structure. At time T1 the primary and secondary were in sync each other.

                  At time T2 a new file File2 was created. This will trigger the xtime marking (where xtime is the current timestamp) from File2 upto to the root, i.e, the xtime of File2, Dir3, Dir1 and finally Dir0 all will be updated.

                  Geo-replication daemon crawls the file system based on the condition that xtime(primary) > xtime(secondary). Hence in our example it would crawl only the left part of the directory structure since the right part of the directory structure still has equal timestamp. Although the crawling algorithm is fast we still need to crawl a good part of the directory structure.

                  2.Replication - We use rsync for data replication. Rsync is an external utility which will calculate the diff of the two files and sends this difference from source to sync.

                  "},{"location":"Quick-Start-Guide/Architecture/#overall-working-of-glusterfs","title":"Overall working of GlusterFS","text":"

                  As soon as GlusterFS is installed in a server node, a gluster management daemon(glusterd) binary will be created. This daemon should be running in all participating nodes in the cluster. After starting glusterd, a trusted server pool(TSP) can be created consisting of all storage server nodes (TSP can contain even a single node). Now bricks which are the basic units of storage can be created as export directories in these servers. Any number of bricks from this TSP can be clubbed together to form a volume.

                  Once a volume is created, a glusterfsd process starts running in each of the participating brick. Along with this, configuration files known as vol files will be generated inside /var/lib/glusterd/vols/. There will be configuration files corresponding to each brick in the volume. This will contain all the details about that particular brick. Configuration file required by a client process will also be created. Now our filesystem is ready to use. We can mount this volume on a client machine very easily as follows and use it like we use a local storage:

                  mount.glusterfs\u00a0<IP or hostname>:<volume_name> <mount_point>

                  IP or hostname can be that of any node in the trusted server pool in which the required volume is created.

                  When we mount the volume in the client, the client glusterfs process communicates with the servers\u2019 glusterd process. Server glusterd process sends a configuration file (vol file) containing the list of client translators and another containing the information of each brick in the volume with the help of which the client glusterfs process can now directly communicate with each brick\u2019s glusterfsd process. The setup is now complete and the volume is now ready for client's service.

                  When a system call (File operation or Fop) is issued by client in the mounted filesystem, the VFS (identifying the type of filesystem to be glusterfs) will send the request to the FUSE kernel module. The FUSE kernel module will in turn send it to the GlusterFS in the userspace of the client node via /dev/fuse (this has been described in FUSE section). The GlusterFS process on the client consists of a stack of translators called the client translators which are defined in the configuration file(vol file) sent by the storage server glusterd process. The first among these translators being the FUSE translator which consists of the FUSE library(libfuse). Each translator has got functions corresponding to each file operation or fop supported by glusterfs. The request will hit the corresponding function in each of the translators. Main client translators include:

                  • FUSE translator
                  • DHT translator- DHT translator maps the request to the correct brick that contains the file or directory required.
                  • AFR translator- It receives the request from the previous translator and if the volume type is replicate, it duplicates the request and passes it on to the Protocol client translators of the replicas.
                  • Protocol Client translator- Protocol Client translator is the last in the client translator stack. This translator is divided into multiple threads, one for each brick in the volume. This will directly communicate with the glusterfsd of each brick.

                  In the storage server node that contains the brick in need, the request again goes through a series of translators known as server translators, main ones being:

                  • Protocol server translator
                  • POSIX translator

                  The request will finally reach VFS and then will communicate with the underlying native filesystem. The response will retrace the same path.

                  "},{"location":"Quick-Start-Guide/Quickstart/","title":"Quick Start Guide","text":""},{"location":"Quick-Start-Guide/Quickstart/#installing-glusterfs-a-quick-start-guide","title":"Installing GlusterFS - a Quick Start Guide","text":""},{"location":"Quick-Start-Guide/Quickstart/#purpose-of-this-document","title":"Purpose of this document","text":"

                  This document is intended to provide a step-by-step guide to setting up GlusterFS for the first time with minimum degree of complexity. For the purposes of this guide, it is required to use Fedora 30 (or, higher, see https://fedoraproject.org/wiki/End_of_life) virtual machine instances.

                  After you deploy GlusterFS by following these steps, we recommend that you read the GlusterFS Admin Guide to how to select a volume type that fits your needs and administer GlusterFS. The GlusterFS Install Guide provides a more detailed explanation of the steps we show in this Quick Start Guide.

                  If you would like a more detailed walkthrough with instructions for installing using different methods (in local virtual machines, EC2 and baremetal) and different distributions, then have a look at the Install guide.

                  "},{"location":"Quick-Start-Guide/Quickstart/#using-ansible-to-deploy-and-manage-glusterfs","title":"Using Ansible to deploy and manage GlusterFS","text":"

                  If you are already an Ansible user, and are more comfortable with setting up distributed systems with Ansible, we recommend you to skip all these and move over to gluster-ansible repository, which gives most of the details to get the systems running faster.

                  "},{"location":"Quick-Start-Guide/Quickstart/#automatically-deploying-glusterfs-with-puppet-glustervagrant","title":"Automatically deploying GlusterFS with Puppet-Gluster+Vagrant","text":"

                  To deploy GlusterFS using scripted methods, please read this article.

                  "},{"location":"Quick-Start-Guide/Quickstart/#step-1-have-at-least-three-nodes","title":"Step 1 \u2013 Have at least three nodes","text":"
                  • Fedora 30 (or later) on 3 nodes named \"server1\", \"server2\" and \"server3\"
                  • A working network connection
                  • At least two virtual disks, one for the OS installation, and one to be used to serve GlusterFS storage (sdb), on each of these VMs. This will emulate a real-world deployment, where you would want to separate GlusterFS storage from the OS install.
                  • Setup NTP on each of these servers to get the proper functioning of many applications on top of filesystem. This is an important requirement

                  Note: GlusterFS stores its dynamically generated configuration files at /var/lib/glusterd. If at any point in time GlusterFS is unable to write to these files (for example, when the backing filesystem is full), it will at minimum cause erratic behavior for your system; or worse, take your system offline completely. It is recommended to create separate partitions for directories such as /var/log to reduce the chances of this happening.

                  "},{"location":"Quick-Start-Guide/Quickstart/#step-2-format-and-mount-the-bricks","title":"Step 2 - Format and mount the bricks","text":"

                  Perform this step on all the nodes, \"server{1,2,3}\"

                  Note: We are going to use the XFS filesystem for the backend bricks. But Gluster is designed to work on top of any filesystem, which supports extended attributes.

                  The following examples assume that the brick will be residing on /dev/sdb1.

                  mkfs.xfs -i size=512 /dev/sdb1\nmkdir -p /data/brick1\necho '/dev/sdb1 /data/brick1 xfs defaults 1 2' >> /etc/fstab\nmount -a && mount\n

                  You should now see sdb1 mounted at /data/brick1

                  "},{"location":"Quick-Start-Guide/Quickstart/#step-3-installing-glusterfs","title":"Step 3 - Installing GlusterFS","text":"

                  Install the software

                  yum install glusterfs-server\n

                  Start the GlusterFS management daemon:

                  service glusterd start\n

                  Check the status of the daemon:

                  service glusterd status\n

                  You should see something like this:

                  glusterd.service - LSB: glusterfs server\n       Loaded: loaded (/etc/rc.d/init.d/glusterd)\n   Active: active (running) since Mon, 13 Aug 2012 13:02:11 -0700; 2s ago\n   Process: 19254 ExecStart=/etc/rc.d/init.d/glusterd start (code=exited, status=0/SUCCESS)\n   CGroup: name=systemd:/system/glusterd.service\n       \u251c 19260 /usr/sbin/glusterd -p /run/glusterd.pid\n       \u251c 19304 /usr/sbin/glusterfsd --xlator-option georep-server.listen-port=24009 -s localhost...\n       \u2514 19309 /usr/sbin/glusterfs -f /var/lib/glusterd/nfs/nfs-server.vol -p /var/lib/glusterd/...\n
                  "},{"location":"Quick-Start-Guide/Quickstart/#step-4-configure-the-firewall","title":"Step 4 - Configure the firewall","text":"

                  The gluster processes on the nodes need to be able to communicate with each other. To simplify this setup, configure the firewall on each node to accept all traffic from the other node.

                  iptables -I INPUT -p all -s <ip-address> -j ACCEPT\n

                  where ip-address is the address of the other node.

                  "},{"location":"Quick-Start-Guide/Quickstart/#step-5-configure-the-trusted-pool","title":"Step 5 - Configure the trusted pool","text":"

                  From \"server1\"

                  gluster peer probe server2\ngluster peer probe server3\n

                  Note: When using hostnames, the first server i.e, server1 needs to be probed from one other server to set its hostname. Reason being when the other server i.e, server2 is probed from server1 it may happen that the hosts are configured in a way that the IP Address of the server is transmitted on probing. So in order to use the hostnames in the cluster, it is advised to probe back the server1 from server2, server3 or upto nth server based on the cluster size.

                  From \"server2\"

                  gluster peer probe server1\n

                  Note: Once this pool has been established, only trusted members may probe new servers into the pool. A new server cannot probe the pool, it must be probed from the pool.

                  Check the peer status on server1

                  gluster peer status\n

                  You should see something like this (the UUID will differ)

                  Number of Peers: 2\n\nHostname: server2\nUuid: f0e7b138-4874-4bc0-ab91-54f20c7068b4\nState: Peer in Cluster (Connected)\n\nHostname: server3\nUuid: f0e7b138-4532-4bc0-ab91-54f20c701241\nState: Peer in Cluster (Connected)\n
                  "},{"location":"Quick-Start-Guide/Quickstart/#step-6-set-up-a-glusterfs-volume","title":"Step 6 - Set up a GlusterFS volume","text":"

                  On all servers:

                  mkdir -p /data/brick1/gv0\n

                  From any single server:

                  gluster volume create gv0 replica 3 server1:/data/brick1/gv0 server2:/data/brick1/gv0 server3:/data/brick1/gv0\n

                  On successful operation, you should see something like:

                  volume create: gv0: success: please start the volume to access data\n

                  Then start the newly created volume:

                  gluster volume start gv0\n

                  You should see something like:

                  volume start: gv0: success\n

                  Confirm that the volume shows \"Started\":

                  gluster volume info\n

                  You should see something like this (the Volume ID will differ):

                  Volume Name: gv0\nType: Replicate\nVolume ID: f25cc3d8-631f-41bd-96e1-3e22a4c6f71f\nStatus: Started\nSnapshot Count: 0\nNumber of Bricks: 1 x 3 = 3\nTransport-type: tcp\nBricks:\nBrick1: server1:/data/brick1/gv0\nBrick2: server2:/data/brick1/gv0\nBrick3: server3:/data/brick1/gv0\nOptions Reconfigured:\ntransport.address-family: inet\n

                  Note: If the volume does not show \"Started\", the files under /var/log/glusterfs/glusterd.log should be checked in order to debug and diagnose the situation. These logs can be looked at on one or, all the servers configured.

                  "},{"location":"Quick-Start-Guide/Quickstart/#step-7-testing-the-glusterfs-volume","title":"Step 7 - Testing the GlusterFS volume","text":"

                  For this step, we will use one of the servers to mount the volume. Typically, you would do this from an external machine, known as a \"client\". Since using this method would require additional packages to be installed on the client machine, we will use one of the servers as a simple place to test first , as if it were that \"client\".

                  mkdir /mnt/gluster-test\nmount -t glusterfs server1:/gv0 /mnt/gluster-test\nfor i in `seq -w 1 100`; do cp -rp /var/log/messages /mnt/gluster-test/copy-test-$i; done\n

                  First, check the client mount point:

                  ls -lA /mnt/gluster-test/copy* | wc -l\n

                  You should see 100 files returned. Next, check the GlusterFS brick mount points on each server:

                  ls -lA /data/brick1/gv0/copy*\n

                  You should see 100 files on each server using the method we listed here. Without replication, in a distribute only volume (not detailed here), you should see about 33 files on each one.

                  "},{"location":"Troubleshooting/","title":"Index","text":""},{"location":"Troubleshooting/#troubleshooting-guide","title":"Troubleshooting Guide","text":"

                  This guide describes some commonly seen issues and steps to recover from them. If that doesn\u2019t help, reach out to the Gluster community, in which case the guide also describes what information needs to be provided in order to debug the issue. At minimum, we need the version of gluster running and the output of gluster volume info.

                  "},{"location":"Troubleshooting/#where-do-i-start","title":"Where Do I Start?","text":"

                  Is the issue already listed in the component specific troubleshooting sections?

                  • CLI and Glusterd Issues
                  • Heal related issues
                  • Resolving Split brains
                  • Geo-replication Issues
                  • Gluster NFS Issues
                  • File Locks

                  If that didn't help, here is how to debug further.

                  Identifying the problem and getting the necessary information to diagnose it is the first step in troubleshooting your Gluster setup. As Gluster operations involve interactions between multiple processes, this can involve multiple steps.

                  "},{"location":"Troubleshooting/#what-happened","title":"What Happened?","text":"
                  • An operation failed
                  • High Memory Usage
                  • A Gluster process crashed
                  "},{"location":"Troubleshooting/gfid-to-path/","title":"Convert GFID to Path","text":"

                  GlusterFS internal file identifier (GFID) is a uuid that is unique to each file across the entire cluster. This is analogous to inode number in a normal filesystem. The GFID of a file is stored in its xattr named trusted.gfid.

                  "},{"location":"Troubleshooting/gfid-to-path/#special-mount-using-gfid-access-translator","title":"Special mount using gfid-access translator:","text":"
                  mount -t glusterfs -o aux-gfid-mount vm1:test /mnt/testvol\n

                  Assuming, you have GFID of a file from changelog (or somewhere else). For trying this out, you can get GFID of a file from mountpoint:

                  getfattr -n glusterfs.gfid.string /mnt/testvol/dir/file\n
                  "},{"location":"Troubleshooting/gfid-to-path/#get-file-path-from-gfid-method-1","title":"Get file path from GFID (Method 1):","text":"

                  (Lists hardlinks delimited by :, returns path as seen from mountpoint)

                  "},{"location":"Troubleshooting/gfid-to-path/#turn-on-build-pgfid-option","title":"Turn on build-pgfid option","text":"
                  gluster volume set test build-pgfid on\n

                  Read virtual xattr glusterfs.ancestry.path which contains the file path

                  getfattr -n glusterfs.ancestry.path -e text /mnt/testvol/.gfid/<GFID>\n

                  Example:

                  [root@vm1 glusterfs]# ls -il /mnt/testvol/dir/\ntotal 1\n10610563327990022372 -rw-r--r--. 2 root root 3 Jul 17 18:05 file\n10610563327990022372 -rw-r--r--. 2 root root 3 Jul 17 18:05 file3\n\n[root@vm1 glusterfs]# getfattr -n glusterfs.gfid.string /mnt/testvol/dir/file\ngetfattr: Removing leading '/' from absolute path names\n# file: mnt/testvol/dir/file\nglusterfs.gfid.string=\"11118443-1894-4273-9340-4b212fa1c0e4\"\n\n[root@vm1 glusterfs]# getfattr -n glusterfs.ancestry.path -e text /mnt/testvol/.gfid/11118443-1894-4273-9340-4b212fa1c0e4\ngetfattr: Removing leading '/' from absolute path names\n# file: mnt/testvol/.gfid/11118443-1894-4273-9340-4b212fa1c0e4\nglusterfs.ancestry.path=\"/dir/file:/dir/file3\"\n
                  "},{"location":"Troubleshooting/gfid-to-path/#get-file-path-from-gfid-method-2","title":"Get file path from GFID (Method 2):","text":"

                  (Does not list all hardlinks, returns backend brick path)

                  getfattr -n trusted.glusterfs.pathinfo -e text /mnt/testvol/.gfid/<GFID>\n

                  Example:

                  [root@vm1 glusterfs]# getfattr -n trusted.glusterfs.pathinfo -e text /mnt/testvol/.gfid/11118443-1894-4273-9340-4b212fa1c0e4\ngetfattr: Removing leading '/' from absolute path names\n# file: mnt/testvol/.gfid/11118443-1894-4273-9340-4b212fa1c0e4\ntrusted.glusterfs.pathinfo=\"(<DISTRIBUTE:test-dht> <POSIX(/mnt/brick-test/b):vm1:/mnt/brick-test/b/dir//file3>)\"\n
                  "},{"location":"Troubleshooting/gfid-to-path/#references-and-links","title":"References and links:","text":"

                  posix: placeholders for GFID to path conversion

                  "},{"location":"Troubleshooting/gluster-crash/","title":"Debugging a Crash","text":"

                  To find out why a Gluster process terminated abruptly, we need the following:

                  • A coredump of the process that crashed
                  • The exact version of Gluster that is running
                  • The Gluster log files
                  • the output of gluster volume info
                  • Steps to reproduce the crash if available

                  Contact the community with this information or open an issue

                  "},{"location":"Troubleshooting/resolving-splitbrain/","title":"Heal info and split-brain resolution","text":"

                  This document explains the heal info command available in gluster for monitoring pending heals in replicate volumes and the methods available to resolve split-brains.

                  "},{"location":"Troubleshooting/resolving-splitbrain/#types-of-split-brains","title":"Types of Split-Brains:","text":"

                  A file is said to be in split-brain when Gluster AFR cannot determine which copy in the replica is the correct one.

                  There are three types of split-brains:

                  • Data split-brain: The data in the file differs on the bricks in the replica set
                  • Metadata split-brain: The metadata differs on the bricks
                  • Entry split-brain: The GFID of the file is different on the bricks in the replica or the type of the file is different on the bricks in the replica. Type-mismatch cannot be healed using any of the split-brain resolution methods while gfid split-brains can be.
                  "},{"location":"Troubleshooting/resolving-splitbrain/#1-volume-heal-info","title":"1) Volume heal info:","text":"

                  Usage: gluster volume heal <VOLNAME> info

                  This lists all the files that require healing (and will be processed by the self-heal daemon). It prints either their path or their GFID.

                  "},{"location":"Troubleshooting/resolving-splitbrain/#interpreting-the-output","title":"Interpreting the output","text":"

                  All the files listed in the output of this command need to be healed. The files listed may also be accompanied by the following tags:

                  a) 'Is in split-brain' A file in data or metadata split-brain will be listed with \" - Is in split-brain\" appended after its path/GFID. E.g. \"/file4\" in the output provided below. However, for a file in GFID split-brain, the parent directory of the file is shown to be in split-brain and the file itself is shown to be needing healing, e.g. \"/dir\" in the output provided below is in split-brain because of GFID split-brain of file \"/dir/a\". Files in split-brain cannot be healed without resolving the split-brain.

                  b) 'Is possibly undergoing heal' When the heal info command is run, it (or to be more specific, the 'glfsheal' binary that is executed when you run the command) takes locks on each file to find if it needs healing. However, if the self-heal daemon had already started healing the file, it would have taken locks which glfsheal wouldn't be able to acquire. In such a case, it could print this message. Another possible case could be multiple glfsheal processes running simultaneously (e.g. multiple users ran a heal info command at the same time) and competing for same lock.

                  The following is an example of heal info command's output.

                  "},{"location":"Troubleshooting/resolving-splitbrain/#example","title":"Example","text":"

                  Consider a replica volume \"test\" with two bricks b1 and b2; self-heal daemon off, mounted at /mnt.

                  # gluster volume heal test info\nBrick \\<hostname:brickpath-b1>\n<gfid:aaca219f-0e25-4576-8689-3bfd93ca70c2> - Is in split-brain\n<gfid:39f301ae-4038-48c2-a889-7dac143e82dd> - Is in split-brain\n<gfid:c3c94de2-232d-4083-b534-5da17fc476ac> - Is in split-brain\n<gfid:6dc78b20-7eb6-49a3-8edb-087b90142246>\n\nNumber of entries: 4\n\nBrick <hostname:brickpath-b2>\n/dir/file2\n/dir/file1 - Is in split-brain\n/dir - Is in split-brain\n/dir/file3\n/file4 - Is in split-brain\n/dir/a\n\n\nNumber of entries: 6\n
                  "},{"location":"Troubleshooting/resolving-splitbrain/#analysis-of-the-output","title":"Analysis of the output","text":"

                  It can be seen that

                  A) from brick b1, four entries need healing:

                  • file with gfid:6dc78b20-7eb6-49a3-8edb-087b90142246 needs healing
                  • \"aaca219f-0e25-4576-8689-3bfd93ca70c2\", \"39f301ae-4038-48c2-a889-7dac143e82dd\" and \"c3c94de2-232d-4083-b534-5da17fc476ac\" are in split-brain

                  B) from brick b2 six entries need healing-

                  • \"a\", \"file2\" and \"file3\" need healing
                  • \"file1\", \"file4\" & \"/dir\" are in split-brain
                  "},{"location":"Troubleshooting/resolving-splitbrain/#2-volume-heal-info-split-brain","title":"2. Volume heal info split-brain","text":"

                  Usage: gluster volume heal <VOLNAME> info split-brain This command only shows the list of files that are in split-brain. The output is therefore a subset of gluster volume heal <VOLNAME> info

                  "},{"location":"Troubleshooting/resolving-splitbrain/#example_1","title":"Example","text":"
                  # gluster volume heal test info split-brain\nBrick <hostname:brickpath-b1>\n<gfid:aaca219f-0e25-4576-8689-3bfd93ca70c2>\n<gfid:39f301ae-4038-48c2-a889-7dac143e82dd>\n<gfid:c3c94de2-232d-4083-b534-5da17fc476ac>\nNumber of entries in split-brain: 3\n\nBrick <hostname:brickpath-b2>\n/dir/file1\n/dir\n/file4\nNumber of entries in split-brain: 3\n

                  Note that similar to the heal info command, for GFID split-brains (same filename but different GFID) their parent directories are listed to be in split-brain.

                  "},{"location":"Troubleshooting/resolving-splitbrain/#3-resolution-of-split-brain-using-gluster-cli","title":"3. Resolution of split-brain using gluster CLI","text":"

                  Once the files in split-brain are identified, their resolution can be done from the gluster command line using various policies. Type-mismatch cannot be healed using this methods. Split-brain resolution commands let the user resolve data, metadata, and GFID split-brains.

                  "},{"location":"Troubleshooting/resolving-splitbrain/#31-resolution-of-datametadata-split-brain-using-gluster-cli","title":"3.1 Resolution of data/metadata split-brain using gluster CLI","text":"

                  Data and metadata split-brains can be resolved using the following policies:

                  "},{"location":"Troubleshooting/resolving-splitbrain/#i-select-the-bigger-file-as-source","title":"i) Select the bigger-file as source","text":"

                  This command is useful for per file healing where it is known/decided that the file with bigger size is to be considered as source. gluster volume heal <VOLNAME> split-brain bigger-file <FILE> Here, <FILE> can be either the full file name as seen from the root of the volume (or) the GFID-string representation of the file, which sometimes gets displayed in the heal info command's output. Once this command is executed, the replica containing the <FILE> with a bigger size is found and healing is completed with that brick as a source.

                  "},{"location":"Troubleshooting/resolving-splitbrain/#example_2","title":"Example :","text":"

                  Consider the earlier output of the heal info split-brain command.

                  Before healing the file, notice file size and md5 checksums :

                  On brick b1:

                  [brick1]# stat b1/dir/file1\n  File: \u2018b1/dir/file1\u2019\n  Size: 17              Blocks: 16         IO Block: 4096   regular file\nDevice: fd03h/64771d    Inode: 919362      Links: 2\nAccess: (0644/-rw-r--r--)  Uid: (    0/    root)   Gid: (    0/    root)\nAccess: 2015-03-06 13:55:40.149897333 +0530\nModify: 2015-03-06 13:55:37.206880347 +0530\nChange: 2015-03-06 13:55:37.206880347 +0530\n Birth: -\n[brick1]#\n[brick1]# md5sum b1/dir/file1\n040751929ceabf77c3c0b3b662f341a8  b1/dir/file1\n

                  On brick b2:

                  [brick2]# stat b2/dir/file1\n  File: \u2018b2/dir/file1\u2019\n  Size: 13              Blocks: 16         IO Block: 4096   regular file\nDevice: fd03h/64771d    Inode: 919365      Links: 2\nAccess: (0644/-rw-r--r--)  Uid: (    0/    root)   Gid: (    0/    root)\nAccess: 2015-03-06 13:54:22.974451898 +0530\nModify: 2015-03-06 13:52:22.910758923 +0530\nChange: 2015-03-06 13:52:22.910758923 +0530\n Birth: -\n[brick2]#\n[brick2]# md5sum b2/dir/file1\ncb11635a45d45668a403145059c2a0d5  b2/dir/file1\n

                  Healing file1 using the above command :- gluster volume heal test split-brain bigger-file /dir/file1 Healed /dir/file1.

                  After healing is complete, the md5sum and file size on both bricks should be the same.

                  On brick b1:

                  [brick1]# stat b1/dir/file1\n  File: \u2018b1/dir/file1\u2019\n  Size: 17              Blocks: 16         IO Block: 4096   regular file\nDevice: fd03h/64771d    Inode: 919362      Links: 2\nAccess: (0644/-rw-r--r--)  Uid: (    0/    root)   Gid: (    0/    root)\nAccess: 2015-03-06 14:17:27.752429505 +0530\nModify: 2015-03-06 13:55:37.206880347 +0530\nChange: 2015-03-06 14:17:12.880343950 +0530\n Birth: -\n[brick1]#\n[brick1]# md5sum b1/dir/file1\n040751929ceabf77c3c0b3b662f341a8  b1/dir/file1\n

                  On brick b2:

                  [brick2]# stat b2/dir/file1\n  File: \u2018b2/dir/file1\u2019\n  Size: 17              Blocks: 16         IO Block: 4096   regular file\nDevice: fd03h/64771d    Inode: 919365      Links: 2\nAccess: (0644/-rw-r--r--)  Uid: (    0/    root)   Gid: (    0/    root)\nAccess: 2015-03-06 14:17:23.249403600 +0530\nModify: 2015-03-06 13:55:37.206880000 +0530\nChange: 2015-03-06 14:17:12.881343955 +0530\n Birth: -\n[brick2]#\n[brick2]# md5sum b2/dir/file1\n040751929ceabf77c3c0b3b662f341a8  b2/dir/file1\n
                  "},{"location":"Troubleshooting/resolving-splitbrain/#ii-select-the-file-with-the-latest-mtime-as-source","title":"ii) Select the file with the latest mtime as source","text":"
                  gluster volume heal <VOLNAME> split-brain latest-mtime <FILE>\n

                  As is perhaps self-explanatory, this command uses the brick having the latest modification time for <FILE> as the source for healing.

                  "},{"location":"Troubleshooting/resolving-splitbrain/#iii-select-one-of-the-bricks-in-the-replica-as-the-source-for-a-particular-file","title":"iii) Select one of the bricks in the replica as the source for a particular file","text":"
                  gluster volume heal <VOLNAME> split-brain source-brick <HOSTNAME:BRICKNAME> <FILE>\n

                  Here, <HOSTNAME:BRICKNAME> is selected as source brick and <FILE> present in the source brick is taken as the source for healing.

                  "},{"location":"Troubleshooting/resolving-splitbrain/#example_3","title":"Example :","text":"

                  Notice the md5 checksums and file size before and after healing.

                  Before heal :

                  On brick b1:

                  [brick1]# stat b1/file4\n  File: \u2018b1/file4\u2019\n  Size: 4               Blocks: 16         IO Block: 4096   regular file\nDevice: fd03h/64771d    Inode: 919356      Links: 2\nAccess: (0644/-rw-r--r--)  Uid: (    0/    root)   Gid: (    0/    root)\nAccess: 2015-03-06 13:53:19.417085062 +0530\nModify: 2015-03-06 13:53:19.426085114 +0530\nChange: 2015-03-06 13:53:19.426085114 +0530\n Birth: -\n[brick1]#\n[brick1]# md5sum b1/file4\nb6273b589df2dfdbd8fe35b1011e3183  b1/file4\n

                  On brick b2:

                  [brick2]# stat b2/file4\n  File: \u2018b2/file4\u2019\n  Size: 4               Blocks: 16         IO Block: 4096   regular file\nDevice: fd03h/64771d    Inode: 919358      Links: 2\nAccess: (0644/-rw-r--r--)  Uid: (    0/    root)   Gid: (    0/    root)\nAccess: 2015-03-06 13:52:35.761833096 +0530\nModify: 2015-03-06 13:52:35.769833142 +0530\nChange: 2015-03-06 13:52:35.769833142 +0530\n Birth: -\n[brick2]#\n[brick2]# md5sum b2/file4\n0bee89b07a248e27c83fc3d5951213c1  b2/file4\n

                  Healing the file with gfid c3c94de2-232d-4083-b534-5da17fc476ac using the above command :

                  gluster volume heal test split-brain source-brick test-host:/test/b1 gfid:c3c94de2-232d-4083-b534-5da17fc476ac\n

                  Healed gfid:c3c94de2-232d-4083-b534-5da17fc476ac.

                  After healing :

                  On brick b1:

                  # stat b1/file4\n  File: \u2018b1/file4\u2019\n  Size: 4               Blocks: 16         IO Block: 4096   regular file\nDevice: fd03h/64771d    Inode: 919356      Links: 2\nAccess: (0644/-rw-r--r--)  Uid: (    0/    root)   Gid: (    0/    root)\nAccess: 2015-03-06 14:23:38.944609863 +0530\nModify: 2015-03-06 13:53:19.426085114 +0530\nChange: 2015-03-06 14:27:15.058927962 +0530\n Birth: -\n# md5sum b1/file4\nb6273b589df2dfdbd8fe35b1011e3183  b1/file4\n

                  On brick b2:

                  # stat b2/file4\n File: \u2018b2/file4\u2019\n  Size: 4               Blocks: 16         IO Block: 4096   regular file\nDevice: fd03h/64771d    Inode: 919358      Links: 2\nAccess: (0644/-rw-r--r--)  Uid: (    0/    root)   Gid: (    0/    root)\nAccess: 2015-03-06 14:23:38.944609000 +0530\nModify: 2015-03-06 13:53:19.426085000 +0530\nChange: 2015-03-06 14:27:15.059927968 +0530\n Birth: -\n# md5sum b2/file4\nb6273b589df2dfdbd8fe35b1011e3183  b2/file4\n
                  "},{"location":"Troubleshooting/resolving-splitbrain/#iv-select-one-brick-of-the-replica-as-the-source-for-all-files","title":"iv) Select one brick of the replica as the source for all files","text":"
                  gluster volume heal <VOLNAME> split-brain source-brick <HOSTNAME:BRICKNAME>\n

                  Consider a scenario where many files are in split-brain such that one brick of replica pair is source. As the result of the above command all split-brained files in <HOSTNAME:BRICKNAME> are selected as source and healed to the sink.

                  "},{"location":"Troubleshooting/resolving-splitbrain/#example_4","title":"Example:","text":"

                  Consider a volume having three entries \"a, b and c\" in split-brain.

                  # gluster volume heal test split-brain source-brick test-host:/test/b1\nHealed gfid:944b4764-c253-4f02-b35f-0d0ae2f86c0f.\nHealed gfid:3256d814-961c-4e6e-8df2-3a3143269ced.\nHealed gfid:b23dd8de-af03-4006-a803-96d8bc0df004.\nNumber of healed entries: 3\n
                  "},{"location":"Troubleshooting/resolving-splitbrain/#32-resolution-of-gfid-split-brain-using-gluster-cli","title":"3.2 Resolution of GFID split-brain using gluster CLI","text":"

                  GFID split-brains can also be resolved by the gluster command line using the same policies that are used to resolve data and metadata split-brains.

                  "},{"location":"Troubleshooting/resolving-splitbrain/#i-selecting-the-bigger-file-as-source","title":"i) Selecting the bigger-file as source","text":"

                  This method is useful for per file healing and where you can decided that the file with bigger size is to be considered as source.

                  Run the following command to obtain the path of the file that is in split-brain:

                  # gluster volume heal VOLNAME info split-brain\n

                  From the output, identify the files for which file operations performed from the client failed with input/output error.

                  "},{"location":"Troubleshooting/resolving-splitbrain/#example_5","title":"Example :","text":"
                  # gluster volume heal testvol info\nBrick 10.70.47.45:/bricks/brick2/b0\n/f5\n/ - Is in split-brain\n\nStatus: Connected\nNumber of entries: 2\n\nBrick 10.70.47.144:/bricks/brick2/b1\n/f5\n/ - Is in split-brain\n\nStatus: Connected\nNumber of entries: 2\n

                  Note Entries which are in GFID split-brain may not be shown as in split-brain by the heal info or heal info split-brain commands always. For entry split-brains, it is the parent directory which is shown as being in split-brain. So one might need to run info split-brain to get the dir names and then heal info to get the list of files under that dir which might be in split-brain (it could just be needing heal without split-brain).

                  In the above command, testvol is the volume name, b0 and b1 are the bricks. Execute the below getfattr command on the brick to fetch information if a file is in GFID split-brain or not.

                  # getfattr -d -e hex -m. <path-to-file>\n
                  "},{"location":"Troubleshooting/resolving-splitbrain/#example_6","title":"Example :","text":"

                  On brick /b0

                  # getfattr -d -m . -e hex /bricks/brick2/b0/f5\ngetfattr: Removing leading '/' from absolute path names\nfile: bricks/brick2/b0/f5\nsecurity.selinux=0x73797374656d5f753a6f626a6563745f723a676c7573746572645f627269636b5f743a733000\ntrusted.afr.testvol-client-1=0x000000020000000100000000\ntrusted.afr.dirty=0x000000000000000000000000\ntrusted.gfid=0xce0a9956928e40afb78e95f78defd64f\ntrusted.gfid2path.9cde09916eabc845=0x30303030303030302d303030302d303030302d303030302d3030303030303030303030312f6635\n

                  On brick /b1

                  # getfattr -d -m . -e hex /bricks/brick2/b1/f5\ngetfattr: Removing leading '/' from absolute path names\nfile: bricks/brick2/b1/f5\nsecurity.selinux=0x73797374656d5f753a6f626a6563745f723a676c7573746572645f627269636b5f743a733000\ntrusted.afr.testvol-client-0=0x000000020000000100000000\ntrusted.afr.dirty=0x000000000000000000000000\ntrusted.gfid=0x9563544118653550e888ab38c232e0c\ntrusted.gfid2path.9cde09916eabc845=0x30303030303030302d303030302d303030302d303030302d3030303030303030303030312f6635\n

                  You can notice the difference in GFID for the file f5 in both the bricks. You can find the differences in the file size by executing stat command on the file from the bricks.

                  On brick /b0

                  # stat /bricks/brick2/b0/f5\nFile: \u2018/bricks/brick2/b0/f5\u2019\nSize: 15            Blocks: 8          IO Block: 4096   regular file\nDevice: fd15h/64789d    Inode: 67113350    Links: 2\nAccess: (0644/-rw-r--r--)  Uid: (    0/    root)   Gid: (    0/    root)\nContext: system_u:object_r:glusterd_brick_t:s0\nAccess: 2018-08-29 20:46:26.353751073 +0530\nModify: 2018-08-29 20:46:26.361751203 +0530\nChange: 2018-08-29 20:47:16.363751236 +0530\nBirth: -\n

                  On brick /b1

                  # stat /bricks/brick2/b1/f5\nFile: \u2018/bricks/brick2/b1/f5\u2019\nSize: 2             Blocks: 8          IO Block: 4096   regular file\nDevice: fd15h/64789d    Inode: 67111750    Links: 2\nAccess: (0644/-rw-r--r--)  Uid: (    0/    root)   Gid: (    0/    root)\nContext: system_u:object_r:glusterd_brick_t:s0\nAccess: 2018-08-29 20:44:56.153301616 +0530\nModify: 2018-08-29 20:44:56.161301745 +0530\nChange: 2018-08-29 20:44:56.162301761 +0530\nBirth: -\n

                  Execute the following command along with the full filename as seen from the root of the volume which is displayed in the heal info command's output:

                  # gluster volume heal VOLNAME split-brain bigger-file FILE\n
                  "},{"location":"Troubleshooting/resolving-splitbrain/#example_7","title":"Example :","text":"
                  # gluster volume heal testvol split-brain bigger-file /f5\nGFID split-brain resolved for file /f5\n

                  After the healing is complete, the GFID of the file on both the bricks must be the same as that of the file which had the bigger size. The following is a sample output of the getfattr command after completion of healing the file.

                  On brick /b0

                  # getfattr -d -m . -e hex /bricks/brick2/b0/f5\ngetfattr: Removing leading '/' from absolute path names\nfile: bricks/brick2/b0/f5\nsecurity.selinux=0x73797374656d5f753a6f626a6563745f723a676c7573746572645f627269636b5f743a733000\ntrusted.gfid=0xce0a9956928e40afb78e95f78defd64f\ntrusted.gfid2path.9cde09916eabc845=0x30303030303030302d303030302d303030302d303030302d3030303030303030303030312f6635\n

                  On brick /b1

                  # getfattr -d -m . -e hex /bricks/brick2/b1/f5\ngetfattr: Removing leading '/' from absolute path names\nfile: bricks/brick2/b1/f5\nsecurity.selinux=0x73797374656d5f753a6f626a6563745f723a676c7573746572645f627269636b5f743a733000\ntrusted.gfid=0xce0a9956928e40afb78e95f78defd64f\ntrusted.gfid2path.9cde09916eabc845=0x30303030303030302d303030302d303030302d303030302d3030303030303030303030312f6635\n
                  "},{"location":"Troubleshooting/resolving-splitbrain/#ii-selecting-the-file-with-latest-mtime-as-source","title":"ii) Selecting the file with latest mtime as source","text":"

                  This method is useful for per file healing and if you want the file with latest mtime has to be considered as source.

                  "},{"location":"Troubleshooting/resolving-splitbrain/#example_8","title":"Example :","text":"

                  Lets take another file which is in GFID split-brain and try to heal that using the latest-mtime option.

                  On brick /b0

                  # getfattr -d -m . -e hex /bricks/brick2/b0/f4\ngetfattr: Removing leading '/' from absolute path names\nfile: bricks/brick2/b0/f4\nsecurity.selinux=0x73797374656d5f753a6f626a6563745f723a676c7573746572645f627269636b5f743a733000\ntrusted.afr.testvol-client-1=0x000000020000000100000000\ntrusted.afr.dirty=0x000000000000000000000000\ntrusted.gfid=0xb66b66d07b315f3c9cffac2fb6422a28\ntrusted.gfid2path.364f55367c7bd6f4=0x30303030303030302d303030302d303030302d303030302d3030303030303030303030312f6634\n

                  On brick /b1

                  # getfattr -d -m . -e hex /bricks/brick2/b1/f4\ngetfattr: Removing leading '/' from absolute path names\nfile: bricks/brick2/b1/f4\nsecurity.selinux=0x73797374656d5f753a6f626a6563745f723a676c7573746572645f627269636b5f743a733000\ntrusted.afr.testvol-client-0=0x000000020000000100000000\ntrusted.afr.dirty=0x000000000000000000000000\ntrusted.gfid=0x87242f808c6e56a007ef7d49d197acff\ntrusted.gfid2path.364f55367c7bd6f4=0x30303030303030302d303030302d303030302d303030302d3030303030303030303030312f6634\n

                  You can notice the difference in GFID for the file f4 in both the bricks. You can find the difference in the modification time by executing stat command on the file from the bricks.

                  On brick /b0

                  # stat /bricks/brick2/b0/f4\nFile: \u2018/bricks/brick2/b0/f4\u2019\nSize: 14            Blocks: 8          IO Block: 4096   regular file\nDevice: fd15h/64789d    Inode: 67113349    Links: 2\nAccess: (0644/-rw-r--r--)  Uid: (    0/    root)   Gid: (    0/    root)\nContext: system_u:object_r:glusterd_brick_t:s0\nAccess: 2018-08-29 20:57:38.913629991 +0530\nModify: 2018-08-29 20:57:38.921630122 +0530\nChange: 2018-08-29 20:57:38.923630154 +0530\nBirth: -\n

                  On brick /b1

                  # stat /bricks/brick2/b1/f4\nFile: \u2018/bricks/brick2/b1/f4\u2019\nSize: 2             Blocks: 8          IO Block: 4096   regular file\nDevice: fd15h/64789d    Inode: 67111749    Links: 2\nAccess: (0644/-rw-r--r--)  Uid: (    0/    root)   Gid: (    0/    root)\nContext: system_u:object_r:glusterd_brick_t:s0\nAccess: 2018-08-24 20:54:50.953217256 +0530\nModify: 2018-08-24 20:54:50.961217385 +0530\nChange: 2018-08-24 20:54:50.962217402 +0530\nBirth: -\n

                  Execute the following command:

                  # gluster volume heal VOLNAME split-brain latest-mtime FILE\n
                  "},{"location":"Troubleshooting/resolving-splitbrain/#example_9","title":"Example :","text":"
                  # gluster volume heal testvol split-brain latest-mtime /f4\nGFID split-brain resolved for file /f4\n

                  After the healing is complete, the GFID of the files on both bricks must be same. The following is a sample output of the getfattr command after completion of healing the file. You can notice that the file has been healed using the brick having the latest mtime as the source.

                  On brick /b0

                  # getfattr -d -m . -e hex /bricks/brick2/b0/f4\ngetfattr: Removing leading '/' from absolute path names\nfile: bricks/brick2/b0/f4\nsecurity.selinux=0x73797374656d5f753a6f626a6563745f723a676c7573746572645f627269636b5f743a733000\ntrusted.gfid=0xb66b66d07b315f3c9cffac2fb6422a28\ntrusted.gfid2path.364f55367c7bd6f4=0x30303030303030302d303030302d303030302d303030302d3030303030303030303030312f6634\n

                  On brick /b1

                  # getfattr -d -m . -e hex /bricks/brick2/b1/f4\ngetfattr: Removing leading '/' from absolute path names\nfile: bricks/brick2/b1/f4\nsecurity.selinux=0x73797374656d5f753a6f626a6563745f723a676c7573746572645f627269636b5f743a733000\ntrusted.gfid=0xb66b66d07b315f3c9cffac2fb6422a28\ntrusted.gfid2path.364f55367c7bd6f4=0x30303030303030302d303030302d303030302d303030302d3030303030303030303030312f6634\n
                  "},{"location":"Troubleshooting/resolving-splitbrain/#iii-select-one-of-the-bricks-in-the-replica-as-source-for-a-particular-file","title":"iii) Select one of the bricks in the replica as source for a particular file","text":"

                  This method is useful for per file healing and if you know which copy of the file is good.

                  "},{"location":"Troubleshooting/resolving-splitbrain/#example_10","title":"Example :","text":"

                  Lets take another file which is in GFID split-brain and try to heal that using the source-brick option.

                  On brick /b0

                  # getfattr -d -m . -e hex /bricks/brick2/b0/f3\ngetfattr: Removing leading '/' from absolute path names\nfile: bricks/brick2/b0/f3\nsecurity.selinux=0x73797374656d5f753a6f626a6563745f723a676c7573746572645f627269636b5f743a733000\ntrusted.afr.testvol-client-1=0x000000020000000100000000\ntrusted.afr.dirty=0x000000000000000000000000\ntrusted.gfid=0x9d542fb1b3b15837a2f7f9dcdf5d6ee8\ntrusted.gfid2path.364f55367c7bd6f4=0x30303030303030302d303030302d303030302d303030302d3030303030303030303030312f6634\n

                  On brick /b1

                  # getfattr -d -m . -e hex /bricks/brick2/b1/f3\ngetfattr: Removing leading '/' from absolute path names\nfile: bricks/brick2/b0/f3\nsecurity.selinux=0x73797374656d5f753a6f626a6563745f723a676c7573746572645f627269636b5f743a733000\ntrusted.afr.testvol-client-1=0x000000020000000100000000\ntrusted.afr.dirty=0x000000000000000000000000\ntrusted.gfid=0xc90d9b0f65f6530b95b9f3f8334033df\ntrusted.gfid2path.364f55367c7bd6f4=0x30303030303030302d303030302d303030302d303030302d3030303030303030303030312f6634\n

                  You can notice the difference in GFID for the file f3 in both the bricks.

                  Execute the following command:

                  # gluster volume heal VOLNAME split-brain source-brick HOSTNAME:export-directory-absolute-path FILE\n

                  In this command, FILE present in HOSTNAME : export-directory-absolute-path is taken as source for healing.

                  "},{"location":"Troubleshooting/resolving-splitbrain/#example_11","title":"Example :","text":"
                  # gluster volume heal testvol split-brain source-brick 10.70.47.144:/bricks/brick2/b1 /f3\nGFID split-brain resolved for file /f3\n

                  After the healing is complete, the GFID of the file on both the bricks should be same as that of the brick which was chosen as source for healing. The following is a sample output of the getfattr command after the file is healed.

                  On brick /b0

                  # getfattr -d -m . -e hex /bricks/brick2/b0/f3\ngetfattr: Removing leading '/' from absolute path names\nfile: bricks/brick2/b0/f3\nsecurity.selinux=0x73797374656d5f753a6f626a6563745f723a676c7573746572645f627269636b5f743a733000\ntrusted.gfid=0x90d9b0f65f6530b95b9f3f8334033df\ntrusted.gfid2path.364f55367c7bd6f4=0x30303030303030302d303030302d303030302d303030302d3030303030303030303030312f6634\n

                  On brick /b1

                  # getfattr -d -m . -e hex /bricks/brick2/b1/f3\ngetfattr: Removing leading '/' from absolute path names\nfile: bricks/brick2/b1/f3\nsecurity.selinux=0x73797374656d5f753a6f626a6563745f723a676c7573746572645f627269636b5f743a733000\ntrusted.gfid=0x90d9b0f65f6530b95b9f3f8334033df\ntrusted.gfid2path.364f55367c7bd6f4=0x30303030303030302d303030302d303030302d303030302d3030303030303030303030312f6634\n

                  Note

                  • One cannot use the GFID of the file as an argument with any of the CLI options to resolve GFID split-brain. It should be the absolute path as seen from the mount point to the file considered as source.

                  • With source-brick option there is no way to resolve all the GFID split-brain in one shot by not specifying any file path in the CLI as done while resolving data or metadata split-brain. For each file in GFID split-brain, run the CLI with the policy you want to use.

                  • Resolving directory GFID split-brain using CLI with the \"source-brick\" option in a \"distributed-replicated\" volume needs to be done on all the sub-volumes explicitly, which are in this state. Since directories get created on all the sub-volumes, using one particular brick as source for directory GFID split-brain heals the directory for that particular sub-volume. Source brick should be chosen in such a way that after heal all the bricks of all the sub-volumes have the same GFID.

                  "},{"location":"Troubleshooting/resolving-splitbrain/#note","title":"Note:","text":"

                  As mentioned earlier, type-mismatch can not be resolved using CLI. Type-mismatch means different st_mode values (for example, the entry is a file in one brick while it is a directory on the other). Trying to heal such entry would fail.

                  "},{"location":"Troubleshooting/resolving-splitbrain/#example_12","title":"Example","text":"

                  The entry named \"entry1\" is of different types on the bricks of the replica. Lets try to heal that using the split-brain CLI.

                  # gluster volume heal test split-brain source-brick test-host:/test/b1 /entry1\nHealing /entry1 failed:Operation not permitted.\nVolume heal failed.\n

                  However, they can be fixed by deleting the file from all but one bricks. See Fixing Directory entry split-brain

                  "},{"location":"Troubleshooting/resolving-splitbrain/#an-overview-of-working-of-heal-info-commands","title":"An overview of working of heal info commands","text":"

                  When these commands are invoked, a \"glfsheal\" process is spawned which reads the entries from the various sub-directories under /<brick-path>/.glusterfs/indices/ of all the bricks that are up (that it can connect to) one after another. These entries are GFIDs of files that might need healing. Once GFID entries from a brick are obtained, based on the lookup response of this file on each participating brick of replica-pair & trusted.afr.* extended attributes it is found out if the file needs healing, is in split-brain etc based on the requirement of each command and displayed to the user.

                  "},{"location":"Troubleshooting/resolving-splitbrain/#4-resolution-of-split-brain-from-the-mount-point","title":"4. Resolution of split-brain from the mount point","text":"

                  A set of getfattr and setfattr commands have been provided to detect the data and metadata split-brain status of a file and resolve split-brain, if any, from mount point.

                  Consider a volume \"test\", having bricks b0, b1, b2 and b3.

                  # gluster volume info test\n\nVolume Name: test\nType: Distributed-Replicate\nVolume ID: 00161935-de9e-4b80-a643-b36693183b61\nStatus: Started\nNumber of Bricks: 2 x 2 = 4\nTransport-type: tcp\nBricks:\nBrick1: test-host:/test/b0\nBrick2: test-host:/test/b1\nBrick3: test-host:/test/b2\nBrick4: test-host:/test/b3\n

                  Directory structure of the bricks is as follows:

                  # tree -R /test/b?\n/test/b0\n\u251c\u2500\u2500 dir\n\u2502\u00a0\u00a0 \u2514\u2500\u2500 a\n\u2514\u2500\u2500 file100\n\n/test/b1\n\u251c\u2500\u2500 dir\n\u2502\u00a0\u00a0 \u2514\u2500\u2500 a\n\u2514\u2500\u2500 file100\n\n/test/b2\n\u251c\u2500\u2500 dir\n\u251c\u2500\u2500 file1\n\u251c\u2500\u2500 file2\n\u2514\u2500\u2500 file99\n\n/test/b3\n\u251c\u2500\u2500 dir\n\u251c\u2500\u2500 file1\n\u251c\u2500\u2500 file2\n\u2514\u2500\u2500 file99\n

                  Some files in the volume are in split-brain.

                  # gluster v heal test info split-brain\nBrick test-host:/test/b0/\n/file100\n/dir\nNumber of entries in split-brain: 2\n\nBrick test-host:/test/b1/\n/file100\n/dir\nNumber of entries in split-brain: 2\n\nBrick test-host:/test/b2/\n/file99\n<gfid:5399a8d1-aee9-4653-bb7f-606df02b3696>\nNumber of entries in split-brain: 2\n\nBrick test-host:/test/b3/\n<gfid:05c4b283-af58-48ed-999e-4d706c7b97d5>\n<gfid:5399a8d1-aee9-4653-bb7f-606df02b3696>\nNumber of entries in split-brain: 2\n
                  "},{"location":"Troubleshooting/resolving-splitbrain/#to-know-datametadata-split-brain-status-of-a-file","title":"To know data/metadata split-brain status of a file:","text":"
                  getfattr -n replica.split-brain-status <path-to-file>\n

                  The above command executed from mount provides information if a file is in data/metadata split-brain. Also provides the list of afr children to analyze to get more information about the file. This command is not applicable to gfid/directory split-brain.

                  "},{"location":"Troubleshooting/resolving-splitbrain/#example_13","title":"Example:","text":"
                  1. \"file100\" is in metadata split-brain. Executing the above mentioned command for file100 gives :
                  # getfattr -n replica.split-brain-status file100\nfile: file100\nreplica.split-brain-status=\"data-split-brain:no    metadata-split-brain:yes    Choices:test-client-0,test-client-1\"\n
                  1. \"file1\" is in data split-brain.
                  # getfattr -n replica.split-brain-status file1\nfile: file1\nreplica.split-brain-status=\"data-split-brain:yes    metadata-split-brain:no    Choices:test-client-2,test-client-3\"\n
                  1. \"file99\" is in both data and metadata split-brain.
                  # getfattr -n replica.split-brain-status file99\nfile: file99\nreplica.split-brain-status=\"data-split-brain:yes    metadata-split-brain:yes    Choices:test-client-2,test-client-3\"\n
                  1. \"dir\" is in directory split-brain but as mentioned earlier, the above command is not applicable to such split-brain. So it says that the file is not under data or metadata split-brain.
                  # getfattr -n replica.split-brain-status dir\nfile: dir\nreplica.split-brain-status=\"The file is not under data or metadata split-brain\"\n
                  1. \"file2\" is not in any kind of split-brain.
                  # getfattr -n replica.split-brain-status file2\nfile: file2\nreplica.split-brain-status=\"The file is not under data or metadata split-brain\"\n
                  "},{"location":"Troubleshooting/resolving-splitbrain/#to-analyze-the-files-in-data-and-metadata-split-brain","title":"To analyze the files in data and metadata split-brain","text":"

                  Trying to do operations (say cat, getfattr etc) from the mount on files in split-brain, gives an input/output error. To enable the users analyze such files, a setfattr command is provided.

                  # setfattr -n replica.split-brain-choice -v \"choiceX\" <path-to-file>\n

                  Using this command, a particular brick can be chosen to access the file in split-brain from.

                  "},{"location":"Troubleshooting/resolving-splitbrain/#example_14","title":"Example:","text":"
                  1. \"file1\" is in data-split-brain. Trying to read from the file gives input/output error.
                  # cat file1\ncat: file1: Input/output error\n

                  Split-brain choices provided for file1 were test-client-2 and test-client-3.

                  Setting test-client-2 as split-brain choice for file1 serves reads from b2 for the file.

                  # setfattr -n replica.split-brain-choice -v test-client-2 file1\n

                  Now, read operations on the file can be done.

                  # cat file1\nxyz\n

                  Similarly, to inspect the file from other choice, replica.split-brain-choice is to be set to test-client-3.

                  Trying to inspect the file from a wrong choice errors out.

                  To undo the split-brain-choice that has been set, the above mentioned setfattr command can be used with \"none\" as the value for extended attribute.

                  "},{"location":"Troubleshooting/resolving-splitbrain/#example_15","title":"Example:","text":"
                  # setfattr -n replica.split-brain-choice -v none file1\n

                  Now performing cat operation on the file will again result in input/output error, as before.

                  # cat file\ncat: file1: Input/output error\n

                  Once the choice for resolving split-brain is made, source brick is supposed to be set for the healing to be done. This is done using the following command:

                  # setfattr -n replica.split-brain-heal-finalize -v <heal-choice> <path-to-file>\n
                  "},{"location":"Troubleshooting/resolving-splitbrain/#example_16","title":"Example","text":"
                  # setfattr -n replica.split-brain-heal-finalize -v test-client-2 file1\n

                  The above process can be used to resolve data and/or metadata split-brain on all the files.

                  NOTE:

                  1. If \"fopen-keep-cache\" fuse mount option is disabled then inode needs to be invalidated each time before selecting a new replica.split-brain-choice to inspect a file. This can be done by using:
                  # sefattr -n inode-invalidate -v 0 <path-to-file>\n
                  1. The above mentioned process for split-brain resolution from mount will not work on nfs mounts as it doesn't provide xattrs support.
                  "},{"location":"Troubleshooting/resolving-splitbrain/#5-automagic-unsplit-brain-by-ctimemtimesizemajority","title":"5. Automagic unsplit-brain by [ctime|mtime|size|majority]","text":"

                  The CLI and fuse mount based resolution methods require intervention in the sense that the admin/ user needs to run the commands manually. There is a cluster.favorite-child-policy volume option which when set to one of the various policies available, automatically resolve split-brains without user intervention. The default value is 'none', i.e. it is disabled.

                  # gluster volume set help | grep -A3 cluster.favorite-child-policy\nOption: cluster.favorite-child-policy\nDefault Value: none\nDescription: This option can be used to automatically resolve split-brains using various policies without user intervention. \"size\" picks the file with the biggest size as the source. \"ctime\" and \"mtime\" pick the file with the latest ctime and mtime respectively as the source. \"majority\" picks a file with identical mtime and size in more than half the number of bricks in the replica.\n

                  cluster.favorite-child-policy applies to all files of the volume. It is assumed that if this option is enabled with a particular policy, you don't care to examine the split-brain files on a per file basis but just want the split-brain to be resolved as and when it occurs based on the set policy.

                  "},{"location":"Troubleshooting/resolving-splitbrain/#manual-split-brain-resolution","title":"Manual Split-Brain Resolution:","text":""},{"location":"Troubleshooting/resolving-splitbrain/#quick-start","title":"Quick Start:","text":"
                  1. Get the path of the file that is in split-brain:

                    It can be obtained either by a) The command gluster volume heal info split-brain. b) Identify the files for which file operations performed from the client keep failing with Input/Output error.

                  2. Close the applications that opened this file from the mount point. In case of VMs, they need to be powered-off.

                  3. Decide on the correct copy:

                    This is done by observing the afr changelog extended attributes of the file on the bricks using the getfattr command; then identifying the type of split-brain (data split-brain, metadata split-brain, entry split-brain or split-brain due to gfid-mismatch); and finally determining which of the bricks contains the 'good copy' of the file. getfattr -d -m . -e hex <file-path-on-brick>. It is also possible that one brick might contain the correct data while the other might contain the correct metadata.

                  4. Reset the relevant extended attribute on the brick(s) that contains the 'bad copy' of the file data/metadata using the setfattr command.

                    setfattr -n <attribute-name> -v <attribute-value> <file-path-on-brick>

                  5. Trigger self-heal on the file by performing lookup from the client:

                    ls -l <file-path-on-gluster-mount>

                  "},{"location":"Troubleshooting/resolving-splitbrain/#detailed-instructions-for-steps-3-through-5","title":"Detailed Instructions for steps 3 through 5:","text":"

                  To understand how to resolve split-brain we need to know how to interpret the afr changelog extended attributes.

                  Execute getfattr -d -m . -e hex <file-path-on-brick>

                  Example:

                  [root@store3 ~]# getfattr -d -e hex -m. brick-a/file.txt\n\\#file: brick-a/file.txt\nsecurity.selinux=0x726f6f743a6f626a6563745f723a66696c655f743a733000\ntrusted.afr.vol-client-2=0x000000000000000000000000\ntrusted.afr.vol-client-3=0x000000000200000000000000\ntrusted.gfid=0x307a5c9efddd4e7c96e94fd4bcdcbd1b\n

                  The extended attributes with trusted.afr.<volname>-client-<subvolume-index> are used by afr to maintain changelog of the file.The values of the trusted.afr.<volname>-client-<subvolume-index> are calculated by the glusterfs client (fuse or nfs-server) processes. When the glusterfs client modifies a file or directory, the client contacts each brick and updates the changelog extended attribute according to the response of the brick.

                  'subvolume-index' is nothing but (brick number - 1) in gluster volume info <volname> output.

                  Example:

                  [root@pranithk-laptop ~]# gluster volume info vol\n Volume Name: vol\n Type: Distributed-Replicate\n Volume ID: 4f2d7849-fbd6-40a2-b346-d13420978a01\n Status: Created\n Number of Bricks: 4 x 2 = 8\n Transport-type: tcp\n Bricks:\n brick-a: pranithk-laptop:/gfs/brick-a\n brick-b: pranithk-laptop:/gfs/brick-b\n brick-c: pranithk-laptop:/gfs/brick-c\n brick-d: pranithk-laptop:/gfs/brick-d\n brick-e: pranithk-laptop:/gfs/brick-e\n brick-f: pranithk-laptop:/gfs/brick-f\n brick-g: pranithk-laptop:/gfs/brick-g\n brick-h: pranithk-laptop:/gfs/brick-h\n

                  In the example above:

                  Brick             |    Replica set        |    Brick subvolume index\n----------------------------------------------------------------------------\n-/gfs/brick-a     |       0               |       0\n-/gfs/brick-b     |       0               |       1\n-/gfs/brick-c     |       1               |       2\n-/gfs/brick-d     |       1               |       3\n-/gfs/brick-e     |       2               |       4\n-/gfs/brick-f     |       2               |       5\n-/gfs/brick-g     |       3               |       6\n-/gfs/brick-h     |       3               |       7\n

                  Each file in a brick maintains the changelog of itself and that of the files present in all the other bricks in its replica set as seen by that brick.

                  In the example volume given above, all files in brick-a will have 2 entries, one for itself and the other for the file present in its replica pair, i.e.brick-b: trusted.afr.vol-client-0=0x000000000000000000000000 -->changelog for itself (brick-a) trusted.afr.vol-client-1=0x000000000000000000000000 -->changelog for brick-b as seen by brick-a

                  Likewise, all files in brick-b will have: trusted.afr.vol-client-0=0x000000000000000000000000 -->changelog for brick-a as seen by brick-b trusted.afr.vol-client-1=0x000000000000000000000000 -->changelog for itself (brick-b)

                  The same can be extended for other replica pairs.

                  Interpreting Changelog (roughly pending operation count) Value: Each extended attribute has a value which is 24 hexa decimal digits. First 8 digits represent changelog of data. Second 8 digits represent changelog of metadata. Last 8 digits represent Changelog of directory entries.

                  Pictorially representing the same, we have:

                  0x 000003d7 00000001 00000000\n        |      |       |\n        |      |        \\_ changelog of directory entries\n        |       \\_ changelog of metadata\n         \\ _ changelog of data\n

                  For Directories metadata and entry changelogs are valid. For regular files data and metadata changelogs are valid. For special files like device files etc metadata changelog is valid. When a file split-brain happens it could be either data split-brain or meta-data split-brain or both. When a split-brain happens the changelog of the file would be something like this:

                  Example:(Lets consider both data, metadata split-brain on same file).

                  [root@pranithk-laptop vol]# getfattr -d -m . -e hex /gfs/brick-?/a\ngetfattr: Removing leading '/' from absolute path names\n\\#file: gfs/brick-a/a\ntrusted.afr.vol-client-0=0x000000000000000000000000\ntrusted.afr.vol-client-1=0x000003d70000000100000000\ntrusted.gfid=0x80acdbd886524f6fbefa21fc356fed57\n\\#file: gfs/brick-b/a\ntrusted.afr.vol-client-0=0x000003b00000000100000000\ntrusted.afr.vol-client-1=0x000000000000000000000000\ntrusted.gfid=0x80acdbd886524f6fbefa21fc356fed57\n
                  "},{"location":"Troubleshooting/resolving-splitbrain/#observations","title":"Observations:","text":""},{"location":"Troubleshooting/resolving-splitbrain/#according-to-changelog-extended-attributes-on-file-gfsbrick-aa","title":"According to changelog extended attributes on file /gfs/brick-a/a:","text":"

                  The first 8 digits of trusted.afr.vol-client-0 are all zeros (0x00000000................), and the first 8 digits of trusted.afr.vol-client-1 are not all zeros (0x000003d7................). So the changelog on /gfs/brick-a/a implies that some data operations succeeded on itself but failed on /gfs/brick-b/a.

                  The second 8 digits of trusted.afr.vol-client-0 are all zeros (0x........00000000........), and the second 8 digits of trusted.afr.vol-client-1 are not all zeros (0x........00000001........). So the changelog on /gfs/brick-a/a implies that some metadata operations succeeded on itself but failed on /gfs/brick-b/a.

                  "},{"location":"Troubleshooting/resolving-splitbrain/#according-to-changelog-extended-attributes-on-file-gfsbrick-ba","title":"According to Changelog extended attributes on file /gfs/brick-b/a:","text":"

                  The first 8 digits of trusted.afr.vol-client-0 are not all zeros (0x000003b0................), and the first 8 digits of trusted.afr.vol-client-1 are all zeros (0x00000000................). So the changelog on /gfs/brick-b/a implies that some data operations succeeded on itself but failed on /gfs/brick-a/a.

                  The second 8 digits of trusted.afr.vol-client-0 are not all zeros (0x........00000001........), and the second 8 digits of trusted.afr.vol-client-1 are all zeros (0x........00000000........). So the changelog on /gfs/brick-b/a implies that some metadata operations succeeded on itself but failed on /gfs/brick-a/a.

                  Since both the copies have data, metadata changes that are not on the other file, it is in both data and metadata split-brain.

                  "},{"location":"Troubleshooting/resolving-splitbrain/#deciding-on-the-correct-copy","title":"Deciding on the correct copy:","text":"

                  The user may have to inspect stat,getfattr output of the files to decide which metadata to retain and contents of the file to decide which data to retain. Continuing with the example above, lets say we want to retain the data of /gfs/brick-a/a and metadata of /gfs/brick-b/a.

                  "},{"location":"Troubleshooting/resolving-splitbrain/#resetting-the-relevant-changelogs-to-resolve-the-split-brain","title":"Resetting the relevant changelogs to resolve the split-brain:","text":"

                  For resolving data-split-brain:

                  We need to change the changelog extended attributes on the files as if some data operations succeeded on /gfs/brick-a/a but failed on /gfs/brick-b/a. But /gfs/brick-b/a should NOT have any changelog which says some data operations succeeded on /gfs/brick-b/a but failed on /gfs/brick-a/a. We need to reset the data part of the changelog on trusted.afr.vol-client-0 of /gfs/brick-b/a.

                  For resolving metadata-split-brain:

                  We need to change the changelog extended attributes on the files as if some metadata operations succeeded on /gfs/brick-b/a but failed on /gfs/brick-a/a. But /gfs/brick-a/a should NOT have any changelog which says some metadata operations succeeded on /gfs/brick-a/a but failed on /gfs/brick-b/a. We need to reset metadata part of the changelog on trusted.afr.vol-client-1 of /gfs/brick-a/a

                  So, the intended changes are: On /gfs/brick-b/a: For trusted.afr.vol-client-0 0x000003b00000000100000000 to 0x000000000000000100000000 (Note that the metadata part is still not all zeros) Hence execute setfattr -n trusted.afr.vol-client-0 -v 0x000000000000000100000000 /gfs/brick-b/a

                  On /gfs/brick-a/a: For trusted.afr.vol-client-1 0x000003d70000000100000000 to 0x000003d70000000000000000 (Note that the data part is still not all zeros) Hence execute setfattr -n trusted.afr.vol-client-1 -v 0x000003d70000000000000000 /gfs/brick-a/a

                  Thus after the above operations are done, the changelogs look like this:

                  [root@pranithk-laptop vol]# getfattr -d -m . -e hex /gfs/brick-?/a\ngetfattr: Removing leading '/' from absolute path names\n\\#file: gfs/brick-a/a\ntrusted.afr.vol-client-0=0x000000000000000000000000\ntrusted.afr.vol-client-1=0x000003d70000000000000000\ntrusted.gfid=0x80acdbd886524f6fbefa21fc356fed57\n\n\\#file: gfs/brick-b/a\ntrusted.afr.vol-client-0=0x000000000000000100000000\ntrusted.afr.vol-client-1=0x000000000000000000000000\ntrusted.gfid=0x80acdbd886524f6fbefa21fc356fed57\n
                  "},{"location":"Troubleshooting/resolving-splitbrain/#triggering-self-heal","title":"Triggering Self-heal:","text":"

                  Perform ls -l <file-path-on-gluster-mount> to trigger healing.

                  Fixing Directory entry split-brain:

                  Afr has the ability to conservatively merge different entries in the directories when there is a split-brain on directory. If on one brick directory 'd' has entries '1', '2' and has entries '3', '4' on the other brick then afr will merge all of the entries in the directory to have '1', '2', '3', '4' entries in the same directory. (Note: this may result in deleted files to re-appear in case the split-brain happens because of deletion of files on the directory) Split-brain resolution needs human intervention when there is at least one entry which has same file name but different gfid in that directory. Example: On brick-a the directory has entries '1' (with gfid g1), '2' and on brick-b directory has entries '1' (with gfid g2) and '3'. These kinds of directory split-brains need human intervention to resolve. The user needs to remove either file '1' on brick-a or the file '1' on brick-b to resolve the split-brain. In addition, the corresponding gfid-link file also needs to be removed.The gfid-link files are present in the .glusterfs folder in the top-level directory of the brick. If the gfid of the file is 0x307a5c9efddd4e7c96e94fd4bcdcbd1b (the trusted.gfid extended attribute got from the getfattr command earlier),the gfid-link file can be found at

                  /gfs/brick-a/.glusterfs/30/7a/307a5c9efddd4e7c96e94fd4bcdcbd1b

                  "},{"location":"Troubleshooting/resolving-splitbrain/#word-of-caution","title":"Word of caution:","text":"

                  Before deleting the gfid-link, we have to ensure that there are no hard links to the file present on that brick. If hard-links exist,they must be deleted as well.

                  "},{"location":"Troubleshooting/statedump/","title":"Statedump","text":"

                  A statedump is, as the name suggests, a dump of the internal state of a glusterfs process. It captures information about in-memory structures such as frames, call stacks, active inodes, fds, mempools, iobufs, and locks as well as xlator specific data structures. This can be an invaluable tool for debugging memory leaks and hung processes.

                  • Generate a Statedump
                  • Read a Statedump
                  • Debug with a Statedump
                  "},{"location":"Troubleshooting/statedump/#generate-a-statedump","title":"Generate a Statedump","text":"

                  Run the command

                  gluster --print-statedumpdir\n

                  on a gluster server node to find out which directory the statedumps will be created in. This directory may need to be created if not already present. For the rest of this document, we will refer to this directory as statedump-directory.

                  To generate a statedump for a process, run

                  kill -USR1 <pid-of-gluster-process>\n

                  For client mounts:

                  Run the following command on the client system

                  kill -USR1 <pid-of-gluster-mount-process>\n

                  There are specific commands to generate statedumps for all brick processes/nfs server/quotad which can be used instead of the above. Run the following commands on one of the server nodes:

                  For bricks:

                  gluster volume statedump <volname>\n

                  For the NFS server:

                  gluster volume statedump <volname> nfs\n

                  For quotad:

                  gluster volume statedump <volname> quotad\n

                  The statedumps will be created in statedump-directory on each node. The statedumps for brick processes will be created with the filename hyphenated-brick-path.<pid>.dump.timestamp while for all other processes it will be glusterdump.<pid>.dump.timestamp.

                  "},{"location":"Troubleshooting/statedump/#read-a-statedump","title":"Read a Statedump","text":"

                  Statedumps are text files and can be opened in any text editor. The first and last lines of the file contain the start and end time (in UTC)respectively of when the statedump file was written.

                  "},{"location":"Troubleshooting/statedump/#mallinfo","title":"Mallinfo","text":"

                  The mallinfo return status is printed in the following format. Please read man mallinfo for more information about what each field means.

                  [mallinfo]\nmallinfo_arena=100020224    /* Non-mmapped space allocated (bytes) */\nmallinfo_ordblks=69467      /* Number of free chunks */\nmallinfo_smblks=449         /* Number of free fastbin blocks */\nmallinfo_hblks=13           /* Number of mmapped regions */\nmallinfo_hblkhd=20144128    /* Space allocated in mmapped regions (bytes) */\nmallinfo_usmblks=0          /* Maximum total allocated space (bytes) */\nmallinfo_fsmblks=39264      /* Space in freed fastbin blocks (bytes) */\nmallinfo_uordblks=96710112  /* Total allocated space (bytes) */\nmallinfo_fordblks=3310112   /* Total free space (bytes) */\nmallinfo_keepcost=133712    /* Top-most, releasable space (bytes) */\n
                  "},{"location":"Troubleshooting/statedump/#memory-accounting-stats","title":"Memory accounting stats","text":"

                  Each xlator defines data structures specific to its requirements. The statedump captures information about the memory usage and allocations of these structures for each xlator in the call-stack and prints them in the following format:

                  For the xlator with the name glusterfs

                  [global.glusterfs - Memory usage]   #[global.<xlator-name> - Memory usage]\nnum_types=119                       #The number of data types it is using\n

                  followed by the memory usage for each data-type for that translator. The following example displays a sample for the gf_common_mt_gf_timer_t type

                  [global.glusterfs - usage-type gf_common_mt_gf_timer_t memusage]\n#[global.<xlator-name> - usage-type <tag associated with the data-type> memusage]\nsize=112          #Total size allocated for data-type when the statedump was taken i.e. num_allocs * sizeof (data-type)\nnum_allocs=2      #Number of allocations of the data-type which are active at the time of taking the statedump.\nmax_size=168      #max_num_allocs times the sizeof(data-type) i.e. max_num_allocs * sizeof (data-type)\nmax_num_allocs=3  #Maximum number of active allocations at any point in the life of the process.\ntotal_allocs=7    #Number of times this data-type was allocated in the life of the process.\n

                  This information is useful while debugging high memory usage issues as steadily increasing values for num_allocs may indicate a memory leak for that data-type.

                  "},{"location":"Troubleshooting/statedump/#mempools","title":"Mempools","text":"

                  Mempools are an optimization intended to reduce the number of allocations of a data type. By creating a mempool of 1024 elements for a data-type, new elements of that type will be allocated from the heap using syscalls like calloc only if all the 1024 elements in the pool are in active use.

                  Memory pool allocations by each xlator are displayed in the following format:

                  [mempool] #Section name\n-----=-----\npool-name=fuse:fd_t #pool-name=<xlator-name>:<data-type>\nhot-count=1         #number of mempool elements in active use. i.e. for this pool it is the number of 'fd_t' elements in active use.\ncold-count=1023     #number of mempool elements that are not in use. New allocation requests will be served from here until all the elements in the pool are in use i.e. cold-count becomes 0.\npadded_sizeof=108   #Element size including padding. Each mempool element is padded with a doubly-linked-list + ptr of mempool + is-in-use info to operate the pool of elements\npool-misses=0       #Number of times the element was allocated from heap because all elements from the pool were in active use.\nalloc-count=314     #Number of times this type of data was allocated through out the life of this process. This may include pool-misses as well.\nmax-alloc=3         #Maximum number of elements from the pool in active use at any point in the life of the process. This does *not* include pool-misses.\ncur-stdalloc=0      #Number of allocations made from heap that are yet to be released via mem_put().\nmax-stdalloc=0      #Maximum number of allocations from heap that were in active use at any point in the life of the process.\n

                  This information is also useful while debugging high memory usage issues as large hot_count and cur-stdalloc values may point to an element not being freed after it has been used.

                  "},{"location":"Troubleshooting/statedump/#iobufs","title":"Iobufs","text":"
                  [iobuf.global]\niobuf_pool=0x1f0d970                #The memory pool for iobufs\niobuf_pool.default_page_size=131072 #The default size of iobuf (if no iobuf size is specified the default size is allocated)\n#iobuf_arena: One arena represents a group of iobufs of a particular size\niobuf_pool.arena_size=12976128       # The initial size of the iobuf pool (doesn't include the stdalloc'd memory or newly added arenas)\niobuf_pool.arena_cnt=8               #Total number of arenas in the pool\niobuf_pool.request_misses=0          #The number of iobufs that were stdalloc'd (as they exceeded the default max page size provided by iobuf_pool).\n

                  There are 3 lists of arenas

                  1. Arena list: arenas allocated during iobuf pool creation and the arenas that are in use(active_cnt != 0) will be part of this list.
                  2. Purge list: arenas that can be purged(no active iobufs, active_cnt == 0).
                  3. Filled list: arenas without free iobufs.
                  [purge.1]                        #purge.<S.No.>\npurge.1.mem_base=0x7fc47b35f000  #The address of the arena structure\npurge.1.active_cnt=0             #The number of iobufs active in that arena\npurge.1.passive_cnt=1024         #The number of unused iobufs in the arena\npurge.1.alloc_cnt=22853          #Total allocs in this pool(number of times the iobuf was allocated from this arena)\npurge.1.max_active=7             #Max active iobufs from this arena, at any point in the life of this process.\npurge.1.page_size=128            #Size of all the iobufs in this arena.\n\n[arena.5] #arena.<S.No.>\narena.5.mem_base=0x7fc47af1f000\narena.5.active_cnt=0\narena.5.passive_cnt=64\narena.5.alloc_cnt=0\narena.5.max_active=0\narena.5.page_size=32768\n

                  If the active_cnt of any arena is non zero, then the statedump will also have the iobuf list.

                  [arena.6.active_iobuf.1]                  #arena.<S.No>.active_iobuf.<iobuf.S.No.>\narena.6.active_iobuf.1.ref=1              #refcount of the iobuf\narena.6.active_iobuf.1.ptr=0x7fdb921a9000 #address of the iobuf\n\n[arena.6.active_iobuf.2]\narena.6.active_iobuf.2.ref=1\narena.6.active_iobuf.2.ptr=0x7fdb92189000\n

                  A lot of filled arenas at any given point in time could be a sign of iobuf leaks.

                  "},{"location":"Troubleshooting/statedump/#call-stack","title":"Call stack","text":"

                  The fops received by gluster are handled using call stacks. A call stack contains information about the uid/gid/pid etc of the process that is executing the fop. Each call stack contains different call-frames for each xlator which handles that fop.

                  [global.callpool.stack.3]    #global.callpool.stack.<Serial-Number>\nstack=0x7fc47a44bbe0         #Stack address\nuid=0                        #Uid of the process executing the fop\ngid=0                        #Gid of the process executing the fop\npid=6223                     #Pid of the process executing the fop\nunique=2778                  #Some Xlators like afr do copy_frame and perform the operation in a different stack. This id is used to determine the stacks that are inter-related because of copy-frame\nlk-owner=0000000000000000    #Some of the fuse fops have lk-owner.\nop=LOOKUP                    #Fop\ntype=1                       #Type of the op i.e. FOP/MGMT-OP\ncnt=9                        #Number of frames in this stack.\n
                  "},{"location":"Troubleshooting/statedump/#call-frame","title":"Call-frame","text":"

                  Each frame will have information about which xlator the frame belongs to, which function it wound to/from and which it will be unwound to, and whether it has unwound.

                  [global.callpool.stack.3.frame.2] #global.callpool.stack.<stack-serial-number>.frame.<frame-serial-number>\nframe=0x7fc47a611dbc              #Frame address\nref_count=0                       #Incremented at the time of wind and decremented at the time of unwind.\ntranslator=r2-client-1            #Xlator this frame belongs to\ncomplete=0                        #1 if this frame is already unwound. 0 if it is yet to unwind.\nparent=r2-replicate-0             #Parent xlator of this frame\nwind_from=afr_lookup              #Parent xlator function from which it was wound\nwind_to=priv->children[i]->fops->lookup\nunwind_to=afr_lookup_cbk          #Parent xlator function to unwind to\n

                  To debug hangs in the system, see which xlator has not yet unwound its fop by checking the value of the complete tag in the statedump. (complete=0 indicates the xlator has not yet unwound).

                  "},{"location":"Troubleshooting/statedump/#fuse-operation-history","title":"FUSE Operation History","text":"

                  Gluster Fuse maintains a history of the operations that it has performed.

                  [xlator.mount.fuse.history]\nTIME=2014-07-09 16:44:57.523364\nmessage=[0] fuse_release: RELEASE(): 4590:, fd: 0x1fef0d8, gfid: 3afb4968-5100-478d-91e9-76264e634c9f\n\nTIME=2014-07-09 16:44:57.523373\nmessage=[0] send_fuse_err: Sending Success for operation 18 on inode 3afb4968-5100-478d-91e9-76264e634c9f\n\nTIME=2014-07-09 16:44:57.523394\nmessage=[0] fuse_getattr_resume: 4591, STAT, path: (/iozone.tmp), gfid: (3afb4968-5100-478d-91e9-76264e634c9f)\n
                  "},{"location":"Troubleshooting/statedump/#xlator-configuration","title":"Xlator configuration","text":"
                  [cluster/replicate.r2-replicate-0] #Xlator type, name information\nchild_count=2                      #Number of children for the xlator\n#Xlator specific configuration below\nchild_up[0]=1\npending_key[0]=trusted.afr.r2-client-0\nchild_up[1]=1\npending_key[1]=trusted.afr.r2-client-1\ndata_self_heal=on\nmetadata_self_heal=1\nentry_self_heal=1\ndata_change_log=1\nmetadata_change_log=1\nentry-change_log=1\nread_child=1\nfavorite_child=-1\nwait_count=1\n
                  "},{"location":"Troubleshooting/statedump/#graphinode-table","title":"Graph/inode table","text":"
                  [active graph - 1]\n\nconn.1.bound_xl./data/brick01a/homegfs.hashsize=14057\nconn.1.bound_xl./data/brick01a/homegfs.name=/data/brick01a/homegfs/inode\nconn.1.bound_xl./data/brick01a/homegfs.lru_limit=16384 #Least recently used size limit\nconn.1.bound_xl./data/brick01a/homegfs.active_size=690 #Number of inodes undergoing some kind of fop ie., on which there is at least one ref.\nconn.1.bound_xl./data/brick01a/homegfs.lru_size=183    #Number of inodes present in lru list\nconn.1.bound_xl./data/brick01a/homegfs.purge_size=0    #Number of inodes present in purge list\n
                  "},{"location":"Troubleshooting/statedump/#inode","title":"Inode","text":"
                  [conn.1.bound_xl./data/brick01a/homegfs.active.324] #324th inode in active inode list\ngfid=e6d337cf-97eb-44b3-9492-379ba3f6ad42           #Gfid of the inode\nnlookup=13                                          #Number of times lookups happened from the client or from fuse kernel\nfd-count=4                                          #Number of fds opened on the inode\nref=11                                              #Number of refs taken on the inode\nia_type=1                                           #Type of the inode. This should be changed to some string :-(\n\n[conn.1.bound_xl./data/brick01a/homegfs.lru.1] #1st inode in lru list. Note that ref count is zero for these inodes.\ngfid=5114574e-69bc-412b-9e52-f13ff087c6fc\nnlookup=5\nfd-count=0\nref=0\nia_type=2\n
                  "},{"location":"Troubleshooting/statedump/#inode-context","title":"Inode context","text":"

                  Each xlator can store information specific to it in the inode context. This context can also be printed in the statedump. Here is the inode context of the locks xlator

                  [xlator.features.locks.homegfs-locks.inode]\npath=/homegfs/users/dfrobins/gfstest/r4/SCRATCH/fort.5102 - path of the file\nmandatory=0\ninodelk-count=5 #Number of inode locks\nlock-dump.domain.domain=homegfs-replicate-0:self-heal #Domain on which the lock was taken. In this case, this domain is used by the selfheal to prevent more than one heal on the same file\ninodelk.inodelk[0](ACTIVE)=type=WRITE, whence=0, start=0, len=0, pid = 18446744073709551615, owner=080b1ada117f0000, client=0xb7fc30, connection-id=compute-30-029.com-3505-2014/06/29-14:46:12:477358-homegfs-client-0-0-1, granted at Sun Jun 29 11:01:00 2014 #Active lock information\n\ninodelk.inodelk[1](BLOCKED)=type=WRITE, whence=0, start=0, len=0, pid = 18446744073709551615, owner=c0cb091a277f0000, client=0xad4f10, connection-id=gfs01a.com-4080-2014/06/29-14:41:36:917768-homegfs-client-0-0-0, blocked at Sun Jun 29 11:04:44 2014 #Blocked lock information\n\nlock-dump.domain.domain=homegfs-replicate-0:metadata #Domain name where metadata operations take locks to maintain replication consistency\nlock-dump.domain.domain=homegfs-replicate-0 #Domain name where entry/data operations take locks to maintain replication consistency\ninodelk.inodelk[0](ACTIVE)=type=WRITE, whence=0, start=11141120, len=131072, pid = 18446744073709551615, owner=080b1ada117f0000, client=0xb7fc30, connection-id=compute-30-029.com-3505-2014/06/29-14:46:12:477358-homegfs-client-0-0-1, granted at Sun Jun 29 11:10:36 2014 #Active lock information\n
                  "},{"location":"Troubleshooting/statedump/#debug-with-statedumps","title":"Debug With Statedumps","text":""},{"location":"Troubleshooting/statedump/#memory-leaks","title":"Memory leaks","text":"

                  Statedumps can be used to determine whether the high memory usage of a process is caused by a leak. To debug the issue, generate statedumps for that process at regular intervals, or before and after running the steps that cause the memory used to increase. Once you have multiple statedumps, compare the memory allocation stats to see if any of them are increasing steadily as those could indicate a potential memory leak.

                  The following examples walk through using statedumps to debug two different memory leaks.

                  "},{"location":"Troubleshooting/statedump/#with-the-memory-accounting-feature","title":"With the memory accounting feature:","text":"

                  BZ 1120151 reported high memory usage by the self heal daemon whenever one of the bricks was wiped in a replicate volume and a full self-heal was invoked to heal the contents. This issue was debugged using statedumps to determine which data-structure was leaking memory.

                  A statedump of the self heal daemon process was taken using

                  kill -USR1 `<pid-of-gluster-self-heal-daemon>`\n

                  On examining the statedump:

                  grep -w num_allocs glusterdump.5225.dump.1405493251\nnum_allocs=77078\nnum_allocs=87070\nnum_allocs=117376\n....\n\ngrep hot-count glusterdump.5225.dump.1405493251\nhot-count=16384\nhot-count=16384\nhot-count=4095\n....\n

                  On searching for num_allocs with high values in the statedump, a grep of the statedump revealed a large number of allocations for the following data-types under the replicate xlator:

                  1. gf_common_mt_asprintf
                  2. gf_common_mt_char
                  3. gf_common_mt_mem_pool.

                  On checking the afr-code for allocations with tag gf_common_mt_char, it was found that the data-self-heal code path does not free one such allocated data structure. gf_common_mt_mem_pool suggests that there is a leak in pool memory. The replicate-0:dict_t, glusterfs:data_t and glusterfs:data_pair_t pools are using a lot of memory, i.e. cold_count is 0 and there are too many allocations. Checking the source code of dict.c shows that key in dict is allocated with gf_common_mt_char i.e. 2. tag and value is created using gf_asprintf which in-turn uses gf_common_mt_asprintf i.e. 1.. Checking the code for leaks in self-heal code paths led to a line which over-writes a variable with new dictionary even when it was already holding a reference to another dictionary. After fixing these leaks, we ran the same test to verify that none of the num_allocs values increased in the statedump of the self-daemon after healing 10,000 files. Please check http://review.gluster.org/8316 for more info about the patch/code.

                  "},{"location":"Troubleshooting/statedump/#leaks-in-mempools","title":"Leaks in mempools:","text":"

                  The statedump output of mempools was used to test and verify the fixes for BZ 1134221. On code analysis, dict_t objects were found to be leaking (due to missing unref's) during name self-heal.

                  Glusterfs was compiled with the -DDEBUG flags to have cold count set to 0 by default. The test involved creating 100 files on plain replicate volume, removing them from one of the backend bricks, and then triggering lookups on them from the mount point. A statedump of the mount process was taken before executing the test case and after it was completed.

                  Statedump output of the fuse mount process before the test case was executed:

                  pool-name=glusterfs:dict_t\nhot-count=0\ncold-count=0\npadded_sizeof=140\nalloc-count=33\nmax-alloc=0\npool-misses=33\ncur-stdalloc=14\nmax-stdalloc=18\n

                  Statedump output of the fuse mount process after the test case was executed:

                  pool-name=glusterfs:dict_t\nhot-count=0\ncold-count=0\npadded_sizeof=140\nalloc-count=2841\nmax-alloc=0\npool-misses=2841\ncur-stdalloc=214\nmax-stdalloc=220\n

                  Here, as cold count was 0 by default, cur-stdalloc indicates the number of dict_t objects that were allocated from the heap using mem_get(), and are yet to be freed using mem_put(). After running the test case (named selfheal of 100 files), there was a rise in the cur-stdalloc value (from 14 to 214) for dict_t.

                  After the leaks were fixed, glusterfs was again compiled with -DDEBUG flags and the steps were repeated. Statedumps of the FUSE mount were taken before and after executing the test case to ascertain the validity of the fix. And the results were as follows:

                  Statedump output of the fuse mount process before executing the test case:

                  pool-name=glusterfs:dict_t\nhot-count=0\ncold-count=0\npadded_sizeof=140\nalloc-count=33\nmax-alloc=0\npool-misses=33\ncur-stdalloc=14\nmax-stdalloc=18\n

                  Statedump output of the fuse mount process after executing the test case:

                  pool-name=glusterfs:dict_t\nhot-count=0\ncold-count=0\npadded_sizeof=140\nalloc-count=2837\nmax-alloc=0\npool-misses=2837\ncur-stdalloc=14\nmax-stdalloc=119\n

                  The value of cur-stdalloc remained 14 after the test, indicating that the fix indeed does what it's supposed to do.

                  "},{"location":"Troubleshooting/statedump/#hangs-caused-by-frame-loss","title":"Hangs caused by frame loss","text":"

                  BZ 994959 reported that the Fuse mount hangs on a readdirp operation. Here are the steps used to locate the cause of the hang using statedump.

                  Statedumps were taken for all gluster processes after reproducing the issue. The following stack was seen in the FUSE mount's statedump:

                  [global.callpool.stack.1.frame.1]\nref_count=1\ntranslator=fuse\ncomplete=0\n\n[global.callpool.stack.1.frame.2]\nref_count=0\ntranslator=r2-client-1\ncomplete=1 <<----- Client xlator has completed the readdirp call and unwound to afr\nparent=r2-replicate-0\nwind_from=afr_do_readdir\nwind_to=children[call_child]->fops->readdirp\nunwind_from=client3_3_readdirp_cbk\nunwind_to=afr_readdirp_cbk\n\n[global.callpool.stack.1.frame.3]\nref_count=0\ntranslator=r2-replicate-0\ncomplete=0 <<---- But the Afr xlator is not unwinding for some reason.\nparent=r2-dht\nwind_from=dht_do_readdir\nwind_to=xvol->fops->readdirp\nunwind_to=dht_readdirp_cbk\n\n[global.callpool.stack.1.frame.4]\nref_count=1\ntranslator=r2-dht\ncomplete=0\nparent=r2-io-cache\nwind_from=ioc_readdirp\nwind_to=FIRST_CHILD(this)->fops->readdirp\nunwind_to=ioc_readdirp_cbk\n\n[global.callpool.stack.1.frame.5]\nref_count=1\ntranslator=r2-io-cache\ncomplete=0\nparent=r2-quick-read\nwind_from=qr_readdirp\nwind_to=FIRST_CHILD (this)->fops->readdirp\nunwind_to=qr_readdirp_cbk\n

                  unwind_to shows that call was unwound to afr_readdirp_cbk from the r2-client-1 xlator. Inspecting that function revealed that afr is not unwinding the stack when fop failed. Check http://review.gluster.org/5531 for more info about patch/code changes.

                  "},{"location":"Troubleshooting/troubleshooting-afr/","title":"Troubleshooting Self-heal","text":"

                  The first level of analysis always starts with looking at the log files. Which ones, you ask?

                  • /var/log/glusterfs/$fuse-mount-point.log \u2013> Fuse client log
                  • /var/log/glusterfs/glfsheal-$volname.log \u2013> This is the log file to look at when you run the heal info/split-brain resolution commands.
                  • /var/log/glusterfs/glustershd.log \u2013> This is the self-heal daemon log that prints the names of files undergoing heal, the sources and sinks for each file etc. It is common for all volumes.
                  • /var/log/glusterfs/bricks/$brick.log\u2013>Some errors in clients are simply propagated from the bricks themselves, so correlating client log errors with the logs from the brick is necessary.

                  Sometimes, you might need more verbose logging to figure out what\u2019s going on: gluster volume set $volname client-log-level $LEVEL

                  where LEVEL can be any one of DEBUG, WARNING, ERROR, INFO, CRITICAL, NONE, TRACE. This should ideally make all the log files mentioned above to start logging at $LEVEL. The default is INFO but you can temporarily toggle it to DEBUG or TRACE if you want to see under-the-hood messages. Useful when the normal logs don\u2019t give a clue as to what is happening.

                  "},{"location":"Troubleshooting/troubleshooting-afr/#heal-related-issues","title":"Heal related issues:","text":"

                  Most issues I\u2019ve seen on the mailing list and with customers can broadly fit into the following buckets:

                  (Note: Not discussing split-brains here. If they occur, you need to use split-brain resolution CLI or cluster.favorite-child-policy options to fix them. They usually occur in replica 2 volumes and can be prevented by using replica 3 or arbiter volumes.)

                  "},{"location":"Troubleshooting/troubleshooting-afr/#i-heal-info-appears-to-hangtakes-a-long-time-to-complete","title":"i) Heal info appears to hang/takes a long time to complete","text":"

                  If the number of entries are large, then heal info will take longer than usual. While there are performance improvements to heal info being planned, a faster way to get an approx. count of the pending entries is to use the gluster volume heal $VOLNAME statistics heal-count command.

                  Knowledge Hack: Since we know that during the write transaction. the xattrop folder will capture the gfid-string of the file if it needs heal, we can also do an ls /brick/.glusterfs/indices/xattrop|wc -l on each brick to get the approx. no of entries that need heal. If this number reduces over time, it is a sign that the heal backlog is reducing. You will also see messages whenever a particular type of heal starts/ends for a given gfid, like so:

                  [2019-05-07 12:05:14.460442] I [MSGID: 108026] [afr-self-heal-entry.c:883:afr_selfheal_entry_do] 0-testvol-replicate-0: performing entry selfheal on d120c0cf-6e87-454b-965b-0d83a4c752bb\n\n[2019-05-07 12:05:14.474710] I [MSGID: 108026] [afr-self-heal-common.c:1741:afr_log_selfheal] 0-testvol-replicate-0: Completed entry selfheal on d120c0cf-6e87-454b-965b-0d83a4c752bb. sources=[0] 2 sinks=1\n\n[2019-05-07 12:05:14.493506] I [MSGID: 108026] [afr-self-heal-common.c:1741:afr_log_selfheal] 0-testvol-replicate-0: Completed data selfheal on a9b5f183-21eb-4fb3-a342-287d3a7dddc5. sources=[0] 2 sinks=1\n\n[2019-05-07 12:05:14.494577] I [MSGID: 108026] [afr-self-heal-metadata.c:52:__afr_selfheal_metadata_do] 0-testvol-replicate-0: performing metadata selfheal on a9b5f183-21eb-4fb3-a342-287d3a7dddc5\n\n[2019-05-07 12:05:14.498398] I [MSGID: 108026] [afr-self-heal-common.c:1741:afr_log_selfheal] 0-testvol-replicate-0: Completed metadata selfheal on a9b5f183-21eb-4fb3-a342-287d3a7dddc5. sources=[0] 2 sinks=1\n
                  "},{"location":"Troubleshooting/troubleshooting-afr/#ii-self-heal-is-stuck-not-getting-completed","title":"ii) Self-heal is stuck/ not getting completed.","text":"

                  If a file seems to be forever appearing in heal info and not healing, check the following:

                  • Examine the afr xattrs- Do they clearly indicate the good and bad copies? If there isn\u2019t at least one good copy, then the file is in split-brain and you would need to use the split-brain resolution CLI.
                  • Identify which node\u2019s shds would be picking up the file for heal. If a file is listed in the heal info output under brick1 and brick2, then the shds on the nodes which host those bricks would attempt (and one of them would succeed) in doing the heal.
                  • Once the shd is identified, look at the shd logs to see if it is indeed connected to the bricks.

                  This is good:

                  [2019-05-07 09:53:02.912923] I [MSGID: 114046] [client-handshake.c:1106:client_setvolume_cbk] 0-testvol-client-2: Connected to testvol-client-2, attached to remote volume '/bricks/brick3'\n

                  This indicates a disconnect:

                  [2019-05-07 11:44:47.602862] I [MSGID: 114018] [client.c:2334:client_rpc_notify] 0-testvol-client-2: disconnected from testvol-client-2. Client process will keep trying to connect to glusterd until brick's port is available\n\n[2019-05-07 11:44:50.953516] E [MSGID: 114058] [client-handshake.c:1456:client_query_portmap_cbk] 0-testvol-client-2: failed to get the port number for remote subvolume. Please run 'gluster volume status' on server to see if brick process is running.\n

                  Alternatively, take a statedump of the self-heal daemon (shd) and check if all client xlators are connected to the respective bricks. The shd must have connected=1 for all the client xlators, meaning it can talk to all the bricks.

                  Shd\u2019s statedump entry of a client xlator that is connected to the 3rd brick Shd\u2019s statedump entry of the same client xlator if it is diconnected from the 3rd brick [xlator.protocol.client.testvol-client-2.priv] connected=1 total_bytes_read=75004 ping_timeout=42 total_bytes_written=50608 ping_msgs_sent=0 msgs_sent=0 [xlator.protocol.client.testvol-client-2.priv] connected=0 total_bytes_read=75004 ping_timeout=42 total_bytes_written=50608 ping_msgs_sent=0 msgs_sent=0

                  If there are connection issues (i.e. connected=0), you would need to investigate and fix them. Check if the pid and the TCP/RDMA Port of the brick proceess from gluster volume status $VOLNAME matches that of ps aux|grep glusterfsd|grep $brick-path

                  # gluster volume status\nStatus of volume: testvol\nGluster process TCP Port RDMA Port Online Pid\n\n---\n\nBrick 127.0.0.2:/bricks/brick1 49152 0 Y 12527\n
                  # ps aux|grep brick1\n\nroot 12527 0.0 0.1 1459208 20104 ? Ssl 11:20 0:01 /usr/local/sbin/glusterfsd -s 127.0.0.2 --volfile-id testvol.127.0.0.2.bricks-brick1 -p /var/run/gluster/vols/testvol/127.0.0.2-bricks-brick1.pid -S /var/run/gluster/70529980362a17d6.socket --brick-name /bricks/brick1 -l /var/log/glusterfs/bricks/bricks-brick1.log --xlator-option *-posix.glusterd-uuid=d90b1532-30e5-4f9d-a75b-3ebb1c3682d4 --process-name brick --brick-port 49152 --xlator-option testvol-server.listen-port=49152\n

                  Though this will likely match, sometimes there could be a bug leading to stale port usage. A quick workaround would be to restart glusterd on that node and check if things match. Report the issue to the devs if you see this problem.

                  • I have seen some cases where a file is listed in heal info, and the afr xattrs indicate pending metadata or data heal but the file itself is not present on all bricks. Ideally, the parent directory of the file must have pending entry heal xattrs so that the file either gets created on the missing bricks or gets deleted from the ones where it is present. But if the parent dir doesn\u2019t have xattrs, the entry heal can\u2019t proceed. In such cases, you can

                    • Either do a lookup directly on the file from the mount so that name heal is triggered and then shd can pickup the data/metadata heal.
                    • Or manually set entry xattrs on the parent dir to emulate an entry heal so that the file gets created as a part of it.
                    • If a brick\u2019s underlying filesystem/lvm was damaged and fsck\u2019d to recovery, some files/dirs might be missing on it. If there is a lot of missing info on the recovered bricks, it might be better to just to a replace-brick or reset-brick and let the heal fully sync everything rather than fiddling with afr xattrs of individual entries.

                  Hack: How to trigger heal on any file/directory Knowing about self-heal logic and index heal from the previous post, we can sort of emulate a heal with the following steps. This is not something that you should be doing on your cluster but it pays to at least know that it is possible when push comes to shove.

                  1. Picking one brick as good and setting the afr pending xattr on it blaming the bad bricks.
                  2. Capture the gfid inside .glusterfs/indices/xattrop so that the shd can pick it up during index heal.
                  3. Finally, trigger index heal: gluster volume heal $VOLNAME .

                  Example: Let us say a FILE-1 exists with trusted.gfid=0x1ad2144928124da9b7117d27393fea5c on all bricks of a replica 3 volume called testvol. It has no afr xattrs. But you still need to emulate a heal. Let us say you choose brick-2 as the source. Let us do the steps listed above:

                  1. Make brick-2 blame the other 2 bricks:

                    setfattr -n trusted.afr.testvol-client-2 -v 0x000000010000000000000000 /bricks/brick2/FILE-1\nsetfattr -n trusted.afr.testvol-client-1 -v 0x000000010000000000000000 /bricks/brick2/FILE-1\n
                  2. Store the gfid string inside xattrop folder as a hardlink to the base entry:

                    # cd /bricks/brick2/.glusterfs/indices/xattrop/\n# ls -li\ntotal 0\n17829255 ----------. 1 root root 0 May 10 11:20 xattrop-a400ca91-cec9-4463-a183-aca9eaff9fa7`\n\n# ln xattrop-a400ca91-cec9-4463-a183-aca9eaff9fa7 1ad21449-2812-4da9-b711-7d27393fea5c\n# ll\ntotal 0\n----------. 2 root root 0 May 10 11:20 1ad21449-2812-4da9-b711-7d27393fea5c\n----------. 2 root root 0 May 10 11:20 xattrop-a400ca91-cec9-4463-a183-aca9eaff9fa7\n
                  3. Trigger heal: gluster volume heal testvol

                    The glustershd.log of node-2 should log about the heal.

                    [2019-05-10 06:10:46.027238] I [MSGID: 108026] [afr-self-heal-common.c:1741:afr_log_selfheal] 0-testvol-replicate-0: Completed data selfheal on 1ad21449-2812-4da9-b711-7d27393fea5c. sources=[1] sinks=0 2\n

                    So the data was healed from the second brick to the first and third brick.

                  "},{"location":"Troubleshooting/troubleshooting-afr/#iii-self-heal-is-too-slow","title":"iii) Self-heal is too slow","text":"

                  If the heal backlog is decreasing and you see glustershd logging heals but you\u2019re not happy with the rate of healing, then you can play around with shd-max-threads and shd-wait-qlength volume options.

                  Option: cluster.shd-max-threads\nDefault Value: 1\nDescription: Maximum number of parallel heals SHD can do per local brick. This can substantially lower heal times, but can also crush your bricks if you don\u2019t have the storage hardware to support this.\n\nOption: cluster.shd-wait-qlength\nDefault Value: 1024\nDescription: This option can be used to control number of heals that can wait in SHD per subvolume\n

                  I\u2019m not covering it here but it is possible to launch multiple shd instances (and kill them later on) on your node for increasing heal throughput. It is documented at https://access.redhat.com/solutions/3794011.

                  "},{"location":"Troubleshooting/troubleshooting-afr/#iv-self-heal-is-too-aggressive-and-slows-down-the-system","title":"iv) Self-heal is too aggressive and slows down the system.","text":"

                  If shd-max-threads are at the lowest value (i.e. 1) and you see if CPU usage of the bricks is too high, you can check if the volume\u2019s profile info shows a lot of RCHECKSUM fops. Data self-heal does checksum calculation (i.e the posix_rchecksum() FOP) which can be CPU intensive. You can the cluster.data-self-heal-algorithm option to full. This does a full file copy instead of computing rolling checksums and syncing only the mismatching blocks. The tradeoff is that the network consumption will be increased.

                  You can also disable all client-side heals if they are turned on so that the client bandwidth is consumed entirely by the application FOPs and not the ones by client side background heals. i.e. turn off cluster.metadata-self-heal, cluster.data-self-heal and cluster.entry-self-heal. Note: In recent versions of gluster, client-side heals are disabled by default.

                  "},{"location":"Troubleshooting/troubleshooting-afr/#mount-related-issues","title":"Mount related issues:","text":""},{"location":"Troubleshooting/troubleshooting-afr/#i-all-fops-are-failing-with-enotconn","title":"i) All fops are failing with ENOTCONN","text":"

                  Check mount log/ statedump for loss of quorum, just like for glustershd. If this is a fuse client (as opposed to an nfs/ gfapi client), you can also check the .meta folder to check the connection status to the bricks.

                  # cat /mnt/fuse_mnt/.meta/graphs/active/testvol-client-*/private |grep connected\n\nconnected = 0\nconnected = 1\nconnected = 1\n

                  If connected=0, the connection to that brick is lost. Find out why. If the client is not connected to quorum number of bricks, then AFR fails lookups (and therefore any subsequent FOP) with Transport endpoint is not connected

                  "},{"location":"Troubleshooting/troubleshooting-afr/#ii-fops-on-some-files-are-failing-with-enotconn","title":"ii) FOPs on some files are failing with ENOTCONN","text":"

                  Check mount log for the file being unreadable:

                  [2019-05-10 11:04:01.607046] W [MSGID: 108027] [afr-common.c:2268:afr_attempt_readsubvol_set] 13-testvol-replicate-0: no read subvols for /FILE.txt\n[2019-05-10 11:04:01.607775] W [fuse-bridge.c:939:fuse_entry_cbk] 0-glusterfs-fuse: 234: LOOKUP() /FILE.txt => -1 (Transport endpoint is not connected)\n

                  This means there was only 1 good copy and the client has lost connection to that brick. You need to ensure that the client is connected to all bricks.

                  "},{"location":"Troubleshooting/troubleshooting-afr/#iii-mount-is-hung","title":"iii) Mount is hung","text":"

                  It can be difficult to pin-point the issue immediately and might require assistance from the developers but the first steps to debugging could be to

                  • strace the fuse mount; see where it is hung.
                  • Take a statedump of the mount to see which xlator has frames that are not wound (i.e. complete=0) and for which FOP. Then check the source code to see if there are any unhanded cases where the xlator doesn\u2019t wind the FOP to its child.
                  • Take statedump of bricks to see if there are any stale locks. An indication of stale locks is the same lock being present in multiple statedumps or the \u2018granted\u2019 date being very old.

                  Excerpt from a brick statedump:

                  [xlator.features.locks.testvol-locks.inode]\npath=/FILE\nmandatory=0\ninodelk-count=1\nlock-dump.domain.domain=testvol-replicate-0:self-heal\nlock-dump.domain.domain=testvol-replicate-0\ninodelk.inodelk[0](ACTIVE)=type=WRITE, whence=0, start=0, len=0,\npid = 18446744073709551610, owner=700a0060037f0000, client=0x7fc57c09c1c0,\nconnection-id=vm1-17902-2018/10/14-07:18:17:132969-testvol-client-0-0-0, granted at 2018-10-14 07:18:40\n

                  While stale lock issues are candidates for bug reports, the locks xlator on the brick releases locks from a particular client upon a network disconnect. That can be used as a workaround to release the stale locks- i.e. restart the brick or restart the client or induce a network disconnect between them.

                  "},{"location":"Troubleshooting/troubleshooting-filelocks/","title":"Troubleshooting File Locks","text":"

                  Use statedumps to find and list the locks held on files. The statedump output also provides information on each lock with its range, basename, PID of the application holding the lock, and so on. You can analyze the output to know about the locks whose owner/application is no longer running or interested in that lock. After ensuring that the no application is using the file, you can clear the lock using the following clear lock commands.

                  1. Perform statedump on the volume to view the files that are locked using the following command:

                    gluster volume statedump  inode\n

                    For example, to display statedump of test-volume:

                    gluster volume statedump test-volume\nVolume statedump successful\n

                    The statedump files are created on the brick servers in the/tmp directory or in the directory set using server.statedump-path volume option. The naming convention of the dump file is <brick-path>.<brick-pid>.dump.

                    The following are the sample contents of the statedump file. It indicates that GlusterFS has entered into a state where there is an entry lock (entrylk) and an inode lock (inodelk). Ensure that those are stale locks and no resources own them.

                    [xlator.features.locks.vol-locks.inode]\npath=/\nmandatory=0\nentrylk-count=1\nlock-dump.domain.domain=vol-replicate-0\nxlator.feature.locks.lock-dump.domain.entrylk.entrylk[0](ACTIVE)=type=ENTRYLK_WRLCK on basename=file1, pid = 714782904, owner=ffffff2a3c7f0000, transport=0x20e0670, , granted at Mon Feb 27 16:01:01 2012\n\nconn.2.bound_xl./gfs/brick1.hashsize=14057\nconn.2.bound_xl./gfs/brick1.name=/gfs/brick1/inode\nconn.2.bound_xl./gfs/brick1.lru_limit=16384\nconn.2.bound_xl./gfs/brick1.active_size=2\nconn.2.bound_xl./gfs/brick1.lru_size=0\nconn.2.bound_xl./gfs/brick1.purge_size=0\n\n[conn.2.bound_xl./gfs/brick1.active.1]\ngfid=538a3d4a-01b0-4d03-9dc9-843cd8704d07\nnlookup=1\nref=2\nia_type=1\n[xlator.features.locks.vol-locks.inode]\npath=/file1\nmandatory=0\ninodelk-count=1\nlock-dump.domain.domain=vol-replicate-0\ninodelk.inodelk[0](ACTIVE)=type=WRITE, whence=0, start=0, len=0, pid = 714787072, owner=00ffff2a3c7f0000, transport=0x20e0670, , granted at Mon Feb 27 16:01:01 2012\n
                  2. Clear the lock using the following command:

                    gluster volume clear-locks\n

                    For example, to clear the entry lock on file1 of test-volume:

                    gluster volume clear-locks test-volume / kind granted entry file1\nVolume clear-locks successful\nvol-locks: entry blocked locks=0 granted locks=1\n
                  3. Clear the inode lock using the following command:

                    gluster volume clear-locks\n

                    For example, to clear the inode lock on file1 of test-volume:

                    gluster  volume clear-locks test-volume /file1 kind granted inode 0,0-0\nVolume clear-locks successful\nvol-locks: inode blocked locks=0 granted locks=1\n

                    Perform statedump on test-volume again to verify that the above inode and entry locks are cleared.

                  "},{"location":"Troubleshooting/troubleshooting-georep/","title":"Troubleshooting Geo-replication","text":""},{"location":"Troubleshooting/troubleshooting-georep/#troubleshooting-geo-replication","title":"Troubleshooting Geo-replication","text":"

                  This section describes the most common troubleshooting scenarios related to GlusterFS Geo-replication.

                  "},{"location":"Troubleshooting/troubleshooting-georep/#locating-log-files","title":"Locating Log Files","text":"

                  For every Geo-replication session, the following three log files are associated to it (four, if the secondary is a gluster volume):

                  • Primary-log-file - log file for the process which monitors the Primary volume
                  • Secondary-log-file - log file for process which initiates the changes in secondary
                  • Primary-gluster-log-file - log file for the maintenance mount point that Geo-replication module uses to monitor the Primary volume
                  • Secondary-gluster-log-file - is the secondary's counterpart of it

                  Primary Log File

                  To get the Primary-log-file for geo-replication, use the following command:

                  gluster volume geo-replication <session> config log-file\n

                  For example:

                  gluster volume geo-replication Volume1 example.com:/data/remote_dir config log-file\n

                  Secondary Log File

                  To get the log file for geo-replication on secondary (glusterd must be running on secondary machine), use the following commands:

                  1. On primary, run the following command:

                    gluster volume geo-replication Volume1 example.com:/data/remote_dir config session-owner 5f6e5200-756f-11e0-a1f0-0800200c9a66\n

                    Displays the session owner details.

                  2. On secondary, run the following command:

                    gluster volume geo-replication /data/remote_dir config log-file /var/log/gluster/${session-owner}:remote-mirror.log\n
                  3. Replace the session owner details (output of Step 1) to the output of Step 2 to get the location of the log file.

                    /var/log/gluster/5f6e5200-756f-11e0-a1f0-0800200c9a66:remote-mirror.log\n
                  "},{"location":"Troubleshooting/troubleshooting-georep/#rotating-geo-replication-logs","title":"Rotating Geo-replication Logs","text":"

                  Administrators can rotate the log file of a particular primary-secondary session, as needed. When you run geo-replication's log-rotate command, the log file is backed up with the current timestamp suffixed to the file name and signal is sent to gsyncd to start logging to a new log file.

                  To rotate a geo-replication log file

                  • Rotate log file for a particular primary-secondary session using the following command:
                    gluster volume geo-replication  log-rotate\n

                  For example, to rotate the log file of primary Volume1 and secondary example.com:/data/remote_dir :

                      gluster volume geo-replication Volume1 example.com:/data/remote_dir log rotate\n    log rotate successful\n
                  • Rotate log file for all sessions for a primary volume using the following command:
                    gluster volume geo-replication  log-rotate\n

                  For example, to rotate the log file of primary Volume1:

                      gluster volume geo-replication Volume1 log rotate\n    log rotate successful\n
                  • Rotate log file for all sessions using the following command:
                    gluster volume geo-replication log-rotate\n

                  For example, to rotate the log file for all sessions:

                      gluster volume geo-replication log rotate\n    log rotate successful\n
                  "},{"location":"Troubleshooting/troubleshooting-georep/#synchronization-is-not-complete","title":"Synchronization is not complete","text":"

                  Description: GlusterFS geo-replication did not synchronize the data completely but the geo-replication status displayed is OK.

                  Solution: You can enforce a full sync of the data by erasing the index and restarting GlusterFS geo-replication. After restarting, GlusterFS geo-replication begins synchronizing all the data. All files are compared using checksum, which can be a lengthy and high resource utilization operation on large data sets.

                  "},{"location":"Troubleshooting/troubleshooting-georep/#issues-in-data-synchronization","title":"Issues in Data Synchronization","text":"

                  Description: Geo-replication display status as OK, but the files do not get synced, only directories and symlink gets synced with the following error message in the log:

                  [2011-05-02 13:42:13.467644] E [primary:288:regjob] GMaster: failed to sync ./some\\_file\\`\n

                  Solution: Geo-replication invokes rsync v3.0.0 or higher on the host and the remote machine. You must verify if you have installed the required version.

                  "},{"location":"Troubleshooting/troubleshooting-georep/#geo-replication-status-displays-faulty-very-often","title":"Geo-replication status displays Faulty very often","text":"

                  Description: Geo-replication displays status as faulty very often with a backtrace similar to the following:

                  2011-04-28 14:06:18.378859] E [syncdutils:131:log\\_raise\\_exception]\n\\<top\\>: FAIL: Traceback (most recent call last): File\n\"/usr/local/libexec/glusterfs/python/syncdaemon/syncdutils.py\", line\n152, in twraptf(\\*aa) File\n\"/usr/local/libexec/glusterfs/python/syncdaemon/repce.py\", line 118, in\nlisten rid, exc, res = recv(self.inf) File\n\"/usr/local/libexec/glusterfs/python/syncdaemon/repce.py\", line 42, in\nrecv return pickle.load(inf) EOFError\n

                  Solution: This error indicates that the RPC communication between the primary gsyncd module and secondary gsyncd module is broken and this can happen for various reasons. Check if it satisfies all the following pre-requisites:

                  • Password-less SSH is set up properly between the host and the remote machine.
                  • If FUSE is installed in the machine, because geo-replication module mounts the GlusterFS volume using FUSE to sync data.
                  • If the Secondary is a volume, check if that volume is started.
                  • If the Secondary is a plain directory, verify if the directory has been created already with the required permissions.
                  • If GlusterFS 3.2 or higher is not installed in the default location (in Primary) and has been prefixed to be installed in a custom location, configure the gluster-command for it to point to the exact location.
                  • If GlusterFS 3.2 or higher is not installed in the default location (in secondary) and has been prefixed to be installed in a custom location, configure the remote-gsyncd-command for it to point to the exact place where gsyncd is located.
                  "},{"location":"Troubleshooting/troubleshooting-georep/#intermediate-primary-goes-to-faulty-state","title":"Intermediate Primary goes to Faulty State","text":"

                  Description: In a cascading set-up, the intermediate primary goes to faulty state with the following log:

                  raise RuntimeError (\"aborting on uuid change from %s to %s\" % \\\\\nRuntimeError: aborting on uuid change from af07e07c-427f-4586-ab9f-\n4bf7d299be81 to de6b5040-8f4e-4575-8831-c4f55bd41154\n

                  Solution: In a cascading set-up the Intermediate primary is loyal to the original primary. The above log means that the geo-replication module has detected change in original primary. If this is the desired behavior, delete the config option volume-id in the session initiated from the intermediate primary.

                  "},{"location":"Troubleshooting/troubleshooting-glusterd/","title":"Troubleshooting CLI and glusterd","text":""},{"location":"Troubleshooting/troubleshooting-glusterd/#troubleshooting-the-gluster-cli-and-glusterd","title":"Troubleshooting the gluster CLI and glusterd","text":"

                  The glusterd daemon runs on every trusted server node and is responsible for the management of the trusted pool and volumes.

                  The gluster CLI sends commands to the glusterd daemon on the local node, which executes the operation and returns the result to the user.

                  "},{"location":"Troubleshooting/troubleshooting-glusterd/#debugging-glusterd","title":"Debugging glusterd","text":""},{"location":"Troubleshooting/troubleshooting-glusterd/#logs","title":"Logs","text":"

                  Start by looking at the log files for clues as to what went wrong when you hit a problem. The default directory for Gluster logs is /var/log/glusterfs. The logs for the CLI and glusterd are:

                  • glusterd : /var/log/glusterfs/glusterd.log
                  • gluster CLI : /var/log/glusterfs/cli.log
                  "},{"location":"Troubleshooting/troubleshooting-glusterd/#statedumps","title":"Statedumps","text":"

                  Statedumps are useful in debugging memory leaks and hangs. See Statedump for more details.

                  "},{"location":"Troubleshooting/troubleshooting-glusterd/#common-issues-and-how-to-resolve-them","title":"Common Issues and How to Resolve Them","text":"

                  \"Another transaction is in progress for volname\" or \"Locking failed on xxx.xxx.xxx.xxx\"

                  As Gluster is distributed by nature, glusterd takes locks when performing operations to ensure that configuration changes made to a volume are atomic across the cluster. These errors are returned when:

                  • More than one transaction contends on the same lock.

                  Solution : These are likely to be transient errors and the operation will succeed if retried once the other transaction is complete.

                  • A stale lock exists on one of the nodes.

                  Solution : Repeating the operation will not help until the stale lock is cleaned up. Restart the glusterd process holding the lock

                  • Check the glusterd.log file to find out which node holds the stale lock. Look for the message: lock being held by <uuid>
                  • Run gluster peer status to identify the node with the uuid in the log message.
                  • Restart glusterd on that node.

                  \"Transport endpoint is not connected\" errors but all bricks are up

                  This is usually seen when a brick process does not shut down cleanly, leaving stale data behind in the glusterd process. Gluster client processes query glusterd for the ports the bricks processes are listening on and attempt to connect to that port. If the port information in glusterd is incorrect, the client will fail to connect to the brick even though it is up. Operations which would need to access that brick may fail with \"Transport endpoint is not connected\".

                  Solution : Restart the glusterd service.

                  \"Peer Rejected\"

                  gluster peer status returns \"Peer Rejected\" for a node.

                  Hostname: <hostname>\nUuid: <xxxx-xxx-xxxx>\nState: Peer Rejected (Connected)\n

                  This indicates that the volume configuration on the node is not in sync with the rest of the trusted storage pool. You should see the following message in the glusterd log for the node on which the peer status command was run:

                  Version of Cksums <vol-name> differ. local cksum = xxxxxx, remote cksum = xxxxyx on peer <hostname>\n

                  Solution: Update the cluster.op-version

                  • Run gluster volume get all cluster.max-op-version to get the latest supported op-version.
                  • Update the cluster.op-version to the latest supported op-version by executing gluster volume set all cluster.op-version <op-version>.

                  \"Accepted Peer Request\"

                  If the glusterd handshake fails while expanding a cluster, the view of the cluster will be inconsistent. The state of the peer in gluster peer status will be \u201caccepted peer request\u201d and subsequent CLI commands will fail with an error. Eg. Volume create command will fail with \"volume create: testvol: failed: Host <hostname> is not in 'Peer in Cluster' state

                  In this case the value of the state field in /var/lib/glusterd/peers/<UUID> will be other than 3.

                  Solution:

                  • Stop glusterd
                  • Open /var/lib/glusterd/peers/<UUID>
                  • Change state to 3
                  • Start glusterd
                  "},{"location":"Troubleshooting/troubleshooting-gnfs/","title":"Troubleshooting gNFS","text":""},{"location":"Troubleshooting/troubleshooting-gnfs/#troubleshooting-gluster-nfs","title":"Troubleshooting Gluster NFS","text":"

                  This section describes the most common troubleshooting issues related to NFS .

                  "},{"location":"Troubleshooting/troubleshooting-gnfs/#mount-command-on-nfs-client-fails-with-rpc-error-program-not-registered","title":"mount command on NFS client fails with \u201cRPC Error: Program not registered\u201d","text":"

                  Start portmap or rpcbind service on the NFS server.

                  This error is encountered when the server has not started correctly. On most Linux distributions this is fixed by starting portmap:

                  /etc/init.d/portmap start\n

                  On some distributions where portmap has been replaced by rpcbind, the following command is required:

                  /etc/init.d/rpcbind start\n

                  After starting portmap or rpcbind, gluster NFS server needs to be restarted.

                  "},{"location":"Troubleshooting/troubleshooting-gnfs/#nfs-server-start-up-fails-with-port-is-already-in-use-error-in-the-log-file","title":"NFS server start-up fails with \u201cPort is already in use\u201d error in the log file.","text":"

                  Another Gluster NFS server is running on the same machine.

                  This error can arise in case there is already a Gluster NFS server running on the same machine. This situation can be confirmed from the log file, if the following error lines exist:

                  [2010-05-26 23:40:49] E [rpc-socket.c:126:rpcsvc_socket_listen] rpc-socket: binding socket failed:Address already in use\n[2010-05-26 23:40:49] E [rpc-socket.c:129:rpcsvc_socket_listen] rpc-socket: Port is already in use\n[2010-05-26 23:40:49] E [rpcsvc.c:2636:rpcsvc_stage_program_register] rpc-service: could not create listening connection\n[2010-05-26 23:40:49] E [rpcsvc.c:2675:rpcsvc_program_register] rpc-service: stage registration of program failed\n[2010-05-26 23:40:49] E [rpcsvc.c:2695:rpcsvc_program_register] rpc-service: Program registration failed: MOUNT3, Num: 100005, Ver: 3, Port: 38465\n[2010-05-26 23:40:49] E [nfs.c:125:nfs_init_versions] nfs: Program init failed\n[2010-05-26 23:40:49] C [nfs.c:531:notify] nfs: Failed to initialize protocols\n

                  To resolve this error one of the Gluster NFS servers will have to be shutdown. At this time, Gluster NFS server does not support running multiple NFS servers on the same machine.

                  "},{"location":"Troubleshooting/troubleshooting-gnfs/#mount-command-fails-with-rpcstatd-related-error-message","title":"mount command fails with \u201crpc.statd\u201d related error message","text":"

                  If the mount command fails with the following error message:

                  mount.nfs: rpc.statd is not running but is required for remote locking.\nmount.nfs: Either use '-o nolock' to keep locks local, or start statd.\n

                  For NFS clients to mount the NFS server, rpc.statd service must be running on the clients. Start rpc.statd service by running the following command:

                  rpc.statd\n
                  "},{"location":"Troubleshooting/troubleshooting-gnfs/#mount-command-takes-too-long-to-finish","title":"mount command takes too long to finish.","text":"

                  Start rpcbind service on the NFS client

                  The problem is that the rpcbind or portmap service is not running on the NFS client. The resolution for this is to start either of these services by running the following command:

                  /etc/init.d/portmap start\n

                  On some distributions where portmap has been replaced by rpcbind, the following command is required:

                  /etc/init.d/rpcbind start\n
                  "},{"location":"Troubleshooting/troubleshooting-gnfs/#nfs-server-glusterfsd-starts-but-initialization-fails-with-nfsrpc-service-portmap-registration-of-program-failed-error-message-in-the-log","title":"NFS server glusterfsd starts but initialization fails with \u201cnfsrpc- service: portmap registration of program failed\u201d error message in the log.","text":"

                  NFS start-up can succeed but the initialization of the NFS service can still fail preventing clients from accessing the mount points. Such a situation can be confirmed from the following error messages in the log file:

                  [2010-05-26 23:33:47] E [rpcsvc.c:2598:rpcsvc_program_register_portmap] rpc-service: Could notregister with portmap\n[2010-05-26 23:33:47] E [rpcsvc.c:2682:rpcsvc_program_register] rpc-service: portmap registration of program failed\n[2010-05-26 23:33:47] E [rpcsvc.c:2695:rpcsvc_program_register] rpc-service: Program registration failed: MOUNT3, Num: 100005, Ver: 3, Port: 38465\n[2010-05-26 23:33:47] E [nfs.c:125:nfs_init_versions] nfs: Program init failed\n[2010-05-26 23:33:47] C [nfs.c:531:notify] nfs: Failed to initialize protocols\n[2010-05-26 23:33:49] E [rpcsvc.c:2614:rpcsvc_program_unregister_portmap] rpc-service: Could not unregister with portmap\n[2010-05-26 23:33:49] E [rpcsvc.c:2731:rpcsvc_program_unregister] rpc-service: portmap unregistration of program failed\n[2010-05-26 23:33:49] E [rpcsvc.c:2744:rpcsvc_program_unregister] rpc-service: Program unregistration failed: MOUNT3, Num: 100005, Ver: 3, Port: 38465\n
                  1. Start portmap or rpcbind service on the NFS server

                    On most Linux distributions, portmap can be started using the following command:

                    /etc/init.d/portmap start\n

                    On some distributions where portmap has been replaced by rpcbind, run the following command:

                    /etc/init.d/rpcbind start\n

                    After starting portmap or rpcbind, gluster NFS server needs to be restarted.

                  2. Stop another NFS server running on the same machine

                    Such an error is also seen when there is another NFS server running on the same machine but it is not the Gluster NFS server. On Linux systems, this could be the kernel NFS server. Resolution involves stopping the other NFS server or not running the Gluster NFS server on the machine. Before stopping the kernel NFS server, ensure that no critical service depends on access to that NFS server's exports.

                    On Linux, kernel NFS servers can be stopped by using either of the following commands depending on the distribution in use:

                    /etc/init.d/nfs-kernel-server stop\n/etc/init.d/nfs stop\n
                  3. Restart Gluster NFS server

                  "},{"location":"Troubleshooting/troubleshooting-gnfs/#mount-command-fails-with-nfs-server-failed-error","title":"mount command fails with NFS server failed error.","text":"

                  mount command fails with following error

                  mount: mount to NFS server '10.1.10.11' failed: timed out (retrying).\n

                  Perform one of the following to resolve this issue:

                  1. Disable name lookup requests from NFS server to a DNS server

                    The NFS server attempts to authenticate NFS clients by performing a reverse DNS lookup to match hostnames in the volume file with the client IP addresses. There can be a situation where the NFS server either is not able to connect to the DNS server or the DNS server is taking too long to responsd to DNS request. These delays can result in delayed replies from the NFS server to the NFS client resulting in the timeout error seen above.

                    NFS server provides a work-around that disables DNS requests, instead relying only on the client IP addresses for authentication. The following option can be added for successful mounting in such situations:

                    option rpc-auth.addr.namelookup off

                    Note: Remember that disabling the NFS server forces authentication of clients to use only IP addresses and if the authentication rules in the volume file use hostnames, those authentication rules will fail and disallow mounting for those clients.

                    OR

                  2. NFS version used by the NFS client is other than version 3

                    Gluster NFS server supports version 3 of NFS protocol. In recent Linux kernels, the default NFS version has been changed from 3 to 4. It is possible that the client machine is unable to connect to the Gluster NFS server because it is using version 4 messages which are not understood by Gluster NFS server. The timeout can be resolved by forcing the NFS client to use version 3. The vers option to mount command is used for this purpose:

                    mount -o vers=3\n
                  "},{"location":"Troubleshooting/troubleshooting-gnfs/#showmount-fails-with-clnt_create-rpc-unable-to-receive","title":"showmount fails with clnt_create: RPC: Unable to receive","text":"

                  Check your firewall setting to open ports 111 for portmap requests/replies and Gluster NFS server requests/replies. Gluster NFS server operates over the following port numbers: 38465, 38466, and 38467.

                  "},{"location":"Troubleshooting/troubleshooting-gnfs/#application-fails-with-invalid-argument-or-value-too-large-for-defined-data-type-error","title":"Application fails with \"Invalid argument\" or \"Value too large for defined data type\" error.","text":"

                  These two errors generally happen for 32-bit nfs clients or applications that do not support 64-bit inode numbers or large files. Use the following option from the CLI to make Gluster NFS return 32-bit inode numbers instead: nfs.enable-ino32 \\<on|off>

                  Applications that will benefit are those that were either:

                  • built 32-bit and run on 32-bit machines such that they do not support large files by default
                  • built 32-bit on 64-bit systems

                  This option is disabled by default so NFS returns 64-bit inode numbers by default.

                  Applications which can be rebuilt from source are recommended to rebuild using the following flag with gcc:

                  -D_FILE_OFFSET_BITS=64\n
                  "},{"location":"Troubleshooting/troubleshooting-memory/","title":"Troubleshooting High Memory Utilization","text":"

                  If the memory utilization of a Gluster process increases significantly with time, it could be a leak caused by resources not being freed. If you suspect that you may have hit such an issue, try using statedumps to debug the issue.

                  If you are unable to figure out where the leak is, please file an issue and provide the following details:

                  • Gluster version
                  • The affected process
                  • The output of gluster volume info
                  • Steps to reproduce the issue if available
                  • Statedumps for the process collected at intervals as the memory utilization increases
                  • The Gluster log files for the process (if possible)
                  "},{"location":"Upgrade-Guide/","title":"Upgrade-Guide Index","text":""},{"location":"Upgrade-Guide/#upgrading-glusterfs","title":"Upgrading GlusterFS","text":"
                  • About op-version

                  If you are using GlusterFS version 6.x or above, you can upgrade it to the following:

                  • Upgrading to 10
                  • Upgrading to 9
                  • Upgrading to 8
                  • Upgrading to 7

                  If you are using GlusterFS version 5.x or above, you can upgrade it to the following:

                  • Upgrading to 8
                  • Upgrading to 7
                  • Upgrading to 6

                  If you are using GlusterFS version 4.x or above, you can upgrade it to the following:

                  • Upgrading to 6
                  • Upgrading to 5

                  If you are using GlusterFS version 3.4.x or above, you can upgrade it to following:

                  • Upgrading to 3.5
                  • Upgrading to 3.6
                  • Upgrading to 3.7
                  • Upgrading to 3.9
                  • Upgrading to 3.10
                  • Upgrading to 3.11
                  • Upgrading to 3.12
                  • Upgrading to 3.13
                  "},{"location":"Upgrade-Guide/generic-upgrade-procedure/","title":"Generic Upgrade procedure","text":""},{"location":"Upgrade-Guide/generic-upgrade-procedure/#pre-upgrade-notes","title":"Pre-upgrade notes","text":"
                  • Online upgrade is only possible with replicated and distributed replicate volumes
                  • Online upgrade is not supported for dispersed or distributed dispersed volumes
                  • Ensure no configuration changes are done during the upgrade
                  • If you are using geo-replication, please upgrade the slave cluster(s) before upgrading the master
                  • Upgrading the servers ahead of the clients is recommended
                  • It is recommended to have the same client and server, major versions running eventually
                  "},{"location":"Upgrade-Guide/generic-upgrade-procedure/#online-upgrade-procedure-for-servers","title":"Online upgrade procedure for servers","text":"

                  This procedure involves upgrading one server at a time, while keeping the volume(s) online and client IO ongoing. This procedure assumes that multiple replicas of a replica set, are not part of the same server in the trusted storage pool.

                  ALERT: If there are disperse or, pure distributed volumes in the storage pool being upgraded, this procedure is NOT recommended, use the Offline upgrade procedure instead.

                  "},{"location":"Upgrade-Guide/generic-upgrade-procedure/#repeat-the-following-steps-on-each-server-in-the-trusted-storage-pool-to-upgrade-the-entire-pool-to-new-version","title":"Repeat the following steps, on each server in the trusted storage pool, to upgrade the entire pool to new-version :","text":"
                  1. Stop all gluster services, either using the command below, or through other means.

                    systemctl stop glusterd\nsystemctl stop glustereventsd\nkillall glusterfs glusterfsd glusterd\n
                  2. Stop all applications that run on this server and access the volumes via gfapi (qemu, NFS-Ganesha, Samba, etc.)

                  3. Install Gluster new-version, below example shows how to create a repository on fedora and use it to upgrade :

                    3.1 Create a private repository (assuming /new-gluster-rpms/ folder has the new rpms ):

                    createrepo /new-gluster-rpms/\n

                    3.2 Create the .repo file in /etc/yum.d/ :

                    # cat /etc/yum.d/newglusterrepo.repo\n [newglusterrepo]\n name=NewGlusterRepo\n baseurl=\"file:///new-gluster-rpms/\"\n gpgcheck=0\n enabled=1\n

                    3.3 Upgrade glusterfs, for example to upgrade glusterfs-server to x.y version :

                    yum update glusterfs-server-x.y.fc30.x86_64.rpm\n
                  4. Ensure that version reflects new-version in the output of,

                    gluster --version\n
                  5. Start glusterd on the upgraded server

                    systemctl start glusterd\n
                  6. Ensure that all gluster processes are online by checking the output of,

                    gluster volume status\n
                  7. If the glustereventsd service was previously enabled, it is required to start it using the commands below, or, through other means,

                    systemctl start glustereventsd\n
                  8. Invoke self-heal on all the gluster volumes by running,

                    for i in `gluster volume list`; do gluster volume heal $i; done\n
                  9. Verify that there are no heal backlog by running the command for all the volumes,

                    gluster volume heal <volname> info\n

                  NOTE: Before proceeding to upgrade the next server in the pool it is recommended to check the heal backlog. If there is a heal backlog, it is recommended to wait until the backlog is empty, or, the backlog does not contain any entries requiring a sync to the just upgraded server.

                  1. Restart any gfapi based application stopped previously in step (2)
                  "},{"location":"Upgrade-Guide/generic-upgrade-procedure/#offline-upgrade-procedure","title":"Offline upgrade procedure","text":"

                  This procedure involves cluster downtime and during the upgrade window, clients are not allowed access to the volumes.

                  "},{"location":"Upgrade-Guide/generic-upgrade-procedure/#steps-to-perform-an-offline-upgrade","title":"Steps to perform an offline upgrade:","text":"
                  1. On every server in the trusted storage pool, stop all gluster services, either using the command below, or through other means,

                    systemctl stop glusterd\nsystemctl stop glustereventsd\nkillall glusterfs glusterfsd glusterd\n
                  2. Stop all applications that access the volumes via gfapi (qemu, NFS-Ganesha, Samba, etc.), across all servers

                  3. Install Gluster new-version, on all servers

                  4. Ensure that version reflects new-version in the output of the following command on all servers,

                    gluster --version\n
                  5. Start glusterd on all the upgraded servers

                    systemctl start glusterd\n
                  6. Ensure that all gluster processes are online by checking the output of,

                    gluster volume status\n
                  7. If the glustereventsd service was previously enabled, it is required to start it using the commands below, or, through other means,

                    systemctl start glustereventsd\n
                  8. Restart any gfapi based application stopped previously in step (2)

                  "},{"location":"Upgrade-Guide/generic-upgrade-procedure/#post-upgrade-steps","title":"Post upgrade steps","text":"

                  Perform the following steps post upgrading the entire trusted storage pool,

                  • It is recommended to update the op-version of the cluster. Refer, to the op-version section for further details
                  • Proceed to upgrade the clients to new-version version as well
                  • Post upgrading the clients, for replicate volumes, it is recommended to enable the option gluster volume set <volname> fips-mode-rchecksum on to turn off usage of MD5 checksums during healing. This enables running Gluster on FIPS compliant systems.
                  "},{"location":"Upgrade-Guide/generic-upgrade-procedure/#if-upgrading-from-a-version-lesser-than-gluster-70","title":"If upgrading from a version lesser than Gluster 7.0","text":"

                  NOTE: If you have ever enabled quota on your volumes then after the upgrade is done, you will have to restart all the nodes in the cluster one by one so as to fix the checksum values in the quota.cksum file under the /var/lib/glusterd/vols/<volname>/ directory. The peers may go into Peer rejected state while doing so but once all the nodes are rebooted everything will be back to normal.

                  "},{"location":"Upgrade-Guide/generic-upgrade-procedure/#upgrade-procedure-for-clients","title":"Upgrade procedure for clients","text":"

                  Following are the steps to upgrade clients to the new-version version,

                  1. Unmount all glusterfs mount points on the client
                  2. Stop all applications that access the volumes via gfapi (qemu, etc.)
                  3. Install Gluster new-version
                  4. Mount all gluster shares
                  5. Start any applications that were stopped previously in step (2)
                  "},{"location":"Upgrade-Guide/op-version/","title":"Op-version","text":""},{"location":"Upgrade-Guide/op-version/#op-version","title":"op-version","text":"

                  op-version is the operating version of the Gluster which is running.

                  op-version was introduced to ensure gluster running with different versions do not end up in a problem and backward compatibility issues can be tackled.

                  After Gluster upgrade, it is advisable to have op-version updated.

                  "},{"location":"Upgrade-Guide/op-version/#updating-op-version","title":"Updating op-version","text":"

                  Current op-version can be queried as below:

                  For 3.10 onwards:

                  gluster volume get all cluster.op-version\n

                  For release < 3.10:

                  # gluster volume get <VOLNAME> cluster.op-version\n

                  To get the maximum possible op-version a cluster can support, the following query can be used (this is available 3.10 release onwards):

                  gluster volume get all cluster.max-op-version\n

                  For example, if some nodes in a cluster have been upgraded to X and some to X+, then the maximum op-version supported by the cluster is X, and the cluster.op-version can be bumped up to X to support new features.

                  op-version can be updated as below. For example, after upgrading to glusterfs-4.0.0, set op-version as:

                  gluster volume set all cluster.op-version 40000\n

                  Note: This is not mandatory, but advisable to have updated op-version if you want to make use of latest features in the updated gluster.

                  "},{"location":"Upgrade-Guide/op-version/#client-op-version","title":"Client op-version","text":"

                  When trying to set a volume option, it might happen that one or more of the connected clients cannot support the feature being set and might need to be upgraded to the op-version the cluster is currently running on.

                  To check op-version information for the connected clients and find the offending client, the following query can be used for 3.10 release onwards:

                  # gluster volume status <all|VOLNAME> clients\n

                  The respective clients can then be upgraded to the required version.

                  This information could also be used to make an informed decision while bumping up the op-version of a cluster, so that connected clients can support all the new features provided by the upgraded cluster as well.

                  "},{"location":"Upgrade-Guide/upgrade-to-10/","title":"Upgrade procedure to Gluster 10, from Gluster 9.x, 8.x and 7.x","text":"

                  We recommend reading the release notes for 10.0 to be aware of the features and fixes provided with the release.

                  NOTE: Before following the generic upgrade procedure checkout the \"Major Issues\" section given below.

                  Refer, to the generic upgrade procedure guide and follow documented instructions.

                  "},{"location":"Upgrade-Guide/upgrade-to-10/#major-issues","title":"Major issues","text":""},{"location":"Upgrade-Guide/upgrade-to-10/#the-following-options-are-removed-from-the-code-base-and-require-to-be-unset","title":"The following options are removed from the code base and require to be unset","text":"

                  before an upgrade from releases older than release 4.1.0,

                  - features.lock-heal\n- features.grace-timeout\n

                  To check if these options are set use,

                  gluster volume info\n

                  and ensure that the above options are not part of the Options Reconfigured: section in the output of all volumes in the cluster.

                  If these are set, then unset them using the following commands,

                  # gluster volume reset <volname> <option>\n
                  "},{"location":"Upgrade-Guide/upgrade-to-10/#make-sure-you-are-not-using-any-of-the-following-depricated-features","title":"Make sure you are not using any of the following depricated features :","text":"
                  - Block device (bd) xlator\n- Decompounder feature\n- Crypt xlator\n- Symlink-cache xlator\n- Stripe feature\n- Tiering support (tier xlator and changetimerecorder)\n- Glupy\n

                  NOTE: Failure to do the above may result in failure during online upgrades, and the reset of these options to their defaults needs to be done prior to upgrading the cluster.

                  "},{"location":"Upgrade-Guide/upgrade-to-10/#deprecated-translators-and-upgrade-procedure-for-volumes-using-these-features","title":"Deprecated translators and upgrade procedure for volumes using these features","text":"

                  If you are upgrading from a release prior to release-6 be aware of deprecated xlators and functionality.

                  "},{"location":"Upgrade-Guide/upgrade-to-11/","title":"Upgrade procedure to Gluster 11, from Gluster 10.x, 9.x and 8.x","text":"

                  We recommend reading the release notes for 11.0 to be aware of the features and fixes provided with the release.

                  NOTE: Before following the generic upgrade procedure checkout the \"Major Issues\" section given below.

                  Refer, to the generic upgrade procedure guide and follow documented instructions.

                  "},{"location":"Upgrade-Guide/upgrade-to-11/#major-issues","title":"Major issues","text":""},{"location":"Upgrade-Guide/upgrade-to-11/#the-following-options-are-removed-from-the-code-base-and-require-to-be-unset","title":"The following options are removed from the code base and require to be unset","text":"

                  before an upgrade from releases older than release 4.1.0,

                  - features.lock-heal\n- features.grace-timeout\n

                  To check if these options are set use,

                  gluster volume info\n

                  and ensure that the above options are not part of the Options Reconfigured: section in the output of all volumes in the cluster.

                  If these are set, then unset them using the following commands,

                  # gluster volume reset <volname> <option>\n
                  "},{"location":"Upgrade-Guide/upgrade-to-11/#make-sure-you-are-not-using-any-of-the-following-depricated-features","title":"Make sure you are not using any of the following depricated features :","text":"
                  - Block device (bd) xlator\n- Decompounder feature\n- Crypt xlator\n- Symlink-cache xlator\n- Stripe feature\n- Tiering support (tier xlator and changetimerecorder)\n- Glupy\n

                  NOTE: Failure to do the above may result in failure during online upgrades, and the reset of these options to their defaults needs to be done prior to upgrading the cluster.

                  Online Upgrade: User will obsrve \"Peer Rejected\" issues while upgrading if NFS Ganesha is not enabled as the nfs options were made optional in this release causing checksum misamtch. Stopping and starting a server after upgrade should fix the issue.

                  "},{"location":"Upgrade-Guide/upgrade-to-11/#deprecated-translators-and-upgrade-procedure-for-volumes-using-these-features","title":"Deprecated translators and upgrade procedure for volumes using these features","text":"

                  If you are upgrading from a release prior to release-6 be aware of deprecated xlators and functionality.

                  "},{"location":"Upgrade-Guide/upgrade-to-3.10/","title":"Upgrade to 3.10","text":""},{"location":"Upgrade-Guide/upgrade-to-3.10/#upgrade-procedure-to-gluster-3100-from-gluster-39x-38x-and-37x","title":"Upgrade procedure to Gluster 3.10.0, from Gluster 3.9.x, 3.8.x and 3.7.x","text":""},{"location":"Upgrade-Guide/upgrade-to-3.10/#pre-upgrade-notes","title":"Pre-upgrade notes","text":"
                  • Online upgrade is only possible with replicated and distributed replicate volumes
                  • Online upgrade is not supported for dispersed or distributed dispersed volumes
                  • Ensure no configuration changes are done during the upgrade
                  • If you are using geo-replication, please upgrade the slave cluster(s) before upgrading the master
                  • Upgrading the servers ahead of the clients is recommended
                  • It is recommended to have the same client and server, major versions running eventually
                  "},{"location":"Upgrade-Guide/upgrade-to-3.10/#online-upgrade-procedure-for-servers","title":"Online upgrade procedure for servers","text":"

                  This procedure involves upgrading one server at a time, while keeping the volume(s) online and client IO ongoing. This procedure assumes that multiple replicas of a replica set, are not part of the same server in the trusted storage pool.

                  ALERT: If any of your volumes, in the trusted storage pool that is being upgraded, uses disperse or is a pure distributed volume, this procedure is NOT recommended, use the Offline upgrade procedure instead.

                  "},{"location":"Upgrade-Guide/upgrade-to-3.10/#repeat-the-following-steps-on-each-server-in-the-trusted-storage-pool-to-upgrade-the-entire-pool-to-310-version","title":"Repeat the following steps, on each server in the trusted storage pool, to upgrade the entire pool to 3.10 version:","text":"
                  1. Stop all gluster services, either using the command below, or through other means,

                    killall glusterfs glusterfsd glusterd\n
                  2. Stop all applications that run on this server and access the volumes via gfapi (qemu, NFS-Ganesha, Samba, etc.)

                  3. Install Gluster 3.10

                  4. Ensure that version reflects 3.10.0 in the output of,

                    gluster --version\n
                  5. Start glusterd on the upgraded server

                    glusterd\n
                  6. Ensure that all gluster processes are online by checking the output of,

                    gluster volume status\n
                  7. Self-heal all gluster volumes by running

                    for i in `gluster volume list`; do gluster volume heal $i; done\n
                  8. Ensure that there is no heal backlog by running the below command for all volumes

                    gluster volume heal <volname> info\n

                    NOTE: If there is a heal backlog, wait till the backlog is empty, or the backlog does not have any entries needing a sync to the just upgraded server, before proceeding to upgrade the next server in the pool

                  9. Restart any gfapi based application stopped previously in step (2)

                  "},{"location":"Upgrade-Guide/upgrade-to-3.10/#offline-upgrade-procedure","title":"Offline upgrade procedure","text":"

                  This procedure involves cluster downtime and during the upgrade window, clients are not allowed access to the volumes.

                  "},{"location":"Upgrade-Guide/upgrade-to-3.10/#steps-to-perform-an-offline-upgrade","title":"Steps to perform an offline upgrade:","text":"
                  1. On every server in the trusted storage pool, stop all gluster services, either using the command below, or through other means,

                    killall glusterfs glusterfsd glusterd\n
                  2. Stop all applications that access the volumes via gfapi (qemu, NFS-Ganesha, Samba, etc.), across all servers

                  3. Install Gluster 3.10, on all servers

                  4. Ensure that version reflects 3.10.0 in the output of the following command on all servers,

                    gluster --version\n
                  5. Start glusterd on all the upgraded servers

                    glusterd\n
                  6. Ensure that all gluster processes are online by checking the output of,

                    gluster volume status\n
                  7. Restart any gfapi based application stopped previously in step (2)

                  "},{"location":"Upgrade-Guide/upgrade-to-3.10/#post-upgrade-steps","title":"Post upgrade steps","text":"

                  Perform the following steps post upgrading the entire trusted storage pool,

                  • It is recommended to update the op-version of the cluster. Refer, to the op-version section for further details
                  • Proceed to upgrade the clients to 3.10 version as well
                  "},{"location":"Upgrade-Guide/upgrade-to-3.10/#upgrade-procedure-for-clients","title":"Upgrade procedure for clients","text":"

                  Following are the steps to upgrade clients to the 3.10.0 version,

                  1. Unmount all glusterfs mount points on the client
                  2. Stop all applications that access the volumes via gfapi (qemu, etc.)
                  3. Install Gluster 3.10
                  4. Mount all gluster shares
                  5. Start any applications that were stopped previously in step (2)
                  "},{"location":"Upgrade-Guide/upgrade-to-3.11/","title":"Upgrade to 3.11","text":""},{"location":"Upgrade-Guide/upgrade-to-3.11/#upgrade-procedure-to-gluster-311-from-gluster-310x-and-38x","title":"Upgrade procedure to Gluster 3.11, from Gluster 3.10.x, and 3.8.x","text":"

                  NOTE: Upgrade procedure remains the same as with the 3.10 release

                  "},{"location":"Upgrade-Guide/upgrade-to-3.11/#pre-upgrade-notes","title":"Pre-upgrade notes","text":"
                  • Online upgrade is only possible with replicated and distributed replicate volumes
                  • Online upgrade is not supported for dispersed or distributed dispersed volumes
                  • Ensure no configuration changes are done during the upgrade
                  • If you are using geo-replication, please upgrade the slave cluster(s) before upgrading the master
                  • Upgrading the servers ahead of the clients is recommended
                  • It is recommended to have the same client and server, major versions running eventually
                  "},{"location":"Upgrade-Guide/upgrade-to-3.11/#online-upgrade-procedure-for-servers","title":"Online upgrade procedure for servers","text":"

                  This procedure involves upgrading one server at a time, while keeping the volume(s) online and client IO ongoing. This procedure assumes that multiple replicas of a replica set, are not part of the same server in the trusted storage pool.

                  ALERT: If any of your volumes, in the trusted storage pool that is being upgraded, uses disperse or is a pure distributed volume, this procedure is NOT recommended, use the Offline upgrade procedure instead.

                  "},{"location":"Upgrade-Guide/upgrade-to-3.11/#repeat-the-following-steps-on-each-server-in-the-trusted-storage-pool-to-upgrade-the-entire-pool-to-311-version","title":"Repeat the following steps, on each server in the trusted storage pool, to upgrade the entire pool to 3.11 version:","text":"
                  1. Stop all gluster services, either using the command below, or through other means,

                    killall glusterfs glusterfsd glusterd\n
                  2. Stop all applications that run on this server and access the volumes via gfapi (qemu, NFS-Ganesha, Samba, etc.)

                  3. Install Gluster 3.11

                  4. Ensure that version reflects 3.11.x in the output of,

                    gluster --version\n

                    NOTE: x is the minor release number for the release

                  5. Start glusterd on the upgraded server

                    glusterd\n
                  6. Ensure that all gluster processes are online by checking the output of,

                    gluster volume status\n
                  7. Self-heal all gluster volumes by running

                    for i in `gluster volume list`; do gluster volume heal $i; done\n
                  8. Ensure that there is no heal backlog by running the below command for all volumes

                    gluster volume heal <volname> info\n

                    NOTE: If there is a heal backlog, wait till the backlog is empty, or the backlog does not have any entries needing a sync to the just upgraded server, before proceeding to upgrade the next server in the pool

                  9. Restart any gfapi based application stopped previously in step (2)

                  "},{"location":"Upgrade-Guide/upgrade-to-3.11/#offline-upgrade-procedure","title":"Offline upgrade procedure","text":"

                  This procedure involves cluster downtime and during the upgrade window, clients are not allowed access to the volumes.

                  "},{"location":"Upgrade-Guide/upgrade-to-3.11/#steps-to-perform-an-offline-upgrade","title":"Steps to perform an offline upgrade:","text":"
                  1. On every server in the trusted storage pool, stop all gluster services, either using the command below, or through other means,

                    killall glusterfs glusterfsd glusterd\n
                  2. Stop all applications that access the volumes via gfapi (qemu, NFS-Ganesha, Samba, etc.), across all servers

                  3. Install Gluster 3.11, on all servers

                  4. Ensure that version reflects 3.11.x in the output of the following command on all servers,

                    gluster --version\n

                    NOTE: x is the minor release number for the release

                  5. Start glusterd on all the upgraded servers

                    glusterd\n
                  6. Ensure that all gluster processes are online by checking the output of,

                    gluster volume status\n
                  7. Restart any gfapi based application stopped previously in step (2)

                  "},{"location":"Upgrade-Guide/upgrade-to-3.11/#post-upgrade-steps","title":"Post upgrade steps","text":"

                  Perform the following steps post upgrading the entire trusted storage pool,

                  • It is recommended to update the op-version of the cluster. Refer, to the op-version section for further details
                  • Proceed to upgrade the clients to 3.11 version as well
                  "},{"location":"Upgrade-Guide/upgrade-to-3.11/#upgrade-procedure-for-clients","title":"Upgrade procedure for clients","text":"

                  Following are the steps to upgrade clients to the 3.11.x version,

                  NOTE: x is the minor release number for the release

                  1. Unmount all glusterfs mount points on the client
                  2. Stop all applications that access the volumes via gfapi (qemu, etc.)
                  3. Install Gluster 3.11
                  4. Mount all gluster shares
                  5. Start any applications that were stopped previously in step (2)
                  "},{"location":"Upgrade-Guide/upgrade-to-3.12/","title":"Upgrade to 3.12","text":""},{"location":"Upgrade-Guide/upgrade-to-3.12/#upgrade-procedure-to-gluster-312-from-gluster-311x-310x-and-38x","title":"Upgrade procedure to Gluster 3.12, from Gluster 3.11.x, 3.10.x, and 3.8.x","text":"

                  NOTE: Upgrade procedure remains the same as with 3.11 and 3.10 releases

                  "},{"location":"Upgrade-Guide/upgrade-to-3.12/#pre-upgrade-notes","title":"Pre-upgrade notes","text":"
                  • Online upgrade is only possible with replicated and distributed replicate volumes
                  • Online upgrade is not supported for dispersed or distributed dispersed volumes
                  • Ensure no configuration changes are done during the upgrade
                  • If you are using geo-replication, please upgrade the slave cluster(s) before upgrading the master
                  • Upgrading the servers ahead of the clients is recommended
                  • It is recommended to have the same client and server, major versions running eventually
                  "},{"location":"Upgrade-Guide/upgrade-to-3.12/#online-upgrade-procedure-for-servers","title":"Online upgrade procedure for servers","text":"

                  This procedure involves upgrading one server at a time, while keeping the volume(s) online and client IO ongoing. This procedure assumes that multiple replicas of a replica set, are not part of the same server in the trusted storage pool.

                  ALERT: If there are disperse or, pure distributed volumes in the storage pool being upgraded, this procedure is NOT recommended, use the Offline upgrade procedure instead.

                  "},{"location":"Upgrade-Guide/upgrade-to-3.12/#repeat-the-following-steps-on-each-server-in-the-trusted-storage-pool-to-upgrade-the-entire-pool-to-312-version","title":"Repeat the following steps, on each server in the trusted storage pool, to upgrade the entire pool to 3.12 version:","text":"
                  1. Stop all gluster services, either using the command below, or through other means,

                    killall glusterfs glusterfsd glusterd\nsystemctl stop glustereventsd\n
                  2. Stop all applications that run on this server and access the volumes via gfapi (qemu, NFS-Ganesha, Samba, etc.)

                  3. Install Gluster 3.12

                  4. Ensure that version reflects 3.12.x in the output of,

                    gluster --version\n

                    NOTE: x is the minor release number for the release

                  5. Start glusterd on the upgraded server

                    glusterd\n
                  6. Ensure that all gluster processes are online by checking the output of,

                    gluster volume status\n
                  7. If the glustereventsd service was previously enabled, it is required to start it using the commands below, or, through other means,

                    systemctl start glustereventsd\n
                  8. Invoke self-heal on all the gluster volumes by running,

                    for i in `gluster volume list`; do gluster volume heal $i; done\n
                  9. Verify that there are no heal backlog by running the command for all the volumes,

                    gluster volume heal <volname> info\n

                    NOTE: Before proceeding to upgrade the next server in the pool it is recommended to check the heal backlog. If there is a heal backlog, it is recommended to wait until the backlog is empty, or, the backlog does not contain any entries requiring a sync to the just upgraded server.

                  10. Restart any gfapi based application stopped previously in step (2)

                  "},{"location":"Upgrade-Guide/upgrade-to-3.12/#offline-upgrade-procedure","title":"Offline upgrade procedure","text":"

                  This procedure involves cluster downtime and during the upgrade window, clients are not allowed access to the volumes.

                  "},{"location":"Upgrade-Guide/upgrade-to-3.12/#steps-to-perform-an-offline-upgrade","title":"Steps to perform an offline upgrade:","text":"
                  1. On every server in the trusted storage pool, stop all gluster services, either using the command below, or through other means,

                    killall glusterfs glusterfsd glusterd glustereventsd\nsystemctl stop glustereventsd\n
                  2. Stop all applications that access the volumes via gfapi (qemu, NFS-Ganesha, Samba, etc.), across all servers

                  3. Install Gluster 3.12, on all servers

                  4. Ensure that version reflects 3.12.x in the output of the following command on all servers,

                    gluster --version\n

                    NOTE: x is the minor release number for the release

                  5. Start glusterd on all the upgraded servers

                    glusterd\n
                  6. Ensure that all gluster processes are online by checking the output of,

                    gluster volume status\n
                  7. If the glustereventsd service was previously enabled, it is required to start it using the commands below, or, through other means,

                    systemctl start glustereventsd\n
                  8. Restart any gfapi based application stopped previously in step (2)

                  "},{"location":"Upgrade-Guide/upgrade-to-3.12/#post-upgrade-steps","title":"Post upgrade steps","text":"

                  Perform the following steps post upgrading the entire trusted storage pool,

                  • It is recommended to update the op-version of the cluster. Refer, to the op-version section for further details
                  • Proceed to upgrade the clients to 3.12 version as well
                  "},{"location":"Upgrade-Guide/upgrade-to-3.12/#upgrade-procedure-for-clients","title":"Upgrade procedure for clients","text":"

                  Following are the steps to upgrade clients to the 3.12.x version,

                  NOTE: x is the minor release number for the release

                  1. Unmount all glusterfs mount points on the client
                  2. Stop all applications that access the volumes via gfapi (qemu, etc.)
                  3. Install Gluster 3.12
                  4. Mount all gluster shares
                  5. Start any applications that were stopped previously in step (2)
                  "},{"location":"Upgrade-Guide/upgrade-to-3.13/","title":"Upgrade to 3.13","text":""},{"location":"Upgrade-Guide/upgrade-to-3.13/#upgrade-procedure-to-gluster-313-from-gluster-312x-and-310x","title":"Upgrade procedure to Gluster 3.13, from Gluster 3.12.x, and 3.10.x","text":"

                  NOTE: Upgrade procedure remains the same as with 3.12 and 3.10 releases

                  "},{"location":"Upgrade-Guide/upgrade-to-3.13/#pre-upgrade-notes","title":"Pre-upgrade notes","text":"
                  • Online upgrade is only possible with replicated and distributed replicate volumes
                  • Online upgrade is not supported for dispersed or distributed dispersed volumes
                  • Ensure no configuration changes are done during the upgrade
                  • If you are using geo-replication, please upgrade the slave cluster(s) before upgrading the master
                  • Upgrading the servers ahead of the clients is recommended
                  • It is recommended to have the same client and server, major versions running eventually
                  "},{"location":"Upgrade-Guide/upgrade-to-3.13/#online-upgrade-procedure-for-servers","title":"Online upgrade procedure for servers","text":"

                  This procedure involves upgrading one server at a time, while keeping the volume(s) online and client IO ongoing. This procedure assumes that multiple replicas of a replica set, are not part of the same server in the trusted storage pool.

                  ALERT: If any of your volumes, in the trusted storage pool that is being upgraded, uses disperse or is a pure distributed volume, this procedure is NOT recommended, use the Offline upgrade procedure instead.

                  "},{"location":"Upgrade-Guide/upgrade-to-3.13/#repeat-the-following-steps-on-each-server-in-the-trusted-storage-pool-to-upgrade-the-entire-pool-to-313-version","title":"Repeat the following steps, on each server in the trusted storage pool, to upgrade the entire pool to 3.13 version:","text":"
                  1. Stop all gluster services, either using the command below, or through other means,

                    killall glusterfs glusterfsd glusterd\n
                  2. Stop all applications that run on this server and access the volumes via gfapi (qemu, NFS-Ganesha, Samba, etc.)

                  3. Install Gluster 3.13

                  4. Ensure that version reflects 3.13.x in the output of,

                    gluster --version\n

                    NOTE: x is the minor release number for the release

                  5. Start glusterd on the upgraded server

                    glusterd\n
                  6. Ensure that all gluster processes are online by checking the output of,

                    gluster volume status\n
                  7. Self-heal all gluster volumes by running

                    for i in `gluster volume list`; do gluster volume heal $i; done\n
                  8. Ensure that there is no heal backlog by running the below command for all volumes

                    gluster volume heal <volname> info\n

                    NOTE: If there is a heal backlog, wait till the backlog is empty, or the backlog does not have any entries needing a sync to the just upgraded server, before proceeding to upgrade the next server in the pool

                  9. Restart any gfapi based application stopped previously in step (2)

                  "},{"location":"Upgrade-Guide/upgrade-to-3.13/#offline-upgrade-procedure","title":"Offline upgrade procedure","text":"

                  This procedure involves cluster downtime and during the upgrade window, clients are not allowed access to the volumes.

                  "},{"location":"Upgrade-Guide/upgrade-to-3.13/#steps-to-perform-an-offline-upgrade","title":"Steps to perform an offline upgrade:","text":"
                  1. On every server in the trusted storage pool, stop all gluster services, either using the command below, or through other means,

                    killall glusterfs glusterfsd glusterd\n
                  2. Stop all applications that access the volumes via gfapi (qemu, NFS-Ganesha, Samba, etc.), across all servers

                  3. Install Gluster 3.13, on all servers

                  4. Ensure that version reflects 3.13.x in the output of the following command on all servers,

                    gluster --version\n

                    NOTE: x is the minor release number for the release

                  5. Start glusterd on all the upgraded servers

                    glusterd\n
                  6. Ensure that all gluster processes are online by checking the output of,

                    gluster volume status\n
                  7. Restart any gfapi based application stopped previously in step (2)

                  "},{"location":"Upgrade-Guide/upgrade-to-3.13/#post-upgrade-steps","title":"Post upgrade steps","text":"

                  Perform the following steps post upgrading the entire trusted storage pool,

                  • It is recommended to update the op-version of the cluster. Refer, to the op-version section for further details
                  • Proceed to upgrade the clients to 3.13 version as well
                  "},{"location":"Upgrade-Guide/upgrade-to-3.13/#upgrade-procedure-for-clients","title":"Upgrade procedure for clients","text":"

                  Following are the steps to upgrade clients to the 3.13.x version,

                  NOTE: x is the minor release number for the release

                  1. Unmount all glusterfs mount points on the client
                  2. Stop all applications that access the volumes via gfapi (qemu, etc.)
                  3. Install Gluster 3.13
                  4. Mount all gluster shares
                  5. Start any applications that were stopped previously in step (2)
                  "},{"location":"Upgrade-Guide/upgrade-to-3.5/","title":"Upgrade to 3.5","text":""},{"location":"Upgrade-Guide/upgrade-to-3.5/#glusterfs-upgrade-from-34x-to-35","title":"Glusterfs upgrade from 3.4.x to 3.5","text":"

                  Now that GlusterFS 3.5.0 is out, here are some mechanisms to upgrade from earlier installed versions of GlusterFS.

                  Upgrade from GlusterFS 3.4.x:

                  GlusterFS 3.5.0 is compatible with 3.4.x (yes, you read it right!). You can upgrade your deployment by following one of the two procedures below.

                  a) Scheduling a downtime (Recommended)

                  For this approach, schedule a downtime and prevent all your clients from accessing the servers.

                  If you have quota configured, you need to perform step 1 and 6, otherwise you can skip it.

                  If you have geo-replication session running, stop the session using the geo-rep stop command (please refer to step 1 of geo-rep upgrade steps provided below)

                  1. Execute \"pre-upgrade-script-for-quota.sh\" mentioned under \"Upgrade Steps For Quota\" section.
                  2. Stop all glusterd, glusterfsd and glusterfs processes on your server.
                  3. Install GlusterFS 3.5.0
                  4. Start glusterd.
                  5. Ensure that all started volumes have processes online in \u201cgluster volume status\u201d.
                  6. Execute \"Post-Upgrade Script\" mentioned under \"Upgrade Steps For Quota\" section.

                  You would need to repeat these steps on all servers that form your trusted storage pool.

                  To upgrade geo-replication session, please refer to geo-rep upgrade steps provided below (from step 2)

                  After upgrading the servers, it is recommended to upgrade all client installations to 3.5.0.

                  b) Rolling upgrades with no downtime

                  If you have replicated or distributed replicated volumes with bricks placed in the right fashion for redundancy, have no data to be self-healed and feel adventurous, you can perform a rolling upgrade through the following procedure:

                  NOTE: Rolling upgrade of geo-replication session from glusterfs version \\< 3.5 to 3.5.x is not supported.

                  If you have quota configured, you need to perform step 1 and 7, otherwise you can skip it.

                  1. Execute \"pre-upgrade-script-for-quota.sh\" mentioned under \"Upgrade Steps For Quota\" section.
                  2. Stop all glusterd, glusterfs and glusterfsd processes on your server.
                  3. Install GlusterFS 3.5.0.
                  4. Start glusterd.
                  5. Run \u201cgluster volume heal <volname> info\u201d on all volumes and ensure that there is nothing left to be self-healed on every volume. If you have pending data for self-heal, run \u201cgluster volume heal <volname>\u201d and wait for self-heal to complete.
                  6. Ensure that all started volumes have processes online in \u201cgluster volume status\u201d.
                  7. Execute \"Post-Upgrade Script\" mentioned under \"Upgrade Steps For Quota\" section.

                  Repeat the above steps on all servers that are part of your trusted storage pool.

                  Again after upgrading the servers, it is recommended to upgrade all client installations to 3.5.0.

                  Do report your findings on 3.5.0 in gluster-users, #gluster on Freenode and bugzilla.

                  Please note that this may not work for all installations & upgrades. If you notice anything amiss and would like to see it covered here, please point the same.

                  "},{"location":"Upgrade-Guide/upgrade-to-3.5/#upgrade-steps-for-quota","title":"Upgrade Steps For Quota","text":"

                  The upgrade process for quota involves executing two upgrade scripts:

                  1. pre-upgrade-script-for-quota.sh, and\\
                  2. post-upgrade-script-for-quota.sh

                  Pre-Upgrade Script:

                  What it does:

                  The pre-upgrade script (pre-upgrade-script-for-quota.sh) iterates over the list of volumes that have quota enabled and captures the configured quota limits for each such volume in a file under /var/tmp/glusterfs/quota-config-backup/vol_\\<VOLNAME> by executing 'quota list' command on each one of them.

                  Pre-requisites for running Pre-Upgrade Script:

                  1. Make sure glusterd and the brick processes are running on all nodes in the cluster.
                  2. The pre-upgrade script must be run prior to upgradation.
                  3. The pre-upgrade script must be run on only one of the nodes in the cluster.

                  Location:

                  pre-upgrade-script-for-quota.sh must be retrieved from the source tree under the 'extras' directory.

                  Invocation:

                  Invoke the script by executing `./pre-upgrade-script-for-quota.sh` from the shell on any one of the nodes in the cluster.

                  • Example:

                    [root@server1 extras]#./pre-upgrade-script-for-quota.sh

                  Post-Upgrade Script:

                  What it does:

                  The post-upgrade script (post-upgrade-script-for-quota.sh) picks the volumes that have quota enabled.

                  Because the cluster must be operating at op-version 3 for quota to work, the 'default-soft-limit' for each of these volumes is set to 80% (which is its default value) via `volume set` operation as an explicit trigger to bump up the op-version of the cluster and also to trigger a re-write of volfiles which knocks quota off client volume file.

                  Once this is done, these volumes are started forcefully using `volume start force` to launch the Quota Daemon on all the nodes.

                  Thereafter, for each of these volumes, the paths and the limits configured on them are retrieved from the backed up file /var/tmp/glusterfs/quota-config-backup/vol_\\<VOLNAME> and limits are set on them via the `quota limit-usage` interface.

                  Note:

                  In the new version of quota, the command `quota limit-usage` will fail if the directory on which quota limit is to be set for a given volume does not exist. Therefore, it is advised that you create these directories first before running post-upgrade-script-for-quota.sh if you want limits to be set on these directories.

                  Pre-requisites for running Post-Upgrade Script:

                  1. The post-upgrade script must be executed after all the nodes in the cluster have upgraded.
                  2. Also, all the clients accessing the given volume must also be upgraded before the script is run.
                  3. Make sure glusterd and the brick processes are running on all nodes in the cluster post upgrade.
                  4. The script must be run from the same node where the pre-upgrade script was run.

                  Location:

                  post-upgrade-script-for-quota.sh can be found under the 'extras' directory of the source tree for glusterfs.

                  Invocation:

                  post-upgrade-script-for-quota.sh takes one command line argument. This argument could be one of the following: ''the name of the volume which has quota enabled; or' '' 'all'.''

                  In the first case, invoke post-upgrade-script-for-quota.sh from the shell for each volume with quota enabled, with the name of the volume passed as an argument in the command-line:

                  • Example:

                  For a volume \"vol1\" on which quota is enabled, invoke the script in the following way:

                      [root@server1 extras]#./post-upgrade-script-for-quota.sh vol1\n

                  In the second case, the post-upgrade script picks on its own, the volumes on which quota is enabled, and executes the post-upgrade procedure on each one of them. In this case, invoke post-upgrade-script-for-quota.sh from the shell with 'all' passed as an argument in the command-line:

                  • Example:

                    [root@server1 extras]#./post-upgrade-script-for-quota.sh all

                  Note:

                  In the second case, post-upgrade-script-for-quota.sh exits prematurely upon failure to ugprade any given volume. In that case, you may run post-upgrade-script-for-quota.sh individually (using the volume name as command line argument) on this volume and also on all volumes appearing after this volume in the output of `gluster volume list`, that have quota enabled.

                  The backed up files under /var/tmp/glusterfs/quota-config-backup/ are retained after the post-upgrade procedure for reference.

                  "},{"location":"Upgrade-Guide/upgrade-to-3.5/#upgrade-steps-for-geo-replication","title":"Upgrade steps for geo replication:","text":"

                  Here are the steps to upgrade your existing geo-replication setup to new distributed geo-replication in glusterfs-3.5. The new version leverges all the nodes in your master volume and provides better performace.

                  Note:

                  Since new version of geo-rep very much different from the older one, this has to be done offline.

                  New version supports only syncing between two gluster volumes via ssh+gluster.

                  This doc deals with upgrading geo-rep. So upgrading the volumes are not covered in detail here.

                  Below are the steps to upgrade:

                  \u200b1. Stop the geo-replication session in older version ( \\< 3.5) using the below command

                      #gluster volume geo-replication `<master_vol>` `<slave_host>`::`<slave_vol>` stop\n

                  \u200b2. Now since the new geo-replication requires gfids of master and slave volume to be same, generate a file containing the gfids of all the files in master

                      cd /usr/share/glusterfs/scripts/ ;\n    bash generate-gfid-file.sh localhost:`<master_vol>` $PWD/get-gfid.sh    /tmp/master_gfid_file.txt ;\n    scp /tmp/master_gfid_file.txt root@`<slave_host>`:/tmp\n

                  \u200b3. Now go to the slave host and aplly the gfid to the slave volume.

                      cd /usr/share/glusterfs/scripts/\n    bash slave-upgrade.sh localhost:`<slave_vol>` /tmp/master_gfid_file.txt    $PWD/gsync-sync-gfid\n

                  This will ask you for password of all the nodes in slave cluster. Please provide them, if asked.

                  \u200b4. Also note that this will restart your slave gluster volume (stop and start)

                  \u200b5. Now create and start the geo-rep session between master and slave. For instruction on creating new geo-rep seesion please refer distributed-geo-rep admin guide.

                      gluster volume geo-replication `<master_volume>` `<slave_host>`::`<slave_volume>` create push-pem force\n    gluster volume geo-replication `<master_volume>` `<slave_host>`::`<slave_volume>` start\n

                  \u200b6. Now your session is upgraded to use distributed-geo-rep

                  "},{"location":"Upgrade-Guide/upgrade-to-3.6/","title":"GlusterFS upgrade from 3.5.x to 3.6.x","text":"

                  Now that GlusterFS 3.6.0 is out, here is the process to upgrade from earlier installed versions of GlusterFS.

                  If you are using GlusterFS replication ( \\< 3.6) in your setup , please note that the new afrv2 implementation is only compatible with 3.6 GlusterFS clients. If you are not updating your clients to GlusterFS version 3.6 you need to disable client self healing process. You can perform this by below steps.

                  # gluster v set testvol cluster.entry-self-heal off\nvolume set: success\n\n# gluster v set testvol cluster.data-self-heal off\nvolume set: success\n\n# gluster v set testvol cluster.metadata-self-heal off\nvolume set: success\n
                  "},{"location":"Upgrade-Guide/upgrade-to-3.6/#glusterfs-upgrade-from-35x-to-36x_1","title":"GlusterFS upgrade from 3.5.x to 3.6.x","text":"

                  a) Scheduling a downtime (Recommended)

                  For this approach, schedule a downtime and prevent all your clients from accessing ( umount your volumes, stop gluster Volumes..etc)the servers.

                  1. Stop all glusterd, glusterfsd and glusterfs processes on your server.
                  2. Install GlusterFS 3.6.0
                  3. Start glusterd.
                  4. Ensure that all started volumes have processes online in \u201cgluster volume status\u201d.

                  You would need to repeat these steps on all servers that form your trusted storage pool.

                  After upgrading the servers, it is recommended to upgrade all client installations to 3.6.0

                  "},{"location":"Upgrade-Guide/upgrade-to-3.6/#glusterfs-upgrade-from-34x-to-36x","title":"GlusterFS upgrade from 3.4.x to 3.6.X","text":"

                  Upgrade from GlusterFS 3.4.x:

                  GlusterFS 3.6.0 is compatible with 3.4.x (yes, you read it right!). You can upgrade your deployment by following one of the two procedures below.

                  a) Scheduling a downtime (Recommended)

                  For this approach, schedule a downtime and prevent all your clients from accessing ( umount your volumes, stop gluster Volumes..etc)the servers.

                  If you have quota configured, you need to perform step 1 and 6, otherwise you can skip it.

                  If you have geo-replication session running, stop the session using the geo-rep stop command (please refer to step 1 of geo-rep upgrade steps provided below)

                  1. Execute \"pre-upgrade-script-for-quota.sh\" mentioned under \"Upgrade Steps For Quota\" section.
                  2. Stop all glusterd, glusterfsd and glusterfs processes on your server.
                  3. Install GlusterFS 3.6.0
                  4. Start glusterd.
                  5. Ensure that all started volumes have processes online in \u201cgluster volume status\u201d.
                  6. Execute \"Post-Upgrade Script\" mentioned under \"Upgrade Steps For Quota\" section.

                  You would need to repeat these steps on all servers that form your trusted storage pool.

                  To upgrade geo-replication session, please refer to geo-rep upgrade steps provided below (from step 2)

                  After upgrading the servers, it is recommended to upgrade all client installations to 3.6.0.

                  Do report your findings on 3.6.0 in gluster-users, #gluster on Freenode and bugzilla.

                  Please note that this may not work for all installations & upgrades. If you notice anything amiss and would like to see it covered here, please point the same.

                  "},{"location":"Upgrade-Guide/upgrade-to-3.6/#upgrade-steps-for-quota","title":"Upgrade Steps For Quota","text":"

                  The upgrade process for quota involves executing two upgrade scripts:

                  1. pre-upgrade-script-for-quota.sh, and\\
                  2. post-upgrade-script-for-quota.sh

                  Pre-Upgrade Script:

                  What it does:

                  The pre-upgrade script (pre-upgrade-script-for-quota.sh) iterates over the list of volumes that have quota enabled and captures the configured quota limits for each such volume in a file under /var/tmp/glusterfs/quota-config-backup/vol_\\<VOLNAME> by executing 'quota list' command on each one of them.

                  Pre-requisites for running Pre-Upgrade Script:

                  1. Make sure glusterd and the brick processes are running on all nodes in the cluster.
                  2. The pre-upgrade script must be run prior to upgradation.
                  3. The pre-upgrade script must be run on only one of the nodes in the cluster.

                  Location:

                  pre-upgrade-script-for-quota.sh must be retrieved from the source tree under the 'extras' directory.

                  Invocation:

                  Invoke the script by executing `./pre-upgrade-script-for-quota.sh` from the shell on any one of the nodes in the cluster.

                  Example:

                  [root@server1 extras]#./pre-upgrade-script-for-quota.sh\n

                  Post-Upgrade Script:

                  What it does:

                  The post-upgrade script (post-upgrade-script-for-quota.sh) picks the volumes that have quota enabled.

                  Because the cluster must be operating at op-version 3 for quota to work, the 'default-soft-limit' for each of these volumes is set to 80% (which is its default value) via `volume set` operation as an explicit trigger to bump up the op-version of the cluster and also to trigger a re-write of volfiles which knocks quota off client volume file.

                  Once this is done, these volumes are started forcefully using `volume start force` to launch the Quota Daemon on all the nodes.

                  Thereafter, for each of these volumes, the paths and the limits configured on them are retrieved from the backed up file /var/tmp/glusterfs/quota-config-backup/vol_\\<VOLNAME> and limits are set on them via the `quota limit-usage` interface.

                  Note:

                  In the new version of quota, the command `quota limit-usage` will fail if the directory on which quota limit is to be set for a given volume does not exist. Therefore, it is advised that you create these directories first before running post-upgrade-script-for-quota.sh if you want limits to be set on these directories.

                  Pre-requisites for running Post-Upgrade Script:

                  1. The post-upgrade script must be executed after all the nodes in the cluster have upgraded.
                  2. Also, all the clients accessing the given volume must also be upgraded before the script is run.
                  3. Make sure glusterd and the brick processes are running on all nodes in the cluster post upgrade.
                  4. The script must be run from the same node where the pre-upgrade script was run.

                  Location:

                  post-upgrade-script-for-quota.sh can be found under the 'extras' directory of the source tree for glusterfs.

                  Invocation:

                  post-upgrade-script-for-quota.sh takes one command line argument. This argument could be one of the following: ''the name of the volume which has quota enabled; or' '' 'all'.''

                  In the first case, invoke post-upgrade-script-for-quota.sh from the shell for each volume with quota enabled, with the name of the volume passed as an argument in the command-line:

                  Example:

                  For a volume \"vol1\" on which quota is enabled, invoke the script in the following way:

                  [root@server1 extras]#./post-upgrade-script-for-quota.sh vol1\n

                  In the second case, the post-upgrade script picks on its own, the volumes on which quota is enabled, and executes the post-upgrade procedure on each one of them. In this case, invoke post-upgrade-script-for-quota.sh from the shell with 'all' passed as an argument in the command-line:

                  Example:

                  [root@server1 extras]#./post-upgrade-script-for-quota.sh all\n

                  Note:

                  In the second case, post-upgrade-script-for-quota.sh exits prematurely upon failure to ugprade any given volume. In that case, you may run post-upgrade-script-for-quota.sh individually (using the volume name as command line argument) on this volume and also on all volumes appearing after this volume in the output of `gluster volume list`, that have quota enabled.

                  The backed up files under /var/tmp/glusterfs/quota-config-backup/ are retained after the post-upgrade procedure for reference.

                  "},{"location":"Upgrade-Guide/upgrade-to-3.6/#upgrade-steps-for-geo-replication","title":"Upgrade steps for geo replication:","text":"

                  Here are the steps to upgrade your existing geo-replication setup to new distributed geo-replication in glusterfs-3.5. The new version leverges all the nodes in your master volume and provides better performace.

                  Note:

                  Since new version of geo-rep very much different from the older one, this has to be done offline.

                  New version supports only syncing between two gluster volumes via ssh+gluster.

                  This doc deals with upgrading geo-rep. So upgrading the volumes are not covered in detail here.

                  Below are the steps to upgrade:

                  \u200b1. Stop the geo-replication session in older version ( \\< 3.5) using the below command

                      # gluster volume geo-replication `<master_vol>` `<slave_host>`::`<slave_vol>` stop\n

                  \u200b2. Now since the new geo-replication requires gfids of master and slave volume to be same, generate a file containing the gfids of all the files in master

                      # cd /usr/share/glusterfs/scripts/ ;\n    # bash generate-gfid-file.sh localhost:`<master_vol>` $PWD/get-gfid.sh    /tmp/master_gfid_file.txt ;\n    # scp /tmp/master_gfid_file.txt root@`<slave_host>`:/tmp\n

                  \u200b3. Now go to the slave host and aplly the gfid to the slave volume.

                      # cd /usr/share/glusterfs/scripts/\n    # bash slave-upgrade.sh localhost:`<slave_vol>` /tmp/master_gfid_file.txt    $PWD/gsync-sync-gfid\n

                  This will ask you for password of all the nodes in slave cluster. Please provide them, if asked.

                  \u200b4. Also note that this will restart your slave gluster volume (stop and start)

                  \u200b5. Now create and start the geo-rep session between master and slave. For instruction on creating new geo-rep seesion please refer distributed-geo-rep admin guide.

                      # gluster volume geo-replication `<master_volume>` `<slave_host>`::`<slave_volume>` create push-pem force\n    # gluster volume geo-replication `<master_volume>` `<slave_host>`::`<slave_volume>` start\n

                  \u200b6. Now your session is upgraded to use distributed-geo-rep

                  "},{"location":"Upgrade-Guide/upgrade-to-3.7/","title":"GlusterFS upgrade to 3.7.x","text":"

                  Now that GlusterFS 3.7.0 is out, here is the process to upgrade from earlier installed versions of GlusterFS. Please read the entire howto before proceeding with an upgrade of your deployment

                  "},{"location":"Upgrade-Guide/upgrade-to-3.7/#pre-upgrade","title":"Pre-upgrade","text":"

                  GlusterFS contains afrv2 implementation from 3.6.0 by default. If you are using GlusterFS replication ( \\< 3.6) in your setup, please note that the new afrv2 implementation is only compatible with 3.6 or greater GlusterFS clients. If you are not updating your clients to GlusterFS version 3.6 along with your servers you would need to disable client self healing process before the upgrade. You can perform this by below steps.

                  # gluster v set testvol cluster.entry-self-heal off\nvolume set: success\n\n# gluster v set testvol cluster.data-self-heal off\nvolume set: success\n\n# gluster v set testvol cluster.metadata-self-heal off\nvolume set: success\n
                  "},{"location":"Upgrade-Guide/upgrade-to-3.7/#glusterfs-upgrade-to-37x_1","title":"GlusterFS upgrade to 3.7.x","text":"

                  a) Scheduling a downtime

                  For this approach, schedule a downtime and prevent all your clients from accessing (umount your volumes, stop gluster Volumes..etc) the servers.

                  1. Stop all glusterd, glusterfsd and glusterfs processes on your server.\n2. Install  GlusterFS 3.7.0\n3. Start glusterd.\n4. Ensure that all started volumes have processes online in \u201cgluster volume status\u201d.\n

                  You would need to repeat these steps on all servers that form your trusted storage pool.

                  After upgrading the servers, it is recommended to upgrade all client installations to 3.7.0.

                  b) Rolling Upgrade

                  If you have replicated or distributed replicated volumes with bricks placed in the right fashion for redundancy, have no data to be self-healed and feel adventurous, you can perform a rolling upgrade through the following procedure:

                  1.Stop all glusterd, glusterfs and glusterfsd processes on your server.\n2.Install GlusterFS 3.7.0.\n3.Start glusterd.\n4.Run \u201cgluster volume heal <volname> info\u201d on all volumes and ensure that there is nothing left to be 5.self-healed on every volume. If you have pending data for self-heal, run \u201cgluster volume heal <volname>\u201d and wait for self-heal to complete.\n6.Ensure that all started volumes have processes online in \u201cgluster volume status\u201d.\n

                  Repeat the above steps on all servers that are part of your trusted storage pool.

                  Again after upgrading the servers, it is recommended to upgrade all client installations to 3.7.0.

                  "},{"location":"Upgrade-Guide/upgrade-to-3.7/#special-notes-for-upgrading-from-34x-to-37x","title":"Special notes for upgrading from 3.4.x to 3.7.X","text":"

                  If you have quota or geo-replication configured in 3.4.x, please read below. Else you can skip this section.

                  Architectural changes in Quota & geo-replication were introduced in Gluster 3.5.0. Hence scheduling a downtime is recommended for upgrading from 3.4.x to 3.7.x if you have these features enabled.

                  "},{"location":"Upgrade-Guide/upgrade-to-3.7/#upgrade-steps-for-quota","title":"Upgrade Steps For Quota","text":"

                  The upgrade process for quota involves the following:

                  1. Run pre-upgrade-script-for-quota.sh
                  2. Upgrade to 3.7.0
                  3. Run post-upgrade-script-for-quota.sh

                  More details on the scripts are as under.

                  Pre-Upgrade Script:

                  What it does:

                  The pre-upgrade script (pre-upgrade-script-for-quota.sh) iterates over the list of volumes that have quota enabled and captures the configured quota limits for each such volume in a file under /var/tmp/glusterfs/quota-config-backup/vol_\\<VOLNAME> by executing 'quota list' command on each one of them.

                  Pre-requisites for running Pre-Upgrade Script:

                  1. Make sure glusterd and the brick processes are running on all nodes in the cluster.
                  2. The pre-upgrade script must be run prior to upgradation.
                  3. The pre-upgrade script must be run on only one of the nodes in the cluster.

                  Location:

                  pre-upgrade-script-for-quota.sh must be retrieved from the source tree under the 'extras' directory.

                  Invocation:

                  Invoke the script by executing `./pre-upgrade-script-for-quota.sh` from the shell on any one of the nodes in the cluster.

                  Example:

                  [root@server1 extras]#./pre-upgrade-script-for-quota.sh\n

                  Post-Upgrade Script:

                  What it does:

                  The post-upgrade script (post-upgrade-script-for-quota.sh) picks the volumes that have quota enabled.

                  Because the cluster must be operating at op-version 3 for quota to work, the 'default-soft-limit' for each of these volumes is set to 80% (which is its default value) via `volume set` operation as an explicit trigger to bump up the op-version of the cluster and also to trigger a re-write of volfiles which knocks quota off client volume file.

                  Once this is done, these volumes are started forcefully using `volume start force` to launch the Quota Daemon on all the nodes.

                  Thereafter, for each of these volumes, the paths and the limits configured on them are retrieved from the backed up file /var/tmp/glusterfs/quota-config-backup/vol_\\<VOLNAME> and limits are set on them via the `quota limit-usage` interface.

                  Note:

                  In the new version of quota, the command `quota limit-usage` will fail if the directory on which quota limit is to be set for a given volume does not exist. Therefore, it is advised that you create these directories first before running post-upgrade-script-for-quota.sh if you want limits to be set on these directories.

                  Pre-requisites for running Post-Upgrade Script:

                  1. The post-upgrade script must be executed after all the nodes in the cluster have upgraded.
                  2. Also, all the clients accessing the given volume must also be upgraded before the script is run.
                  3. Make sure glusterd and the brick processes are running on all nodes in the cluster post upgrade.
                  4. The script must be run from the same node where the pre-upgrade script was run.

                  Location:

                  post-upgrade-script-for-quota.sh can be found under the 'extras' directory of the source tree for glusterfs.

                  Invocation:

                  post-upgrade-script-for-quota.sh takes one command line argument. This argument could be one of the following: ''the name of the volume which has quota enabled; or' '' 'all'.''

                  In the first case, invoke post-upgrade-script-for-quota.sh from the shell for each volume with quota enabled, with the name of the volume passed as an argument in the command-line:

                  Example: For a volume \"vol1\" on which quota is enabled, invoke the script in the following way:

                  [root@server1 extras]#./post-upgrade-script-for-quota.sh vol1\n

                  In the second case, the post-upgrade script picks on its own, the volumes on which quota is enabled, and executes the post-upgrade procedure on each one of them. In this case, invoke post-upgrade-script-for-quota.sh from the shell with 'all' passed as an argument in the command-line:

                  Example:

                  [root@server1 extras]#./post-upgrade-script-for-quota.sh all\n

                  Note:

                  In the second case, post-upgrade-script-for-quota.sh exits prematurely upon failure to ugprade any given volume. In that case, you may run post-upgrade-script-for-quota.sh individually (using the volume name as command line argument) on this volume and also on all volumes appearing after this volume in the output of `gluster volume list`, that have quota enabled.

                  The backed up files under /var/tmp/glusterfs/quota-config-backup/ are retained after the post-upgrade procedure for reference.

                  "},{"location":"Upgrade-Guide/upgrade-to-3.7/#upgrade-steps-for-geo-replication","title":"Upgrade steps for geo replication:","text":"

                  New version supports only syncing between two gluster volumes via ssh+gluster.

                  ''Below are the steps to upgrade. ''

                  \u200b1. Stop the geo-replication session in older version ( \\< 3.5) using the below command

                      # gluster volume geo-replication <master_vol> <slave_host>::<slave_vol> stop\n

                  \u200b2. Now since the new geo-replication requires gfids of master and slave volume to be same, generate a file containing the gfids of all the files in master

                      # cd /usr/share/glusterfs/scripts/ ;\n    # bash generate-gfid-file.sh localhost:<master_vol> $PWD/get-gfid.sh    /tmp/master_gfid_file.txt ;\n    # scp /tmp/master_gfid_file.txt root@<slave_host>:/tmp\n

                  \u200b3. Upgrade the slave cluster installation to 3.7.0

                  \u200b4. Now go to the slave host and apply the gfid to the slave volume.

                      # cd /usr/share/glusterfs/scripts/\n    # bash slave-upgrade.sh localhost:<slave_vol> /tmp/master_gfid_file.txt    $PWD/gsync-sync-gfid\n

                  This will ask you for password of all the nodes in slave cluster. Please provide them, if asked. Also note that this will restart your slave gluster volume (stop and start)

                  \u200b5. Upgrade the master cluster to 3.7.0

                  \u200b6. Now create and start the geo-rep session between master and slave. For instruction on creating new geo-rep session please refer distributed-geo-rep chapter in admin guide.

                      # gluster volume geo-replication <master_volume> <slave_host>::<slave_volume> create push-pem force\n    # gluster volume geo-replication <master_volume> <slave_host>::<slave_volume> start\n

                  At this point, your distributed geo-replication should be configured appropriately.

                  "},{"location":"Upgrade-Guide/upgrade-to-3.8/","title":"Upgrade to 3.8","text":""},{"location":"Upgrade-Guide/upgrade-to-3.8/#upgrade-procedure-from-gluster-37x","title":"Upgrade procedure from Gluster 3.7.x","text":""},{"location":"Upgrade-Guide/upgrade-to-3.8/#pre-upgrade-notes","title":"Pre-upgrade Notes","text":"
                  • Online upgrade is only possible with replicated and distributed replicate volumes.
                  • Online upgrade is not yet supported for dispersed or distributed dispersed volumes.
                  • Ensure no configuration changes are done during the upgrade.
                  • If you are using geo-replication, please upgrade the slave cluster(s) before upgrading the master.
                  • Upgrading the servers ahead of the clients is recommended.
                  • Upgrade the clients after the servers are upgraded. It is recommended to have the same client and server major versions.
                  "},{"location":"Upgrade-Guide/upgrade-to-3.8/#online-upgrade-procedure-for-servers","title":"Online Upgrade Procedure for Servers","text":"

                  The procedure involves upgrading one server at a time . On every storage server in your trusted storage pool:

                  • Stop all gluster services using the below command or through your favorite way to stop them.

                    killall glusterfs glusterfsd glusterd\n
                  • If you are using gfapi based applications (qemu, NFS-Ganesha, Samba etc.) on the servers, please stop those applications too.

                  • Install Gluster 3.8

                  • Ensure that version reflects 3.8.x in the output of

                    gluster --version\n
                  • Start glusterd on the upgraded server

                    glusterd\n
                  • Ensure that all gluster processes are online by executing

                    gluster volume status\n
                  • Self-heal all gluster volumes by running

                    for i in `gluster volume list`; do gluster volume heal $i; done\n
                  • Ensure that there is no heal backlog by running the below command for all volumes

                    gluster volume heal <volname> info\n
                  • Restart any gfapi based application stopped previously.

                  • After the upgrade is complete on all servers, run the following command:

                    gluster volume set all cluster.op-version 30800\n
                  "},{"location":"Upgrade-Guide/upgrade-to-3.8/#offline-upgrade-procedure","title":"Offline Upgrade Procedure","text":"

                  For this procedure, schedule a downtime and prevent all your clients from accessing the servers.

                  On every storage server in your trusted storage pool:

                  • Stop all gluster services using the below command or through your favorite way to stop them.

                    killall glusterfs glusterfsd glusterd\n
                  • If you are using gfapi based applications (qemu, NFS-Ganesha, Samba etc.) on the servers, please stop those applications too.

                  • Install Gluster 3.8

                  • Ensure that version reflects 3.8.x in the output of

                    gluster --version\n
                  • Start glusterd on the upgraded server

                    glusterd\n
                  • Ensure that all gluster processes are online by executing

                    gluster volume status\n
                  • Restart any gfapi based application stopped previously.

                  • After the upgrade is complete on all servers, run the following command:

                    gluster volume set all cluster.op-version 30800\n
                  "},{"location":"Upgrade-Guide/upgrade-to-3.8/#upgrade-procedure-for-clients","title":"Upgrade Procedure for Clients","text":"
                  • Unmount all glusterfs mount points on the client
                  • Stop applications using gfapi (qemu etc.)
                  • Install Gluster 3.8
                  • Mount all gluster shares
                  • Start applications using libgfapi that were stopped previously
                  "},{"location":"Upgrade-Guide/upgrade-to-3.9/","title":"Upgrade to 3.9","text":""},{"location":"Upgrade-Guide/upgrade-to-3.9/#upgrade-procedure-from-gluster-38x-and-37x","title":"Upgrade procedure from Gluster 3.8.x and 3.7.x","text":"

                  The steps to uprade to Gluster 3.9 are the same as for upgrading to Gluster 3.8. Please follow the detailed instructions from the 3.8 upgrade guide.

                  Note that there is only a single difference, related to the op-version:

                  After the upgrade is complete on all servers, run the following command:

                  gluster volume set all cluster.op-version 30900\n
                  "},{"location":"Upgrade-Guide/upgrade-to-4.0/","title":"Upgrade to 4.0","text":""},{"location":"Upgrade-Guide/upgrade-to-4.0/#upgrade-procedure-to-gluster-40-from-gluster-313x-312x-and-310x","title":"Upgrade procedure to Gluster 4.0, from Gluster 3.13.x, 3.12.x, and 3.10.x","text":"

                  NOTE: Upgrade procedure remains the same as with 3.12 and 3.10 releases

                  "},{"location":"Upgrade-Guide/upgrade-to-4.0/#pre-upgrade-notes","title":"Pre-upgrade notes","text":"
                  • Online upgrade is only possible with replicated and distributed replicate volumes
                  • Online upgrade is not supported for dispersed or distributed dispersed volumes
                  • Ensure no configuration changes are done during the upgrade
                  • If you are using geo-replication, please upgrade the slave cluster(s) before upgrading the master
                  • Upgrading the servers ahead of the clients is recommended
                  • It is recommended to have the same client and server, major versions running eventually
                  "},{"location":"Upgrade-Guide/upgrade-to-4.0/#online-upgrade-procedure-for-servers","title":"Online upgrade procedure for servers","text":"

                  This procedure involves upgrading one server at a time, while keeping the volume(s) online and client IO ongoing. This procedure assumes that multiple replicas of a replica set, are not part of the same server in the trusted storage pool.

                  ALERT: If any of your volumes, in the trusted storage pool that is being upgraded, uses disperse or is a pure distributed volume, this procedure is NOT recommended, use the Offline upgrade procedure instead.

                  "},{"location":"Upgrade-Guide/upgrade-to-4.0/#repeat-the-following-steps-on-each-server-in-the-trusted-storage-pool-to-upgrade-the-entire-pool-to-40-version","title":"Repeat the following steps, on each server in the trusted storage pool, to upgrade the entire pool to 4.0 version:","text":"
                  1. Stop all gluster services, either using the command below, or through other means,

                    killall glusterfs glusterfsd glusterd\n
                  2. Stop all applications that run on this server and access the volumes via gfapi (qemu, NFS-Ganesha, Samba, etc.)

                  3. Install Gluster 4.0

                  4. Ensure that version reflects 4.0.x in the output of,

                    gluster --version\n

                    NOTE: x is the minor release number for the release

                  5. Start glusterd on the upgraded server

                    glusterd\n
                  6. Ensure that all gluster processes are online by checking the output of,

                    gluster volume status\n
                  7. Self-heal all gluster volumes by running

                    for i in `gluster volume list`; do gluster volume heal $i; done\n
                  8. Ensure that there is no heal backlog by running the below command for all volumes

                    gluster volume heal <volname> info\n

                    NOTE: If there is a heal backlog, wait till the backlog is empty, or the backlog does not have any entries needing a sync to the just upgraded server, before proceeding to upgrade the next server in the pool

                  9. Restart any gfapi based application stopped previously in step (2)

                  "},{"location":"Upgrade-Guide/upgrade-to-4.0/#offline-upgrade-procedure","title":"Offline upgrade procedure","text":"

                  This procedure involves cluster downtime and during the upgrade window, clients are not allowed access to the volumes.

                  "},{"location":"Upgrade-Guide/upgrade-to-4.0/#steps-to-perform-an-offline-upgrade","title":"Steps to perform an offline upgrade:","text":"
                  1. On every server in the trusted storage pool, stop all gluster services, either using the command below, or through other means,

                    killall glusterfs glusterfsd glusterd\n
                  2. Stop all applications that access the volumes via gfapi (qemu, NFS-Ganesha, Samba, etc.), across all servers

                  3. Install Gluster 4.0, on all servers

                  4. Ensure that version reflects 4.0.x in the output of the following command on all servers,

                    gluster --version\n

                    NOTE: x is the minor release number for the release

                  5. Start glusterd on all the upgraded servers

                    glusterd\n
                  6. Ensure that all gluster processes are online by checking the output of,

                    gluster volume status\n
                  7. Restart any gfapi based application stopped previously in step (2)

                  "},{"location":"Upgrade-Guide/upgrade-to-4.0/#post-upgrade-steps","title":"Post upgrade steps","text":"

                  Perform the following steps post upgrading the entire trusted storage pool,

                  • It is recommended to update the op-version of the cluster. Refer, to the op-version section for further details
                  • Proceed to upgrade the clients to 4.0 version as well
                  • Post upgrading the clients, for replicate volumes, it is recommended to enable the option gluster volume set <volname> fips-mode-rchecksum on to turn off usage of MD5 checksums during healing. This enables running Gluster on FIPS compliant systems.
                  "},{"location":"Upgrade-Guide/upgrade-to-4.0/#upgrade-procedure-for-clients","title":"Upgrade procedure for clients","text":"

                  Following are the steps to upgrade clients to the 4.0.x version,

                  NOTE: x is the minor release number for the release

                  1. Unmount all glusterfs mount points on the client
                  2. Stop all applications that access the volumes via gfapi (qemu, etc.)
                  3. Install Gluster 4.0
                  4. Mount all gluster shares
                  5. Start any applications that were stopped previously in step (2)
                  "},{"location":"Upgrade-Guide/upgrade-to-4.1/","title":"Upgrade to 4.1","text":""},{"location":"Upgrade-Guide/upgrade-to-4.1/#upgrade-procedure-to-gluster-41-from-gluster-40x-312x-and-310x","title":"Upgrade procedure to Gluster 4.1, from Gluster 4.0.x, 3.12.x, and 3.10.x","text":"

                  NOTE: Upgrade procedure remains the same as with 3.12 and 3.10 releases

                  "},{"location":"Upgrade-Guide/upgrade-to-4.1/#pre-upgrade-notes","title":"Pre-upgrade notes","text":"
                  • Online upgrade is only possible with replicated and distributed replicate volumes
                  • Online upgrade is not supported for dispersed or distributed dispersed volumes
                  • Ensure no configuration changes are done during the upgrade
                  • If you are using geo-replication, please upgrade the slave cluster(s) before upgrading the master
                  • Upgrading the servers ahead of the clients is recommended
                  • It is recommended to have the same client and server, major versions running eventually
                  "},{"location":"Upgrade-Guide/upgrade-to-4.1/#online-upgrade-procedure-for-servers","title":"Online upgrade procedure for servers","text":"

                  This procedure involves upgrading one server at a time, while keeping the volume(s) online and client IO ongoing. This procedure assumes that multiple replicas of a replica set, are not part of the same server in the trusted storage pool.

                  ALERT: If there are disperse or, pure distributed volumes in the storage pool being upgraded, this procedure is NOT recommended, use the Offline upgrade procedure instead.

                  "},{"location":"Upgrade-Guide/upgrade-to-4.1/#repeat-the-following-steps-on-each-server-in-the-trusted-storage-pool-to-upgrade-the-entire-pool-to-41-version","title":"Repeat the following steps, on each server in the trusted storage pool, to upgrade the entire pool to 4.1 version:","text":"
                  1. Stop all gluster services, either using the command below, or through other means,

                    killall glusterfs glusterfsd glusterd\nsystemctl stop glustereventsd\n
                  2. Stop all applications that run on this server and access the volumes via gfapi (qemu, NFS-Ganesha, Samba, etc.)

                  3. Install Gluster 4.1

                  4. Ensure that version reflects 4.1.x in the output of,

                    gluster --version\n

                  NOTE: x is the minor release number for the release

                  1. Start glusterd on the upgraded server

                    glusterd\n
                  2. Ensure that all gluster processes are online by checking the output of,

                    gluster volume status\n
                  3. If the glustereventsd service was previously enabled, it is required to start it using the commands below, or, through other means,

                    systemctl start glustereventsd\n
                  4. Invoke self-heal on all the gluster volumes by running,

                    for i in `gluster volume list`; do gluster volume heal $i; done\n
                  5. Verify that there are no heal backlog by running the command for all the volumes,

                    gluster volume heal <volname> info\n

                  NOTE: Before proceeding to upgrade the next server in the pool it is recommended to check the heal backlog. If there is a heal backlog, it is recommended to wait until the backlog is empty, or, the backlog does not contain any entries requiring a sync to the just upgraded server.

                  1. Restart any gfapi based application stopped previously in step (2)
                  "},{"location":"Upgrade-Guide/upgrade-to-4.1/#offline-upgrade-procedure","title":"Offline upgrade procedure","text":"

                  This procedure involves cluster downtime and during the upgrade window, clients are not allowed access to the volumes.

                  "},{"location":"Upgrade-Guide/upgrade-to-4.1/#steps-to-perform-an-offline-upgrade","title":"Steps to perform an offline upgrade:","text":"
                  1. On every server in the trusted storage pool, stop all gluster services, either using the command below, or through other means,

                    killall glusterfs glusterfsd glusterd glustereventsd\nsystemctl stop glustereventsd\n
                  2. Stop all applications that access the volumes via gfapi (qemu, NFS-Ganesha, Samba, etc.), across all servers

                  3. Install Gluster 4.1, on all servers

                  4. Ensure that version reflects 4.1.x in the output of the following command on all servers,

                    gluster --version\n

                    NOTE: x is the minor release number for the release

                  5. Start glusterd on all the upgraded servers

                    glusterd\n
                  6. Ensure that all gluster processes are online by checking the output of,

                    gluster volume status\n
                  7. If the glustereventsd service was previously enabled, it is required to start it using the commands below, or, through other means,

                    systemctl start glustereventsd\n
                  8. Restart any gfapi based application stopped previously in step (2)

                  "},{"location":"Upgrade-Guide/upgrade-to-4.1/#post-upgrade-steps","title":"Post upgrade steps","text":"

                  Perform the following steps post upgrading the entire trusted storage pool,

                  • It is recommended to update the op-version of the cluster. Refer, to the op-version section for further details
                  • Proceed to upgrade the clients to 4.1 version as well
                  • Post upgrading the clients, for replicate volumes, it is recommended to enable the option gluster volume set <volname> fips-mode-rchecksum on to turn off usage of MD5 checksums during healing. This enables running Gluster on FIPS compliant systems.
                  "},{"location":"Upgrade-Guide/upgrade-to-4.1/#upgrade-procedure-for-clients","title":"Upgrade procedure for clients","text":"

                  Following are the steps to upgrade clients to the 4.1.x version,

                  NOTE: x is the minor release number for the release

                  1. Unmount all glusterfs mount points on the client
                  2. Stop all applications that access the volumes via gfapi (qemu, etc.)
                  3. Install Gluster 4.1
                  4. Mount all gluster shares
                  5. Start any applications that were stopped previously in step (2)
                  "},{"location":"Upgrade-Guide/upgrade-to-5/","title":"Upgrade to 5","text":""},{"location":"Upgrade-Guide/upgrade-to-5/#upgrade-procedure-to-gluster-5-from-gluster-41x-40x-312x-and-310x","title":"Upgrade procedure to Gluster 5, from Gluster 4.1.x, 4.0.x, 3.12.x and 3.10.x","text":"

                  NOTE: Upgrade procedure remains the same as with 4.1 release

                  Refer, to the Upgrading to 4.1 guide and follow documented instructions, replacing 5 when you encounter 4.1 in the guide as the version reference.

                  "},{"location":"Upgrade-Guide/upgrade-to-5/#major-issues","title":"Major issues","text":"
                  1. The following options are removed from the code base and require to be unset before an upgrade from releases older than release 4.1.0,

                    • features.lock-heal
                    • features.grace-timeout

                  To check if these options are set use,

                  gluster volume info\n

                  and ensure that the above options are not part of the Options Reconfigured: section in the output of all volumes in the cluster.

                  If these are set, then unset them using the following commands,

                  # gluster volume reset <volname> <option>\n

                  NOTE: Failure to do the above may result in failure during online upgrades, and the reset of these options to their defaults needs to be done prior to upgrading the cluster.

                  "},{"location":"Upgrade-Guide/upgrade-to-6/","title":"Upgrade to 6","text":""},{"location":"Upgrade-Guide/upgrade-to-6/#upgrade-procedure-to-gluster-6-from-gluster-5x-41x-and-312x","title":"Upgrade procedure to Gluster 6, from Gluster 5.x, 4.1.x, and 3.12.x","text":"

                  We recommend reading the release notes for 6.0 to be aware of the features and fixes provided with the release.

                  NOTE: Upgrade procedure remains the same as with 4.1.x release

                  Refer, to the Upgrading to 4.1 guide and follow documented instructions, replacing 6 when you encounter 4.1 in the guide as the version reference.

                  "},{"location":"Upgrade-Guide/upgrade-to-6/#major-issues","title":"Major issues","text":"
                  1. The following options are removed from the code base and require to be unset before an upgrade from releases older than release 4.1.0,

                    • features.lock-heal
                    • features.grace-timeout

                  To check if these options are set use,

                  gluster volume info\n

                  and ensure that the above options are not part of the Options Reconfigured: section in the output of all volumes in the cluster.

                  If these are set, then unset them using the following commands,

                  # gluster volume reset <volname> <option>\n

                  NOTE: Failure to do the above may result in failure during online upgrades, and the reset of these options to their defaults needs to be done prior to upgrading the cluster.

                  "},{"location":"Upgrade-Guide/upgrade-to-6/#deprecated-translators-and-upgrade-procedure-for-volumes-using-these-features","title":"Deprecated translators and upgrade procedure for volumes using these features","text":"

                  With this release of Gluster, the following xlator/features are deprecated and are not available in the distribution specific packages. If any of these xlators or features are in use, refer to instructions on steps needed pre-upgrade to plan for an upgrade to this release.

                  "},{"location":"Upgrade-Guide/upgrade-to-6/#stripe-volume","title":"Stripe volume","text":"

                  Stripe xlator, provided the ability to stripe data across bricks. This functionality was used to create and support files larger than a single brick and also to provide better disk utilization across large file IO, by spreading the IO blocks across bricks and hence physical disks.

                  This functionality is now provided by the shard xlator.

                  There is no in place upgrade feasible for volumes using the stripe feature, and users are encouraged to migrate their data from existing stripe based volumes to sharded volumes.

                  "},{"location":"Upgrade-Guide/upgrade-to-6/#tier-volume","title":"Tier volume","text":"

                  Tier feature is no longer supported with this release. There is no replacement for the tiering feature as well.

                  Volumes using the existing Tier feature need to be converted to regular volumes before upgrading to this release.

                  Command reference:

                  volume tier <VOLNAME> detach <start|stop|status|commit|[force]>\n
                  "},{"location":"Upgrade-Guide/upgrade-to-6/#other-miscellaneous-features","title":"Other miscellaneous features","text":"
                  • BD xlator
                  • glupy

                  The above translators were not supported in previous versions as well, but users had an option to create volumes using these features. If such volumes were in use, data from the same need to me migrated into a new volume without the feature, before upgrading the clusters.

                  "},{"location":"Upgrade-Guide/upgrade-to-7/","title":"Upgrade to 7","text":""},{"location":"Upgrade-Guide/upgrade-to-7/#upgrade-procedure-to-gluster-7-from-gluster-6x-5x-41x-and-312x","title":"Upgrade procedure to Gluster 7, from Gluster 6.x, 5.x, 4.1.x, and 3.12.x","text":"

                  We recommend reading the release notes for 7.0 to be aware of the features and fixes provided with the release.

                  NOTE: Upgrade procedure remains the same as with 4.1.x release

                  Refer, to the Upgrading to 4.1 guide and follow documented instructions, replacing 7 when you encounter 4.1 in the guide as the version reference.

                  NOTE: If you have ever enabled quota on your volumes then after the upgrade is done, you will have to restart all the nodes in the cluster one by one so as to fix the checksum values in the quota.cksum file under the /var/lib/glusterd/vols/<volname>/ directory. The peers may go into Peer rejected state while doing so but once all the nodes are rebooted everything will be back to normal.

                  "},{"location":"Upgrade-Guide/upgrade-to-7/#major-issues","title":"Major issues","text":"
                  1. The following options are removed from the code base and require to be unset before an upgrade from releases older than release 4.1.0,

                    • features.lock-heal
                    • features.grace-timeout

                  To check if these options are set use,

                  gluster volume info\n

                  and ensure that the above options are not part of the Options Reconfigured: section in the output of all volumes in the cluster.

                  If these are set, then unset them using the following commands,

                  # gluster volume reset <volname> <option>\n

                  NOTE: Failure to do the above may result in failure during online upgrades, and the reset of these options to their defaults needs to be done prior to upgrading the cluster.

                  "},{"location":"Upgrade-Guide/upgrade-to-7/#deprecated-translators-and-upgrade-procedure-for-volumes-using-these-features","title":"Deprecated translators and upgrade procedure for volumes using these features","text":"

                  If you are upgrading from a release prior to release-6 be aware of deprecated xlators and functionality.

                  "},{"location":"Upgrade-Guide/upgrade-to-8/","title":"Upgrade procedure to Gluster 8, from Gluster 7.x, 6.x and 5.x","text":"

                  We recommend reading the release notes for 8.0 to be aware of the features and fixes provided with the release.

                  NOTE: Before following the generic upgrade procedure checkout the \"Major Issues\" section given below.

                  With version 8, there are certain changes introduced to the directory structure of changelog files in gluster geo-replication. Thus, before the upgrade of geo-rep packages, we need to execute the upgrade script with the brick path as argument, as described below:

                  1. Stop the geo-rep session
                  2. Run the upgrade script with the brick path as the argument. Script can be used in loop for multiple bricks.
                  3. Start the upgradation process. This script will update the existing changelog directory structure and the paths inside the htime files to a new format introduced in version 8. If the above mentioned script is not executed, the search algorithm, used during the history crawl will fail with the wrong result for upgradation from version 7 and below to version 8 and above.

                  Refer, to the generic upgrade procedure guide and follow documented instructions.

                  "},{"location":"Upgrade-Guide/upgrade-to-8/#major-issues","title":"Major issues","text":""},{"location":"Upgrade-Guide/upgrade-to-8/#the-following-options-are-removed-from-the-code-base-and-require-to-be-unset","title":"The following options are removed from the code base and require to be unset","text":"

                  before an upgrade from releases older than release 4.1.0,

                  - features.lock-heal\n- features.grace-timeout\n

                  To check if these options are set use,

                  gluster volume info\n

                  and ensure that the above options are not part of the Options Reconfigured: section in the output of all volumes in the cluster.

                  If these are set, then unset them using the following commands,

                  # gluster volume reset <volname> <option>\n
                  "},{"location":"Upgrade-Guide/upgrade-to-8/#make-sure-you-are-not-using-any-of-the-following-depricated-features","title":"Make sure you are not using any of the following depricated features :","text":"
                  - Block device (bd) xlator\n- Decompounder feature\n- Crypt xlator\n- Symlink-cache xlator\n- Stripe feature\n- Tiering support (tier xlator and changetimerecorder)\n- Glupy\n

                  NOTE: Failure to do the above may result in failure during online upgrades, and the reset of these options to their defaults needs to be done prior to upgrading the cluster.

                  "},{"location":"Upgrade-Guide/upgrade-to-8/#deprecated-translators-and-upgrade-procedure-for-volumes-using-these-features","title":"Deprecated translators and upgrade procedure for volumes using these features","text":"

                  If you are upgrading from a release prior to release-6 be aware of deprecated xlators and functionality.

                  "},{"location":"Upgrade-Guide/upgrade-to-9/","title":"Upgrade procedure to Gluster 9, from Gluster 8.x, 7.x and 6.x","text":"

                  We recommend reading the release notes for 9.0 to be aware of the features and fixes provided with the release.

                  NOTE: Before following the generic upgrade procedure checkout the \"Major Issues\" section given below.

                  Refer, to the generic upgrade procedure guide and follow documented instructions.

                  "},{"location":"Upgrade-Guide/upgrade-to-9/#major-issues","title":"Major issues","text":""},{"location":"Upgrade-Guide/upgrade-to-9/#the-following-options-are-removed-from-the-code-base-and-require-to-be-unset","title":"The following options are removed from the code base and require to be unset","text":"

                  before an upgrade from releases older than release 4.1.0,

                  - features.lock-heal\n- features.grace-timeout\n

                  To check if these options are set use,

                  gluster volume info\n

                  and ensure that the above options are not part of the Options Reconfigured: section in the output of all volumes in the cluster.

                  If these are set, then unset them using the following commands,

                  # gluster volume reset <volname> <option>\n
                  "},{"location":"Upgrade-Guide/upgrade-to-9/#make-sure-you-are-not-using-any-of-the-following-deprecated-features","title":"Make sure you are not using any of the following deprecated features :","text":"
                  - Block device (bd) xlator\n- Decompounder feature\n- Crypt xlator\n- Symlink-cache xlator\n- Stripe feature\n- Tiering support (tier xlator and changetimerecorder)\n- Glupy\n

                  NOTE: Failure to do the above may result in failure during online upgrades, and the reset of these options to their defaults needs to be done prior to upgrading the cluster.

                  "},{"location":"Upgrade-Guide/upgrade-to-9/#deprecated-translators-and-upgrade-procedure-for-volumes-using-these-features","title":"Deprecated translators and upgrade procedure for volumes using these features","text":"

                  If you are upgrading from a release prior to release-6 be aware of deprecated xlators and functionality.

                  "},{"location":"presentations/","title":"Presentations","text":"

                  This is a collection of Gluster presentations from all over the world. We have a slideshare account where most of these presentations are stored.

                  "},{"location":"presentations/#fosdem-2020-brussels-belgium-1st-2nd-february-2020","title":"FOSDEM 2020 @ Brussels, Belgium - 1st & 2nd February 2020","text":"
                  • Evolution of path based Geo-replication in Gluster - Hari Gowtham
                  • A 'Thin Arbiter' for glusterfs replication - Ravishankar N
                  "},{"location":"presentations/#fosdem-2017-brussels-belgium-february-5-2017","title":"FOSDEM 2017 @ Brussels, Belgium - February 5, 2017","text":"
                  • GlusterD-2.0: The next generation of GlusterFS management - Kaushal M
                  • Gluster Roadmap and Features - Niels de Vos
                  • SELinux Support over GlusterFS - Jiffin Tony Thottan
                  • Hyper-converged, persistent storage for containers with GlusterFS - Jose Rivera, Mohamed Ashiq Liyazudeen
                  • Kubernetes+GlusterFS Lightning Ver. - Jose Rivera, Mohamed Ashiq Liyazudeen
                  "},{"location":"presentations/#pycon-india-2016-new-delhi-india-september-25-2016","title":"PyCon India 2016 @ New Delhi, India - September 25, 2016","text":"
                  • Python bindings to GlusterFS - a distributed filesystem - Prashanth Pai
                  "},{"location":"presentations/#openshift-meetup-india-2016-bangalore-india-june-11-2016","title":"Openshift Meetup India 2016 @Bangalore, India - June 11, 2016","text":"
                  • \u201cGlusterFS and Openshift (slideshare)\u201d - Humble Devassy Chirammal, Mohamed Ashiq Liyazudeen
                  "},{"location":"presentations/#glusterfs-meetup-bangalore-2016-bangalore-india-june-4-2016","title":"GlusterFS Meetup Bangalore 2016 @ Bangalore, India - June 4, 2016","text":"
                  • \u201cGlusterFS Containers (slideshare)\u201d - Humble Devassy Chirammal, Mohamed Ashiq Liyazudeen
                  • \"gdeploy 2.0 (slideshare)\" - Sachidananda Urs, Nandaja Varma
                  "},{"location":"presentations/#openstack-days-istanbul-2016-istanbul-turkey-may-31-2016","title":"OpenStack Days Istanbul 2016 @ Istanbul, Turkey - May 31, 2016","text":"
                  • \u201cBuilding Clouds That Scale-Out with GlusterFS\u201d - Mustafa Resul CETINEL
                  "},{"location":"presentations/#nluug-voorjaarsconferentie-2016-bunnik-the-netherlands-may-26-2016","title":"NLUUG Voorjaarsconferentie 2016 @ Bunnik, The Netherlands - May 26, 2016","text":"
                  • Replication Techniques in Gluster (.odp) (.pdf) - Niels de Vos
                  "},{"location":"presentations/#vault-2016-raleigh-nc-us-apr-20-21-2016","title":"Vault 2016 @ Raleigh, NC, US - Apr 20-21, 2016","text":"
                  • GlusterD 2.0 (slideshare) - Atin Mukherjee
                  "},{"location":"presentations/#incontro-devops-italia-2016-bologna-italy-apr-1-2016","title":"Incontro DevOps Italia 2016 @ Bologna, Italy - Apr 1, 2016","text":"
                  • Gluster roadmap, recent improvements and upcoming features slideshare Vimeo recording - Niels de Vos
                  "},{"location":"presentations/#linuxconfau-2016-geelong-australia-feb-03-2016","title":"LinuxConfAU 2016 @ Geelong, Australia - Feb 03, 2016","text":"
                  • GlusterD thread synchronization using Userspace Read Copy Update (URCU) slideshare - Atin Mukherjee
                  "},{"location":"presentations/#devconfcz-2016-brno-czech-republic-february-5-2016","title":"DevConf.CZ 2016 @ Brno, Czech Republic - February 5, 2016","text":"
                  • [Ceph, Gluster, Swift : Similarities and differences] (https://speakerdeck.com/prashanthpai/ceph-gluster-swift-similarities-and-differences) - Prashanth Pai, Thiago da Silva
                  "},{"location":"presentations/#fosdem-2016-brussels-belgium-january-30-2016","title":"FOSDEM 2016 @ Brussels, Belgium - January 30, 2016","text":"
                  • Gluster roadmap: Recent improvements and upcoming features slideshare - Niels de Vos
                  "},{"location":"presentations/#t-dose-2015-eindhoven-the-netherlands-nov-28-2015","title":"T-DOSE 2015 @ Eindhoven, The Netherlands - Nov 28, 2015","text":"
                  • Introduction into Scale-out Storage with Gluster slideshare - Niels de Vos
                  "},{"location":"presentations/#usenix-lisa-2015-washington-dc-usa-nov-8-2015","title":"Usenix LISA 2015 @ Washington DC, USA - Nov 8, 2015","text":"
                  • GlusterFS Tutorial - Architecture - Rajesh Joseph & Poornima Gurusiddaiah
                  • GlusterFS Tutorial - Hands-on - Rajesh Joseph & Poornima Gurusiddaiah
                  "},{"location":"presentations/#open-source-backup-conference-cologne-germany-september-30-2015","title":"Open Source Backup Conference @ Cologne, Germany - September 30, 2015","text":"
                  • Scale-Out backups with Bareos and Gluster (slideshare) - Niels de Vos
                  "},{"location":"presentations/#2015-storage-developer-conference","title":"2015 Storage Developer Conference","text":"
                  • Achieving Coherent and Aggressive Client Caching in Gluster, a Distributed System pdf - Poornima Gurusiddaiah, Soumya Koduri

                  • Introduction to Highly Available NFS Server on Scale-Out Storage Systems Based on GlusterFS slideshare - Soumya Koduri, Meghana Madhusudhan

                  "},{"location":"presentations/#gluster-summit-2015-barcelona-spain","title":"Gluster Summit 2015 @ Barcelona, Spain","text":"
                  • Bug Triage in Gluster - Niels de Vos
                  • Responsibilities of Gluster Maintainers - Niels de Vos
                  • Leases and caching - Poornima Gurusiddaiah & Soumya Koduri
                  • Cache tiering in GlusterFS and future directions - Dan Lambright
                  • Yet Another Deduplication Library (YADL) - Dan Lambright
                  "},{"location":"presentations/#gluster-conference-nmamit-nitte-apr-11-2015","title":"Gluster Conference @ NMAMIT, Nitte - Apr 11, 2015","text":"
                  • Introduction to Open Source - Niels de Vos, Red Hat
                  • Software Defined Storage - Dan Lambright, Red Hat
                  • Introduction to GlusterFS - Kaleb S. KEITHLEY, Red Hat
                  • Data Deduplication - Joseph Fernandes, Red Hat
                  • Quality of Service - Karthik US & Sukumar Poojary, 4th SEM, MCA, NMAMIT, Nitte
                  "},{"location":"presentations/#ceph-gluster-fs-software-defined-storage-meetup-jan-22-2015","title":"Ceph & Gluster FS - Software Defined Storage Meetup - Jan 22, 2015","text":"
                  • GlusterFS - Current Features & Roadmap - Niels de Vos, Red Hat
                  "},{"location":"presentations/#open-source-storage-for-bigdata-fifth-elephant-event-jun-21-2014","title":"Open source storage for bigdata :Fifth Elephant event - Jun 21, 2014","text":"
                  • GlusterFS_Hadoop_fifth-elephant.odp - Lalatendu Mohanty, Red Hat
                  "},{"location":"presentations/#red-hat-summit-2014-san-francisco-california-usa-apr-14-17-2014","title":"Red Hat Summit 2014, San Francisco, California, USA - Apr 14-17, 2014","text":"
                  • Red Hat Storage Server Administration Deep Dive - slideshare - Dustin Black, Red Hat
                  • GlusterFS Stack Diagram
                  "},{"location":"presentations/#gluster-community-night-amsterdam-the-netherlands-mar-4th-2014","title":"Gluster Community Night, Amsterdam, The Netherlands - Mar 4th, 2014","text":"
                  • GlusterFS for System Administrators - Niels de Vos, Red Hat
                  "},{"location":"presentations/#gluster-community-day-london-united-kingdom-oct-29th-2013","title":"Gluster Community Day, London, United Kingdom - Oct 29th, 2013","text":"
                  • Developing Apps and Integrating with GlusterFS - Libgfapi.odp - Justin Clift, Red Hat
                  "},{"location":"presentations/#gluster-community-day-linuxcon-europ-2013-edinburgh-united-kingdom-oct-22-24-2013","title":"Gluster Community Day / LinuxCon Europ 2013, Edinburgh, United Kingdom - Oct 22-24, 2013","text":"
                  • GlusterFS Architecture & Roadmap - Vijay Bellur
                  • Integrating GlusterFS, qemu and oVirt - Vijay Bellur
                  "},{"location":"presentations/#gluster-community-day-stockholm-sweden-sep-4th-2013","title":"Gluster Community Day, Stockholm, Sweden - Sep 4th, 2013","text":"
                  • Gluster related development - Niels de Vos, Red Hat
                  "},{"location":"presentations/#loadays-belgium-april-6th-2013","title":"LOADays, Belgium - April 6th, 2013","text":"
                  • Glusterfs_for_sysadmins-justin_clift - GlusterFS for SysAdmins, Justin Clift. For LOADays 2013 conference.
                  "},{"location":"presentations/#cialug-des-moines-ia-march-21st-2013","title":"CIALUG Des Moines, IA - March 21st, 2013","text":"
                  • Converged infrastruture with oVirt, KVM, and Gluster - Theron Conrey, Red Hat
                  "},{"location":"presentations/#gluster-community-summit-bangalore-march-7-8-2013","title":"Gluster Community Summit, Bangalore - March 7 & 8, 2013","text":"
                  • SMB-GlusterDevMar2013 - Chris Hertel, Red Hat
                  • Video recording
                  • kkeithley-UFONFS-GlusterSummit - Kaleb Keithley, Red Hat
                  • HDFS + GlusterFS integration - Jay Vyas Video
                  • Jointhe_SuperColony-_Feb2013.odp - JMW, Red Hat
                  • GlusterFS API Introduction slideshare (Jeff Darcy, Red Hat)
                  "},{"location":"presentations/#gluster-community-workshop-at-cern-in-geneva-february-26-2013","title":"Gluster Community Workshop at CERN in Geneva - February 26, 2013","text":"
                  • Debugging GlusterFS with Wireshark (additional files), Niels de Vos
                  "},{"location":"presentations/#gluster-community-workshop-at-linuxcon-europe-november-8-2012","title":"Gluster Community Workshop at LinuxCon Europe - November 8, 2012","text":"
                  • Gluster for Sysadmins - Dustin Black, Red Hat
                  • On-demand_File_Caching_-_Gustavo_Brand - On-demand File Caching, Gustavo Brand, Scalus Project
                  • Gluster_Wireshark_Niels_de_Vos - Gluster and Wireshark Integration, Niels de Vos, Red Hat
                  • Accessing_Gluster_UFO_-_Eco_Willson Unified File and Object with GlusterFS, Eco Willson, Red Hat
                  • Disperse_Xlator_Ramon_Datalab.pdf - Disperse Translator, Ramon , Datalab
                  • State_of_the_Gluster_-_LCEU.pdf State of the Gluster Community, John Mark Walker, Red Hat
                  • QEMU_GlusterFS - QEMU integration with GlusterFS, Bharata Rao, IBM Linux Technology Center
                  "},{"location":"presentations/#software-developers-conference-snia-sep-17-2012","title":"Software Developers' Conference (SNIA) - Sep 17, 2012","text":"
                  • Challenges and Futures slideshare (Jeff Darcy, Red Hat)
                  "},{"location":"presentations/#gluster-workshop-at-linuxcon-north-america-aug-28-2012","title":"Gluster Workshop at LinuxCon North America - Aug 28, 2012","text":"
                  • Translator tutorial slideshare (Jeff Darcy, Red Hat)
                  • Translator example slideshare (Jeff Darcy, Red Hat)
                  "},{"location":"release-notes/","title":"index","text":"

                  Gluster releases are separated into major and minor releases. Major releases typically contain newer functionality (in addition to bug fixes) and minor releases improve the stability of a major releases by providing bug fixes that are found or reported against them.

                  Major releases are made once every 1 year and receive minor updates for the next 12 months, after which they are no longer maintained (or termed EOL (End-Of-Life)).

                  NOTE:

                  From Gluster 10 major release, the release cycle for major releases is changed from 6 months to 1 year. Minor releases will follow every alternate month for a period of 12 months.

                  Like wise minor releases of the previous major version will happen every three months.

                  Detailed release schedule here

                  "},{"location":"release-notes/#release-notes","title":"Release Notes","text":""},{"location":"release-notes/#glusterfs-selinux-release-notes","title":"GlusterFS seLinux release notes","text":"
                  • 2.0.1
                  "},{"location":"release-notes/#glusterfs-11-release-notes","title":"GlusterFS 11 release notes","text":"
                  • 11.0
                  • 11.1
                  "},{"location":"release-notes/#glusterfs-10-release-notes","title":"GlusterFS 10 release notes","text":"
                  • 10.1
                  • 10.0
                  • 10.2
                  • 10.3
                  • 10.4
                  "},{"location":"release-notes/#glusterfs-9-release-notes","title":"GlusterFS 9 release notes","text":"
                  • 9.6
                  • 9.5
                  • 9.4
                  • 9.3
                  • 9.2
                  • 9.1
                  • 9.0
                  "},{"location":"release-notes/#glusterfs-8-release-notes","title":"GlusterFS 8 release notes","text":"
                  • 8.6
                  • 8.5
                  • 8.4
                  • 8.3
                  • 8.2
                  • 8.1
                  • 8.0
                  "},{"location":"release-notes/#glusterfs-7-release-notes","title":"GlusterFS 7 release notes","text":"
                  • 7.9
                  • 7.8
                  • 7.7
                  • 7.6
                  • 7.5
                  • 7.4
                  • 7.3
                  • 7.2
                  • 7.1
                  • 7.0
                  "},{"location":"release-notes/#glusterfs-6-release-notes","title":"GlusterFS 6 release notes","text":"
                  • 6.10
                  • 6.9
                  • 6.8
                  • 6.7
                  • 6.6
                  • 6.5
                  • 6.4
                  • 6.3
                  • 6.2
                  • 6.1
                  • 6.0
                  "},{"location":"release-notes/#glusterfs-5-release-notes","title":"GlusterFS 5 release notes","text":"
                  • 5.13
                  • 5.12
                  • 5.11
                  • 5.10
                  • 5.9
                  • 5.8
                  • 5.6
                  • 5.5
                  • 5.3
                  • 5.2
                  • 5.1
                  • 5.0
                  "},{"location":"release-notes/#glusterfs-41-release-notes","title":"GlusterFS 4.1 release notes","text":"
                  • 4.1.10
                  • 4.1.9
                  • 4.1.8
                  • 4.1.7
                  • 4.1.6
                  • 4.1.5
                  • 4.1.4
                  • 4.1.3
                  • 4.1.2
                  • 4.1.1
                  • 4.1.0
                  "},{"location":"release-notes/#glusterfs-40-release-notes","title":"GlusterFS 4.0 release notes","text":"
                  • 4.0.2
                  • 4.0.1
                  • 4.0.0
                  "},{"location":"release-notes/#glusterfs-313-release-notes","title":"GlusterFS 3.13 release notes","text":"
                  • 3.13.2
                  • 3.13.1
                  • 3.13.0
                  "},{"location":"release-notes/#glusterfs-312-release-notes","title":"GlusterFS 3.12 release notes","text":"
                  • 3.12.15
                  • 3.12.14
                  • 3.12.13
                  • 3.12.12
                  • 3.12.11
                  • 3.12.10
                  • 3.12.9
                  • 3.12.8
                  • 3.12.7
                  • 3.12.6
                  • 3.12.5
                  • 3.12.4
                  • 3.12.3
                  • 3.12.2
                  • 3.12.1
                  • 3.12.0
                  "},{"location":"release-notes/#glusterfs-311-release-notes","title":"GlusterFS 3.11 release notes","text":"
                  • 3.11.3
                  • 3.11.2
                  • 3.11.1
                  • 3.11.0
                  "},{"location":"release-notes/#glusterfs-310-release-notes","title":"GlusterFS 3.10 release notes","text":"
                  • 3.10.12
                  • 3.10.11
                  • 3.10.10
                  • 3.10.9
                  • 3.10.8
                  • 3.10.7
                  • 3.10.6
                  • 3.10.5
                  • 3.10.4
                  • 3.10.3
                  • 3.10.2
                  • 3.10.1
                  • 3.10.0
                  "},{"location":"release-notes/#glusterfs-39-release-notes","title":"GlusterFS 3.9 release notes","text":"
                  • 3.9.0
                  "},{"location":"release-notes/#glusterfs-37-release-notes","title":"GlusterFS 3.7 release notes","text":"
                  • 3.7.1
                  • 3.7.0
                  "},{"location":"release-notes/#glusterfs-36-release-notes","title":"GlusterFS 3.6 release notes","text":"
                  • 3.6.3
                  • 3.6.0
                  "},{"location":"release-notes/#glusterfs-35-release-notes","title":"GlusterFS 3.5 release notes","text":"
                  • 3.5.4
                  • 3.5.3
                  • 3.5.2
                  • 3.5.1
                  • 3.5.0
                  "},{"location":"release-notes/10.0/","title":"Release notes for Gluster 10.0","text":"

                  Release date: 16-Nov-2021

                  This is a major release that includes a range of features, code improvements and stability fixes as noted below.

                  A selection of the key features and changes are documented in this page. A full list of bugs that have been addressed is included further below.

                  • Release notes for Gluster 10.0
                  • Announcements
                  • Builds are available at -
                  • Highlights
                  • Bugs addressed
                  "},{"location":"release-notes/10.0/#announcements","title":"Announcements","text":"
                  1. Releases that receive maintenance updates post release 10 is 9 (reference)
                  2. Release 10 will receive maintenance updates around the 15th of every alternative month, and the release 9 will recieve maintainance updates around 15th every three months.
                  "},{"location":"release-notes/10.0/#builds-are-available-at-","title":"Builds are available at -","text":"

                  https://download.gluster.org/pub/gluster/glusterfs/10/10.0/

                  "},{"location":"release-notes/10.0/#highlights","title":"Highlights","text":"
                  • Major performance improvement of ~20% w.r.t small files as well as large files testing in controlled lab environments #2771

                  NOTE: The above improvement requires tcmalloc library to be enabled for building. We have tested and verified tcmalloc in X86_64 platforms and is enabled only for x86_64 builds in current release.

                  • Randomized port selection for bricks, improves startup time #786
                  • Performance improvement with use of readdir instead of readdirp in fix-layout #2241
                  • Heal time improvement with bigger window size #2067
                  "},{"location":"release-notes/10.0/#bugs-addressed","title":"Bugs addressed","text":"

                  Bugs addressed since release-10 are listed below.

                  • #504 AFR: remove memcpy() + ntoh32() pattern
                  • #705 gf_backtrace_save inefficiencies
                  • #782 Do not explicitly call strerror(errnum) when logging
                  • #786 glusterd-pmap binds to 10K ports on startup (using IPv4)
                  • #904 [bug:1649037] Translators allocate too much memory in their xlator_
                  • #1000 [bug:1193929] GlusterFS can be improved
                  • #1002 [bug:1679998] GlusterFS can be improved
                  • #1052 [bug:1693692] Increase code coverage from regression tests
                  • #1060 [bug:789278] Issues reported by Coverity static analysis tool
                  • #1096 [bug:1622665] clang-scan report: glusterfs issues
                  • #1101 [bug:1813029] volume brick fails to come online because other proce
                  • #1251 performance: improve __afr_fd_ctx_get() function
                  • #1339 Rebalance status is not shown correctly after node reboot
                  • #1358 features/shard: wrong \"inode->ref\" leading to ASSERT in inode_unref
                  • #1359 Cleanup --disable-mempool
                  • #1380 fd_unref() optimization - do an atomic decrement outside the lock a
                  • #1384 mount glusterfs volume, files larger than 64Mb only show 64Mb
                  • #1406 shared storage volume fails to mount in ipv6 environment
                  • #1415 Removing problematic language in geo-replication
                  • #1423 shard_make_block_abspath() should be called with a string of of the
                  • #1536 Improve dict_reset() efficiency
                  • #1545 fuse_invalidate_entry() - too many repetitive calls to uuid_utoa()
                  • #1583 Rework stats structure (xl->stats.total.metrics[fop_idx] and friend
                  • #1584 MAINTAINERS file needs to be revisited and updated
                  • #1596 'this' NULL check relies on 'THIS' not being NULL
                  • #1600 Save and re-use MYUUID
                  • #1678 Improve gf_error_to_errno() and gf_errno_to_error() positive flow
                  • #1695 Rebalance has a redundant lookup operation
                  • #1702 Move GF_CLIENT_PID_GSYNCD check to start of the function.
                  • #1703 Remove trivial check for GF_XATTR_SHARD_FILE_SIZE before calling sh
                  • #1707 PL_LOCAL_GET_REQUESTS access the dictionary twice for the same info
                  • #1717 glusterd: sequence of rebalance and replace/reset-brick presents re
                  • #1723 DHT: further investigation for treating an ongoing mknod's linkto file
                  • #1749 brick-process: call 'notify()' and 'fini()' of brick xlators in a p
                  • #1755 Reduce calls to 'THIS' in fd_destroy() and others, where 'THIS' is
                  • #1761 CONTRIBUTING.md regression can only be run by maintainers
                  • #1764 Slow write on ZFS bricks after healing millions of files due to add
                  • #1772 build: add LTO as a configure option
                  • #1773 DHT/Rebalance - Remove unused variable dht_migrate_file
                  • #1779 Add-brick command should check hostnames with bricks present in vol
                  • #1825 Latency in io-stats should be in nanoseconds resolution, not micros
                  • #1872 Question: How to check heal info without glusterd management layer
                  • #1885 __posix_writev() - reduce memory copies and unneeded zeroing
                  • #1888 GD_OP_VERSION needs to be updated for release-10
                  • #1898 schedule_georep.py resulting in failure when used with python3
                  • #1909 core: Avoid several dict OR key is NULL message in brick logs
                  • #1925 dht_pt_getxattr does not seem to handle virtual xattrs.
                  • #1935 logging to syslog instead of any glusterfs logs
                  • #1943 glusterd-volgen: Add functionality to accept any custom xlator
                  • #1952 posix-aio: implement GF_FOP_FSYNC
                  • #1959 Broken links in the 2 replicas split-brain-issue - [Bug]Enhancemen
                  • #1960 Add missing LOCK_DESTROY() calls
                  • #1966 Can't print trace details due to memory allocation issues
                  • #1977 Inconsistent locking in presence of disconnects
                  • #1978 test case ./tests/bugs/core/bug-1432542-mpx-restart-crash.t is gett
                  • #1981 Reduce posix_fdstat() calls in IO paths
                  • #1991 mdcache: bug causes getxattr() to report ENODATA when fetching samb
                  • #1992 dht: var decommission_subvols_cnt becomes invalid when config is up
                  • #1996 Analyze if spinlocks have any benefit and remove them if not
                  • #2001 Error handling in /usr/sbin/gluster-eventsapi produces AttributeErr
                  • #2005 ./tests/bugs/replicate/bug-921231.t is continuously failing
                  • #2013 dict_t hash-calculation can be removed when hash_size=1
                  • #2024 Remove gfs_id variable or at least set to appropriate value
                  • #2025 list_del() should not set prev and next
                  • #2033 tests/bugs/nfs/bug-1053579.t fails on CentOS 8
                  • #2038 shard_unlink() fails due to no space to create marker file
                  • #2039 Do not allow POSIX IO backend switch when the volume is running
                  • #2042 mount ipv6 gluster volume with serveral backup-volfile-servers,use
                  • #2052 Revert the commit 50e953e2450b5183988c12e87bdfbc997e0ad8a8
                  • #2054 cleanup call_stub_t from unused variables
                  • #2063 Provide autoconf option to enable/disable storage.linux-io_uring du
                  • #2067 Change self-heal-window-size to 1MB by default
                  • #2075 Annotate synctasks with valgrind API if --enable-valgrind[=memcheck
                  • #2080 Glustereventsd default port
                  • #2083 GD_MSG_DICT_GET_FAILED should not include 'errno' but 'ret'
                  • #2086 Move tests/00-geo-rep/00-georep-verify-non-root-setup.t to tests/00
                  • #2096 iobuf_arena structure doesn't need passive and active iobufs, but l
                  • #2099 'force' option does not work in the replicated volume snapshot crea
                  • #2101 Move 00-georep-verify-non-root-setup.t back to tests/00-geo-rep/
                  • #2107 mount crashes when setfattr -n distribute.fix.layout -v \"yes\" is ex
                  • #2116 enable quota for multiple volumes take more time
                  • #2117 Concurrent quota enable causes glusterd deadlock
                  • #2123 Implement an I/O framework
                  • #2129 CID 1445996 Null pointer dereferences (FORWARD_NULL) /xlators/mgmt/
                  • #2130 stack.h/c: remove unused variable and reorder struct
                  • #2133 Changelog History Crawl failed after resuming stopped geo-replicati
                  • #2134 Fix spurious failures caused by change in profile info duration to
                  • #2138 glfs_write() dumps a core file file when buffer size is 1GB
                  • #2154 \"Operation not supported\" doing a chmod on a symlink
                  • #2159 Remove unused component tests
                  • #2161 Crash caused by memory corruption
                  • #2169 Stack overflow when parallel-readdir is enabled
                  • #2180 CID 1446716: Memory - illegal accesses (USE_AFTER_FREE) /xlators/mg
                  • #2187 [Input/output error] IO failure while performing shrink operation w
                  • #2190 Move a test case tests/basic/glusterd-restart-shd-mux.t to flaky
                  • #2192 4+1 arbiter setup is broken
                  • #2198 There are blocked inodelks for a long time
                  • #2216 Fix coverity issues
                  • #2232 \"Invalid argument\" when reading a directory with gfapi
                  • #2234 Segmentation fault in directory quota daemon for replicated volume
                  • #2239 rebalance crashes in dht on master
                  • #2241 Using readdir instead of readdirp for fix-layout increases performa
                  • #2253 Disable lookup-optimize by default in the virt group
                  • #2258 Provide option to disable fsync in data migration
                  • #2260 failed to list quota info after setting limit-usage
                  • #2268 dht_layout_unref() only uses 'this' to check that 'this->private' i
                  • #2278 nfs-ganesha does not start due to shared storage not ready, but ret
                  • #2287 runner infrastructure fails to provide platfrom independent error c
                  • #2294 dict.c: remove some strlen() calls if using DICT_LIST_IMP
                  • #2308 Developer sessions for glusterfs
                  • #2313 Long setting names mess up the columns and break parsing
                  • #2317 Rebalance doesn't migrate some sparse files
                  • #2328 \"gluster volume set group samba\" needs to include write-b
                  • #2330 gf_msg can cause relock deadlock
                  • #2334 posix_handle_soft() is doing an unnecessary stat
                  • #2337 memory leak observed in lock fop
                  • #2348 Gluster's test suite on RHEL 8 runs slower than on RHEL 7
                  • #2351 glusterd: After upgrade on release 9.1 glusterd protocol is broken
                  • #2353 Permission issue after upgrading to Gluster v9.1
                  • #2360 extras: postscript fails on logrotation of snapd logs
                  • #2364 After the service is restarted, a large number of handles are not r
                  • #2370 glusterd: Issues with custom xlator changes
                  • #2378 Remove sys_fstatat() from posix_handle_unset_gfid() function - not
                  • #2380 Remove sys_lstat() from posix_acl_xattr_set() - not needed
                  • #2388 Geo-replication gets delayed when there are many renames on primary
                  • #2394 Spurious failure in tests/basic/fencing/afr-lock-heal-basic.t
                  • #2398 Bitrot and scrub process showed like unknown in the gluster volume
                  • #2404 Spurious failure of tests/bugs/ec/bug-1236065.t
                  • #2407 configure glitch with CC=clang
                  • #2410 dict_xxx_sizen variant compilation should fail on passing a variabl
                  • #2414 Prefer mallinfo2() to mallinfo() if available
                  • #2421 rsync should not try to sync internal xattrs.
                  • #2429 Use file timestamps with nanosecond precision
                  • #2431 Drop --disable-syslog configuration option
                  • #2440 Geo-replication not working on Ubuntu 21.04
                  • #2443 Core dumps on Gluster 9 - 3 replicas
                  • #2446 client_add_lock_for_recovery() - new_client_lock() should be called
                  • #2467 failed to open /proc/0/status: No such file or directory
                  • #2470 sharding: [inode.c:1255:__inode_unlink] 0-inode: dentry not found
                  • #2480 Brick going offline on another host as well as the host which reboo
                  • #2502 xlator/features/locks/src/common.c has code duplication
                  • #2507 Use appropriate msgid in gf_msg()
                  • #2515 Unable to mount the gluster volume using fuse unless iptables is fl
                  • #2522 ganesha_ha (extras/ganesha/ocf): ganesha_grace RA fails in start()
                  • #2540 delay-gen doesn't work correctly for delays longer than 2 seconds
                  • #2551 Sometimes the lock notification feature doesn't work
                  • #2581 With strict-locks enabled clients which are holding posix locks sti
                  • #2590 trusted.io-stats-dump extended attribute usage description error
                  • #2611 Granular entry self-heal is taking more time than full entry self h
                  • #2617 High CPU utilization of thread glfs_fusenoti and huge delays in som
                  • #2620 Granular entry heal purging of index name trigger two lookups in th
                  • #2625 auth.allow value is corrupted after add-brick operation
                  • #2626 entry self-heal does xattrops unnecessarily in many cases
                  • #2649 glustershd failed in bind with error \"Address already in use\"
                  • #2652 Removal of deadcode: Pump
                  • #2659 tests/basic/afr/afr-anon-inode.t crashed
                  • #2664 Test suite produce uncompressed logs
                  • #2693 dht: dht_local_wipe is crashed while running rename operation
                  • #2771 Smallfile improvement in glusterfs
                  • #2782 Glustereventsd does not listen on IPv4 when IPv6 is not available
                  • #2789 An improper locking bug(e.g., deadlock) on the lock up_inode_ctx->c
                  • #2798 FUSE mount option for localtime-logging is not exposed
                  • #2816 Glusterfsd memory leak when subdir_mounting a volume
                  • #2835 dht: found anomalies in dht_layout after commit c4cbdbcb3d02fb56a62
                  • #2857 variable twice initialization.
                  • "},{"location":"release-notes/10.1/","title":"Release notes for Gluster 10.1","text":"

                    Release date: 1st-Feb-2022

                    This is a bugfix and improvement release. The release notes for 10.0 contain a listing of all the new features that were added and bugs fixed in the GlusterFS 10 stable release.

                    NOTE:

                    • Next minor release tentative date: Week of 15th May, 2022 (As published in the Gluster Community Meeting, Release 10 will have updates every 3 months now on)

                    • Users are highly encouraged to upgrade to newer releases of GlusterFS.

                    "},{"location":"release-notes/10.1/#important-fixes-in-this-release","title":"Important fixes in this release","text":"
                    • Fix missing stripe count issue with upgrade from 9.x to 10.x
                    • Fix IO failure when shrinking distributed dispersed volume with ongoing IO
                    • Fix log spam introduced with glusterfs 10.0
                    • Enable ltcmalloc_minimal instead of ltcmalloc
                    "},{"location":"release-notes/10.1/#builds-are-available-at-","title":"Builds are available at -","text":"

                    https://download.gluster.org/pub/gluster/glusterfs/10/10.1/

                    "},{"location":"release-notes/10.1/#bugs-addressed","title":"Bugs addressed","text":"
                    • #2846 Avoid redundant logs in gluster
                    • #2903 Fix worker disconnect due to AttributeError in geo-replication
                    • #2910 Check for available ports in port_range in glusterd
                    • #2939 Remove the deprecated commands from gluster man page
                    • #2947 Fix IO failure when shrinking distributed dispersed volume with ongoing IO
                    • #3071 Fix log spam introduced with glusterfs 10.0
                    • #3000 Enable ltcmalloc_minimal instead of ltcmalloc
                    • #3086 Handle excessive log in case dict is NUL
                    • #3133 Fix missing stripe count issue with upgrade from 9.x to 10.x
                    • #2962 Fix volume create failures without disperse count and ip addresses
                    "},{"location":"release-notes/10.2/","title":"Release notes for Gluster 10.2","text":"

                    This is a bugfix and improvement release. The release notes for 10.0 and 10.1 contain a listing of all the new features that were added and bugs fixed in the GlusterFS 10 stable release.

                    NOTE:

                    • Next minor release tentative date: Week of 15th Nov, 2022
                    • Users are highly encouraged to upgrade to newer releases of GlusterFS.
                    "},{"location":"release-notes/10.2/#important-fixes-in-this-release","title":"Important fixes in this release","text":"
                    • Optimize server functionality by enhancing server_process_event_upcall code path during the handling of upcall event
                    • Fix all bricks not starting issue on node reboot when brick count is high(>750)
                    • Fix stale posix locks that appear after client disconnection
                    "},{"location":"release-notes/10.2/#builds-are-available-at","title":"Builds are available at","text":"

                    https://download.gluster.org/pub/gluster/glusterfs/10/10.2/

                    "},{"location":"release-notes/10.2/#bugs-addressed","title":"Bugs addressed","text":"
                    • #3182 Fix stale posix locks that appear after client disconnection
                    • #3187 Fix Locks xlator fd leaks
                    • #3234 Fix incorrect directory check inorder to successfully locate the SSL certificate
                    • #3262 Synchronize layout(ref|unref) during layout(get|set) in dht
                    • #3321 Optimize server functionality by enhancing server_process_event_upcall code path during the handling of upcall event
                    • #3334 Fix errors and timeouts when creating qcow2 file via libgfapi
                    • #3375 Fix all bricks not starting issue on node reboot when brick count is high(>750)
                    • #3417 Fix crash due to unaligned memory access
                    • #3470 Fix spurious crash when \"peer probing\" a non existing host name
                    "},{"location":"release-notes/10.3/","title":"Release notes for Gluster 10.3","text":"

                    Release date: 27th-Sep-2022

                    This is a bugfix release. The release notes for 10.0, 10.1 and 10.2 contain a listing of all the new improvements and bugs fixed in the GlusterFS 10 stable release.

                    NOTE: - Next minor release tentative date: Week of 25th Mar, 2022 - Users are highly encouraged to upgrade to newer releases of GlusterFS.

                    "},{"location":"release-notes/10.3/#important-fixes-in-this-release","title":"Important fixes in this release","text":"
                    • Fix qemu-img crash on a distributed volume
                    • Fix a possible deadlock scenario In Brick Process
                    • Allow opening snapshot directory(entrypoint) via glfs_open()/glfs_h_open() and snapshot directory(entrypoint) to support functionalities of Samba
                    • Implement seek fop in DHT and prevent EBADFD related failures
                    "},{"location":"release-notes/10.3/#builds-are-available-at","title":"Builds are available at","text":"

                    https://download.gluster.org/pub/gluster/glusterfs/10/10.3/

                    "},{"location":"release-notes/10.3/#issues-addressed-in-this-release","title":"Issues addressed in this release","text":"
                    • #1000 Fix qemu-img crash on a distributed volume
                    • #3774 Fix a possible deadlock scenario In Brick Process
                    • #3373 Implement seek fop in DHT and prevent EBADFD related failures
                    • #3666 Prevent snapd crashes on opening snapshot directory via gfapi
                    • #3765 Allow opening snapshot directory(entrypoint) via glfs_open()/glfs_h_open()
                    • #3307 Fix return from glfs_open() to honour O_DIRECTORY flag for Samba
                    • #3725 Fix mismatch in errorcode between fgetxattr() and glusterfs.get_real_filename
                    • #3778 Handle spurious failures of spare_file_rebalance.t test case
                    "},{"location":"release-notes/10.4/","title":"Release notes for Gluster 10.4","text":"

                    Release date: 27th-Apr-2023

                    This is a bugfix release. The release notes for 10.0, 10.1, 10.2 and 10.3 contain a listing of all the new improvements and bugs fixed in the GlusterFS 10 stable release.

                    NOTE: - Next minor release tentative date: Week of 25th Sep, 2023 - Users are highly encouraged to upgrade to newer releases of GlusterFS.

                    "},{"location":"release-notes/10.4/#important-fixes-in-this-release","title":"Important fixes in this release","text":"
                    • Fix fuse concurrency problems
                    • Fix memory corruption in debug builds
                    • Fix recovery issue with posix locks upon reconnection of a disconnected brick
                    "},{"location":"release-notes/10.4/#builds-are-available-at","title":"Builds are available at","text":"

                    https://download.gluster.org/pub/gluster/glusterfs/10/10.4/

                    "},{"location":"release-notes/10.4/#issues-addressed-in-this-release","title":"Issues addressed in this release","text":"
                    • #1000 configure: Force 'char' type to be signed in order to eliminate anomalies
                    • #2752 posix: Fix directory gfid handle if a rename fails
                    • #3345 Fix inconsistencies in big-endian architectures of hashfn
                    • #3346 Fix stack overflow when processing glx_dir(p) list structures in xdr
                    • #3882 Fix deadlock in gf_print_trace of sys_log
                    • #3901 Fix segmentaion fault in io-stats xlator
                    • #3954 Fix stack-buffer-overflow according to AddressSanitizer
                    • #4020 Improve regression test suite
                    • #4029 Process stuck listing snapshots from NFS
                    • #4031 Fix Input/Output error when using linux-aio on big-endean architectures
                    • #4042 Fix recovery issue with posix locks upon reconnection of a disconnected brick
                    • #4071 Make timestamps of .snap directory stable
                    • #3894 Use opendir for directories in glfs_open and glfs_h_open of api
                    • #3636 Enable posix xlator to consider storage.reserve val
                    "},{"location":"release-notes/11.0/","title":"Release notes for Gluster 11.0","text":"

                    Release date: 14th-Feb-2023

                    This is a major release that includes a range of features, code improvements and stability fixes as noted below.

                    A selection of the key features and changes are documented in this page. A full list of bugs that have been addressed is included further below.

                    • Announcements
                    • Highlights
                    • Bugs addressed in the release
                    "},{"location":"release-notes/11.0/#announcements","title":"Announcements","text":"
                    1. Releases that receive maintenance updates post release 11 is 10 (reference)
                    2. Release 10 and 11 will receive maintenance updates around 15th of every six months corresponding to thier previous release dates.
                    "},{"location":"release-notes/11.0/#highlights","title":"Highlights","text":"
                    • Major performance impovement of ~36% with rmdir operations #3685
                    • Extension of ZFS support for snapshots #2855
                    • Qouta implimentation based on namespace #1750
                    • Major cleanups and readdir/readdirp improvements link1 link2
                    "},{"location":"release-notes/11.0/#bugs-addressed","title":"Bugs addressed","text":"

                    Bugs addressed since release-10 are listed below:

                    • #1831 virtual images in replicated volume are not healed
                    • #1459 gluster_shared_storage failed to automount on node reboot on rhel 8
                    • #1458 sharding: fanout mknod process into multi threads
                    • #1457 systemd unit files missing from Debian 8.1-1 package
                    • #1456 virtual images in replicated volume are not healed?
                    • #1455 docs.gluster.org is down
                    • #1454 Geo-replication gsyncd at 100% CPU
                    • #1453 Disperse shd heal activity should be observable
                    • #1452 all glusterfs-client crashed at the same time
                    • #1451 Add details on ssl_setup_connection_params to help troubleshooting
                    • #1450 Please consider repackaging/providing dependency package (stubs)
                    • #145 Snapshot: improvements
                    • #1000 [bug:1193929] GlusterFS can be improved
                    • #1002 [bug:1679998] GlusterFS can be improved
                    • #1060 [bug:789278] Issues reported by Coverity static analysis tool
                    • #1686 mount-shared-storage.sh issue with systemd-automount
                    • #1757 RFE: improve namespace support in glusterfs
                    • #1774 RFE: simple-quota implementation
                    • #2123 Implement an I/O framework
                    • #2164 gf_proc_dump_call_stack() is not printing ctime correctly
                    • #2308 Developer sessions for glusterfs
                    • #2469 Python syntax error in syncdutils.py
                    • #2483 Place holder issue for fixing potential bugs in protocol/client/src
                    • #2491 Add s390x support to community CI
                    • #2664 Test suite produce uncompressed logs
                    • #2717 GlusterFS doesn't support O_PATH flag in open()
                    • #2735 Remove byte-order.h and use the normal byteorder functions
                    • #2793 cluster.rebal-throttle description doesn't seem to match the code
                    • #2832 selinux: make it possible to persist ganesha_use_fusefs from one up
                    • #2846 glusterd log filled with error messages.
                    • #2903 geo-rep restarts because of 'list' object has no attribute 'join' e
                    • #2910 glusterd: volume start doesn't fail with properly if the port range
                    • #2912 When glusterfs uses an untrusted domain name\uff0c it cannot update the
                    • #2913 gluster lib cannot be dlopened: /lib64/libtcmalloc.so.4: cannot all
                    • #2916 Replace SHA deprecated functions with newer ones
                    • #2936 Wrong value for inodeSize in Volume status xml output.
                    • #2939 Volume log commands 'filename' and 'locate' described in the man bu
                    • #2944 tests: valid ip to be used instead of localhost, 127.0.0.1 or loopb
                    • #2947 IO failure when shrinking distributed dispersed volume while perfor
                    • #2962 cli: volume create without disperse count fails with ip addresses
                    • #2963 Do not use an iobuf pool for the CLI
                    • #2964 Cleanup the stub pool
                    • #2967 Make relevant functions static
                    • #2971 core file from /tests/basic/fencing/afr-lock-heal-advanced.t
                    • #2973 Allocate socket ioq outside of the out_lock
                    • #2975 quick-read: remove unused 'sh-failed' dict_get() function
                    • #2986 AFR: reduce variable indirection
                    • #2989 GF_ASSERT_AND_GOTO_WITH_ERROR improvements
                    • #2997 HA status is in FAILOVER when configuring NFS ganesha with pacemake
                    • #2998 Remove old authentication schemes
                    • #3004 Use gf_strndup() instead of gf_strdup() when string length is known
                    • #3005 multiple files: improve gf_dirent_for_name() functionality
                    • #3012 Reduce the impact of Cloudsync on IO path
                    • #3054 Update the links in gluster test framework documentation
                    • #3066 Online upgrade - 9.x to 10.0
                    • #3071 Log spam with glusterfs 10.0
                    • #3076 __lease_ctx_set() is not checking correctly for the return code
                    • #3103 glusterfs snapd crashes when snapshot is de-activated.
                    • #3130 Reduce the number of include statements
                    • #3137 Cleanup common include files
                    • #3166 bug: Incorrect Mac OS version assertion in configure.
                    • #3182 Some stale posix locks appear after a client disconnection
                    • #3187 Locks xlator leaks fd's in some cases
                    • #3191 dht: Fix double free issue in the cbk function dht_common_mark_mdsx
                    • #3194 Log message for RPC clients is misleading because it logs unsigned
                    • #3213 Configure geo rep SSH with AES128 encryption - to improve performance
                    • #3217 Disperse volume with more than 16 data bricks fails to mount
                    • #3228 event-epoll.c: table_idx will always be 0?
                    • #3229 handle 'option remote-port' properly in client protocol.
                    • #3232 The config for creating /var/run/gluster on boot is missing for glu
                    • #3234 SSL certificate wrong default path
                    • #3236 nfs: Optimize ctxcount value to reduce memory consumption for nfs-s
                    • #3240 dht_revalidate_cbk() needs to trigger directory heal with root perm
                    • #3248 fault in gluster command completion
                    • #3262 dht: Synchronize layout_(ref|unref) during layout_(get|set) in dht
                    • #3264 Posix private struct - reduce size
                    • #3288 GFID split brain resolution using favourite-child-policy leads to E
                    • #3294 Remove dht_nonblocking_inodelk() - it's not used
                    • #3304 CID 1476381: (OVERRUN) @ /rpc/rpc-lib/src/rpc-clnt.c: 249 in __save
                    • #3321 server: Optimize server_process_event_upcall code path during handl
                    • #3329 mounting with ipv6 hostname leads to failure
                    • #3334 Error messages and 20 seconds timeout when creating qcow2 file via
                    • #3347 Test Failure: tests/bitrot/bug-1207627-bitrot-scrub-status.t
                    • #3359 meta_lookup() and other small improvements to meta xlator
                    • #3373 DHT doesn't implement seek fop and causes failures
                    • #3375 glusterd: After node reboot not able to start all bricks successful
                    • #3382 Dictionary: remove all hash related code
                    • #3394 autoupdate - Update a configure.ac to a newer Autoconf
                    • #3417 Crash due to unaligned memory access
                    • #3426 logging.c small improvements
                    • #3469 Improve regression testing
                    • #3470 Spurious crash when \"peer probing\" a non existing host name
                    • #3507 thin-arbiter-volume.t execute failed
                    • #3521 changelog: A brick process is getting crash due to SIGSEGV
                    • #3527 Brick process crashed when global thread pool is enabled
                    • #3604 is_nfs_export_available check and mount_nfs commands fail in RHEL 8
                    • #3636 posix: small storage environment and storage.reserve value
                    • #3647 wrong debug log in upcall_cache_invalidate()
                    • #3662 Some times Gluster Volume info XML returns wrong distCount
                    • #3683 dht: Cleanup linkto file by rebalance daemon while (hashed|cached)
                    • #3685 dht: Introduce rmdir-optimize option
                    • #3688 Infinite loop in dht when lookup fails with ENODATA
                    • #3695 test: ./tests/bugs/posix/bug-1651445.t is continuous failing during
                    • #3708 ./tests/basic/afr/afr-no-fsync.t is failing on FSYNCDIR, not on FSY
                    • #3710 Brick crashes automatically when writing small files
                    • #3717 syncop functions: no need to copy iatt structs if you are not going
                    • #3729 gf_svc_releasedir() logic error
                    • #3750 Bash completion is loaded every time bash starts
                    • #3774 Dead Lock In Brick Process
                    • #3778 test: Increase timeout for ./tests/basic/distribute/spare_file_reba
                    • #3781 Prime time client-only access control
                    • #3793 tests/bugs/replicate/bug-1586020-mark-dirty-for-entry-txn-on-quorum
                    • #3797 cdc xlator improvements
                    • #3823 rfc.sh: cannot detect upstream remote for non english locale
                    • #3831 afr: posix lock behavior is not correct while received an interrupt
                    • #3836 tests/bugs/glusterd/daemon-log-level-option.t fails on regression
                    • #3845 fuse_readdirp_cbk() - minor modifications
                    • #3847 gftest build failure
                    • #3855 reduce work for memory account
                    • #3876 fsetxattr() failed with EBADFD on opened directories
                    • #3891 Possible trash can size overflow
                    "},{"location":"release-notes/11.1/","title":"Release notes for Gluster 11.1","text":"

                    This is a bugfix release. The release notes for 11.0 contains a listing of all the new improvements and bugs fixed in the GlusterFS 11 stable release.

                    NOTE: - Next minor release tentative date: Release will be based on requirement only - Users are highly encouraged to upgrade to newer releases of GlusterFS.

                    "},{"location":"release-notes/11.1/#important-fixes-in-this-release","title":"Important fixes in this release","text":"
                    • Fix upgrade issue by reverting posix change related to storage.reserve value
                    • Fix possible data loss during rebalance if there is any linkfile on the system
                    • Fix maximum op-version for release 11
                    "},{"location":"release-notes/11.1/#builds-are-available-at","title":"Builds are available at","text":"

                    https://download.gluster.org/pub/gluster/glusterfs/11/11.1/

                    "},{"location":"release-notes/11.1/#issues-addressed-in-this-release","title":"Issues addressed in this release","text":"
                    • #1000 Force 'char' type to be signed and add '-fsigned-char' option during compilation
                    • #2752 Fix directory gfid handle if a rename fails
                    • #3346 Fix stack overflow when processing glx_dir(p)list structures in xdr
                    • #3636 Fix upgrade issue by reverting posix change related to storage.reserve value
                    • #3701 Fix error \"not supported for ipv6\"
                    • #3732 Fix an AddressSanitizer issue heap-use-after-free
                    • #4005 Fix maximum op-version for release 11
                    • #4020 Improve regression test suite
                    • #4029 Process stuck listing snapshots from NFS
                    • #4031 Fix write failures with \"I/O error\" when using linux-aio on big-endean architectures
                    • #4042 Recover posix locks upon reconnection of a disconnected brick
                    • #4071 Make timestamps stable in snapview-server
                    • #4074 Fix fuse Crashing with \"Assertion failed: inode_lookup >= nlookup
                    • #4107 Fix the issue of cli not showing the correct volume type
                    • #4148 Fix possible data loss during rebalance if there is any linkfile on the system
                    • #4190 Fix the issue of glusterfs encountering a SIGSEGV in __gf_free
                    • #4198 Fix warnings raised by glusterfs fuse script on fedora38
                    • #4224 Add simple-quota xattr to afr and ec ignore list
                    • #4196 Revert structure of per_thread_pool_list_t
                    • #4255 Fix the brick process crash during the upcall event
                    "},{"location":"release-notes/3.10.0/","title":"Release notes for Gluster 3.10.0","text":"

                    This is a major Gluster release that includes some substantial changes. The features revolve around, better support in container environments, scaling to larger number of bricks per node, and a few usability and performance improvements, among other bug fixes.

                    The most notable features and changes are documented on this page. A full list of bugs that has been addressed is included further below.

                    "},{"location":"release-notes/3.10.0/#major-changes-and-features","title":"Major changes and features","text":""},{"location":"release-notes/3.10.0/#brick-multiplexing","title":"Brick multiplexing","text":"

                    Notes for users: Multiplexing reduces both port and memory usage. It does not improve performance vs. non-multiplexing except when memory is the limiting factor, though there are other related changes that improve performance overall (e.g. compared to 3.9).

                    Multiplexing is off by default. It can be enabled with

                    # gluster volume set all cluster.brick-multiplex on\n

                    Limitations: There are currently no tuning options for multiplexing - it's all or nothing. This will change in the near future.

                    Known Issues: The only feature or combination of features known not to work with multiplexing is USS and SSL. Anyone using that combination should leave multiplexing off.

                    "},{"location":"release-notes/3.10.0/#support-to-display-op-version-information-from-clients","title":"Support to display op-version information from clients","text":"

                    Notes for users: To get information on what op-version are supported by the clients, users can invoke the gluster volume status command for clients. Along with information on hostname, port, bytes read, bytes written and number of clients connected per brick, we now also get the op-version on which the respective clients operate. Following is the example usage:

                    # gluster volume status <VOLNAME|all> clients\n

                    Limitations:

                    Known Issues:

                    "},{"location":"release-notes/3.10.0/#support-to-get-maximum-op-version-in-a-heterogeneous-cluster","title":"Support to get maximum op-version in a heterogeneous cluster","text":"

                    Notes for users: A heterogeneous cluster operates on a common op-version that can be supported across all the nodes in the trusted storage pool. Upon upgrade of the nodes in the cluster, the cluster might support a higher op-version. Users can retrieve the maximum op-version to which the cluster could be bumped up to by invoking the gluster volume get command on the newly introduced global option, cluster.max-op-version. The usage is as follows:

                    # gluster volume get all cluster.max-op-version\n

                    Limitations:

                    Known Issues:

                    "},{"location":"release-notes/3.10.0/#support-for-rebalance-time-to-completion-estimation","title":"Support for rebalance time to completion estimation","text":"

                    Notes for users: Users can now see approximately how much time the rebalance operation will take to complete across all nodes.

                    The estimated time left for rebalance to complete is displayed as part of the rebalance status. Use the command:

                    # gluster volume rebalance <VOLNAME> status\n

                    Limitations: The rebalance process calculates the time left based on the rate at while files are processed on the node and the total number of files on the brick which is determined using statfs. The limitations of this are:

                    • A single fs partition must host only one brick. Multiple bricks on the same fs partition will cause the statfs results to be invalid.

                    • The estimates are dynamic and are recalculated every time the rebalance status command is invoked.The estimates become more accurate over time so short running rebalance operations may not benefit.

                    Known Issues: As glusterfs does not stored the number of files on the brick, we use statfs to guess the number. The .glusterfs directory contents can significantly skew this number and affect the calculated estimates.

                    "},{"location":"release-notes/3.10.0/#separation-of-tier-as-its-own-service","title":"Separation of tier as its own service","text":"

                    Notes for users: This change is to move the management of the tier daemon into the gluster service framework, thereby improving it stability and manageability by the service framework.

                    This has no change to any of the tier commands or user facing interfaces and operations.

                    Limitations:

                    Known Issues:

                    "},{"location":"release-notes/3.10.0/#statedump-support-for-gfapi-based-applications","title":"Statedump support for gfapi based applications","text":"

                    Notes for users: gfapi based applications now can dump state information for better trouble shooting of issues. A statedump can be triggered in two ways:

                    1. by executing the following on one of the Gluster servers,
                       # gluster volume statedump <VOLNAME> client <HOST>:<PID>\n
                    • <VOLNAME> should be replaced by the name of the volume
                    • <HOST> should be replaced by the hostname of the system running the gfapi application
                    • <PID> should be replaced by the PID of the gfapi application

                    • through calling glfs_sysrq(<FS>, GLFS_SYSRQ_STATEDUMP) within the application

                    • <FS> should be replaced by a pointer to a glfs_t structure

                    All statedumps (*.dump.* files) will be located at the usual location, on most distributions this would be /var/run/gluster/.

                    Limitations: It is not possible to trigger statedumps from the Gluster CLI when the gfapi application has lost its management connection to the GlusterD servers.

                    GlusterFS 3.10 is the first release that contains support for the new glfs_sysrq() function. Applications that include features for debugging will need to be adapted to call this function. At the time of the release of 3.10, no applications are known to call glfs_sysrq().

                    Known Issues:

                    "},{"location":"release-notes/3.10.0/#disabled-creation-of-trash-directory-by-default","title":"Disabled creation of trash directory by default","text":"

                    Notes for users: From now onwards trash directory, namely .trashcan, will not be be created by default upon creation of new volumes unless and until the feature is turned ON and the restrictions on the same will be applicable as long as features.trash is set for a particular volume.

                    Limitations: After upgrade for pre-existing volumes, trash directory will be still present at root of the volume. Those who are not interested in this feature may have to manually delete the directory from the mount point.

                    Known Issues:

                    "},{"location":"release-notes/3.10.0/#implemented-parallel-readdirp-with-distribute-xlator","title":"Implemented parallel readdirp with distribute xlator","text":"

                    Notes for users: Currently the directory listing gets slower as the number of bricks/nodes increases in a volume, though the file/directory numbers remain unchanged. With this feature, the performance of directory listing is made mostly independent of the number of nodes/bricks in the volume. Thus scale doesn't exponentially reduce the directory listing performance. (On a 2, 5, 10, 25 brick setup we saw ~5, 100, 400, 450% improvement consecutively)

                    To enable this feature:

                    # gluster volume set <VOLNAME> performance.readdir-ahead on\n# gluster volume set <VOLNAME> performance.parallel-readdir on\n

                    To disable this feature:

                    # gluster volume set <VOLNAME> performance.parallel-readdir off\n

                    If there are more than 50 bricks in the volume it is good to increase the cache size to be more than 10Mb (default value):

                    # gluster volume set <VOLNAME> performance.rda-cache-limit <CACHE SIZE>\n

                    Limitations:

                    Known Issues:

                    "},{"location":"release-notes/3.10.0/#md-cache-can-optionally-ve-cache-securityima-xattr","title":"md-cache can optionally -ve cache security.ima xattr","text":"

                    Notes for users: From kernel version 3.X or greater, creating of a file results in removexattr call on security.ima xattr. This xattr is not set on the file unless IMA feature is active. With this patch, removxattr call returns ENODATA if it is not found in the cache.

                    The end benefit is faster create operations where IMA is not enabled.

                    To cache this xattr use,

                    # gluster volume set <VOLNAME> performance.cache-ima-xattrs on\n

                    The above option is on by default.

                    Limitations:

                    Known Issues:

                    "},{"location":"release-notes/3.10.0/#added-support-for-cpu-extensions-in-disperse-computations","title":"Added support for CPU extensions in disperse computations","text":"

                    Notes for users: To improve disperse computations, a new way of generating dynamic code targeting specific CPU extensions like SSE and AVX on Intel processors is implemented. The available extensions are detected on run time. This can roughly double encoding and decoding speeds (or halve CPU usage).

                    This change is 100% compatible with the old method. No change is needed if an existing volume is upgraded.

                    You can control which extensions to use or disable them with the following command:

                    # gluster volume set <VOLNAME> disperse.cpu-extensions <type>\n

                    Valid values are:

                    • none: Completely disable dynamic code generation
                    • auto: Automatically detect available extensions and use the best one
                    • x64: Use dynamic code generation using standard 64 bits instructions
                    • sse: Use dynamic code generation using SSE extensions (128 bits)
                    • avx: Use dynamic code generation using AVX extensions (256 bits)

                    The default value is 'auto'. If a value is specified that is not detected on run-time, it will automatically fall back to the next available option.

                    Limitations:

                    Known Issues: To solve a conflict between the dynamic code generator and SELinux, it has been necessary to create a dynamic file on runtime in the directory /usr/libexec/glusterfs. This directory only exists if the server package is installed. On nodes with only the client package installed, this directory won't exist and the dynamic code won't be used.

                    It also needs root privileges to create the file there, so any gfapi application not running as root won't be able to use dynamic code generation.

                    In these cases, disperse volumes will continue working normally but using the old implementation (equivalent to setting disperse.cpu-extensions to none).

                    More information and a discussion on how to solve this can be found here:

                    https://bugzilla.redhat.com/1421649

                    "},{"location":"release-notes/3.10.0/#bugs-addressed","title":"Bugs addressed","text":"

                    Bugs addressed since release-3.9 are listed below.

                    • #789278: Issues reported by Coverity static analysis tool
                    • #1198849: Minor improvements and cleanup for the build system
                    • #1211863: RFE: Support in md-cache to use upcall notifications to invalidate its cache
                    • #1231224: Misleading error messages on brick logs while creating directory (mkdir) on fuse mount
                    • #1234054: `gluster volume heal split-brain' does not heal if data/metadata/entry self-heal options are turned off
                    • #1289922: Implement SIMD support on EC
                    • #1290304: [RFE]Reducing number of network round trips
                    • #1297182: Mounting with \"-o noatime\" or \"-o noexec\" causes \"nosuid,nodev\" to be set as well
                    • #1313838: Tiering as separate process and in v status moving tier task to tier process
                    • #1316873: EC: Set/unset dirty flag for all the update operations
                    • #1325531: Statedump: Add per xlator ref counting for inode
                    • #1325792: \"gluster vol heal test statistics heal-count replica\" seems doesn't work
                    • #1330604: out-of-tree builds generate XDR headers and source files in the original directory
                    • #1336371: Sequential volume start&stop is failing with SSL enabled setup.
                    • #1341948: DHT: Rebalance- Misleading log messages from __dht_check_free_space function
                    • #1344714: removal of file from nfs mount crashs ganesha server
                    • #1349385: [FEAT]jbr: Add rollbacking of failed fops
                    • #1355956: RFE : move ganesha related configuration into shared storage
                    • #1356076: DHT doesn't evenly balance files on FreeBSD with ZFS
                    • #1356960: OOM Kill on client when heal is in progress on 1*(2+1) arbiter volume
                    • #1357753: JSON output for all Events CLI commands
                    • #1357754: Delayed Events if any one Webhook is slow
                    • #1358296: tier: breaking down the monolith processing function tier_migrate_using_query_file()
                    • #1359612: [RFE] Geo-replication Logging Improvements
                    • #1360670: Add output option --xml to man page of gluster
                    • #1363595: Node remains in stopped state in pcs status with \"/usr/lib/ocf/resource.d/heartbeat/ganesha_mon: line 137: [: too many arguments ]\" messages in logs.
                    • #1363965: geo-replication *changes.log does not respect the log-level configured
                    • #1364420: [RFE] History Crawl performance improvement
                    • #1365395: Support for rc.d and init for Service management
                    • #1365740: dht: Update stbuf from servers having layout
                    • #1365791: Geo-rep worker Faulty with OSError: [Errno 21] Is a directory
                    • #1365822: [RFE] cli command to get max supported cluster.op-version
                    • #1366494: Rebalance is not considering the brick sizes while fixing the layout
                    • #1366495: 1 mkdir generates tons of log messages from dht xlator
                    • #1366648: [GSS] A hot tier brick becomes full, causing the entire volume to have issues and returns stale file handle and input/output error.
                    • #1366815: spurious heal info as pending heal entries never end on an EC volume while IOs are going on
                    • #1368012: gluster fails to propagate permissions on the root of a gluster export when adding bricks
                    • #1368138: Crash of glusterd when using long username with geo-replication
                    • #1368312: Value of `replica.split-brain-status' attribute of a directory in metadata split-brain in a dist-rep volume reads that it is not in split-brain
                    • #1368336: [RFE] Tier Events
                    • #1369077: The directories get renamed when data bricks are offline in 4*(2+1) volume
                    • #1369124: fix unused variable warnings from out-of-tree builds generate XDR headers and source files i...
                    • #1369397: segment fault in changelog_cleanup_dispatchers
                    • #1369403: [RFE]: events from protocol server
                    • #1369523: worm: variable reten_mode is invalid to be free by mem_put in fini()
                    • #1370410: [granular entry sh] - Provide a CLI to enable/disable the feature that checks that there are no heals pending before allowing the operation
                    • #1370567: [RFE] Provide snapshot events for the new eventing framework
                    • #1370931: glfs_realpath() should not return malloc()'d allocated memory
                    • #1371353: posix: Integrate important events with events framework
                    • #1371470: disperse: Integrate important events with events framework
                    • #1371485: [RFE]: AFR events
                    • #1371539: Quota version not changing in the quota.conf after upgrading to 3.7.1 from 3.6.1
                    • #1371540: Spurious regression in tests/basic/gfapi/bug1291259.t
                    • #1371874: [RFE] DHT Events
                    • #1372193: [geo-rep]: AttributeError: 'Popen' object has no attribute 'elines'
                    • #1372211: write-behind: flush stuck by former failed write
                    • #1372356: glusterd experiencing repeated connect/disconnect messages when shd is down
                    • #1372553: \"gluster vol status all clients --xml\" doesn't generate xml if there is a failure in between
                    • #1372584: Fix the test case http://review.gluster.org/#/c/15385/
                    • #1373072: Event pushed even if Answer is No in the Volume Stop and Delete prompt
                    • #1373373: Worker crashes with EINVAL errors
                    • #1373520: [Bitrot]: Recovery fails of a corrupted hardlink (and the corresponding parent file) in a disperse volume
                    • #1373741: [geo-replication]: geo-rep Status is not showing bricks from one of the nodes
                    • #1374093: glusterfs: create a directory with 0464 mode return EIO error
                    • #1374286: [geo-rep]: defunct tar process while using tar+ssh sync
                    • #1374584: Detach tier commit is allowed when detach tier start goes into failed state
                    • #1374587: gf_event python fails with ImportError
                    • #1374993: bug-963541.t spurious failure
                    • #1375181: /var/tmp/rpm-tmp.KPCugR: line 2: /bin/systemctl: No such file or directory
                    • #1375431: [RFE] enable sharding and strict-o-direct with virt profile - /var/lib/glusterd/groups/virt
                    • #1375526: Kill rpc.statd on Linux machines
                    • #1375532: Rpm installation fails with conflicts error for eventsconfig.json file
                    • #1376671: Rebalance fails to start if a brick is down
                    • #1376693: RFE: Provide a prompt when enabling gluster-NFS
                    • #1377097: The GlusterFS Callback RPC-calls always use RPC/XID 42
                    • #1377341: out-of-tree builds generate XDR headers and source files in the original directory
                    • #1377427: incorrect fuse dumping for WRITE
                    • #1377556: Files not being opened with o_direct flag during random read operation (Glusterfs 3.8.2)
                    • #1377584: memory leak problems are found in daemon:glusterd, server:glusterfsd and client:glusterfs
                    • #1377607: Volume restart couldn't re-export the volume exported via ganesha.
                    • #1377864: Creation of files on hot tier volume taking very long time
                    • #1378057: glusterd fails to start without installing glusterfs-events package
                    • #1378072: Modifications to AFR Events
                    • #1378305: DHT: remove unused structure members
                    • #1378436: build: python-ctypes no longer exists in Fedora Rawhide
                    • #1378492: warning messages seen in glusterd logs for each 'gluster volume status' command
                    • #1378684: Poor smallfile read performance on Arbiter volume compared to Replica 3 volume
                    • #1378778: Add a test script for compound fops changes in AFR
                    • #1378842: [RFE] 'gluster volume get' should implement the way to retrieve volume options using the volume name 'all'
                    • #1379223: \"nfs.disable: on\" is not showing in Vol info by default for the 3.7.x volumes after updating to 3.9.0
                    • #1379285: gfapi: Fix fd ref leaks
                    • #1379328: Boolean attributes are published as string
                    • #1379330: eventsapi/georep: Events are not available for Checkpoint and Status Change
                    • #1379511: Fix spurious failures in open-behind.t
                    • #1379655: Recording (ffmpeg) processes on FUSE get hung
                    • #1379720: errors appear in brick and nfs logs and getting stale files on NFS clients
                    • #1379769: GlusterFS fails to build on old Linux distros with linux/oom.h missing
                    • #1380249: Huge memory usage of FUSE client
                    • #1380275: client ID should logged when SSL connection fails
                    • #1381115: Polling failure errors getting when volume is started&stopped with SSL enabled setup.
                    • #1381421: afr fix shd log message error
                    • #1381830: Regression caused by enabling client-io-threads by default
                    • #1382236: glusterfind pre session hangs indefinitely
                    • #1382258: RFE: Support to update NFS-Ganesha export options dynamically
                    • #1382266: md-cache: Invalidate cache entry in case of OPEN with O_TRUNC
                    • #1384142: crypt: changes needed for openssl-1.1 (coming in Fedora 26)
                    • #1384297: glusterfs can't self heal character dev file for invalid dev_t parameters
                    • #1384906: arbiter volume write performance is bad with sharding
                    • #1385104: invalid argument warning messages seen in fuse client logs 2016-09-30 06:34:58.938667] W [dict.c:418ict_set] (-->/usr/lib64/glusterfs/3.8.4/xlator/cluster/replicate.so(+0x58722) 0-dict: !this || !value for key=link-count [Invalid argument]
                    • #1385575: pmap_signin event fails to update brickinfo->signed_in flag
                    • #1385593: Fix some spelling mistakes in comments and log messages
                    • #1385839: Incorrect volume type in the \"glusterd_state\" file generated using CLI \"gluster get-state\"
                    • #1386088: Memory Leaks in snapshot code path
                    • #1386097: 4 of 8 bricks (2 dht subvols) crashed on systemic setup
                    • #1386123: geo-replica slave node goes faulty for non-root user session due to fail to locate gluster binary
                    • #1386141: Error and warning message getting while removing glusterfs-events package
                    • #1386188: Asynchronous Unsplit-brain still causes Input/Output Error on system calls
                    • #1386200: Log all published events
                    • #1386247: [Eventing]: 'gluster volume tier start force' does not generate a TIER_START event
                    • #1386450: Continuous warning messages getting when one of the cluster node is down on SSL setup.
                    • #1386516: [Eventing]: UUID is showing zeros in the event message for the peer probe operation.
                    • #1386626: fuse mount point not accessible
                    • #1386766: trashcan max file limit cannot go beyond 1GB
                    • #1387160: clone creation with older names in a system fails
                    • #1387207: [Eventing]: Random VOLUME_SET events seen when no operation is done on the gluster cluster
                    • #1387241: Pass proper permission to acl_permit() in posix_acl_open()
                    • #1387652: [Eventing]: BRICK_DISCONNECTED events seen when a tier volume is stopped
                    • #1387864: [Eventing]: 'gluster vol bitrot scrub ondemand' does not produce an event
                    • #1388010: [Eventing]: 'VOLUME_REBALANCE' event messages have an incorrect volume name
                    • #1388062: throw warning to show that older tier commands are depricated and will be removed.
                    • #1388292: performance.read-ahead on results in processes on client stuck in IO wait
                    • #1388348: glusterd: Display proper error message and fail the command if S32gluster_enable_shared_storage.sh hook script is not present during gluster volume set all cluster.enable-shared-storage command
                    • #1388401: Labelled geo-rep checkpoints hide geo-replication status
                    • #1388861: build: python on Debian-based dists use .../lib/python2.7/dist-packages instead of .../site-packages
                    • #1388862: [Eventing]: Events not seen when command is triggered from one of the peer nodes
                    • #1388877: Continuous errors getting in the mount log when the volume mount server glusterd is down.
                    • #1389293: build: incorrect Requires: for portblock resource agent
                    • #1389481: glusterfind fails to list files from tiered volume
                    • #1389697: Remove-brick status output is showing status of fix-layout instead of original remove-brick status output
                    • #1389746: Refresh config fails while exporting subdirectories within a volume
                    • #1390050: Elasticsearch get CorruptIndexException errors when running with GlusterFS persistent storage
                    • #1391086: gfapi clients crash while using async calls due to double fd_unref
                    • #1391387: The FUSE client log is filling up with posix_acl_default and posix_acl_access messages
                    • #1392167: SMB[md-cache Private Build]:Error messages in brick logs related to upcall_cache_invalidate gf_uuid_is_null
                    • #1392445: Hosted Engine VM paused post replace-brick operation
                    • #1392713: inconsistent file permissions b/w write permission and sticky bits(---------T ) displayed when IOs are going on with md-cache enabled (and within the invalidation cycle)
                    • #1392772: [setxattr_cbk] \"Permission denied\" warning messages are seen in logs while running pjd-fstest suite
                    • #1392865: Better logging when reporting failures of the kind \" Failing MKNOD as quorum is not met\"
                    • #1393259: stat of file is hung with possible deadlock
                    • #1393678: Worker restarts on log-rsync-performance config update
                    • #1394131: [md-cache]: All bricks crashed while performing symlink and rename from client at the same time
                    • #1394224: \"nfs-grace-monitor\" timed out messages observed
                    • #1394548: Make debugging EACCES errors easier to debug
                    • #1394719: libgfapi core dumps
                    • #1394881: Failed to enable nfs-ganesha after disabling nfs-ganesha cluster
                    • #1395261: Seeing error messages [snapview-client.c:283:gf_svc_lookup_cbk] and [dht-helper.c:1666ht_inode_ctx_time_update] (-->/usr/lib64/glusterfs/3.8.4/xlator/cluster/replicate.so(+0x5d75c)
                    • #1395648: ganesha-ha.conf --status should validate if the VIPs are assigned to right nodes
                    • #1395660: Checkpoint completed event missing master node detail
                    • #1395687: Client side IObuff leaks at a high pace consumes complete client memory and hence making gluster volume inaccessible
                    • #1395993: heal info --xml when bricks are down in a systemic environment is not displaying anything even after more than 30minutes
                    • #1396038: refresh-config fails and crashes ganesha when mdcache is enabled on the volume.
                    • #1396048: A hard link is lost during rebalance+lookup
                    • #1396062: [geo-rep]: Worker crashes seen while renaming directories in loop
                    • #1396081: Wrong value in Last Synced column during Hybrid Crawl
                    • #1396364: Scheduler : Scheduler should not depend on glusterfs-events package
                    • #1396793: [Ganesha] : Ganesha crashes intermittently during nfs-ganesha restarts.
                    • #1396807: capture volume tunables in get-state dump
                    • #1396952: I/O errors on FUSE mount point when reading and writing from 2 clients
                    • #1397052: OOM kill of nfs-ganesha on one node while fs-sanity test suite is executed.
                    • #1397177: memory leak when using libgfapi
                    • #1397419: glusterfs_ctx_defaults_init is re-initializing ctx->locks
                    • #1397424: PEER_REJECT, EVENT_BRICKPATH_RESOLVE_FAILED, EVENT_COMPARE_FRIEND_VOLUME_FAILED are not seen
                    • #1397754: [SAMBA-CIFS] : IO hungs in cifs mount while graph switch on & off
                    • #1397795: NFS-Ganesha:Volume reset for any option causes reset of ganesha enable option and bring down the ganesha services
                    • #1398076: SEEK_HOLE/ SEEK_DATA doesn't return the correct offset
                    • #1398226: With compound fops on, client process crashes when a replica is brought down while IO is in progress
                    • #1398566: self-heal info command hangs after triggering self-heal
                    • #1399031: build: add systemd dependency to glusterfs sub-package
                    • #1399072: [Disperse] healing should not start if only data bricks are UP
                    • #1399134: GlusterFS client crashes during remove-brick operation
                    • #1399154: After ganesha node reboot/shutdown, portblock process goes to FAILED state
                    • #1399186: [GANESHA] Export ID changed during volume start and stop with message \"lookup_export failed with Export id not found\" in ganesha.log
                    • #1399578: [compound FOPs]: Memory leak while doing FOPs with brick down
                    • #1399592: Memory leak when self healing daemon queue is full
                    • #1399780: Use standard refcounting for structures where possible
                    • #1399995: Dump volume specific options in get-state output in a more parseable manner
                    • #1400013: [USS,SSL] .snaps directory is not reachable when I/O encryption (SSL) is enabled
                    • #1400026: Duplicate value assigned to GD_MSG_DAEMON_STATE_REQ_RCVD and GD_MSG_BRICK_CLEANUP_SUCCESS messages
                    • #1400237: Ganesha services are not stopped when pacemaker quorum is lost
                    • #1400613: [GANESHA] failed to create directory of hostname of new node in var/lib/nfs/ganesha/ in already existing cluster nodes
                    • #1400818: possible memory leak on client when writing to a file while another client issues a truncate
                    • #1401095: log the error when locking the brick directory fails
                    • #1401218: Fix compound fops memory leaks
                    • #1401404: [Arbiter] IO's Halted and heal info command hung
                    • #1401777: atime becomes zero when truncating file via ganesha (or gluster-NFS)
                    • #1401801: [RFE] Use Host UUID to find local nodes to spawn workers
                    • #1401812: RFE: Make readdirp parallel in dht
                    • #1401822: [GANESHA]Unable to export the ganesha volume after doing volume start and stop
                    • #1401836: update documentation to readthedocs.io
                    • #1401921: glusterfsd crashed while taking snapshot using scheduler
                    • #1402237: Bad spacing in error message in cli
                    • #1402261: cli: compile warnings (unused var) if building without bd xlator
                    • #1402369: Getting the warning message while erasing the gluster \"glusterfs-server\" package.
                    • #1402710: ls and move hung on disperse volume
                    • #1402730: self-heal not happening, as self-heal info lists the same pending shards to be healed
                    • #1402828: Snapshot: Snapshot create command fails when gluster-shared-storage volume is stopped
                    • #1402841: Files remain unhealed forever if shd is disabled and re-enabled while healing is in progress.
                    • #1403130: [GANESHA] Adding a node to cluster failed to allocate resource-agents to new node.
                    • #1403780: Incorrect incrementation of volinfo refcnt during volume start
                    • #1404118: Snapshot: After snapshot restore failure , snapshot goes into inconsistent state
                    • #1404168: Upcall: Possible use after free when log level set to TRACE
                    • #1404181: [Ganesha+SSL] : Ganesha crashes on all nodes on volume restarts
                    • #1404410: [Perf] : pcs cluster resources went into stopped state during Multithreaded perf tests on RHGS layered over RHEL 6
                    • #1404573: tests/bugs/snapshot/bug-1316437.t test is causing spurious failure
                    • #1404678: [geo-rep]: Config commands fail when the status is 'Created'
                    • #1404905: DHT : file rename operation is successful but log has error 'key:trusted.glusterfs.dht.linkto error:File exists' , 'setting xattrs on failed (File exists)'
                    • #1405165: Allow user to disable mem-pool
                    • #1405301: Fix the failure in tests/basic/gfapi/bug1291259.t
                    • #1405478: Keepalive should be set for IPv6 & IPv4
                    • #1405554: Fix spurious failure in bug-1402841.t-mt-dir-scan-race.t
                    • #1405775: GlusterFS process crashed after add-brick
                    • #1405902: Fix spurious failure in tests/bugs/replicate/bug-1402730.t
                    • #1406224: VM pauses due to storage I/O error, when one of the data brick is down with arbiter/replica volume
                    • #1406249: [GANESHA] Deleting a node from ganesha cluster deletes the volume entry from /etc/ganesha/ganesha.conf file
                    • #1406252: Free xdr-allocated compound request and response arrays
                    • #1406348: [Eventing]: POSIX_SAME_GFID event seen for .trashcan folder and .trashcan/internal_op
                    • #1406410: [GANESHA] Adding node to ganesha cluster is not assigning the correct VIP to the new node
                    • #1406411: Fail add-brick command if replica count changes
                    • #1406878: ec prove tests fail in FB build environment.
                    • #1408115: Remove-brick rebalance failed while rm -rf is in progress
                    • #1408131: Remove tests/distaf
                    • #1408395: [Arbiter] After Killing a brick writes drastically slow down
                    • #1408712: with granular-entry-self-heal enabled i see that there is a gfid mismatch and vm goes to paused state after migrating to another host
                    • #1408755: Remove tests/basic/rpm.t
                    • #1408757: Fix failure of split-brain-favorite-child-policy.t in CentOS7
                    • #1408758: tests/bugs/glusterd/bug-913555.t fails spuriously
                    • #1409078: RFE: Need a command to check op-version compatibility of clients
                    • #1409186: Dict_t leak in dht_migration_complete_check_task and dht_rebalance_inprogress_task
                    • #1409202: Warning messages throwing when EC volume offline brick comes up are difficult to understand for end user.
                    • #1409206: Extra lookup/fstats are sent over the network when a brick is down.
                    • #1409727: [ganesha + EC]posix compliance rename tests failed on EC volume with nfs-ganesha mount.
                    • #1409730: [ganesha+ec]: Contents of original file are not seen when hardlink is created
                    • #1410071: [Geo-rep] Geo replication status detail without master and slave volume args
                    • #1410313: brick crashed on systemic setup
                    • #1410355: Remove-brick rebalance failed while rm -rf is in progress
                    • #1410375: [Mdcache] clients being served wrong information about a file, can lead to file inconsistency
                    • #1410777: ganesha service crashed on all nodes of ganesha cluster on disperse volume when doing lookup while copying files remotely using scp
                    • #1410853: glusterfs-server should depend on firewalld-filesystem
                    • #1411607: [Geo-rep] If for some reason MKDIR failed to sync, it should not proceed further.
                    • #1411625: Spurious split-brain error messages are seen in rebalance logs
                    • #1411999: URL to Fedora distgit no longer uptodate
                    • #1412002: Examples/getvolfile.py is not pep8 compliant
                    • #1412069: No rollback of renames on succeeded subvols during failure
                    • #1412174: Memory leak on mount/fuse when setxattr fails
                    • #1412467: Remove tests/bugs/distribute/bug-1063230.t
                    • #1412489: Upcall: Possible memleak if inode_ctx_set fails
                    • #1412689: [Geo-rep] Slave mount log file is cluttered by logs of multiple active mounts
                    • #1412917: OOM kill of glusterfsd during continuous add-bricks
                    • #1412918: fuse: Resource leak in fuse-helper under GF_SOLARIS_HOST_OS
                    • #1413967: geo-rep session faulty with ChangelogException \"No such file or directory\"
                    • #1415226: packaging: python/python2(/python3) cleanup
                    • #1415245: core: max op version
                    • #1415279: libgfapi: remove/revert glfs_ipc() changes targeted for 4.0
                    • #1415581: RFE : Create trash directory only when its is enabled
                    • #1415915: RFE: An administrator friendly way to determine rebalance completion time
                    • #1415918: Cache security.ima xattrs as well
                    • #1416285: EXPECT_WITHIN is taking too much time even if the result matches with expected value
                    • #1416416: Improve output of \"gluster volume status detail\"
                    • #1417027: option performance.parallel-readdir should honor cluster.readdir-optimize
                    • #1417028: option performance.parallel-readdir can cause OOM in large volumes
                    • #1417042: glusterd restart is starting the offline shd daemon on other node in the cluster
                    • #1417135: [Stress] : SHD Logs flooded with \"Heal Failed\" messages,filling up \"/\" quickly
                    • #1417521: [SNAPSHOT] With all USS plugin enable .snaps directory is not visible in cifs mount as well as windows mount
                    • #1417527: glusterfind: After glusterfind pre command execution all temporary files and directories /usr/var/lib/misc/glusterfsd/glusterfind/// should be removed
                    • #1417804: debug/trace: Print iatts of individual entries in readdirp callback for better debugging experience
                    • #1418091: [RFE] Support multiple bricks in one process (multiplexing)
                    • #1418536: Portmap allocates way too much memory (256KB) on stack
                    • #1418541: [Ganesha+SSL] : Bonnie++ hangs during rewrites.
                    • #1418623: client process crashed due to write behind translator
                    • #1418650: Samba crash when mounting a distributed dispersed volume over CIFS
                    • #1418981: Unable to take Statedump for gfapi applications
                    • #1419305: disable client.io-threads on replica volume creation
                    • #1419306: [RFE] Need to have group cli option to set all md-cache options using a single command
                    • #1419503: [SAMBA-SSL] Volume Share hungs when multiple mount & unmount is performed over a windows client on a SSL enabled cluster
                    • #1419696: Fix spurious failure of ec-background-heal.t and tests/bitrot/bug-1373520.t
                    • #1419824: repeated operation failed warnings in gluster mount logs with disperse volume
                    • #1419825: Sequential and Random Writes are off target by 12% and 22% respectively on EC backed volumes over FUSE
                    • #1419846: removing warning related to enum, to let the build take place without errors for 3.10
                    • #1419855: [Remove-brick] Hardlink migration fails with \"lookup failed (No such file or directory)\" error messages in rebalance logs
                    • #1419868: removing old tier commands under the rebalance commands
                    • #1420606: glusterd is crashed at the time of stop volume
                    • #1420808: Trash feature improperly disabled
                    • #1420810: Massive xlator_t leak in graph-switch code
                    • #1420982: Automatic split brain resolution must check for all the bricks to be up to avoiding serving of inconsistent data(visible on x3 or more)
                    • #1420987: warning messages seen in glusterd logs while setting the volume option
                    • #1420989: when server-quorum is enabled, volume get returns 0 value for server-quorum-ratio
                    • #1420991: Modified volume options not synced once offline nodes comes up.
                    • #1421017: CLI option \"--timeout\" is accepting non numeric and negative values.
                    • #1421956: Disperse: Fallback to pre-compiled code execution when dynamic code generation fails
                    • #1422350: glustershd process crashed on systemic setup
                    • #1422363: [Replicate] \"RPC call decoding failed\" leading to IO hang & mount inaccessible
                    • #1422391: Gluster NFS server crashing in __mnt3svc_umountall
                    • #1422766: Entry heal messages in glustershd.log while no entries shown in heal info
                    • #1422777: DHT doesn't evenly balance files on FreeBSD with ZFS
                    • #1422819: [Geo-rep] Recreating geo-rep session with same slave after deleting with reset-sync-time fails to sync
                    • #1422942: Prevent reverse heal from happening
                    • #1423063: glusterfs-fuse RPM now depends on gfapi
                    • #1423070: Bricks not coming up when ran with address sanitizer
                    • #1423385: Crash in index xlator because of race in inode_ctx_set and inode_ref
                    • #1423406: Need to improve remove-brick failure message when the brick process is down.
                    • #1423412: Mount of older client fails
                    • #1423429: unnecessary logging in rda_opendir
                    • #1424921: dht_setxattr returns EINVAL when a file is deleted during the FOP
                    • #1424931: [RFE] Include few more options in virt file
                    • #1424937: multiple glusterfsd process crashed making the complete subvolume unavailable
                    • #1424973: remove-brick status shows 0 rebalanced files
                    • #1425556: glusterd log is flooded with stale disconnect rpc messages
                    • "},{"location":"release-notes/3.10.1/","title":"Release notes for Gluster 3.10.1","text":"

                      This is a bugfix release. The release notes for 3.10.0, contains a listing of all the new features that were added and bugs in the GlusterFS 3.10 stable release.

                      "},{"location":"release-notes/3.10.1/#major-changes-features-and-limitations-addressed-in-this-release","title":"Major changes, features and limitations addressed in this release","text":"
                      1. auth-allow setting was broken with 3.10 release and is now fixed (#1429117)
                      "},{"location":"release-notes/3.10.1/#major-issues","title":"Major issues","text":"
                      1. Expanding a gluster volume that is sharded may cause file corruption
                      2. Sharded volumes are typically used for VM images, if such volumes are expanded or possibly contracted (i.e add/remove bricks and rebalance) there are reports of VM images getting corrupted.
                      3. If you are using sharded volumes, DO NOT rebalance them till this is fixed
                      4. Status of this bug can be tracked here, #1426508
                      "},{"location":"release-notes/3.10.1/#bugs-addressed","title":"Bugs addressed","text":"

                      A total of 31 patches have been merged, addressing 26 bugs:

                      • #1419824: repeated operation failed warnings in gluster mount logs with disperse volume
                      • #1422769: brick process crashes when glusterd is restarted
                      • #1422781: Transport endpoint not connected error seen on client when glusterd is restarted
                      • #1426222: build: fixes to build 3.9.0rc2 on Debian (jessie)
                      • #1426323: common-ha: no need to remove nodes one-by-one in teardown
                      • #1426329: [Ganesha] : Add comment to Ganesha HA config file ,about cluster name's length limitation
                      • #1427387: systemic testing: seeing lot of ping time outs which would lead to splitbrains
                      • #1427399: [RFE] capture portmap details in glusterd's statedump
                      • #1427461: Bricks take up new ports upon volume restart after add-brick op with brick mux enabled
                      • #1428670: Disconnects in nfs mount leads to IO hang and mount inaccessible
                      • #1428739: Fix crash in dht resulting from tests/features/nuke.t
                      • #1429117: auth failure after upgrade to GlusterFS 3.10
                      • #1429402: Restore atime/mtime for symlinks and other non-regular files.
                      • #1429773: disallow increasing replica count for arbiter volumes
                      • #1430512: /libgfxdr.so.0.0.1: undefined symbol: __gf_free
                      • #1430844: build/packaging: Debian and Ubuntu don't have /usr/libexec/; results in bad packages
                      • #1431175: volume start command hangs
                      • #1431176: USS is broken when multiplexing is on
                      • #1431591: memory leak in features/locks xlator
                      • #1434296: [Disperse] Metadata version is not healing when a brick is down
                      • #1434303: Move spit-brain msg in read txn to debug
                      • #1434399: glusterd crashes when peering an IP where the address is more than acceptable range (>255) OR with random hostnames
                      • #1435946: When parallel readdir is enabled and there are simultaneous readdir and disconnects, then it results in crash
                      • #1436203: Undo pending xattrs only on the up bricks
                      • #1436411: Unrecognized filesystems (i.e. btrfs, zfs) log many errors about \"getinode size\"
                      • #1437326: Sharding: Fix a performance bug
                      "},{"location":"release-notes/3.10.10/","title":"Release notes for Gluster 3.10.10","text":"

                      This is a bugfix release. The release notes for 3.10.0, 3.10.1, 3.10.2, 3.10.3, 3.10.4, 3.10.5, 3.10.6, 3.10.7, 3.10.8 and 3.10.9 contain a listing of all the new features that were added and bugs fixed in the GlusterFS 3.10 stable release.

                      "},{"location":"release-notes/3.10.10/#major-changes-features-and-limitations-addressed-in-this-release","title":"Major changes, features and limitations addressed in this release","text":"

                      No Major changes

                      "},{"location":"release-notes/3.10.10/#major-issues","title":"Major issues","text":"
                      1. Brick multiplexing is being tested and fixed aggressively but we still have a few crashes and memory leaks to fix.
                      "},{"location":"release-notes/3.10.10/#bugs-addressed","title":"Bugs addressed","text":"

                      Bugs addressed since release-3.10.9 are listed below.

                      • #1498081: dht_(f)xattrop does not implement migration checks
                      • #1534848: entries not getting cleared post healing of softlinks (stale entries showing up in heal info)
                      "},{"location":"release-notes/3.10.11/","title":"Release notes for Gluster 3.10.11","text":"

                      This is a bugfix release. The release notes for 3.10.0, 3.10.1, 3.10.2, 3.10.3, 3.10.4, 3.10.5, 3.10.6, 3.10.7, 3.10.8, 3.10.9 and 3.10.10 contain a listing of all the new features that were added and bugs fixed in the GlusterFS 3.10 stable release.

                      "},{"location":"release-notes/3.10.11/#major-changes-features-and-limitations-addressed-in-this-release","title":"Major changes, features and limitations addressed in this release","text":"

                      No Major changes

                      "},{"location":"release-notes/3.10.11/#major-issues","title":"Major issues","text":"
                      1. Brick multiplexing is being tested and fixed aggressively but we still have a few crashes and memory leaks to fix.
                      "},{"location":"release-notes/3.10.11/#bugs-addressed","title":"Bugs addressed","text":"

                      Bugs addressed since release-3.10.10 are listed below.

                      • #1486542: \"ganesha.so cannot open\" warning message in glusterd log in non ganesha setup.
                      • #1544461: 3.8 -> 3.10 rolling upgrade fails (same for 3.12 or 3.13) on Ubuntu 14
                      • #1544787: tests/bugs/cli/bug-1169302.t fails spuriously
                      • #1546912: tests/bugs/posix/bug-990028.t fails in release-3.10 branch
                      • #1549482: Quota: After deleting directory from mount point on which quota was configured, quota list command output is blank
                      "},{"location":"release-notes/3.10.12/","title":"Release notes for Gluster 3.10.12","text":"

                      This is a bugfix release. The release notes for 3.10.0, 3.10.1, 3.10.2, 3.10.3, 3.10.4, 3.10.5, 3.10.6, 3.10.7, 3.10.8, 3.10.9, 3.10.10 and 3.10.11 contain a listing of all the new features that were added and bugs fixed in the GlusterFS 3.10 stable release.

                      "},{"location":"release-notes/3.10.12/#major-changes-features-and-limitations-addressed-in-this-release","title":"Major changes, features and limitations addressed in this release","text":"

                      This release contains a fix for a security vulerability in Gluster as follows,

                      • http://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2018-1088
                      • https://nvd.nist.gov/vuln/detail/CVE-2018-1088

                      Installing the updated packages and restarting gluster services, will update the Gluster shared storage volume volfiles, that are more secure than the defaults currently in place.

                      Further, for increased security, the Gluster shared storage volume can be TLS enabled, and access to the same restricted using the auth.ssl-allow option. See, this guide for more details.

                      "},{"location":"release-notes/3.10.12/#major-issues","title":"Major issues","text":"
                      1. Brick multiplexing is being tested and fixed aggressively but we still have a few crashes and memory leaks to fix.
                      "},{"location":"release-notes/3.10.12/#bugs-addressed","title":"Bugs addressed","text":"

                      Bugs addressed since release-3.10.11 are listed below.

                      • #1553777: /var/log/glusterfs/bricks/export_vdb.log flooded with this error message \"Not able to add to index [Too many links]\"
                      • #1555195: [Ganesha] Duplicate volume export entries in ganesha.conf causing volume unexport to fail
                      • #1555203: After a replace brick command, self-heal takes some time to start healing files on disperse volumes
                      • #1557304: [Glusterd] Volume operations fail on a (tiered) volume because of a stale lock held by one of the nodes
                      • #1559352: [Ganesha] : Ganesha crashes while cluster enters failover/failback mode
                      • #1561732: Rebalance failures on a dispersed volume with lookup-optimize enabled
                      • #1563500: nfs-ganesha: in case pcs cluster setup fails then nfs-ganesha process should not start
                      • #1569409: EIO errors on some operations when volume has mixed brick versions on a disperse volume
                      • #1570428: CVE-2018-1088 glusterfs: Privilege escalation via gluster_shared_storage when snapshot scheduling is enabled [fedora-all]
                      "},{"location":"release-notes/3.10.2/","title":"Release notes for Gluster 3.10.2","text":"

                      This is a bugfix release. The release notes for 3.10.0 and 3.10.1 contains a listing of all the new features that were added and bugs in the GlusterFS 3.10 stable release.

                      "},{"location":"release-notes/3.10.2/#major-changes-features-and-limitations-addressed-in-this-release","title":"Major changes, features and limitations addressed in this release","text":"
                      1. Many bugs brick multiplexing and nfs-ganesha+ha bugs have been addressed.
                      2. Rebalance and remove brick operations have been disabled for sharded volumes to prevent data corruption.
                      "},{"location":"release-notes/3.10.2/#major-issues","title":"Major issues","text":"
                      1. Expanding a gluster volume that is sharded may cause file corruption

                      2. Sharded volumes are typically used for VM images, if such volumes are expanded or possibly contracted (i.e add/remove bricks and rebalance) there are reports of VM images getting corrupted.

                      3. Status of this bug can be tracked here, #1426508
                      "},{"location":"release-notes/3.10.2/#bugs-addressed","title":"Bugs addressed","text":"

                      A total of 63 patches have been merged, addressing 46 bugs:

                      • #1437854: Spellcheck issues reported during Debian build
                      • #1425726: Stale export entries in ganesha.conf after executing \"gluster nfs-ganesha disable\"
                      • #1427079: [Ganesha] : unexport fails if export configuration file is not present
                      • #1440148: common-ha (debian/ubuntu): ganesha-ha.sh has a hard-coded /usr/libexec/ganesha...
                      • #1443478: RFE: Support to update NFS-Ganesha export options dynamically
                      • #1443490: [Nfs-ganesha] Refresh config fails when ganesha cluster is in failover mode.
                      • #1441474: synclocks don't work correctly under contention
                      • #1449002: [Brick Multiplexing] : Bricks for multiple volumes going down after glusterd restart and not coming back up after volume start force
                      • #1438813: Segmentation fault when creating a qcow2 with qemu-img
                      • #1438423: [Ganesha + EC] : Input/Output Error while creating LOTS of smallfiles
                      • #1444540: rm -rf \\<dir> returns ENOTEMPTY even though ls on the mount point returns no files
                      • #1446227: Incorrect and redundant logs in the DHT rmdir code path
                      • #1447608: Don't allow rebalance/fix-layout operation on sharding enabled volumes till dht+sharding bugs are fixed
                      • #1448864: Seeing error \"Failed to get the total number of files. Unable to estimate time to complete rebalance\" in rebalance logs
                      • #1443349: [Eventing]: Unrelated error message displayed when path specified during a 'webhook-test/add' is missing a schema
                      • #1441576: [geo-rep]: rsync should not try to sync internal xattrs
                      • #1441927: [geo-rep]: Worker crashes with [Errno 16] Device or resource busy: '.gfid/00000000-0000-0000-0000-000000000001/dir.166 while renaming directories
                      • #1401877: [GANESHA] Symlinks from /etc/ganesha/ganesha.conf to shared_storage are created on the non-ganesha nodes in 8 node gluster having 4 node ganesha cluster
                      • #1425723: nfs-ganesha volume export file remains stale in shared_storage_volume when volume is deleted
                      • #1427759: nfs-ganesha: Incorrect error message returned when disable fails
                      • #1438325: Need to improve remove-brick failure message when the brick process is down.
                      • #1438338: glusterd is setting replicate volume property over disperse volume or vice versa
                      • #1438340: glusterd is not validating for allowed values while setting \"cluster.brick-multiplex\" property
                      • #1441476: Glusterd crashes when restarted with many volumes
                      • #1444128: [BrickMultiplex] gluster command not responding and .snaps directory is not visible after executing snapshot related command
                      • #1445260: [GANESHA] Volume start and stop having ganesha enable on it,turns off cache-invalidation on volume
                      • #1445408: gluster volume stop hangs
                      • #1449934: Brick Multiplexing :- resetting a brick bring down other bricks with same PID
                      • #1435779: Inode ref leak on anonymous reads and writes
                      • #1440278: [GSS] NFS Sub-directory mount not working on solaris10 client
                      • #1450378: GNFS crashed while taking lock on a file from 2 different clients having same volume mounted from 2 different servers
                      • #1449779: quota: limit-usage command failed with error \" Failed to start aux mount\"
                      • #1450564: glfsheal: crashed(segfault) with disperse volume in RDMA
                      • #1443501: Don't wind post-op on a brick where the fop phase failed.
                      • #1444892: When either killing or restarting a brick with performance.stat-prefetch on, stat sometimes returns a bad st_size value.
                      • #1449169: Multiple bricks WILL crash after TCP port probing
                      • #1440805: Update rfc.sh to check Change-Id consistency for backports
                      • #1443010: snapshot: snapshots appear to be failing with respect to secure geo-rep slave
                      • #1445209: snapshot: Unable to take snapshot on a geo-replicated volume, even after stopping the session
                      • #1444773: explicitly specify executor to be bash for tests
                      • #1445407: remove bug-1421590-brick-mux-reuse-ports.t
                      • #1440742: Test files clean up for tier during 3.10
                      • #1448790: [Tiering]: High and low watermark values when set to the same level, is allowed
                      • #1435942: Enabling parallel-readdir causes dht linkto files to be visible on the mount,
                      • #1437763: File-level WORM allows ftruncate() on read-only files
                      • #1439148: Parallel readdir on Gluster NFS displays less number of dentries
                      "},{"location":"release-notes/3.10.3/","title":"Release notes for Gluster 3.10.3","text":"

                      This is a bugfix release. The release notes for 3.10.0 , 3.10.1 and 3.10.2 contain a listing of all the new features that were added and bugs in the GlusterFS 3.10 stable release.

                      "},{"location":"release-notes/3.10.3/#major-changes-features-and-limitations-addressed-in-this-release","title":"Major changes, features and limitations addressed in this release","text":"
                      1. No Major changes
                      "},{"location":"release-notes/3.10.3/#major-issues","title":"Major issues","text":"
                      1. Expanding a gluster volume that is sharded may cause file corruption

                        • Sharded volumes are typically used for VM images, if such volumes are expanded or possibly contracted (i.e add/remove bricks and rebalance) there are reports of VM images getting corrupted.
                        • Status of this bug can be tracked here, #1426508
                      2. Brick multiplexing is being tested and fixed aggressively but we still have a few crashes and memory leaks to fix.

                      "},{"location":"release-notes/3.10.3/#bugs-addressed","title":"Bugs addressed","text":"

                      A total of 18 patches have been merged, addressing 13 bugs:

                      • #1450053: [GANESHA] Adding a node to existing cluster failed to start pacemaker service on new node
                      • #1450773: Quota: After upgrade from 3.7 to higher version , gluster quota list command shows \"No quota configured on volume repvol\"
                      • #1450934: [New] - Replacing an arbiter brick while I/O happens causes vm pause
                      • #1450947: Autoconf leaves unexpanded variables in path names of non-shell-scripttext files
                      • #1451371: crash in dht_rmdir_do
                      • #1451561: AFR returns the node uuid of the same node for every file in the replica
                      • #1451587: cli xml status of detach tier broken
                      • #1451977: Add logs to identify whether disconnects are voluntary or due to network problems
                      • #1451995: Log message shows error code as success even when rpc fails to connect
                      • #1453056: [DHt] : segfault in dht_selfheal_dir_setattr while running regressions
                      • #1453087: Brick Multiplexing: On reboot of a node Brick multiplexing feature lost on that node as multiple brick processes get spawned
                      • #1456682: tierd listens to a port.
                      • #1457054: glusterfs client crash on io-cache.so(__ioc_page_wakeup+0x44)
                      "},{"location":"release-notes/3.10.4/","title":"Release notes for Gluster 3.10.4","text":"

                      This is a bugfix release. The release notes for 3.10.0 , 3.10.1, 3.10.2 and 3.10.3 contain a listing of all the new features that were added and bugs fixed in the GlusterFS 3.10 stable release.

                      "},{"location":"release-notes/3.10.4/#major-changes-features-and-limitations-addressed-in-this-release","title":"Major changes, features and limitations addressed in this release","text":"
                      1. No Major changes
                      "},{"location":"release-notes/3.10.4/#major-issues","title":"Major issues","text":"
                      1. Expanding a gluster volume that is sharded may cause file corruption

                        • Sharded volumes are typically used for VM images, if such volumes are expanded or possibly contracted (i.e add/remove bricks and rebalance) there are reports of VM images getting corrupted.
                        • Status of this bug can be tracked here, #1426508
                      2. Brick multiplexing is being tested and fixed aggressively but we still have a few crashes and memory leaks to fix.

                      3. Another rebalance related bug is being worked upon #1467010
                      "},{"location":"release-notes/3.10.4/#bugs-addressed","title":"Bugs addressed","text":"

                      A total of 18 patches have been merged, addressing 13 bugs:

                      • #1457732: \"split-brain observed [Input/output error]\" error messages in samba logs during parallel rm -rf
                      • #1459760: Glusterd segmentation fault in ' _Unwind_Backtrace' while running peer probe
                      • #1460649: posix-acl: Whitelist virtual ACL xattrs
                      • #1460914: Rebalance estimate time sometimes shows negative values
                      • #1460993: Revert CLI restrictions on running rebalance in VM store use case
                      • #1461019: [Ganesha] : Grace period is not being adhered to on RHEL 7.4; Clients continue running IO even during grace.
                      • #1462080: [Bitrot]: Inconsistency seen with 'scrub ondemand' - fails to trigger scrub
                      • #1463623: [Ganesha]Bricks got crashed while running posix compliance test suit on V4 mount
                      • #1463641: [Ganesha] Ganesha service failed to start on new node added in existing ganeshacluster
                      • #1464078: with AFR now making both nodes to return UUID for a file will result in georep consuming more resources
                      • #1466852: assorted typos and spelling mistakes from Debian lintian
                      • #1466863: dht_rename_lock_cbk crashes in upstream regression test
                      • #1467269: Heal info shows incorrect status
                      "},{"location":"release-notes/3.10.5/","title":"Release notes for Gluster 3.10.5","text":"

                      This is a bugfix release. The release notes for 3.10.0 , 3.10.1, 3.10.2, 3.10.3 and 3.10.4 contain a listing of all the new features that were added and bugs fixed in the GlusterFS 3.10 stable release.

                      "},{"location":"release-notes/3.10.5/#major-changes-features-and-limitations-addressed-in-this-release","title":"Major changes, features and limitations addressed in this release","text":"

                      No Major changes

                      "},{"location":"release-notes/3.10.5/#major-issues","title":"Major issues","text":"
                      1. Expanding a gluster volume that is sharded may cause file corruption

                        • Sharded volumes are typically used for VM images, if such volumes are expanded or possibly contracted (i.e add/remove bricks and rebalance) there are reports of VM images getting corrupted.
                        • The last known cause for corruption #1467010 has a fix with this release. As further testing is still in progress, the issue is retained as a major issue.
                      2. Brick multiplexing is being tested and fixed aggressively but we still have a few crashes and memory leaks to fix.

                      "},{"location":"release-notes/3.10.5/#bugs-addressed","title":"Bugs addressed","text":"

                      Bugs addressed since release-3.10.4 are listed below.

                      • #1467010: Fd based fops fail with EBADF on file migration
                      • #1468126: disperse seek does not correctly handle the end of file
                      • #1468198: [Geo-rep]: entry failed to sync to slave with ENOENT errror
                      • #1470040: packaging: Upgrade glusterfs-ganesha sometimes fails to semanage ganesha_use_fusefs
                      • #1470488: gluster volume status --xml fails when there are 100 volumes
                      • #1471028: glusterfs process leaking memory when error occurs
                      • #1471612: metadata heal not happening despite having an active sink
                      • #1471870: cthon04 can cause segfault in gNFS/NLM
                      • #1471917: [GANESHA] Ganesha setup creation fails due to selinux blocking some services required for setup creation
                      • #1472446: packaging: save ganesha config files in (/var)/run/gluster/shared_storage/nfs-ganesha
                      • #1473129: dht/rebalance: Improve rebalance crawl performance
                      • #1473132: dht/cluster: rebalance/remove-brick should honor min-free-disk
                      • #1473133: dht/cluster: rebalance/remove-brick should honor min-free-disk
                      • #1473134: The rebal-throttle setting does not work as expected
                      • #1473136: rebalance: Allow admin to change thread count for rebalance
                      • #1473137: dht: Make throttle option \"normal\" value uniform across dht_init and dht_reconfigure
                      • #1473140: Fix on demand file migration from client
                      • #1473141: cluster/dht: Fix hardlink migration failures
                      • #1475638: [Scale] : Client logs flooded with \"inode context is NULL\" error messages
                      • #1476212: [geo-rep]: few of the self healed hardlinks on master did not sync to slave
                      • #1478498: scripts: invalid test in S32gluster_enable_shared_storage.sh
                      • #1478499: packaging: /var/lib/glusterd/options should be %config(noreplace)
                      • #1480594: nfs process crashed in \"nfs3_getattr\"
                      "},{"location":"release-notes/3.10.6/","title":"Release notes for Gluster 3.10.6","text":"

                      This is a bugfix release. The release notes for 3.10.0 , 3.10.1, 3.10.2, 3.10.3, 3.10.4 and 3.10.5 contain a listing of all the new features that were added and bugs fixed in the GlusterFS 3.10 stable release.

                      "},{"location":"release-notes/3.10.6/#major-changes-features-and-limitations-addressed-in-this-release","title":"Major changes, features and limitations addressed in this release","text":"

                      No Major changes

                      "},{"location":"release-notes/3.10.6/#major-issues","title":"Major issues","text":"
                      1. Expanding a gluster volume that is sharded may cause file corruption

                        • Sharded volumes are typically used for VM images, if such volumes are expanded or possibly contracted (i.e add/remove bricks and rebalance) there are reports of VM images getting corrupted.
                        • The last known cause for corruption #1498081 is still pending, and not yet a part of this release.
                      2. Brick multiplexing is being tested and fixed aggressively but we still have a few crashes and memory leaks to fix.

                      "},{"location":"release-notes/3.10.6/#bugs-addressed","title":"Bugs addressed","text":"

                      Bugs addressed since release-3.10.5 are listed below.

                      • #1467010: Fd based fops fail with EBADF on file migration
                      • #1481394: libgfapi: memory leak in glfs_h_acl_get
                      • #1482857: glusterd fails to start
                      • #1483997: packaging: use rdma-core(-devel) instead of ibverbs, rdmacm; disable rdma on armv7hl
                      • #1484443: packaging: /run and /var/run; prefer /run
                      • #1486542: \"ganesha.so cannot open\" warning message in glusterd log in non ganesha setup.
                      • #1487042: AFR returns the node uuid of the same node for every file in the replica
                      • #1487647: with AFR now making both nodes to return UUID for a file will result in georep consuming more resources
                      • #1488391: gluster-blockd process crashed and core generated
                      • #1488719: [RHHI] cannot boot vms created from template when disk format = qcow2
                      • #1490909: [Ganesha] : Unable to bring up a Ganesha HA cluster on SELinux disabled machines on latest gluster bits.
                      • #1491166: GlusterD returns a bad memory pointer in glusterd_get_args_from_dict()
                      • #1491691: rpc: TLSv1_2_method() is deprecated in OpenSSL-1.1
                      • #1491966: AFR entry self heal removes a directory's .glusterfs symlink.
                      • #1491985: Add NULL gfid checks before creating file
                      • #1491995: afr: check op_ret value in __afr_selfheal_name_impunge
                      • #1492010: Launch metadata heal in discover code path.
                      • #1495430: Make event-history feature configurable and have it disabled by default
                      • #1496321: [afr] split-brain observed on T files post hardlink and rename in x3 volume
                      • #1497122: Crash in dht_check_and_open_fd_on_subvol_task()
                      "},{"location":"release-notes/3.10.7/","title":"Release notes for Gluster 3.10.7","text":"

                      This is a bugfix release. The release notes for 3.10.0 , 3.10.1, 3.10.2, 3.10.3, 3.10.4, 3.10.5 and 3.10.6 contain a listing of all the new features that were added and bugs fixed in the GlusterFS 3.10 stable release.

                      "},{"location":"release-notes/3.10.7/#major-changes-features-and-limitations-addressed-in-this-release","title":"Major changes, features and limitations addressed in this release","text":"

                      No Major changes

                      "},{"location":"release-notes/3.10.7/#major-issues","title":"Major issues","text":"
                      1. Expanding a gluster volume that is sharded may cause file corruption

                        • Sharded volumes are typically used for VM images, if such volumes are expanded or possibly contracted (i.e add/remove bricks and rebalance) there are reports of VM images getting corrupted.
                        • The last known cause for corruption #1498081 is still pending, and not yet a part of this release.
                      2. Brick multiplexing is being tested and fixed aggressively but we still have a few crashes and memory leaks to fix.

                      "},{"location":"release-notes/3.10.7/#bugs-addressed","title":"Bugs addressed","text":"

                      Bugs addressed since release-3.10.6 are listed below.

                      • #1480788: File-level WORM allows mv over read-only files
                      • #1491059: PID File handling: brick pid file leaves stale pid and brick fails to start when glusterd is started
                      • #1496321: [afr] split-brain observed on T files post hardlink and rename in x3 volume
                      • #1497990: Gluster 3.10.x Packages require manual systemctl daemon reload after install
                      • #1499890: md-cache uses incorrect xattr keynames for GF_POSIX_ACL keys
                      • #1499893: md-cache: xattr values should not be checked with string functions
                      • #1501955: gfapi: API needed to set lk_owner
                      • #1502928: Mishandling null check at send_brick_req of glusterfsd/src/gf_attach.c
                      • #1503405: Potential use of NULL this variable before it gets initialized
                      "},{"location":"release-notes/3.10.8/","title":"Release notes for Gluster 3.10.8","text":"

                      This is a bugfix release. The release notes for 3.10.0 , 3.10.1, 3.10.2, 3.10.3, 3.10.4, 3.10.5, 3.10.6 and 3.10.7 contain a listing of all the new features that were added and bugs fixed in the GlusterFS 3.10 stable release.

                      "},{"location":"release-notes/3.10.8/#major-changes-features-and-limitations-addressed-in-this-release","title":"Major changes, features and limitations addressed in this release","text":"

                      No Major changes

                      "},{"location":"release-notes/3.10.8/#major-issues","title":"Major issues","text":"
                      1. Expanding a gluster volume that is sharded may cause file corruption

                        • Sharded volumes are typically used for VM images, if such volumes are expanded or possibly contracted (i.e add/remove bricks and rebalance) there are reports of VM images getting corrupted.
                        • The last known cause for corruption #1498081 is still pending, and not yet a part of this release.
                      2. Brick multiplexing is being tested and fixed aggressively but we still have a few crashes and memory leaks to fix.

                      "},{"location":"release-notes/3.10.8/#bugs-addressed","title":"Bugs addressed","text":"

                      Bugs addressed since release-3.10.7 are listed below.

                      • #1507749: clean up port map on brick disconnect
                      • #1507752: Brick port mismatch
                      • #1507880: reset-brick commit force failed with glusterd_volume_brickinfo_get Returning -1
                      • #1508036: Address lstat usage in glusterd-snapshot.c code
                      • #1514388: default timeout of 5min not honored for analyzing split-brain files post setfattr replica.split-brain-heal-finalize
                      • #1514424: gluster volume splitbrain info needs to display output of each brick in a stream fashion instead of buffering and dumping at the end
                      • #1517682: Memory leak in locks xlator
                      "},{"location":"release-notes/3.10.9/","title":"Release notes for Gluster 3.10.9","text":"

                      This is a bugfix release. The release notes for 3.10.0, 3.10.1, 3.10.2, 3.10.3, 3.10.4, 3.10.5, 3.10.6, 3.10.7 and 3.10.8 contain a listing of all the new features that were added and bugs fixed in the GlusterFS 3.10 stable release.

                      "},{"location":"release-notes/3.10.9/#major-changes-features-and-limitations-addressed-in-this-release","title":"Major changes, features and limitations addressed in this release","text":"

                      No Major changes

                      "},{"location":"release-notes/3.10.9/#major-issues","title":"Major issues","text":"
                      1. Expanding a gluster volume that is sharded may cause file corruption

                        • Sharded volumes are typically used for VM images, if such volumes are expanded or possibly contracted (i.e add/remove bricks and rebalance) there are reports of VM images getting corrupted.
                        • The last known cause for corruption #1498081 is still pending, and not yet a part of this release.
                      2. Brick multiplexing is being tested and fixed aggressively but we still have a few crashes and memory leaks to fix.

                      "},{"location":"release-notes/3.10.9/#bugs-addressed","title":"Bugs addressed","text":"

                      Bugs addressed since release-3.10.8 are listed below.

                      • #1523050: glusterd consuming high memory
                      • #1529086: fstat returns ENOENT/ESTALE
                      • #1529089: opening a file that is destination of rename results in ENOENT errors
                      • #1529096: /usr/sbin/glusterfs crashing on Red Hat OpenShift Container Platform node
                      • #1530341: [snapshot cifs]ls on .snaps directory is throwing input/output error over cifs mount
                      • #1530450: glustershd fails to start on a volume force start after a brick is down
                      "},{"location":"release-notes/3.11.0/","title":"Release notes for Gluster 3.11.0","text":"

                      This is a major Gluster release that includes some substantial changes. The features revolve around, improvements to small file workloads, SE Linux support, Halo replication enhancement from Facebook, some usability and performance improvements, among other bug fixes.

                      The most notable features and changes are documented on this page. A full list of bugs that have been addressed is included further below.

                      "},{"location":"release-notes/3.11.0/#major-changes-and-features","title":"Major changes and features","text":""},{"location":"release-notes/3.11.0/#switched-to-storhaug-for-ganesha-and-samba-high-availability","title":"Switched to storhaug for ganesha and samba high availability","text":"

                      Notes for users:

                      High Availability (HA) support for NFS-Ganesha (NFS) and Samba (SMB) is managed by Storhaug. Like the old HA implementation, Storhaug uses Pacemaker and Corosync to manage Virtual (floating) IP addresses (VIPs) and fencing. See https://github.com/linux-ha-storage/storhaug.

                      Storhaug packages are available in Fedora and for several popular Linux distributions from https://download.gluster.org/pub/gluster/storhaug/

                      Note: Storhaug does not dictate which fencing solution should be used. There are many to choose from in most popular Linux distributions. Choose the one the best fits your environment and use it.

                      "},{"location":"release-notes/3.11.0/#added-selinux-support-for-gluster-volumes","title":"Added SELinux support for Gluster Volumes","text":"

                      Notes for users:

                      A new xlator has been introduced (features/selinux) to allow setting the extended attribute (security.selinux) that is needed to support SELinux on Gluster volumes. The current ability to enforce the SELinux policy on the Gluster Storage servers prevents setting the extended attribute for use on the client side. The new translator converts the client-side SELinux extended attribute to a Gluster internal representation (the trusted.glusterfs.selinux extended attribute) to prevent problems.

                      This feature is intended to be the base for implementing Labelled-NFS in NFS-Ganesha and SELinux support for FUSE mounts in the Linux kernel.

                      Limitations:

                      • The Linux kernel does not support mounting of FUSE filesystems with SELinux support, yet.
                      • NFS-Ganesha does not support Labelled-NFS, yet.

                      Known Issues:

                      • There has been limited testing, because other projects can not consume the functionality yet without being part of a release. So far, no problems have been observed, but this might change when other projects start to seriously use this.
                      "},{"location":"release-notes/3.11.0/#several-memory-leaks-are-fixed-in-gfapi-during-graph-switches","title":"Several memory leaks are fixed in gfapi during graph switches","text":"

                      Notes for users:

                      Gluster API (or gfapi), has had a few memory leak issues arising specifically during changes to volume graphs (volume topology or options). A few of these are addressed in this release, and more work towards ironing out the pending leaks are in the works across the next few releases.

                      Limitations:

                      • There are still a few leaks to be addressed when graph switches occur
                      "},{"location":"release-notes/3.11.0/#get-state-cli-is-enhanced-to-provide-client-and-brick-capacity-related-information","title":"get-state CLI is enhanced to provide client and brick capacity related information","text":"

                      Notes for users:

                      The get-state CLI output now optionally accommodates client related information corresponding locally running bricks as obtained from gluster volume status <volname>|all clients. Getting the client details is a is a relatively more costly operation and these details will only be added to the output if the get-state command is invoked with the 'detail' option. The following is the updated usage for the get-state command:

                       # gluster get-state [<daemon>] [[odir </path/to/output/dir/>] [file <filename>]] [detail]\n

                      Other than client details, capacity related information for respective local bricks as obtained from gluster volume status <volname>|all detail has also been added to the get-state output.

                      Limitations:

                      • Information for non-local bricks and clients connected to non-local bricks won't be available. This is a known limitation of the get-state command, since get-state command doesn't provide information on non-local bricks.
                      "},{"location":"release-notes/3.11.0/#ability-to-serve-negative-lookups-from-cache-has-been-added","title":"Ability to serve negative lookups from cache has been added","text":"

                      Notes for users:

                      Before creating / renaming any file, lookups (around, 5-6 when using the SMB protocol) are sent to verify if the file already exists. The negative lookup cache, serves these lookups from the cache when possible, thus increasing the create/rename performance when using SMB based access to a gluster volume.

                      Execute the following commands to enable negative-lookup cache:

                      # gluster volume set <volname> features.cache-invalidation on\n# gluster volume set <volname> features.cache-invalidation-timeout 600\n# gluster volume set <VOLNAME> nl-cache on\n

                      Limitations

                      • This feature is supported only for SMB access, for this release
                      "},{"location":"release-notes/3.11.0/#new-xlator-to-help-developers-detecting-resource-leaks-has-been-added","title":"New xlator to help developers detecting resource leaks has been added","text":"

                      Notes for users:

                      This is intended as a developer feature, and hence there is no direct user impact.

                      For developers, the sink xlator provides ways to help detect memory leaks in gfapi and any xlator in between the API and the sink xlator.

                      More details can be found in this thread on the gluster-devel lists

                      "},{"location":"release-notes/3.11.0/#feature-for-metadata-cachingsmall-file-performance-is-production-ready","title":"Feature for metadata-caching/small file performance is production ready","text":"

                      Notes for users:

                      Over the course of releases several fixes and enhancements have been made to the mdcache xlator, to improve performance of small file workloads. As a result, with this release we are announcing this feature to be production ready.

                      In order to improve the performance of directory operations of Gluster volumes, the maximum metadata (stat, xattr) caching time on the client side is increased to 10 minutes, without compromising on the consistency of the cache. Significant performance improvements can be achieved in the following workloads on FUSE and SMB access, by enabling metadata caching:

                      • Listing of directories (recursive)
                      • Creating files
                      • Deleting files
                      • Renaming files

                      To enable metadata caching execute the following commands:

                      # gluster volume set group metadata-cache\n# gluster volume set network.inode-lru-limit <n>\n

                      \\<n>, is set to 50000 by default. It should be increased if the number of concurrently accessed files in the volume is very high. Increasing this number increases the memory footprint of the brick processes.

                      "},{"location":"release-notes/3.11.0/#parallel-readdir-feature-introduced-in-3100-is-production-ready","title":"\"Parallel Readdir\" feature introduced in 3.10.0 is production ready","text":"

                      Notes for users:

                      This feature was introduced in 3.10 and was experimental in nature. Over the course of 3.10 minor releases and 3.11.0 release, this feature has been stabilized and is ready for use in production environments.

                      For further details refer: 3.10.0 release notes

                      "},{"location":"release-notes/3.11.0/#object-versioning-is-enabled-only-if-bitrot-is-enabled","title":"Object versioning is enabled only if bitrot is enabled","text":"

                      Notes for users:

                      Object versioning was turned on by default on brick processes by the bitrot xlator. This caused, setting and looking up of additional extended attributes on the backed file system for every object, even when not actively using bitrot. This at times caused high CPU utilization on the brick processes.

                      To fix this, object versioning is disabled by default, and is only enabled as a part of enabling the bitrot option.

                      "},{"location":"release-notes/3.11.0/#distribute-layer-provides-more-robust-transactions-during-directory-namespace-operations","title":"Distribute layer provides more robust transactions during directory namespace operations","text":"

                      Notes for users:

                      Distribute layer in Gluster, creates and maintains directories in all subvolumes and as a result operations involving creation/manipulation/deletion of these directories needed better transaction support to ensure consistency of the file system.

                      This transaction support is now implemented in the distribute layer, thus ensuring better consistency of the file system as a whole, when dealing with racing operations, operating on the same directory object.

                      "},{"location":"release-notes/3.11.0/#gfapi-extended-readdirplus-api-has-been-added","title":"gfapi extended readdirplus API has been added","text":"

                      Notes for users:

                      An extended readdirplus API glfs_xreaddirplus is added to get extra information along with readdirplus results on demand. This is useful for the applications (like NFS-Ganesha which needs handles) to retrieve more information along with stat in a single call, thus improving performance of work-loads involving directory listing.

                      The API syntax and usage can be found in glfs.h header file.

                      Limitations:

                      • This API currently has support to only return stat and handles (glfs_object) for each dirent of the directory, but can be extended in the future.
                      "},{"location":"release-notes/3.11.0/#improved-adoption-of-standard-refcounting-functions-across-the-code","title":"Improved adoption of standard refcounting functions across the code","text":"

                      Notes for users:

                      This change does not impact users, it is an internal code cleanup activity that ensures that we ref count in a standard manner, thus avoiding unwanted bugs due to different implementations of the same.

                      Known Issues:

                      • This standardization started with this release and is expected to continue across releases.
                      "},{"location":"release-notes/3.11.0/#performance-improvements-to-rebalance-have-been-made","title":"Performance improvements to rebalance have been made","text":"

                      Notes for users:

                      Both crawling and migration improvement has been done in rebalance. The crawler is optimized now to split the migration load across replica and ec nodes. Prior to this change, in case the replicating bricks are distributed over two nodes, then only one node used to do the migration. With the new optimization both the nodes divide the load among each other giving boost to migration performance. And also there have been some optimization to avoid redundant network operations (or RPC calls) in the process of migrating a file.

                      Further, file migration now avoids syncop framework and is managed entirely by rebalance threads giving performance boost.

                      Also, There is a change to throttle settings in rebalance. Earlier user could set three values to rebalance which were \"lazy\", \"normal\", \"aggressive\", which was not flexible enough. To overcome that we have introduced number based throttle settings. User now can set numbers which is an indication of the number of threads rebalance process will work with, thereby translating to the number of files being migrated in parallel.

                      "},{"location":"release-notes/3.11.0/#halo-replication-feature-in-afr-has-been-introduced","title":"Halo Replication feature in AFR has been introduced","text":"

                      Notes for users:

                      Halo Geo-replication is a feature which allows Gluster or NFS clients to write locally to their region (as defined by a latency \"halo\" or threshold if you like), and have their writes asynchronously propagate from their origin to the rest of the cluster. Clients can also write synchronously to the cluster simply by specifying a halo-latency which is very large (e.g. 10seconds) which will include all bricks. To enable halo feature execute the following commands:

                      # gluster volume set cluster.halo-enabled yes\n

                      You may have to set the following following options to change defaults. cluster.halo-shd-latency: The threshold below which self-heal daemons will consider children (bricks) connected.

                      cluster.halo-nfsd-latency: The threshold below which NFS daemons will consider children (bricks) connected.

                      cluster.halo-latency: The threshold below which all other clients will consider children (bricks) connected.

                      cluster.halo-min-replicas: The minimum number of replicas which are to be enforced regardless of latency specified in the above 3 options. If the number of children falls below this threshold the next best (chosen by latency) shall be swapped in.

                      "},{"location":"release-notes/3.11.0/#fallocate-support-with-ec","title":"FALLOCATE support with EC","text":"

                      Notes for users

                      Support for FALLOCATE file operation on EC volume is added with this release. EC volumes can now support basic FALLOCATE functionality.

                      "},{"location":"release-notes/3.11.0/#self-heal-window-size-control-option-for-ec","title":"Self-heal window-size control option for EC","text":"

                      Notes for users

                      Support to control the maximum size of read/write operation carried out during self-heal process has been added with this release. User has to tune 'disperse.self-heal-window-size' option on disperse volume to adjust the size.

                      "},{"location":"release-notes/3.11.0/#major-issues","title":"Major issues","text":"
                      1. Expanding a gluster volume that is sharded may cause file corruption

                        • Sharded volumes are typically used for VM images, if such volumes are expanded or possibly contracted (i.e add/remove bricks and rebalance) there are reports of VM images getting corrupted.
                        • Status of this bug can be tracked here, #1426508
                        • Latest series of fixes for the issue (which are present in this release as well) are not showing the previous corruption, and hence the fixes look good, but this is maintained on the watch list nevetheness.
                      "},{"location":"release-notes/3.11.0/#bugs-addressed","title":"Bugs addressed","text":"

                      Bugs addressed since release-3.10.0 are listed below.

                      • #1169302: Unable to take Statedump for gfapi applications
                      • #1197308: do not depend on \"killall\", use \"pkill\" instead
                      • #1198849: Minor improvements and cleanup for the build system
                      • #1257792: bug-1238706-daemons-stop-on-peer-cleanup.t fails occasionally
                      • #1261689: geo-replication faulty
                      • #1264849: RFE : Create trash directory only when its is enabled
                      • #1297182: Mounting with \"-o noatime\" or \"-o noexec\" causes \"nosuid,nodev\" to be set as well
                      • #1318100: RFE : SELinux translator to support setting SELinux contexts on files in a glusterfs volume
                      • #1321578: auth.allow and auth.reject not working host mentioned with hostnames/FQDN
                      • #1322145: Glusterd fails to restart after replacing a failed GlusterFS node and a volume has a snapshot
                      • #1326219: Make Gluster/NFS an optional component
                      • #1328342: [tiering]: gluster v reset of watermark levels can allow low watermark level to have a higher value than hi watermark level
                      • #1353952: [geo-rep]: rsync should not try to sync internal xattrs
                      • #1356076: DHT doesn't evenly balance files on FreeBSD with ZFS
                      • #1359599: BitRot :- bit-rot.signature and bit-rot.version xattr should not be set if bitrot is not enabled on volume
                      • #1369393: dead loop in changelog_rpc_server_destroy
                      • #1383893: glusterd restart is starting the offline shd daemon on other node in the cluster
                      • #1384989: libglusterfs : update correct memory segments in glfs-message-id
                      • #1385758: [RFE] Support multiple bricks in one process (multiplexing)
                      • #1386578: mounting with rdma protocol fails for tcp,rdma volumes
                      • #1389127: build: fixes to build 3.9.0rc2 on Debian (jessie)
                      • #1390050: Elasticsearch get CorruptIndexException errors when running with GlusterFS persistent storage
                      • #1393338: Rebalance should skip the file if the file has hardlinks instead of failing
                      • #1395643: [SELinux] [Scheduler]: Unable to create Snapshots on RHEL-7.1 using Scheduler
                      • #1396004: RFE: An administrator friendly way to determine rebalance completion time
                      • #1399196: use attribute(format(printf)) to catch format string errors at compile time
                      • #1399593: Obvious typo in cleanup code in rpc_clnt_notify
                      • #1401571: bitrot quarantine dir misspelled
                      • #1401812: RFE: Make readdirp parallel in dht
                      • #1401877: [GANESHA] Symlinks from /etc/ganesha/ganesha.conf to shared_storage are created on the non-ganesha nodes in 8 node gluster having 4 node ganesha cluster
                      • #1402254: compile warning unused variable
                      • #1402661: Samba crash when mounting a distributed dispersed volume over CIFS
                      • #1404424: The data-self-heal option is not honored in AFR
                      • #1405628: Socket search code at startup is slow
                      • #1408809: [Perf] : significant Performance regression seen with disperse volume when compared with 3.1.3
                      • #1409191: Sequential and Random Writes are off target by 12% and 22% respectively on EC backed volumes over FUSE
                      • #1410425: [GNFS+EC] Cthon failures/issues with Lock/Special Test cases on disperse volume with GNFS mount
                      • #1410701: [SAMBA-SSL] Volume Share hungs when multiple mount & unmount is performed over a windows client on a SSL enabled cluster
                      • #1411228: remove-brick status shows 0 rebalanced files
                      • #1411334: Improve output of \"gluster volume status detail\"
                      • #1412135: rename of the same file from multiple clients with caching enabled may result in duplicate files
                      • #1412549: EXPECT_WITHIN is taking too much time even if the result matches with expected value
                      • #1413526: glusterfind: After glusterfind pre command execution all temporary files and directories /usr/var/lib/misc/glusterfsd/glusterfind/// should be removed
                      • #1413971: Bonnie test suite failed with \"Can't open file\" error
                      • #1414287: repeated operation failed warnings in gluster mount logs with disperse volume
                      • #1414346: Quota: After upgrade from 3.7 to higher version , gluster quota list command shows \"No quota configured on volume repvol\"
                      • #1414645: Typo in glusterfs code comments
                      • #1414782: Add logs to selfheal code path to be helpful for debug
                      • #1414902: packaging: python/python2(/python3) cleanup
                      • #1415115: client process crashed due to write behind translator
                      • #1415590: removing old tier commands under the rebalance commands
                      • #1415761: [Remove-brick] Hardlink migration fails with \"lookup failed (No such file or directory)\" error messages in rebalance logs
                      • #1416251: [SNAPSHOT] With all USS plugin enable .snaps directory is not visible in cifs mount as well as windows mount
                      • #1416520: Missing FOPs in the io-stats xlator
                      • #1416689: Fix spurious failure of ec-background-heal.t
                      • #1416889: Simplify refcount API for free'ing function
                      • #1417050: [Stress] : SHD Logs flooded with \"Heal Failed\" messages,filling up \"/\" quickly
                      • #1417466: Prevent reverse heal from happening
                      • #1417522: Automatic split brain resolution must check for all the bricks to be up to avoiding serving of inconsistent data(visible on x3 or more)
                      • #1417540: Mark tests/bitrot/bug-1373520.t bad
                      • #1417588: glusterd is setting replicate volume property over disperse volume or vice versa
                      • #1417913: Hangs on 32 bit systems since 3.9.0
                      • #1418014: disable client.io-threads on replica volume creation
                      • #1418095: Portmap allocates way too much memory (256KB) on stack
                      • #1418213: [Ganesha+SSL] : Bonnie++ hangs during rewrites.
                      • #1418249: [RFE] Need to have group cli option to set all md-cache options using a single command
                      • #1418259: Quota: After deleting directory from mount point on which quota was configured, quota list command output is blank
                      • #1418417: packaging: remove glusterfs-ganesha subpackage
                      • #1418629: glustershd process crashed on systemic setup
                      • #1418900: [RFE] Include few more options in virt file
                      • #1418973: removing warning related to enum, to let the build take place without errors for 3.10
                      • #1420166: The rebal-throttle setting does not work as expected
                      • #1420202: glusterd is crashed at the time of stop volume
                      • #1420434: Trash feature improperly disabled
                      • #1420571: Massive xlator_t leak in graph-switch code
                      • #1420611: when server-quorum is enabled, volume get returns 0 value for server-quorum-ratio
                      • #1420614: warning messages seen in glusterd logs while setting the volume option
                      • #1420619: Entry heal messages in glustershd.log while no entries shown in heal info
                      • #1420623: [RHV-RHGS]: Application VM paused after add brick operation and VM didn't comeup after power cycle.
                      • #1420637: Modified volume options not synced once offline nodes comes up.
                      • #1420697: CLI option \"--timeout\" is accepting non numeric and negative values.
                      • #1420713: glusterd: storhaug, remove all vestiges ganesha
                      • #1421023: Binary file gf_attach generated during build process should be git ignored
                      • #1421590: Bricks take up new ports upon volume restart after add-brick op with brick mux enabled
                      • #1421600: Test files clean up for tier during 3.10
                      • #1421607: Getting error messages in glusterd.log when peer detach is done
                      • #1421653: dht_setxattr returns EINVAL when a file is deleted during the FOP
                      • #1421721: volume start command hangs
                      • #1421724: glusterd log is flooded with stale disconnect rpc messages
                      • #1421759: Gluster NFS server crashing in __mnt3svc_umountall
                      • #1421937: [Replicate] \"RPC call decoding failed\" leading to IO hang & mount inaccessible
                      • #1421938: systemic testing: seeing lot of ping time outs which would lead to splitbrains
                      • #1421955: Disperse: Fallback to pre-compiled code execution when dynamic code generation fails
                      • #1422074: GlusterFS truncates nanoseconds to microseconds when setting mtime
                      • #1422152: Bricks not coming up when ran with address sanitizer
                      • #1422624: Need to improve remove-brick failure message when the brick process is down.
                      • #1422760: [Geo-rep] Recreating geo-rep session with same slave after deleting with reset-sync-time fails to sync
                      • #1422776: multiple glusterfsd process crashed making the complete subvolume unavailable
                      • #1423369: unnecessary logging in rda_opendir
                      • #1423373: Crash in index xlator because of race in inode_ctx_set and inode_ref
                      • #1423410: Mount of older client fails
                      • #1423413: Self-heal fail an WORMed-Files
                      • #1423448: glusterfs-fuse RPM now depends on gfapi
                      • #1424764: Coverty scan return false positive regarding crypto
                      • #1424791: Coverty scan detect a potential free on uninitialised pointer in error code path
                      • #1424793: Missing verification of fcntl return code
                      • #1424796: Remove deadcode found by coverty in glusterd-utils.c
                      • #1424802: Missing call to va_end in xlators/cluster/dht/src/dht-common.c
                      • #1424809: Fix another coverty error for useless goto
                      • #1424815: Fix erronous comparaison of flags resulting in UUID always sent
                      • #1424894: Some switches don't have breaks causing unintended fall throughs.
                      • #1424905: Coverity: Memory issues and dead code
                      • #1425288: glusterd is not validating for allowed values while setting \"cluster.brick-multiplex\" property
                      • #1425515: tests: quota-anon-fd-nfs.t needs to check if nfs mount is avialable before mounting
                      • #1425623: Free all xlator specific resources when xlator->fini() gets called
                      • #1425676: gfids are not populated in release/releasedir requests
                      • #1425703: [Disperse] Metadata version is not healing when a brick is down
                      • #1425743: Tier ./tests/bugs/glusterd/bug-1303028-Rebalance-glusterd-rpc-connection-issue.t
                      • #1426032: Log message shows error code as success even when rpc fails to connect
                      • #1426052: \u2018state\u2019 set but not used error when readline and/or ncurses is not installed
                      • #1426059: gluster fuse client losing connection to gluster volume frequently
                      • #1426125: Add logs to identify whether disconnects are voluntary or due to network problems
                      • #1426509: include volume name in rebalance stage error log
                      • #1426667: [GSS] NFS Sub-directory mount not working on solaris10 client
                      • #1426891: script to resolve function name and line number from backtrace
                      • #1426948: [RFE] capture portmap details in glusterd's statedump
                      • #1427012: Disconnects in nfs mount leads to IO hang and mount inaccessible
                      • #1427018: [RFE] - Need a way to reduce the logging of messages \"Peer CN\" and \"SSL verification suceeded messages\" in glusterd.log file
                      • #1427404: Move tests/bitrot/bug-1373520.t to bad tests and fix the underlying issue in posix
                      • #1428036: Update rfc.sh to check/request issue # when a commit is an \u201crfc\u201d
                      • #1428047: Require a Jenkins job to validate Change-ID on commits to branches in glusterfs repository
                      • #1428055: dht/rebalance: Increase maximum read block size from 128 KB to 1 MB
                      • #1428058: tests: Fix tests/bugs/distribute/bug-1161311.t
                      • #1428064: nfs: Check for null buf, and set op_errno to EIO not 0
                      • #1428068: nfs: Tear down transports for requests that arrive before the volume is initialized
                      • #1428073: nfs: Fix compiler warning when calling svc_getcaller
                      • #1428093: protocol/server: Fix crash bug in unlink flow
                      • #1428510: memory leak in features/locks xlator
                      • #1429198: Restore atime/mtime for symlinks and other non-regular files.
                      • #1429200: disallow increasing replica count for arbiter volumes
                      • #1429330: [crawler]: auxiliary mount remains even after crawler finishes
                      • #1429696: ldd libgfxdr.so.0.0.1: undefined symbol: __gf_free
                      • #1430042: Transport endpoint not connected error seen on client when glusterd is restarted
                      • #1430148: USS is broken when multiplexing is on
                      • #1430608: [RFE] Pass slave volume in geo-rep as read-only
                      • #1430719: gfid split brains not getting resolved with automatic splitbrain resolution
                      • #1430841: build/packaging: Debian and Ubuntu don't have /usr/libexec/; results in bad packages
                      • #1430860: brick process crashes when glusterd is restarted
                      • #1431183: [RFE] Gluster get state command should provide connected client related information
                      • #1431192: [RFE] Gluster get state command should provide volume and cluster utilization related information
                      • #1431908: Enabling parallel-readdir causes dht linkto files to be visible on the mount,
                      • #1431963: Warn CLI while creating replica 2 volumes
                      • #1432542: Glusterd crashes when restarted with many volumes
                      • #1433405: GF_REF_PUT() should return 0 when the structure becomes invalid
                      • #1433425: Unrecognized filesystems (i.e. btrfs, zfs) log many errors about \"getinode size\"
                      • #1433506: [Geo-rep] Master and slave mounts are not accessible to take client profile info
                      • #1433571: Undo pending xattrs only on the up bricks
                      • #1433578: glusterd crashes when peering an IP where the address is more than acceptable range (>255) OR with random hostnames
                      • #1433815: auth failure after upgrade to GlusterFS 3.10
                      • #1433838: Move spit-brain msg in read txn to debug
                      • #1434018: [geo-rep]: Worker crashes with [Errno 16] Device or resource busy: '.gfid/00000000-0000-0000-0000-000000000001/dir.166 while renaming directories
                      • #1434062: synclocks don't work correctly under contention
                      • #1434274: BZ for some bugs found while going through synctask code
                      • #1435943: When parallel readdir is enabled and there are simultaneous readdir and disconnects, then it results in crash
                      • #1436086: Parallel readdir on Gluster NFS displays less number of dentries
                      • #1436090: When parallel readdir is enabled, linked to file resolution fails
                      • #1436739: Sharding: Fix a performance bug
                      • #1436936: parameter state->size is wrong in server3_3_writev
                      • #1437037: Standardize atomic increment/decrement calling conventions
                      • #1437494: Brick Multiplexing:Volume status still shows the PID even after killing the process
                      • #1437748: Spacing issue in fix-layout status output
                      • #1437780: don't send lookup in fuse_getattr()
                      • #1437853: Spellcheck issues reported during Debian build
                      • #1438255: Don't wind post-op on a brick where the fop phase failed.
                      • #1438370: rebalance: Allow admin to change thread count for rebalance
                      • #1438411: [Ganesha + EC] : Input/Output Error while creating LOTS of smallfiles
                      • #1438738: Inode ref leak on anonymous reads and writes
                      • #1438772: build: clang/llvm has builtin_ffs() and builtin_popcount()
                      • #1438810: File-level WORM allows ftruncate() on read-only files
                      • #1438858: explicitly specify executor to be bash for tests
                      • #1439527: [disperse] Don't count healing brick as healthy brick
                      • #1439571: dht/rebalance: Improve rebalance crawl performance
                      • #1439640: [Parallel Readdir] : No bound-checks/CLI validation for parallel readdir tunables
                      • #1440051: Application VMs with their disk images on sharded-replica 3 volume are unable to boot after performing rebalance
                      • #1441035: remove bug-1421590-brick-mux-reuse-ports.t
                      • #1441106: [Geo-rep]: Unnecessary unlink call while processing rmdir
                      • #1441491: The data-self-heal option is not honored in EC
                      • #1441508: dht/cluster: rebalance/remove-brick should honor min-free-disk
                      • #1441910: gluster volume stop hangs
                      • #1441945: [Eventing]: Unrelated error message displayed when path specified during a 'webhook-test/add' is missing a schema
                      • #1442145: split-brain-favorite-child-policy.t depends on \"bc\"
                      • #1442411: meta xlator leaks memory when unloaded
                      • #1442569: Implement Negative lookup cache feature to improve create performance
                      • #1442724: rm -rf returns ENOTEMPTY even though ls on the mount point returns no files
                      • #1442760: snapshot: snapshots appear to be failing with respect to secure geo-rep slave
                      • #1443373: mkdir/rmdir loop causes gfid-mismatch on a 6 brick distribute volume
                      • #1443896: [BrickMultiplex] gluster command not responding and .snaps directory is not visible after executing snapshot related command
                      • #1443959: packaging: no firewalld-filesystem before el 7.3
                      • #1443977: Unable to take snapshot on a geo-replicated volume, even after stopping the session
                      • #1444023: io-stats xlator leaks memory when fini() is called
                      • #1444228: Autoconf leaves unexpanded variables in path names of non-shell-script text files
                      • #1444941: bogus date in %changelog
                      • #1445569: Provide a correct way to save the statedump generated by gfapi application
                      • #1445590: Incorrect and redundant logs in the DHT rmdir code path
                      • #1446126: S30samba-start.sh throws 'unary operator expected' warning during independent execution
                      • #1446273: Some functions are exported incorrectly for Mac OS X with the GFAPI_PUBLIC macro
                      • #1447543: Revert experimental and 4.0 features to prepare for 3.11 release
                      • #1447571: RFE: Enhance handleops readdirplus operation to return handles along with dirents
                      • #1447597: RFE : SELinux translator to support setting SELinux contexts on files in a glusterfs volume
                      • #1447604: volume set fails if nfs.so is not installed
                      • #1447607: Don't allow rebalance/fix-layout operation on sharding enabled volumes till dht+sharding bugs are fixed
                      • #1448345: Segmentation fault when creating a qcow2 with qemu-img
                      • #1448416: Halo Replication feature for AFR translator
                      • #1449004: [Brick Multiplexing] : Bricks for multiple volumes going down after glusterd restart and not coming back up after volume start force
                      • #1449191: Multiple bricks WILL crash after TCP port probing
                      • #1449311: [whql][virtio-block+glusterfs]\"Disk Stress\" and \"Disk Verification\" job always failed on win7-32/win2012/win2k8R2 guest
                      • #1449775: quota: limit-usage command failed with error \" Failed to start aux mount\"
                      • #1449921: afr: include quorum type and count when dumping afr priv
                      • #1449924: When either killing or restarting a brick with performance.stat-prefetch on, stat sometimes returns a bad st_size value.
                      • #1449933: Brick Multiplexing :- resetting a brick bring down other bricks with same PID
                      • #1450267: nl-cache xlator leaks timer wheel and other memory
                      • #1450377: GNFS crashed while taking lock on a file from 2 different clients having same volume mounted from 2 different servers
                      • #1450565: glfsheal: crashed(segfault) with disperse volume in RDMA
                      • #1450729: Brick Multiplexing: seeing Input/Output Error for .trashcan
                      • #1450933: [New] - Replacing an arbiter brick while I/O happens causes vm pause
                      • #1451033: contrib: timer-wheel 32-bit bug, use builtin_fls, license, etc
                      • #1451573: AFR returns the node uuid of the same node for every file in the replica
                      • #1451586: crash in dht_rmdir_do
                      • #1451591: cli xml status of detach tier broken
                      • #1451887: Add tests/basic/afr/gfid-mismatch-resolution-with-fav-child-policy.t to bad tests
                      • #1452000: Spacing issue in fix-layout status output
                      • #1453050: [DHt] : segfault in dht_selfheal_dir_setattr while running regressions
                      • #1453086: Brick Multiplexing: On reboot of a node Brick multiplexing feature lost on that node as multiple brick processes get spawned
                      • #1453152: [Parallel Readdir] : Mounts fail when performance.parallel-readdir is set to \"off\"
                      • #1454533: lock_revocation.t Marked as bad in 3.11 for CentOS as well
                      • #1454569: [geo-rep + nl]: Multiple crashes observed on slave with \"nlc_lookup_cbk\"
                      • #1454597: [Tiering]: High and low watermark values when set to the same level, is allowed
                      • #1454612: glusterd on a node crashed after running volume profile command
                      • #1454686: Implement FALLOCATE FOP for EC
                      • #1454853: Seeing error \"Failed to get the total number of files. Unable to estimate time to complete rebalance\" in rebalance logs
                      • #1455177: ignore incorrect uuid validation in gd_validate_mgmt_hndsk_req
                      • #1455423: dht: dht self heal fails with no hashed subvol error
                      • #1455907: heal info shows the status of the bricks as \"Transport endpoint is not connected\" though bricks are up
                      • #1456224: [gluster-block]:Need a volume group profile option for gluster-block volume to add necessary options to be added.
                      • #1456225: gluster-block is not working as expected when shard is enabled
                      • #1456331: [Bitrot]: Brick process crash observed while trying to recover a bad file in disperse volume
                      • "},{"location":"release-notes/3.11.1/","title":"Release notes for Gluster 3.11.1","text":"

                        This is a bugfix release. The release notes for 3.11.0, contains a listing of all the new features that were added and bugs fixed, in the GlusterFS 3.11 stable release.

                        "},{"location":"release-notes/3.11.1/#major-changes-features-and-limitations-addressed-in-this-release","title":"Major changes, features and limitations addressed in this release","text":""},{"location":"release-notes/3.11.1/#improved-disperse-performance","title":"Improved disperse performance","text":"

                        Fix for bug #1456259 changes the way messages are read and processed from the socket layers on the Gluster client. This has shown performance improvements on disperse volumes, and is applicable to other volume types as well, where there maybe multiple applications or users accessing the same mount point.

                        "},{"location":"release-notes/3.11.1/#group-settings-for-enabling-negative-lookup-caching-provided","title":"Group settings for enabling negative lookup caching provided","text":"

                        Ability to serve negative lookups from cache was added in 3.11.0 and with this release, a group volume set option is added for ease in enabling this feature.

                        See group-nl-cache for more details.

                        "},{"location":"release-notes/3.11.1/#gluster-fuse-now-implements-oauto_unmount-feature","title":"Gluster fuse now implements \"-oauto_unmount\" feature.","text":"

                        libfuse has an auto_unmount option which, if enabled, ensures that the file system is unmounted at FUSE server termination by running a separate monitor process that performs the unmount when that occurs. This release implements that option and behavior for glusterfs.

                        Note that \"auto unmount\" (robust or not) is a leaky abstraction, as the kernel cannot guarantee that at the path where the FUSE fs is mounted is actually the toplevel mount at the time of the umount(2) call, for multiple reasons, among others, see:

                        • fuse-devel: \"fuse: feasible to distinguish between umount and abort?\"
                        • https://github.com/libfuse/libfuse/issues/122
                        "},{"location":"release-notes/3.11.1/#major-issues","title":"Major issues","text":"
                        1. Expanding a gluster volume that is sharded may cause file corruption

                          • Sharded volumes are typically used for VM images, if such volumes are expanded or possibly contracted (i.e add/remove bricks and rebalance) there are reports of VM images getting corrupted.
                          • Status of this bug can be tracked here, #1465123
                        "},{"location":"release-notes/3.11.1/#bugs-addressed","title":"Bugs addressed","text":"

                        Bugs addressed since release-3.11.0 are listed below.

                        • #1456259: limited throughput with disperse volume over small number of bricks
                        • #1457058: glusterfs client crash on io-cache.so(__ioc_page_wakeup+0x44)
                        • #1457289: tierd listens to a port.
                        • #1457339: DHT: slow readdirp performance
                        • #1457616: \"split-brain observed [Input/output error]\" error messages in samba logs during parallel rm -rf
                        • #1457901: nlc_lookup_cbk floods logs
                        • #1458570: [brick multiplexing] detach a brick if posix health check thread complaints about underlying brick
                        • #1458664: [Geo-rep]: METADATA errors are seen even though everything is in sync
                        • #1459090: all: spelling errors (debian package maintainer)
                        • #1459095: extras/hook-scripts: non-portable shell syntax (debian package maintainer)
                        • #1459392: possible repeatedly recursive healing of same file with background heal not happening when IO is going on
                        • #1459759: Glusterd segmentation fault in ' _Unwind_Backtrace' while running peer probe
                        • #1460647: posix-acl: Whitelist virtual ACL xattrs
                        • #1460894: Rebalance estimate time sometimes shows negative values
                        • #1460895: Upcall missing invalidations
                        • #1460896: [Negative Lookup Cache]Need a single group set command for enabling all required nl cache options
                        • #1460898: Enabling parallel-readdir causes dht linkto files to be visible on the mount,
                        • #1462121: [GNFS+EC] Unable to release the lock when the other client tries to acquire the lock on the same file
                        • #1462127: [Bitrot]: Inconsistency seen with 'scrub ondemand' - fails to trigger scrub
                        • #1462636: Use of force with volume start, creates brick directory even it is not present
                        • #1462661: lk fop succeeds even when lock is not acquired on at least quorum number of bricks
                        • #1463250: with AFR now making both nodes to return UUID for a file will result in georep consuming more resources
                        "},{"location":"release-notes/3.11.2/","title":"Release notes for Gluster 3.11.2","text":"

                        This is a bugfix release. The release notes for 3.11.1, 3.11.0, contains a listing of all the new features that were added and bugs fixed, in the GlusterFS 3.11 stable release.

                        "},{"location":"release-notes/3.11.2/#major-changes-features-and-limitations-addressed-in-this-release","title":"Major changes, features and limitations addressed in this release","text":"

                        There are no major features or changes made in this release.

                        "},{"location":"release-notes/3.11.2/#major-issues","title":"Major issues","text":"
                        1. Expanding a gluster volume that is sharded may cause file corruption

                          • Sharded volumes are typically used for VM images, if such volumes are expanded or possibly contracted (i.e add/remove bricks and rebalance) there are reports of VM images getting corrupted.
                          • The last known cause for corruption (Bug #1465123) has a fix with this release. As further testing is still in progress, the issue is retained as a major issue.
                          • Status of this bug can be tracked here, #1465123
                        "},{"location":"release-notes/3.11.2/#bugs-addressed","title":"Bugs addressed","text":"

                        Bugs addressed since release-3.11.0 are listed below.

                        • #1463512: USS: stale snap entries are seen when activation/deactivation performed during one of the glusterd's unavailability
                        • #1463513: [geo-rep]: extended attributes are not synced if the entry and extended attributes are done within changelog roleover/or entry sync
                        • #1463517: Brick Multiplexing:dmesg shows request_sock_TCP: Possible SYN flooding on port 49152 and memory related backtraces
                        • #1463528: [Perf] 35% drop in small file creates on smbv3 on *2
                        • #1463626: [Ganesha]Bricks got crashed while running posix compliance test suit on V4 mount
                        • #1464316: DHT: Pass errno as an argument to gf_msg
                        • #1465123: Fd based fops fail with EBADF on file migration
                        • #1465854: Regression: Heal info takes longer time when a brick is down
                        • #1466801: assorted typos and spelling mistakes from Debian lintian
                        • #1466859: dht_rename_lock_cbk crashes in upstream regression test
                        • #1467268: Heal info shows incorrect status
                        • #1468118: disperse seek does not correctly handle the end of file
                        • #1468200: [Geo-rep]: entry failed to sync to slave with ENOENT errror
                        • #1468457: selfheal deamon cpu consumption not reducing when IOs are going on and all redundant bricks are brought down one after another
                        • #1469459: Rebalance hangs on remove-brick if the target volume changes
                        • #1470938: Regression: non-disruptive(in-service) upgrade on EC volume fails
                        • #1471025: glusterfs process leaking memory when error occurs
                        • #1471611: metadata heal not happening despite having an active sink
                        • #1471869: cthon04 can cause segfault in gNFS/NLM
                        • #1472794: Test script failing with brick multiplexing enabled
                        "},{"location":"release-notes/3.11.3/","title":"Release notes for Gluster 3.11.3","text":"

                        This is a bugfix release. The release notes for 3.11.2, 3.11.1, and 3.11.0 contain a listing of all the new features that were added and bugs fixed, in the GlusterFS 3.11 stable release.

                        This is possibly the last bugfix release for 3.11, as 3.12 is expected to be released around end of August, 2017, which will hence EOL the 3.11 release, as it is a short term maintenence release (see release status).

                        "},{"location":"release-notes/3.11.3/#major-changes-features-and-limitations-addressed-in-this-release","title":"Major changes, features and limitations addressed in this release","text":"

                        There are no major features or changes made in this release.

                        "},{"location":"release-notes/3.11.3/#major-issues","title":"Major issues","text":"
                        1. Expanding a gluster volume that is sharded may cause file corruption

                          • Sharded volumes are typically used for VM images, if such volumes are expanded or possibly contracted (i.e add/remove bricks and rebalance) there are reports of VM images getting corrupted.
                          • The last known cause for corruption (Bug #1465123) has a fix with the 3.11.2 release. As further testing is still in progress, the issue is retained as a major issue.
                          • Status of this bug can be tracked here, #1465123
                        "},{"location":"release-notes/3.11.3/#bugs-addressed","title":"Bugs addressed","text":"

                        Bugs addressed since release-3.11.2 are listed below.

                        • #1475637: [Scale] : Client logs flooded with \"inode context is NULL\" error messages
                        • #1476822: scripts: invalid test in S32gluster_enable_shared_storage.sh
                        • #1476870: [EC]: md5sum mismatches every time for a file from the fuse client on EC volume
                        • #1476873: packaging: /var/lib/glusterd/options should be %config(noreplace)
                        • #1479656: Permission denied errors when appending files after readdir
                        • #1479692: Running sysbench on vm disk from plain distribute gluster volume causes disk corruption
                        "},{"location":"release-notes/3.12.0/","title":"Release notes for Gluster 3.12.0","text":"

                        This is a major Gluster release that includes, ability to mount sub-directories using the Gluster native protocol (FUSE), further brick multiplexing enhancements that help scale to larger brick counts per node, enhancements to gluster get-state CLI enabling better understanding of various bricks and nodes participation/roles in the cluster, ability to resolve GFID split-brain using existing CLI, easier GFID to real path mapping thus enabling easier diagnostics and correction for reported GFID issues (healing among other uses where GFID is the only available source for identifying a file), and other changes and fixes.

                        The most notable features and changes are documented on this page. A full list of bugs that have been addressed is included further below.

                        Further, as 3.11 release is a short term maintenance release, features included in that release are available with 3.12 as well, and could be of interest to users upgrading to 3.12 from older than 3.11 releases. The 3.11 release notes captures the list of features that were introduced with 3.11.

                        "},{"location":"release-notes/3.12.0/#major-changes-and-features","title":"Major changes and features","text":""},{"location":"release-notes/3.12.0/#ability-to-mount-sub-directories-using-the-gluster-fuse-protocol","title":"Ability to mount sub-directories using the Gluster FUSE protocol","text":"

                        Notes for users:

                        With this release, it is possible define sub-directories to be mounted by specific clients and additional granularity in the form of clients to mount only that portion of the volume for access.

                        Until recently, Gluster FUSE mounts enabled mounting the entire volume on the client. This feature helps sharing a volume among the multiple consumers along with enabling restricting access to the sub-directory of choice.

                        Option controlling sub-directory allow/deny rules can be set as follows:

                        # gluster volume set <volname> auth.allow \"/subdir1(192.168.1.*),/(192.168.10.*),/subdir2(192.168.8.*)\"\n

                        How to mount from the client:

                        # mount -t glusterfs <hostname>:/<volname>/<subdir> /<mount_point>\n

                        Or,

                        # mount -t glusterfs <hostname>:/<volname> -osubdir_mount=<subdir> /<mount_point>\n

                        Limitations:

                        • There are no throttling or QoS support for this feature. The feature will just provide the namespace isolation for the different clients.

                        Known Issues:

                        • Once we cross more than 1000s of subdirs in 'auth.allow' option, the performance of reconnect / authentication would be impacted.
                        "},{"location":"release-notes/3.12.0/#gfid-to-path-conversion-is-enabled-by-default","title":"GFID to path conversion is enabled by default","text":"

                        Notes for users:

                        Prior to this feature, only when quota was enabled, did the on disk data have pointers back from GFID to their respective filenames. As a result, if there were a need to locate the path given a GFID, quota had to be enabled.

                        The change brought in by this feature, is to enable this on disk data to be present, for all cases, than just quota. Further, enhancements here have been to improve the manner of storing this information on disk as extended attributes.

                        The internal on disk xattr that is now stored to reference the filename and parent for a GFID is, trusted.gfid2path.<xxhash>

                        This feature is enabled by default with this release.

                        Limitations:

                        None

                        Known Issues:

                        None

                        "},{"location":"release-notes/3.12.0/#various-enhancements-have-been-made-to-the-output-of-get-state-cli-command","title":"Various enhancements have been made to the output of get-state CLI command","text":"

                        Notes for users:

                        The command #gluster get-state has been enhanced to output more information as below,

                        • Arbiter bricks are marked more clearly in a volume that has the feature enabled
                        • Ability to get all volume options (both set and defaults) in the get-state output
                        • Rebalance time estimates, for ongoing rebalance, is captured in the get-state output
                        • If geo-replication is configured, then get-state now captures the session details of the same

                        Limitations:

                        None

                        Known Issues:

                        None

                        "},{"location":"release-notes/3.12.0/#provided-an-option-to-set-a-limit-on-number-of-bricks-multiplexed-in-a-processes","title":"Provided an option to set a limit on number of bricks multiplexed in a processes","text":"

                        Notes for users:

                        This release includes a global option to be switched on only if brick multiplexing is enabled for the cluster. The introduction of this option allows the user to control the number of bricks that are multiplexed in a process on a node. If the limit set by this option is insufficient for a single process, more processes are spawned for the subsequent bricks.

                        Usage:

                        #gluster volume set all cluster.max-bricks-per-process <value>\n
                        "},{"location":"release-notes/3.12.0/#provided-an-option-to-use-localtime-timestamps-in-log-entries","title":"Provided an option to use localtime timestamps in log entries","text":"

                        Limitations:

                        Gluster defaults to UTC timestamps. glusterd, glusterfsd, and server-side glusterfs daemons will use UTC until one of,

                        1. command line option is processed,
                        2. gluster config (/var/lib/glusterd/options) is loaded,
                        3. admin manually sets localtime-logging (cluster.localtime-logging, e.g. #gluster volume set all cluster.localtime-logging enable).

                        There is no mount option to make the FUSE client enable localtime logging.

                        There is no option in gfapi to enable localtime logging.

                        "},{"location":"release-notes/3.12.0/#enhanced-the-option-to-export-statfs-data-for-bricks-sharing-the-same-backend-filesystem","title":"Enhanced the option to export statfs data for bricks sharing the same backend filesystem","text":"

                        Notes for users: In the past 'storage/posix' xlator had an option called option export-statfs-size, which, when set to 'no', exports zero as values for few fields in struct statvfs. These are typically reflected in an output of df command, from a user perspective.

                        When backend bricks are shared between multiple brick processes, the values of these variables have been corrected to reflect field_value / number-of-bricks-at-node. Thus enabling better usage reporting and also enhancing the ability for file placement in the distribute translator when used with the option min-free-disk.

                        "},{"location":"release-notes/3.12.0/#provided-a-means-to-resolve-gfid-split-brain-using-the-gluster-cli","title":"Provided a means to resolve GFID split-brain using the gluster CLI","text":"

                        Notes for users:

                        The existing CLI commands to heal files under split-brain did not handle cases where there was a GFID mismatch between the files. With the provided enhancement the same CLI commands can now address GFID split-brain situations based on the choices provided.

                        The CLI options that are enhanced to help with this situation are,

                        volume heal <VOLNAME> split-brain {bigger-file <FILE> |\n    latest-mtime <FILE> |\n    source-brick <HOSTNAME:BRICKNAME> [<FILE>]}\n

                        Limitations:

                        None

                        Known Issues:

                        None

                        "},{"location":"release-notes/3.12.0/#developer-related-added-a-siteh-for-more-vendorcompany-specific-defaults","title":"Developer related: Added a 'site.h' for more vendor/company specific defaults","text":"

                        Notes for developers:

                        NOTE: Also relevant for users building from sources and needing different defaults for some options

                        Most people consume Gluster in one of two ways:

                        • From packages provided by their OS/distribution vendor
                        • By building themselves from source

                        For the first group it doesn't matter whether configuration is done in a configure script, via command-line options to that configure script, or in a header file. All of these end up as edits to some file under the packager's control, which is then run through their tools and process (e.g. rpmbuild) to create the packages that users will install.

                        For the second group, convenience matters. Such users might not even have a script wrapped around the configure process, and editing one line in a header file is a lot easier than editing several in the configure script. This also prevents a messy profusion of configure options, dozens of which might need to be added to support a single such user's preferences. This comes back around as greater simplicity for packagers as well. This patch defines site.h as the header file for options and parameters that someone building the code for themselves might want to tweak.

                        The project ships one version to reflect the developers' guess at the best defaults for most users, and sophisticated users with unusual needs can override many options at once just by maintaining their own version of that file. Further guidelines for how to determine whether an option should go in configure.ac or site.h are explained within site.h itself.

                        "},{"location":"release-notes/3.12.0/#developer-related-added-xxhash-library-to-libglusterfs-for-required-use","title":"Developer related: Added xxhash library to libglusterfs for required use","text":"

                        Notes for developers:

                        Function gf_xxh64_wrapper has been added as a wrapper into libglusterfs for consumption by interested developers.

                        Reference to code can be found here

                        "},{"location":"release-notes/3.12.0/#developer-related-glfs_ipc-api-in-libgfapi-is-removed-as-a-public-interface","title":"Developer related: glfs_ipc API in libgfapi is removed as a public interface","text":"

                        Notes for users:

                        glfs_ipc API was maintained as a public API in the GFAPI libraries. This has been removed as a public interface, from this release onwards.

                        Any application, written directly to consume gfapi as a means of interfacing with Gluster, using the mentioned API, would need to be modified to adapt to this change.

                        NOTE: As of this release there are no known public consumers of this API

                        "},{"location":"release-notes/3.12.0/#major-issues","title":"Major issues","text":"
                        1. Expanding a gluster volume that is sharded may cause file corruption
                        2. Sharded volumes are typically used for VM images, if such volumes are expanded or possibly contracted (i.e add/remove bricks and rebalance) there are reports of VM images getting corrupted.
                        3. The last known cause for corruption (Bug #1465123) has a fix with this release. As further testing is still in progress, the issue is retained as a major issue.
                        4. Status of this bug can be tracked here, #1465123
                        "},{"location":"release-notes/3.12.0/#bugs-addressed","title":"Bugs addressed","text":"

                        Bugs addressed since release-3.11.0 are listed below.

                        • #1047975: glusterfs/extras: add a convenience script to label (selinux) gluster bricks
                        • #1254002: [RFE] Have named pthreads for easier debugging
                        • #1318100: RFE : SELinux translator to support setting SELinux contexts on files in a glusterfs volume
                        • #1318895: Heal info shows incorrect status
                        • #1326219: Make Gluster/NFS an optional component
                        • #1356453: DHT: slow readdirp performance
                        • #1366817: AFR returns the node uuid of the same node for every file in the replica
                        • #1381970: GlusterFS Daemon stops working after a longer runtime and higher file workload due to design flaws?
                        • #1400924: [RFE] Rsync flags for performance improvements
                        • #1402406: Client stale file handle error in dht-linkfile.c under SPEC SFS 2014 VDA workload
                        • #1414242: [whql][virtio-block+glusterfs]\"Disk Stress\" and \"Disk Verification\" job always failed on win7-32/win2012/win2k8R2 guest
                        • #1421938: systemic testing: seeing lot of ping time outs which would lead to splitbrains
                        • #1424817: Fix wrong operators, found by coverty
                        • #1428061: Halo Replication feature for AFR translator
                        • #1428673: possible repeatedly recursive healing of same file with background heal not happening when IO is going on
                        • #1430608: [RFE] Pass slave volume in geo-rep as read-only
                        • #1431908: Enabling parallel-readdir causes dht linkto files to be visible on the mount,
                        • #1433906: quota: limit-usage command failed with error \" Failed to start aux mount\"
                        • #1437748: Spacing issue in fix-layout status output
                        • #1438966: Multiple bricks WILL crash after TCP port probing
                        • #1439068: Segmentation fault when creating a qcow2 with qemu-img
                        • #1442569: Implement Negative lookup cache feature to improve create performance
                        • #1442788: Cleanup timer wheel in glfs_fini()
                        • #1442950: RFE: Enhance handleops readdirplus operation to return handles along with dirents
                        • #1444596: [Brick Multiplexing] : Bricks for multiple volumes going down after glusterd restart and not coming back up after volume start force
                        • #1445609: [perf-xlators/write-behind] write-behind-window-size could be set greater than its allowed MAX value 1073741824
                        • #1446172: Brick Multiplexing :- resetting a brick bring down other bricks with same PID
                        • #1446362: cli xml status of detach tier broken
                        • #1446412: error-gen don't need to convert error string to int in every fop
                        • #1446516: [Parallel Readdir] : Mounts fail when performance.parallel-readdir is set to \"off\"
                        • #1447116: gfapi exports non-existing glfs_upcall_inode_get_event symbol
                        • #1447266: [snapshot cifs]ls on .snaps directory is throwing input/output error over cifs mount
                        • #1447389: Brick Multiplexing: seeing Input/Output Error for .trashcan
                        • #1447609: server: fd should be refed before put into fdtable
                        • #1447630: Don't allow rebalance/fix-layout operation on sharding enabled volumes till dht+sharding bugs are fixed
                        • #1447826: potential endless loop in function glusterfs_graph_validate_options
                        • #1447828: Should use dict_set_uint64 to set fd->pid when dump fd's info to dict
                        • #1447953: Remove inadvertently merged IPv6 code
                        • #1447960: [Tiering]: High and low watermark values when set to the same level, is allowed
                        • #1447966: 'make cscope' fails on a clean tree due to missing generated XDR files
                        • #1448150: USS: stale snap entries are seen when activation/deactivation performed during one of the glusterd's unavailability
                        • #1448265: use common function iov_length to instead of duplicate code
                        • #1448293: Implement FALLOCATE FOP for EC
                        • #1448299: Mismatch in checksum of the image file after copying to a new image file
                        • #1448364: limited throughput with disperse volume over small number of bricks
                        • #1448640: Seeing error \"Failed to get the total number of files. Unable to estimate time to complete rebalance\" in rebalance logs
                        • #1448692: use GF_ATOMIC to generate callid
                        • #1448804: afr: include quorum type and count when dumping afr priv
                        • #1448914: [geo-rep]: extended attributes are not synced if the entry and extended attributes are done within changelog roleover/or entry sync
                        • #1449008: remove useless options from glusterd's volume set table
                        • #1449232: race condition between client_ctx_get and client_ctx_set
                        • #1449329: When either killing or restarting a brick with performance.stat-prefetch on, stat sometimes returns a bad st_size value.
                        • #1449348: disperse seek does not correctly handle the end of file
                        • #1449495: glfsheal: crashed(segfault) with disperse volume in RDMA
                        • #1449610: [New] - Replacing an arbiter brick while I/O happens causes vm pause
                        • #1450010: [gluster-block]:Need a volume group profile option for gluster-block volume to add necessary options to be added.
                        • #1450559: Error 0-socket.management: socket_poller XX.XX.XX.XX:YYY failed (Input/output error) during any volume operation
                        • #1450630: [brick multiplexing] detach a brick if posix health check thread complaints about underlying brick
                        • #1450730: Add tests/basic/afr/gfid-mismatch-resolution-with-fav-child-policy.t to bad tests
                        • #1450975: Fix on demand file migration from client
                        • #1451083: crash in dht_rmdir_do
                        • #1451162: dht: Make throttle option \"normal\" value uniform across dht_init and dht_reconfigure
                        • #1451248: Brick Multiplexing: On reboot of a node Brick multiplexing feature lost on that node as multiple brick processes get spawned
                        • #1451588: [geo-rep + nl]: Multiple crashes observed on slave with \"nlc_lookup_cbk\"
                        • #1451724: glusterfind pre crashes with \"UnicodeDecodeError: 'utf8' codec can't decode\" error when the --no-encode is used
                        • #1452006: tierd listens to a port.
                        • #1452084: [Ganesha] : Stale linkto files after unsuccessfuly hardlinks
                        • #1452102: [DHt] : segfault in dht_selfheal_dir_setattr while running regressions
                        • #1452378: Cleanup unnecessary logs in fix_quorum_options
                        • #1452527: Shared volume doesn't get mounted on few nodes after rebooting all nodes in cluster.
                        • #1452956: glusterd on a node crashed after running volume profile command
                        • #1453151: [RFE] glusterfind: add --end-time and --field-separator options
                        • #1453977: Brick Multiplexing: Deleting brick directories of the base volume must gracefully detach from glusterfsd without impacting other volumes IO(currently seeing transport end point error)
                        • #1454317: [Bitrot]: Brick process crash observed while trying to recover a bad file in disperse volume
                        • #1454375: ignore incorrect uuid validation in gd_validate_mgmt_hndsk_req
                        • #1454418: Glusterd segmentation fault in ' _Unwind_Backtrace' while running peer probe
                        • #1454701: DHT: Pass errno as an argument to gf_msg
                        • #1454865: [Brick Multiplexing] heal info shows the status of the bricks as \"Transport endpoint is not connected\" though bricks are up
                        • #1454872: [Geo-rep]: Make changelog batch size configurable
                        • #1455049: [GNFS+EC] Unable to release the lock when the other client tries to acquire the lock on the same file
                        • #1455104: dht: dht self heal fails with no hashed subvol error
                        • #1455179: [Geo-rep]: Log time taken to sync entry ops, metadata ops and data ops for each batch
                        • #1455301: gluster-block is not working as expected when shard is enabled
                        • #1455559: [Geo-rep]: METADATA errors are seen even though everything is in sync
                        • #1455831: libglusterfs: updates old comment for 'arena_size'
                        • #1456361: DHT : for many operation directory/file path is '(null)' in brick log
                        • #1456385: glusterfs client crash on io-cache.so(__ioc_page_wakeup+0x44)
                        • #1456405: Brick Multiplexing:dmesg shows request_sock_TCP: Possible SYN flooding on port 49152 and memory related backtraces
                        • #1456582: \"split-brain observed [Input/output error]\" error messages in samba logs during parallel rm -rf
                        • #1456653: nlc_lookup_cbk floods logs
                        • #1456898: Regression test for add-brick failing with brick multiplexing enabled
                        • #1457202: Use of force with volume start, creates brick directory even it is not present
                        • #1457808: all: spelling errors (debian package maintainer)
                        • #1457812: extras/hook-scripts: non-portable shell syntax (debian package maintainer)
                        • #1457981: client fails to connect to the brick due to an incorrect port reported back by glusterd
                        • #1457985: Rebalance estimate time sometimes shows negative values
                        • #1458127: Upcall missing invalidations
                        • #1458193: Implement seek() fop in trace translator
                        • #1458197: io-stats usability/performance statistics enhancements
                        • #1458539: [Negative Lookup]: negative lookup features doesn't seem to work on restart of volume
                        • #1458582: add all as volume option in gluster volume get usage
                        • #1458768: [Perf] 35% drop in small file creates on smbv3 on *2
                        • #1459402: brick process crashes while running bug-1432542-mpx-restart-crash.t in a loop
                        • #1459530: [RFE] Need a way to resolve gfid split brains
                        • #1459620: [geo-rep]: Worker crashed with TypeError: expected string or buffer
                        • #1459781: Brick Multiplexing:Even clean Deleting of the brick directories of base volume is resulting in posix health check errors(just as we see in ungraceful delete methods)
                        • #1459971: posix-acl: Whitelist virtual ACL xattrs
                        • #1460225: Not cleaning up stale socket file is resulting in spamming glusterd logs with warnings of \"got disconnect from stale rpc\"
                        • #1460514: [Ganesha] : Ganesha crashes while cluster enters failover/failback mode
                        • #1460585: Revert CLI restrictions on running rebalance in VM store use case
                        • #1460638: ec-data-heal.t fails with brick mux enabled
                        • #1460659: Avoid one extra call of l(get|list)xattr system call after use buffer in posix_getxattr
                        • #1461129: malformed cluster.server-quorum-ratio setting can lead to split brain
                        • #1461648: Update GlusterFS README
                        • #1461655: glusterd crashes when statedump is taken
                        • #1461792: lk fop succeeds even when lock is not acquired on at least quorum number of bricks
                        • #1461845: [Bitrot]: Inconsistency seen with 'scrub ondemand' - fails to trigger scrub
                        • #1462200: glusterd status showing failed when it's stopped in RHEL7
                        • #1462241: glusterfind: syntax error due to uninitialized variable 'end'
                        • #1462790: with AFR now making both nodes to return UUID for a file will result in georep consuming more resources
                        • #1463178: [Ganesha]Bricks got crashed while running posix compliance test suit on V4 mount
                        • #1463365: Changes for Maintainers 2.0
                        • #1463648: Use GF_XATTR_LIST_NODE_UUIDS_KEY to figure out local subvols
                        • #1464072: cns-brick-multiplexing: brick process fails to restart after gluster pod failure
                        • #1464091: Regression: Heal info takes longer time when a brick is down
                        • #1464110: [Scale] : Rebalance ETA (towards the end) may be inaccurate,even on a moderately large data set.
                        • #1464327: glusterfs client crashes when reading large directory
                        • #1464359: selfheal deamon cpu consumption not reducing when IOs are going on and all redundant bricks are brought down one after another
                        • #1465024: glusterfind: DELETE path needs to be unquoted before further processing
                        • #1465075: Fd based fops fail with EBADF on file migration
                        • #1465214: build failed with GF_DISABLE_MEMPOOL
                        • #1465559: multiple brick processes seen on gluster(fs)d restart in brick multiplexing
                        • #1466037: Fuse mount crashed with continuous dd on a file and reading the file in parallel
                        • #1466110: dht_rename_lock_cbk crashes in upstream regression test
                        • #1466188: Add scripts to analyze quota xattr in backend and identify accounting issues
                        • #1466785: assorted typos and spelling mistakes from Debian lintian
                        • #1467209: [Scale] : Rebalance ETA shows the initial estimate to be ~140 days,finishes within 18 hours though.
                        • #1467277: [GSS] [RFE] add documentation on --xml and --mode=script options to gluster interactive help and man pages
                        • #1467313: cthon04 can cause segfault in gNFS/NLM
                        • #1467513: CIFS:[USS]: .snaps is not accessible from the CIFS client after volume stop/start
                        • #1467718: [Geo-rep]: entry failed to sync to slave with ENOENT errror
                        • #1467841: gluster volume status --xml fails when there are 100 volumes
                        • #1467986: possible memory leak in glusterfsd with multiplexing
                        • #1468191: Enable stat-prefetch in group virt
                        • #1468261: Regression: non-disruptive(in-service) upgrade on EC volume fails
                        • #1468279: metadata heal not happening despite having an active sink
                        • #1468291: NFS Sub directory is getting mounted on solaris 10 even when the permission is restricted in nfs.export-dir volume option
                        • #1468432: tests: fix stats-dump.t failure
                        • #1468433: rpc: include current second in timed out frame cleanup on client
                        • #1468863: Assert in mem_pools_fini during libgfapi-fini-hang.t on NetBSD
                        • #1469029: Rebalance hangs on remove-brick if the target volume changes
                        • #1469179: invoke checkpatch.pl with strict
                        • #1469964: cluster/dht: Fix hardlink migration failures
                        • #1470170: mem-pool: mem_pool_fini() doesn't release entire memory allocated
                        • #1470220: glusterfs process leaking memory when error occurs
                        • #1470489: bulk removexattr shouldn't allow removal of trusted.gfid/trusted.glusterfs.volume-id
                        • #1470533: Brick Mux Setup: brick processes(glusterfsd) crash after a restart of volume which was preceded with some actions
                        • #1470768: file /usr/lib64/glusterfs/3.12dev/xlator is not owned by any package
                        • #1471790: [Brick Multiplexing] : cluster.brick-multiplex has no description.
                        • #1472094: Test script failing with brick multiplexing enabled
                        • #1472250: Remove fop_enum_to_string, get_fop_int usage in libglusterfs
                        • #1472417: No clear method to multiplex all bricks to one process(glusterfsd) with cluster.max-bricks-per-process option
                        • #1472949: [distribute] crashes seen upon rmdirs
                        • #1475181: dht remove-brick status does not indicate failures files not migrated because of a lack of space
                        • #1475192: [Scale] : Rebalance ETA shows the initial estimate to be ~140 days,finishes within 18 hours though.
                        • #1475258: [Geo-rep]: Geo-rep hangs in changelog mode
                        • #1475399: Rebalance estimate time sometimes shows negative values
                        • #1475635: [Scale] : Client logs flooded with \"inode context is NULL\" error messages
                        • #1475641: gluster core dump due to assert failed GF_ASSERT (brick_index < wordcount);
                        • #1475662: [Scale] : Rebalance Logs are bulky.
                        • #1476109: Brick Multiplexing: Brick process crashed at changetimerecorder(ctr) translator when restarting volumes
                        • #1476208: [geo-rep]: few of the self healed hardlinks on master did not sync to slave
                        • #1476653: cassandra fails on gluster-block with both replicate and ec volumes
                        • #1476654: gluster-block default shard-size should be 64MB
                        • #1476819: scripts: invalid test in S32gluster_enable_shared_storage.sh
                        • #1476863: packaging: /var/lib/glusterd/options should be %config(noreplace)
                        • #1476868: [EC]: md5sum mismatches every time for a file from the fuse client on EC volume
                        • #1477152: [Remove-brick] Few files are getting migrated eventhough the bricks crossed cluster.min-free-disk value
                        • #1477190: [GNFS] GNFS got crashed while mounting volume on solaris client
                        • #1477381: Revert experimental and 4.0 features to prepare for 3.12 release
                        • #1477405: eager-lock should be off for cassandra to work at the moment
                        • #1477994: [Ganesha] : Ganesha crashes while cluster enters failover/failback mode
                        • #1478276: separating attach tier and add brick
                        • #1479118: AFR entry self heal removes a directory's .glusterfs symlink.
                        • #1479263: nfs process crashed in \"nfs3svc_getattr\"
                        • #1479303: [Perf] : Large file sequential reads are off target by ~38% on FUSE/Ganesha
                        • #1479474: Add NULL gfid checks before creating file
                        • #1479655: Permission denied errors when appending files after readdir
                        • #1479662: when gluster pod is restarted, bricks from the restarted pod fails to connect to fuse, self-heal etc
                        • #1479717: Running sysbench on vm disk from plain distribute gluster volume causes disk corruption
                        • #1480448: More useful error - replace 'not optimal'
                        • #1480459: Gluster puts PID files in wrong place
                        • #1481931: [Scale] : I/O errors on multiple gNFS mounts with \"Stale file handle\" during rebalance of an erasure coded volume.
                        • #1482804: Negative Test: glusterd crashes for some of the volume options if set at cluster level
                        • #1482835: glusterd fails to start
                        • #1483402: DHT: readdirp fails to read some directories.
                        • #1483996: packaging: use rdma-core(-devel) instead of ibverbs, rdmacm; disable rdma on armv7hl
                        • #1484440: packaging: /run and /var/run; prefer /run
                        • #1484885: [rpc]: EPOLLERR - disconnecting now messages every 3 secs after completing rebalance
                        • #1486107: /var/lib/glusterd/peers File had a blank line, Stopped Glusterd from starting
                        • #1486110: [quorum]: Replace brick is happened when Quorum not met.
                        • #1486120: symlinks trigger faulty geo-replication state (rsnapshot usecase)
                        • #1486122: gluster-block profile needs to have strict-o-direct
                        "},{"location":"release-notes/3.12.1/","title":"Release notes for Gluster 3.12.1","text":"

                        This is a bugfix release. The Release Notes for 3.12.0, 3.12.1 contain a listing of all the new features that were added and bugs fixed in the GlusterFS 3.12 stable release.

                        "},{"location":"release-notes/3.12.1/#major-changes-features-and-limitations-addressed-in-this-release","title":"Major changes, features and limitations addressed in this release","text":"
                         No Major changes\n
                        "},{"location":"release-notes/3.12.1/#major-issues","title":"Major issues","text":"
                        1. Expanding a gluster volume that is sharded may cause file corruption

                          • Sharded volumes are typically used for VM images, if such volumes are expanded or possibly contracted (i.e add/remove bricks and rebalance) there are reports of VM images getting corrupted.
                          • The last known cause for corruption (Bug #1465123) has a fix with this release. As further testing is still in progress, the issue is retained as a major issue.
                          • Status of this bug can be tracked here, #1465123
                        "},{"location":"release-notes/3.12.1/#bugs-addressed","title":"Bugs addressed","text":"
                         A total of 12 patches have been merged, addressing 11 bugs\n
                        • #1486538: [geo-rep+qr]: Crashes observed at slave from qr_lookup_sbk during rename/hardlink/rebalance
                        • #1486557: Log entry of files skipped/failed during rebalance operation
                        • #1487033: rpc: client_t and related objects leaked due to incorrect ref counts
                        • #1487319: afr: check op_ret value in __afr_selfheal_name_impunge
                        • #1488119: scripts: mount.glusterfs contains non-portable bashisms
                        • #1488168: Launch metadata heal in discover code path.
                        • #1488387: gluster-blockd process crashed and core generated
                        • #1488718: [RHHI] cannot boot vms created from template when disk format = qcow2
                        • #1489260: Crash in dht_check_and_open_fd_on_subvol_task()
                        • #1489296: glusterfsd (brick) process crashed
                        • #1489511: return ENOSYS for 'non readable' FOPs
                        "},{"location":"release-notes/3.12.10/","title":"Release notes for Gluster 3.12.10","text":"

                        This is a bugfix release. The release notes for 3.12.0, 3.12.1, 3.12.2, 3.12.3, 3.12.4, 3.12.5, 3.12.6, 3.12.7, 3.12.8 and 3.12.9 contain a listing of all the new features that were added and bugs fixed in the GlusterFS 3.12 stable release.

                        "},{"location":"release-notes/3.12.10/#major-changes-features-and-limitations-addressed-in-this-release","title":"Major changes, features and limitations addressed in this release","text":"

                        None

                        "},{"location":"release-notes/3.12.10/#major-issues","title":"Major issues","text":"

                        None

                        "},{"location":"release-notes/3.12.10/#bugs-addressed","title":"Bugs addressed","text":"

                        Bugs addressed since release-3.12.9 are listed below .

                        • #1570475: Rebalance on few nodes doesn't seem to complete - stuck at FUTEX_WAIT
                        • #1576816: GlusterFS can be improved
                        • #1577164: gfapi: broken symbol versions
                        • #1577845: Geo-rep: faulty session due to OSError: [Errno 95] Operation not supported
                        • #1577862: [geo-rep]: Upgrade fails, session in FAULTY state
                        • #1577868: Glusterd crashed on a few (master) nodes
                        • #1577871: [geo-rep]: Geo-rep scheduler fails
                        • #1580519: the regression test \"tests/bugs/posix/bug-990028.t\" fails
                        • #1581746: bug-1309462.t is failing reliably due to changes in security.capability changes in the kernel
                        • #1590133: xdata is leaking in server3_3_seek
                        "},{"location":"release-notes/3.12.11/","title":"Release notes for Gluster 3.12.11","text":"

                        This is a bugfix release. The release notes for 3.12.0, 3.12.1, 3.12.2, 3.12.3, 3.12.4, 3.12.5, 3.12.6, 3.12.7, 3.12.8, 3.12.9, and 3.12.10 contain a listing of all the new features that were added and bugs fixed in the GlusterFS 3.12 stable release.

                        "},{"location":"release-notes/3.12.11/#major-changes-features-and-limitations-addressed-in-this-release","title":"Major changes, features and limitations addressed in this release","text":"

                        This release contains a fix for a security vulerability in Gluster as follows,

                        • http://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2018-10841
                        • https://nvd.nist.gov/vuln/detail/CVE-2018-10841

                        Installing the updated packages and restarting gluster services on gluster brick hosts, will help prevent the security issue.

                        "},{"location":"release-notes/3.12.11/#major-issues","title":"Major issues","text":"

                        None

                        "},{"location":"release-notes/3.12.11/#bugs-addressed","title":"Bugs addressed","text":"

                        Bugs addressed since release-3.12.10 are listed below.

                        • #1559829: snap/gcron.py: ABRT report for package glusterfs has reached 100 occurrences
                        • #1591187: Gluster Block PVC fails to mount on Jenkins pod
                        • #1593526: CVE-2018-10841 glusterfs: access trusted peer group via remote-host command [glusterfs upstream]
                        "},{"location":"release-notes/3.12.12/","title":"Release notes for Gluster 3.12.12","text":"

                        This is a bugfix release. The release notes for 3.12.0, 3.12.1, 3.12.2, 3.12.3, 3.12.4, 3.12.5, 3.12.6, 3.12.7, 3.12.8, 3.12.9, 3.12.10 and 3.12.11 contain a listing of all the new features that were added and bugs fixed in the GlusterFS 3.12 stable release.

                        "},{"location":"release-notes/3.12.12/#major-changes-features-and-limitations-addressed-in-this-release","title":"Major changes, features and limitations addressed in this release","text":"

                        None

                        "},{"location":"release-notes/3.12.12/#major-issues","title":"Major issues","text":"

                        None

                        "},{"location":"release-notes/3.12.12/#bugs-addressed","title":"Bugs addressed","text":"

                        Bugs addressed since release-3.12.12 are listed below

                        • #1579673: Remove EIO from the dht_inode_missing macro
                        • #1595528: rmdir is leaking softlinks to directories in .glusterfs
                        • #1597120: Add quorum checks in post-op
                        • #1597123: Changes to self-heal logic w.r.t. detecting of split-brains
                        • #1597154: When storage reserve limit is reached, appending data to an existing file throws EROFS error
                        • #1597230: glustershd crashes when index heal is launched before graph is initialized.
                        • #1598121: lookup not assigning gfid if file is not present in all bricks of replica
                        • #1598720: afr: fix bug-1363721.t failure
                        • #1599247: afr: don't update readables if inode refresh failed on all children
                        "},{"location":"release-notes/3.12.13/","title":"Release notes for Gluster 3.12.13","text":"

                        This is a bugfix release. The release notes for 3.12.0, 3.12.1, 3.12.2, 3.12.3, 3.12.4, 3.12.5, 3.12.6, 3.12.7, 3.12.8, 3.12.9, 3.12.10, 3.12.11 and 3.12.12 contain a listing of all the new features that were added and bugs fixed in the GlusterFS 3.12 stable release.

                        "},{"location":"release-notes/3.12.13/#major-changes-features-and-limitations-addressed-in-this-release","title":"Major changes, features and limitations addressed in this release","text":"

                        None

                        "},{"location":"release-notes/3.12.13/#major-issues","title":"Major issues","text":"

                        None

                        "},{"location":"release-notes/3.12.13/#bugs-addressed","title":"Bugs addressed","text":"

                        Bugs addressed in release-3.12.13 are listed below

                        • #1599788: _is_prefix should return false for 0-length strings
                        • #1603093: directories are invisible on client side
                        • #1613512: Backport glusterfs-client memory leak fix to 3.12.x
                        • #1618838: gluster bash completion leaks TOP=0 into the environment
                        • #1618348: [Ganesha] Ganesha crashed in mdcache_alloc_and_check_handle while running bonnie and untars with parallel lookups
                        "},{"location":"release-notes/3.12.14/","title":"Release notes for Gluster 3.12.14","text":"

                        This is a bugfix release. The release notes for 3.12.0, 3.12.1, 3.12.2, 3.12.3, 3.12.4, 3.12.5, 3.12.6, 3.12.7, 3.12.8, 3.12.9, 3.12.10, 3.12.11, 3.12.12 and 3.12.13 contain a listing of all the new features that were added and bugs fixed in the GlusterFS 3.12 stable release.

                        "},{"location":"release-notes/3.12.14/#major-changes-features-and-limitations-addressed-in-this-release","title":"Major changes, features and limitations addressed in this release","text":"
                        1. This release contains fix for following security vulnerabilities,

                        2. https://nvd.nist.gov/vuln/detail/CVE-2018-10904

                        3. https://nvd.nist.gov/vuln/detail/CVE-2018-10907
                        4. https://nvd.nist.gov/vuln/detail/CVE-2018-10911
                        5. https://nvd.nist.gov/vuln/detail/CVE-2018-10913
                        6. https://nvd.nist.gov/vuln/detail/CVE-2018-10914
                        7. https://nvd.nist.gov/vuln/detail/CVE-2018-10923
                        8. https://nvd.nist.gov/vuln/detail/CVE-2018-10926
                        9. https://nvd.nist.gov/vuln/detail/CVE-2018-10927
                        10. https://nvd.nist.gov/vuln/detail/CVE-2018-10928
                        11. https://nvd.nist.gov/vuln/detail/CVE-2018-10929
                        12. https://nvd.nist.gov/vuln/detail/CVE-2018-10930

                        13. To resolve the security vulnerabilities following limitations were made in GlusterFS

                        14. open,read,write on special files like char and block are no longer permitted

                        15. io-stat xlator can dump stat info only to /var/run/gluster directory

                        16. Addressed an issue that affected copying a file over SSL/TLS in a volume

                        Installing the updated packages and restarting gluster services on gluster brick hosts, will fix the security issues.

                        "},{"location":"release-notes/3.12.14/#major-issues","title":"Major issues","text":"

                        None

                        "},{"location":"release-notes/3.12.14/#bugs-addressed","title":"Bugs addressed","text":"

                        Bugs addressed since release-3.12.14 are listed below.

                        • #1622405: Problem with SSL/TLS encryption on Gluster 4.0 & 4.1
                        • #1625286: Information Exposure in posix_get_file_contents function in posix-helpers.c
                        • #1625648: I/O to arbitrary devices on storage server
                        • #1625654: Stack-based buffer overflow in server-rpc-fops.c allows remote attackers to execute arbitrary code
                        • #1625656: Improper deserialization in dict.c:dict_unserialize() can allow attackers to read arbitrary memory
                        • #1625660: Unsanitized file names in debug/io-stats translator can allow remote attackers to execute arbitrary code
                        • #1625664: Files can be renamed outside volume
                        "},{"location":"release-notes/3.12.15/","title":"Release notes for Gluster 3.12.15","text":"

                        This is a bugfix release. The release notes for 3.12.0, 3.12.1, 3.12.2, 3.12.3, 3.12.4, 3.12.5, 3.12.6, 3.12.7, 3.12.8, 3.12.9, 3.12.10, 3.12.11 3.12.12, 3.12.13 and 3.12.14 contain a listing of all the new features that were added and bugs fixed in the GlusterFS 3.12 stable release.

                        "},{"location":"release-notes/3.12.15/#major-changes-features-and-limitations-addressed-in-this-release","title":"Major changes, features and limitations addressed in this release","text":"

                        None

                        "},{"location":"release-notes/3.12.15/#major-issues","title":"Major issues","text":"

                        None

                        "},{"location":"release-notes/3.12.15/#bugs-addressed","title":"Bugs addressed","text":"

                        Bugs addressed since release-3.12.15 are listed below.

                        • #1569336: Volume status inode is broken with brickmux
                        • #1625588: Prevent hangs while increasing replica-count/replace-brick for directory hierarchy
                        • #1497989: Gluster 3.12.1 Packages require manual systemctl daemon reload after install
                        • #1512371: parallel-readdir = TRUE prevents directories listing
                        • #1633625: split-brain observed on parent dir
                        • #1637989: data-self-heal in arbiter volume results in stale locks.
                        "},{"location":"release-notes/3.12.2/","title":"Release notes for Gluster 3.12.2","text":"

                        This is a bugfix release. The release notes for 3.12.0, 3.12.1, 3.12.2 contain a listing of all the new features that were added and bugs fixed in the GlusterFS 3.12 stable release.

                        "},{"location":"release-notes/3.12.2/#major-changes-features-and-limitations-addressed-in-this-release","title":"Major changes, features and limitations addressed in this release","text":"
                         1.) In a pure distribute volume there is no source to heal the replaced brick\n from and hence would cause a loss of data that was present in the replaced brick.\n The CLI has been enhanced to prevent a user from inadvertently using replace brick\n in a pure distribute volume. It is advised to use add/remove brick to migrate from\n an existing brick in a pure distribute volume.\n
                        "},{"location":"release-notes/3.12.2/#major-issues","title":"Major issues","text":"
                        1. Expanding a gluster volume that is sharded may cause file corruption

                          • Sharded volumes are typically used for VM images, if such volumes are expanded or possibly contracted (i.e add/remove bricks and rebalance) there are reports of VM images getting corrupted.
                          • The last known cause for corruption #1465123 is still pending, and not yet part of this release.
                        2. Gluster volume restarts fail if the sub directory export feature is in use. Status of this issue can be tracked here, #1501315

                        3. Mounting a gluster snapshot will fail, when attempting a FUSE based mount of the snapshot. So for the current users, it is recommend to only access snapshot via \".snaps\" directory on a mounted gluster volume. Status of this issue can be tracked here, #1501378

                        "},{"location":"release-notes/3.12.2/#bugs-addressed","title":"Bugs addressed","text":"
                         A total of 31 patches have been merged, addressing 28 bugs\n
                        • #1490493: Sub-directory mount details are incorrect in /proc/mounts
                        • #1491178: GlusterD returns a bad memory pointer in glusterd_get_args_from_dict()
                        • #1491292: Provide brick list as part of VOLUME_CREATE event.
                        • #1491690: rpc: TLSv1_2_method() is deprecated in OpenSSL-1.1
                        • #1492026: set the shard-block-size to 64MB in virt profile
                        • #1492061: CLIENT_CONNECT event not being raised
                        • #1492066: AFR_SUBVOL_UP and AFR_SUBVOLS_DOWN events not working
                        • #1493975: disallow replace brick operation on plain distribute volume
                        • #1494523: Spelling errors in 3.12.1
                        • #1495162: glusterd ends up with multiple uuids for the same node
                        • #1495397: Make event-history feature configurable and have it disabled by default
                        • #1495858: gluster volume create asks for confirmation for replica-2 volume even with force
                        • #1496238: [geo-rep]: Scheduler help needs correction for description of --no-color
                        • #1496317: [afr] split-brain observed on T files post hardlink and rename in x3 volume
                        • #1496326: [GNFS+EC] lock is being granted to 2 different client for the same data range at a time after performing lock acquire/release from the clients1
                        • #1497084: glusterfs process consume huge memory on both server and client node
                        • #1499123: Readdirp is considerably slower than readdir on acl clients
                        • #1499150: Improve performance with xattrop update.
                        • #1499158: client-io-threads option not working for replicated volumes
                        • #1499202: self-heal daemon stuck
                        • #1499392: [geo-rep]: Improve the output message to reflect the real failure with schedule_georep script
                        • #1500396: [geo-rep]: Observed \"Operation not supported\" error with traceback on slave log
                        • #1500472: Use a bitmap to store local node info instead of conf->local_nodeuuids[i].uuids
                        • #1500662: gluster volume heal info \"healed\" and \"heal-failed\" showing wrong information
                        • #1500835: [geo-rep]: Status shows ACTIVE for most workers in EC before it becomes the PASSIVE
                        • #1500841: [geo-rep]: Worker crashes with OSError: [Errno 61] No data available
                        • #1500845: [geo-rep] master worker crash with interrupted system call
                        • #1500853: [geo-rep]: Incorrect last sync \"0\" during hystory crawl after upgrade/stop-start
                        • #1501022: Make choose-local configurable through volume-set command
                        • #1501154: Brick Multiplexing: Gluster volume start force complains with command \"Error : Request timed out\" when there are multiple volumes
                        "},{"location":"release-notes/3.12.3/","title":"Release notes for Gluster 3.12.3","text":"

                        This is a bugfix release. The release notes for 3.12.0, 3.12.1, 3.12.2, 3.12.3 contain a listing of all the new features that were added and bugs fixed in the GlusterFS 3.12 stable release.

                        "},{"location":"release-notes/3.12.3/#major-changes-features-and-limitations-addressed-in-this-release","title":"Major changes, features and limitations addressed in this release","text":"
                        1. The two regression related to with subdir mount got fixed - gluster volume restart failure (#1465123) - mounting gluster snapshot via fuse (#1501378)

                        2. Improvements for \"help\" command with in gluster cli (#1509786)

                        3. Introduction of new api glfs_fd_set_lkowner() to set lock owner

                        "},{"location":"release-notes/3.12.3/#major-issues","title":"Major issues","text":"
                        1. Expanding a gluster volume that is sharded may cause file corruption

                          • Sharded volumes are typically used for VM images, if such volumes are expanded or possibly contracted (i.e add/remove bricks and rebalance) there are reports of VM images getting corrupted.
                          • The last known cause for corruption #1465123 is still pending, and not yet part of this release.
                        "},{"location":"release-notes/3.12.3/#bugs-addressed","title":"Bugs addressed","text":"
                         A total of 25 patches have been merged, addressing 25 bugs\n
                        • #1484489: File-level WORM allows mv over read-only files
                        • #1494527: glusterfs fails to build twice in a row
                        • #1499889: md-cache uses incorrect xattr keynames for GF_POSIX_ACL keys
                        • #1499892: md-cache: xattr values should not be checked with string functions
                        • #1501238: [SNAPSHOT] Unable to mount a snapshot on client
                        • #1501315: Gluster Volume restart fail after exporting fuse sub-dir
                        • #1501864: Add generated HMAC token in header for webhook calls
                        • #1501956: gfapi: API needed to set lk_owner
                        • #1502104: [geo-rep]: RSYNC throwing internal errors
                        • #1503239: [Glusterd] Volume operations fail on a (tiered) volume because of a stale lock held by one of the nodes
                        • #1505221: glusterfs client crash when removing directories
                        • #1505323: When sub-dir is mounted on Fuse client,adding bricks to the same volume unmounts the subdir from fuse client
                        • #1505370: Mishandling null check at send_brick_req of glusterfsd/src/gf_attach.c
                        • #1505373: server.allow-insecure should be visible in \"gluster volume set help\"
                        • #1505527: Posix compliance rename test fails on fuse subdir mount
                        • #1505846: [GSS] gluster volume status command is missing in man page
                        • #1505856: Potential use of NULL this variable before it gets initialized
                        • #1507747: clean up port map on brick disconnect
                        • #1507748: Brick port mismatch
                        • #1507877: reset-brick commit force failed with glusterd_volume_brickinfo_get Returning -1
                        • #1508283: stale brick processes getting created and volume status shows brick as down(pkill glusterfsd glusterfs ,glusterd restart)
                        • #1509200: Event webhook should work with HTTPS urls
                        • #1509786: The output of the \"gluster help\" command is difficult to read
                        • #1511271: Rebalance estimate(ETA) shows wrong details(as intial message of 10min wait reappears) when still in progress
                        • #1511301: In distribute volume after glusterd restart, brick goes offline
                        "},{"location":"release-notes/3.12.4/","title":"Release notes for Gluster 3.12.4","text":"

                        This is a bugfix release. The release notes for 3.12.0, 3.12.1, 3.12.2, 3.12.3, 3.12.4 contain a listing of all the new features that were added and bugs fixed in the GlusterFS 3.12 stable release.

                        "},{"location":"release-notes/3.12.4/#major-issues","title":"Major issues","text":"
                        1. Expanding a gluster volume that is sharded may cause file corruption

                          • Sharded volumes are typically used for VM images, if such volumes are expanded or possibly contracted (i.e add/remove bricks and rebalance) there are reports of VM images getting corrupted.
                          • The last known cause for corruption #1465123 is still pending, and not yet part of this release.
                        "},{"location":"release-notes/3.12.4/#bugs-addressed","title":"Bugs addressed","text":"
                         A total of 13 patches have been merged, addressing 12 bugs\n
                        • #1478411: Directory listings on fuse mount are very slow due to small number of getdents() entries
                        • #1511782: In Replica volume 2*2 when quorum is set, after glusterd restart nfs server is coming up instead of self-heal daemon
                        • #1512432: Test bug-1483058-replace-brick-quorum-validation.t fails inconsistently
                        • #1513258: NetBSD port
                        • #1514380: default timeout of 5min not honored for analyzing split-brain files post setfattr replica.split-brain-heal-finalize
                        • #1514420: gluster volume splitbrain info needs to display output of each brick in a stream fashion instead of buffering and dumping at the end
                        • #1515042: bug-1247563.t is failing on master
                        • #1516691: Rebalance fails on NetBSD because fallocate is not implemented
                        • #1517689: Memory leak in locks xlator
                        • #1518061: Remove 'summary' option from 'gluster vol heal..' CLI
                        • #1523048: glusterd consuming high memory
                        • #1523455: Store allocated objects in the mem_acct
                        "},{"location":"release-notes/3.12.5/","title":"Release notes for Gluster 3.12.5","text":"

                        This is a bugfix release. The release notes for 3.12.0, 3.12.1, 3.12.2, 3.12.3, 3.12.4, 3.12.5 contain a listing of all the new features that were added and bugs fixed in the GlusterFS 3.12 stable release.

                        "},{"location":"release-notes/3.12.5/#major-issues","title":"Major issues","text":"
                        1. Expanding a gluster volume that is sharded may cause file corruption

                          • Sharded volumes are typically used for VM images, if such volumes are expanded or possibly contracted (i.e add/remove bricks and rebalance) there are reports of VM images getting corrupted.
                          • The last known cause for corruption #1465123 is still pending, and not yet part of this release.
                        "},{"location":"release-notes/3.12.5/#bugs-addressed","title":"Bugs addressed","text":"
                         A total of 12 patches have been merged, addressing 11 bugs\n
                        • #1489043: The number of bytes of the quota specified in version 3.7 or later is incorrect
                        • #1511301: In distribute volume after glusterd restart, brick goes offline
                        • #1525850: rdma transport may access an obsolete item in gf_rdma_device_t->all_mr, and causes glusterfsd/glusterfs process crash.
                        • #1527276: feature/bitrot: remove internal xattrs from lookup cbk
                        • #1529085: fstat returns ENOENT/ESTALE
                        • #1529088: opening a file that is destination of rename results in ENOENT errors
                        • #1529095: /usr/sbin/glusterfs crashing on Red Hat OpenShift Container Platform node
                        • #1529539: JWT support without external dependency
                        • #1530448: glustershd fails to start on a volume force start after a brick is down
                        • #1530455: Files are not rebalanced if destination brick(available size) is of smaller size than source brick(available size)
                        • #1531372: Use after free in cli_cmd_volume_create_cbk
                        "},{"location":"release-notes/3.12.6/","title":"Release notes for Gluster 3.12.6","text":"

                        This is a bugfix release. The release notes for 3.12.0, 3.12.1, 3.12.2, 3.12.3, 3.12.4, 3.12.5, 3.12.5 contain a listing of all the new features that were added and bugs fixed in the GlusterFS 3.12 stable release.

                        "},{"location":"release-notes/3.12.6/#major-issues","title":"Major issues","text":"
                        1. Expanding a gluster volume that is sharded may cause file corruption

                          • Sharded volumes are typically used for VM images, if such volumes are expanded or possibly contracted (i.e add/remove bricks and rebalance) there are reports of VM images getting corrupted.
                          • The last known cause for corruption #1465123 is still pending, and not yet part of this release.
                        "},{"location":"release-notes/3.12.6/#bugs-addressed","title":"Bugs addressed","text":"
                         A total of 16 patches have been merged, addressing 16 bugs\n
                        • #1510342: Not all files synced using geo-replication
                        • #1533269: Random GlusterFSD process dies during rebalance
                        • #1534847: entries not getting cleared post healing of softlinks (stale entries showing up in heal info)
                        • #1536334: [Disperse] Implement open fd heal for disperse volume
                        • #1537346: glustershd/glusterd is not using right port when connecting to glusterfsd process
                        • #1539516: DHT log messages: Found anomalies in (null) (gfid = 00000000-0000-0000-0000-000000000000). Holes=1 overlaps=0
                        • #1540224: dht_(f)xattrop does not implement migration checks
                        • #1541267: dht_layout_t leak in dht_populate_inode_for_dentry
                        • #1541930: A down brick is incorrectly considered to be online and makes the volume to be started without any brick available
                        • #1542054: tests/bugs/cli/bug-1169302.t fails spuriously
                        • #1542475: Random failures in tests/bugs/nfs/bug-974972.t
                        • #1542601: The used space in the volume increases when the volume is expanded
                        • #1542615: tests/bugs/core/multiplex-limit-issue-151.t fails sometimes in upstream master
                        • #1542826: Mark tests/bugs/posix/bug-990028.t bad on release-3.12
                        • #1542934: Seeing timer errors in the rebalance logs
                        • #1543016: dht_lookup_unlink_of_false_linkto_cbk fails with \"Permission denied\"
                        • #1544637: 3.8 -> 3.10 rolling upgrade fails (same for 3.12 or 3.13) on Ubuntu 14
                        "},{"location":"release-notes/3.12.7/","title":"Release notes for Gluster 3.12.7","text":"

                        This is a bugfix release. The release notes for 3.12.0, 3.12.1, 3.12.2, 3.12.3, 3.12.4, 3.12.5, 3.12.6 contain a listing of all the new features that were added and bugs fixed in the GlusterFS 3.12 stable release.

                        "},{"location":"release-notes/3.12.7/#bugs-addressed","title":"Bugs addressed","text":""},{"location":"release-notes/3.12.7/#major-issues","title":"Major issues","text":"
                        1. Consider a case in which one of the nodes goes down in gluster cluster with brick multiplexing enabled, if volume operations are performed then post when the node comes back, brick processes will fail to come up. The issue is tracked in #1543708 and will be fixed by next release.

                        A total of 8 patches have been merged, addressing 8 bugs

                        • #1517260: Volume wrong size
                        • #1543709: Optimize glusterd_import_friend_volume code path
                        • #1544635: Though files are in split-brain able to perform writes to the file
                        • #1547841: Typo error in __dht_check_free_space function log message
                        • #1548078: [Rebalance] \"Migrate file failed: : failed to get xattr [No data available]\" warnings in rebalance logs
                        • #1548270: DHT calls dht_lookup_everywhere for 1xn volumes
                        • #1549505: Backport patch to reduce duplicate code in server-rpc-fops.c
                        • "},{"location":"release-notes/3.12.8/","title":"Release notes for Gluster 3.12.8","text":"

                          This is a bugfix release. The release notes for 3.12.0, 3.12.1, 3.12.2, 3.12.3, 3.12.4, 3.12.5, 3.12.6, 3.12.7 contain a listing of all the new features that were added and bugs fixed in the GlusterFS 3.12 stable release.

                          "},{"location":"release-notes/3.12.8/#bugs-addressed","title":"Bugs addressed","text":"
                           A total of 9 patches have been merged, addressing 9 bugs\n
                          • #1543708: glusterd fails to attach brick during restart of the node
                          • #1546627: Syntactical errors in hook scripts for managing SELinux context on bricks
                          • #1549473: possible memleak in glusterfsd process with brick multiplexing on
                          • #1555161: [Rebalance] ENOSPC errors on few files in rebalance logs
                          • #1555201: After a replace brick command, self-heal takes some time to start healing files on disperse volumes
                          • #1558352: [EC] Read performance of EC volume exported over gNFS is significantly lower than write performance
                          • #1561731: Rebalance failures on a dispersed volume with lookup-optimize enabled
                          • #1562723: SHD is not healing entries in halo replication
                          • #1565590: timer: Possible race condition between gftimer* routines
                          "},{"location":"release-notes/3.12.9/","title":"Release notes for Gluster 3.12.9","text":"

                          This is a bugfix release. The release notes for 3.12.0, 3.12.1, 3.12.2, 3.12.3, 3.12.4, 3.12.5, 3.12.6, 3.12.7, and 3.12.8 contain a listing of all the new features that were added and bugs fixed in the GlusterFS 3.12 stable release.

                          "},{"location":"release-notes/3.12.9/#major-changes-features-and-limitations-addressed-in-this-release","title":"Major changes, features and limitations addressed in this release","text":"

                          This release contains a fix for a security vulerability in Gluster as follows,

                          • http://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2018-1088
                          • https://nvd.nist.gov/vuln/detail/CVE-2018-1088

                          Installing the updated packages and restarting gluster services, will update the Gluster shared storage volume volfiles, that are more secure than the defaults currently in place.

                          Further, for increased security, the Gluster shared storage volume can be TLS enabled, and access to the same restricted using the auth.ssl-allow option. See, this guide for more details.

                          "},{"location":"release-notes/3.12.9/#major-issues","title":"Major issues","text":"

                          None

                          "},{"location":"release-notes/3.12.9/#bugs-addressed","title":"Bugs addressed","text":"

                          Bugs addressed since release-3.12.8 are listed below.

                          • #1566131: Bringing down data bricks in cyclic order results in arbiter brick becoming the source for heal.
                          • #1566820: [Remove-brick] Many files were not migrated from the decommissioned bricks; commit results in data loss
                          • #1569407: EIO errors on some operations when volume has mixed brick versions on a disperse volume
                          • #1570430: CVE-2018-1088 glusterfs: Privilege escalation via gluster_shared_storage when snapshot scheduling is enabled [fedora-all]
                          "},{"location":"release-notes/3.13.0/","title":"Release notes for Gluster 3.13.0","text":"

                          This is a major release that includes a range of features enhancing usability; enhancements to GFAPI for developers and a set of bug fixes.

                          The most notable features and changes are documented on this page. A full list of bugs that have been addressed is included further below.

                          "},{"location":"release-notes/3.13.0/#major-changes-and-features","title":"Major changes and features","text":""},{"location":"release-notes/3.13.0/#addition-of-summary-option-to-the-heal-info-cli","title":"Addition of summary option to the heal info CLI","text":"

                          Notes for users: The Gluster heal info CLI now has a 'summary' option displaying the statistics of entries pending heal, in split-brain and currently being healed, per brick.

                          Usage:

                          # gluster volume heal <volname> info summary\n

                          Sample output:

                          Brick <brickname>\nStatus: Connected\nTotal Number of entries: 3\nNumber of entries in heal pending: 2\nNumber of entries in split-brain: 1\nNumber of entries possibly healing: 0\n\nBrick <brickname>\nStatus: Connected\nTotal Number of entries: 4\nNumber of entries in heal pending: 3\nNumber of entries in split-brain: 1\nNumber of entries possibly healing: 0\n

                          Using the --xml option with the CLI results in the output in XML format.

                          NOTE: Summary information is obtained in a similar fashion to detailed information, thus time taken for the command to complete would still be the same, and not faster.

                          "},{"location":"release-notes/3.13.0/#addition-of-checks-for-allowing-lookups-in-afr-and-removal-of-clusterquorum-reads-volume-option","title":"Addition of checks for allowing lookups in AFR and removal of 'cluster.quorum-reads' volume option.","text":"

                          Notes for users:

                          Previously, AFR has never failed lookup unless there is a gfid mismatch. This behavior is being changed with this release, as a part of fixing Bug#1515572.

                          Lookups in replica-3 and arbiter volumes will now succeed only if there is quorum and there is a good copy of a file. I.e. the lookup has to succeed on quorum #bricks and at least one of them has to be a good copy. If these conditions are not met, the operation will fail with the ENOTCONN error.

                          As a part of this change the cluster.quorum-reads volume option is removed, as lookup failure will result in all subsequent operations (including reads) failing, which makes this option redundant.

                          Ensuring this strictness also helps prevent a long standing rename-leading-to-dataloss Bug#1366818, by disallowing lookups (and thus renames) when a good copy is not available.

                          Note: These checks do not affect replica 2 volumes where lookups works as before, even when only 1 brick is online.

                          Further reference: mailing list discussions on topic

                          "},{"location":"release-notes/3.13.0/#support-for-max-port-range-in-glusterdvol","title":"Support for max-port range in glusterd.vol","text":"

                          Notes for users:

                          Glusterd configuration provides an option to control number of ports that can be used by gluster daemons on a node.

                          The option is named \"max-port\" and can be set in the glusterd.vol file per-node to the desired maximum.

                          "},{"location":"release-notes/3.13.0/#prevention-of-other-processes-accessing-the-mounted-brick-snapshots","title":"Prevention of other processes accessing the mounted brick snapshots","text":"

                          Notes for users: Snapshot of gluster bricks are now only mounted when the snapshot is active, or when these are restored. Prior to this snapshots of gluster volumes were mounted by default across the entire life-cycle of the snapshot.

                          This behavior is transparent to users and managed by the gluster processes.

                          "},{"location":"release-notes/3.13.0/#enabling-thin-client","title":"Enabling thin client","text":"

                          Notes for users: Gluster client stack encompasses the cluster translators (like distribution and replication or disperse). This is in addition to the usual caching translators on the client stacks. In certain cases this makes the client footprint larger than sustainable and also incurs frequent client updates.

                          The thin client feature, moves the clustering translators (like distribute and other translators below it) and a few caching translators to a managed protocol endpoint (called gfproxy) on the gluster server nodes, thus thinning the client stack.

                          Usage:

                          # gluster volume set <volname> config.gfproxyd enable\n

                          The above enables the gfproxy protocol service on the server nodes. To mount a client that interacts with this end point, use the --thin-client mount option.

                          Example:

                          # glusterfs --thin-client --volfile-id=<volname> --volfile-server=<host> <mountpoint>\n

                          Limitations: This feature is a technical preview in the 3.13.0 release, and will be improved in the upcoming releases.

                          "},{"location":"release-notes/3.13.0/#ability-to-reserve-back-end-storage-space","title":"Ability to reserve back-end storage space","text":"

                          Notes for users: Posix translator is enhanced with an option that enables reserving disk space on the bricks. This reserved space is not used by the client mounts thus preventing disk full scenarios, as disk expansion or cluster expansion is more tedious to achieve when back-end bricks are full.

                          When the bricks have free space equal to or lesser than the reserved space, mount points using the brick would get ENOSPC errors.

                          The default value for the option is 1(%) of the brick size. If set to 0(%) this feature is disabled. The option takes a numeric percentage value, that reserves up to that percentage of disk space.

                          Usage:

                          # gluster volume set <volname> storage.reserve <number>\n
                          "},{"location":"release-notes/3.13.0/#list-all-the-connected-clients-for-a-brick-and-also-exported-brickssnapshots-from-each-brick-process","title":"List all the connected clients for a brick and also exported bricks/snapshots from each brick process","text":"

                          Notes for users: Gluster CLI is enhanced with an option to list all connected clients to a volume (or all volumes) and also the list of exported bricks and snapshots for the volume.

                          Usage:

                          # gluster volume status <volname/all> client-list\n
                          "},{"location":"release-notes/3.13.0/#improved-write-performance-with-disperse-xlator","title":"Improved write performance with Disperse xlator","text":"

                          Notes for users: Disperse translator has been enhanced to support parallel writes, that hence improves the performance of write operations when using disperse volumes.

                          This feature is enabled by default, and can be toggled using the boolean option, 'disperse.parallel-writes'

                          "},{"location":"release-notes/3.13.0/#disperse-xlator-now-supports-discard-operations","title":"Disperse xlator now supports discard operations","text":"

                          Notes for users: This feature enables users to punch hole in files created on disperse volumes.

                          Usage:

                          # fallocate  -p -o <offset> -l <len> <file_name>\n
                          "},{"location":"release-notes/3.13.0/#included-details-about-memory-pools-in-statedumps","title":"Included details about memory pools in statedumps","text":"

                          Notes for users: For troubleshooting purposes it sometimes is useful to verify the memory allocations done by Gluster. A previous release of Gluster included a rewrite of the memory pool internals. Since these changes, statedumps did not include details about the memory pools anymore.

                          This version of Gluster adds details about the used memory pools in the statedump. Troubleshooting memory consumption problems is much more efficient again.

                          Limitations: There are currently no statistics included in the statedump about the actual behavior of the memory pools. This means that the efficiency of the memory pools can not be verified.

                          "},{"location":"release-notes/3.13.0/#gluster-apis-added-to-register-callback-functions-for-upcalls","title":"Gluster APIs added to register callback functions for upcalls","text":"

                          Notes for developers: New APIs have been added to allow gfapi applications to register and unregister for upcall events. Along with the list of events interested, applications now have to register callback function. This routine shall be invoked asynchronously, in gluster thread context, in case of any upcalls sent by the backend server.

                          int glfs_upcall_register (struct glfs *fs, uint32_t event_list,\n                          glfs_upcall_cbk cbk, void *data);\nint glfs_upcall_unregister (struct glfs *fs, uint32_t event_list);\n

                          libgfapi header files include the complete synopsis about these APIs definition and their usage.

                          Limitations: An application can register only a single callback function for all the upcall events it is interested in.

                          Known Issues: Bug#1515748 GlusterFS server should be able to identify the clients which registered for upcalls and notify only those clients in case of such events

                          "},{"location":"release-notes/3.13.0/#gluster-api-added-with-a-glfs_mem_header-for-exported-memory","title":"Gluster API added with a glfs_mem_header for exported memory","text":"

                          Notes for developers: Memory allocations done in libgfapi that return a structure to the calling application should use GLFS_CALLOC() and friends. Applications can then correctly free the memory by calling glfs_free().

                          This is implemented with a new glfs_mem_header similar to how the memory allocations are done with GF_CALLOC() etc. The new header includes a release() function pointer that gets called to free the resource when the application calls glfs_free().

                          The change is a major improvement for allocating and free'ing resources in a standardized way that is transparent to the libgfapi applications.

                          "},{"location":"release-notes/3.13.0/#provided-a-new-xlator-to-delay-fops-to-aid-slow-brick-response-simulation-and-debugging","title":"Provided a new xlator to delay fops, to aid slow brick response simulation and debugging","text":"

                          Notes for developers: Like error-gen translator, a new translator that introduces delays for FOPs is added to the code base. This can help determine issues around slow(er) client responses and enable better qualification of the translator stacks.

                          For usage refer to this test case.

                          "},{"location":"release-notes/3.13.0/#major-issues","title":"Major issues","text":"
                          1. Expanding a gluster volume that is sharded may cause file corruption

                            • Sharded volumes are typically used for VM images, if such volumes are expanded or possibly contracted (i.e add/remove bricks and rebalance) there are reports of VM images getting corrupted.
                            • The last known cause for corruption (Bug #1515434) has a fix with this release. As further testing is still in progress, the issue is retained as a major issue.
                            • Status of this bug can be tracked here, #1515434
                          "},{"location":"release-notes/3.13.0/#bugs-addressed","title":"Bugs addressed","text":"

                          Bugs addressed since release-3.12.0 are listed below.

                          • #1248393: DHT: readdirp fails to read some directories.
                          • #1258561: Gluster puts PID files in wrong place
                          • #1261463: AFR : [RFE] Improvements needed in \"gluster volume heal info\" commands
                          • #1294051: Though files are in split-brain able to perform writes to the file
                          • #1328994: When a feature fails needing a higher opversion, the message should state what version it needs.
                          • #1335251: mgmt/glusterd: clang compile warnings in glusterd-snapshot.c
                          • #1350406: [storage/posix] - posix_do_futimes function not implemented
                          • #1365683: Fix crash bug when mnt3_resolve_subdir_cbk fails
                          • #1371806: DHT :- inconsistent 'custom extended attributes',uid and gid, Access permission (for directories) if User set/modifies it after bringing one or more sub-volume down
                          • #1376326: separating attach tier and add brick
                          • #1388509: gluster volume heal info \"healed\" and \"heal-failed\" showing wrong information
                          • #1395492: trace/error-gen be turned on together while use 'volume set' command to set one of them
                          • #1396327: gluster core dump due to assert failed GF_ASSERT (brick_index < wordcount);
                          • #1406898: Need build time option to default to IPv6
                          • #1428063: gfproxy: Introduce new server-side daemon called GFProxy
                          • #1432046: symlinks trigger faulty geo-replication state (rsnapshot usecase)
                          • #1443145: Free runtime allocated resources upon graph switch or glfs_fini()
                          • #1445663: Improve performance with xattrop update.
                          • #1451434: Use a bitmap to store local node info instead of conf->local_nodeuuids[i].uuids
                          • #1454590: run.c demo mode broken
                          • #1457985: Rebalance estimate time sometimes shows negative values
                          • #1460514: [Ganesha] : Ganesha crashes while cluster enters failover/failback mode
                          • #1461018: Implement DISCARD FOP for EC
                          • #1462969: Peer-file parsing is too fragile
                          • #1467209: [Scale] : Rebalance ETA shows the initial estimate to be ~140 days,finishes within 18 hours though.
                          • #1467614: Gluster read/write performance improvements on NVMe backend
                          • #1468291: NFS Sub directory is getting mounted on solaris 10 even when the permission is restricted in nfs.export-dir volume option
                          • #1471366: Posix xlator needs to reserve disk space to prevent the brick from getting full.
                          • #1472267: glusterd fails to start
                          • #1472609: Root path xattr does not heal correctly in certain cases when volume is in stopped state
                          • #1472758: Running sysbench on vm disk from plain distribute gluster volume causes disk corruption
                          • #1472961: [GNFS+EC] lock is being granted to 2 different client for the same data range at a time after performing lock acquire/release from the clients1
                          • #1473026: replace-brick failure leaves glusterd in inconsistent state
                          • #1473636: Launch metadata heal in discover code path.
                          • #1474180: [Scale] : Client logs flooded with \"inode context is NULL\" error messages
                          • #1474190: cassandra fails on gluster-block with both replicate and ec volumes
                          • #1474309: Disperse: Coverity issue
                          • #1474318: dht remove-brick status does not indicate failures files not migrated because of a lack of space
                          • #1474639: [Scale] : Rebalance Logs are bulky.
                          • #1475255: [Geo-rep]: Geo-rep hangs in changelog mode
                          • #1475282: [Remove-brick] Few files are getting migrated eventhough the bricks crossed cluster.min-free-disk value
                          • #1475300: implementation of fallocate call in read-only xlator
                          • #1475308: [geo-rep]: few of the self healed hardlinks on master did not sync to slave
                          • #1475605: gluster-block default shard-size should be 64MB
                          • #1475632: Brick Multiplexing: Brick process crashed at changetimerecorder(ctr) translator when restarting volumes
                          • #1476205: [EC]: md5sum mismatches every time for a file from the fuse client on EC volume
                          • #1476295: md-cache uses incorrect xattr keynames for GF_POSIX_ACL keys
                          • #1476324: md-cache: xattr values should not be checked with string functions
                          • #1476410: glusterd: code lacks clarity of logic in glusterd_get_quorum_cluster_counts()
                          • #1476665: [Perf] : Large file sequential reads are off target by ~38% on FUSE/Ganesha
                          • #1476668: [Disperse] : Improve heal info command to handle obvious cases
                          • #1476719: glusterd: flow in glusterd_validate_quorum() could be streamlined
                          • #1476785: scripts: invalid test in S32gluster_enable_shared_storage.sh
                          • #1476861: packaging: /var/lib/glusterd/options should be %config(noreplace)
                          • #1476957: peer-parsing.t fails on NetBSD
                          • #1477169: AFR entry self heal removes a directory's .glusterfs symlink.
                          • #1477404: eager-lock should be off for cassandra to work at the moment
                          • #1477488: Permission denied errors when appending files after readdir
                          • #1478297: Add NULL gfid checks before creating file
                          • #1478710: when gluster pod is restarted, bricks from the restarted pod fails to connect to fuse, self-heal etc
                          • #1479030: nfs process crashed in \"nfs3_getattr\"
                          • #1480099: More useful error - replace 'not optimal'
                          • #1480445: Log entry of files skipped/failed during rebalance operation
                          • #1480525: Make choose-local configurable through volume-set command
                          • #1480591: [Scale] : I/O errors on multiple gNFS mounts with \"Stale file handle\" during rebalance of an erasure coded volume.
                          • #1481199: mempool: run-time crash when built with --disable-mempool
                          • #1481600: rpc: client_t and related objects leaked due to incorrect ref counts
                          • #1482023: snpashots issues with other processes accessing the mounted brick snapshots
                          • #1482344: Negative Test: glusterd crashes for some of the volume options if set at cluster level
                          • #1482906: /var/lib/glusterd/peers File had a blank line, Stopped Glusterd from starting
                          • #1482923: afr: check op_ret value in __afr_selfheal_name_impunge
                          • #1483058: [quorum]: Replace brick is happened when Quorum not met.
                          • #1483995: packaging: use rdma-core(-devel) instead of ibverbs, rdmacm; disable rdma on armv7hl
                          • #1484215: Add Deepshika has CI Peer
                          • #1484225: [rpc]: EPOLLERR - disconnecting now messages every 3 secs after completing rebalance
                          • #1484246: [PATCH] incorrect xattr list handling on FreeBSD
                          • #1484490: File-level WORM allows mv over read-only files
                          • #1484709: [geo-rep+qr]: Crashes observed at slave from qr_lookup_sbk during rename/hardlink/rebalance cases
                          • #1484722: return ENOSYS for 'non readable' FOPs
                          • #1485962: gluster-block profile needs to have strict-o-direct
                          • #1486134: glusterfsd (brick) process crashed
                          • #1487644: Fix reference to readthedocs.io in source code and elsewhere
                          • #1487830: scripts: mount.glusterfs contains non-portable bashisms
                          • #1487840: glusterd: spelling errors reported by Debian maintainer
                          • #1488354: gluster-blockd process crashed and core generated
                          • #1488399: Crash in dht_check_and_open_fd_on_subvol_task()
                          • #1488546: [RHHI] cannot boot vms created from template when disk format = qcow2
                          • #1488808: Warning on FreeBSD regarding -Wformat-extra-args
                          • #1488829: Fix unused variable when TCP_USER_TIMEOUT is undefined
                          • #1488840: Fix guard define on nl-cache
                          • #1488906: Fix clagn/gcc warning for umountd
                          • #1488909: Fix the type of 'len' in posix.c, clang is showing a warning
                          • #1488913: Sub-directory mount details are incorrect in /proc/mounts
                          • #1489432: disallow replace brick operation on plain distribute volume
                          • #1489823: set the shard-block-size to 64MB in virt profile
                          • #1490642: glusterfs client crash when removing directories
                          • #1490897: GlusterD returns a bad memory pointer in glusterd_get_args_from_dict()
                          • #1491025: rpc: TLSv1_2_method() is deprecated in OpenSSL-1.1
                          • #1491670: [afr] split-brain observed on T files post hardlink and rename in x3 volume
                          • #1492109: Provide brick list as part of VOLUME_CREATE event.
                          • #1492542: Gluster v status client-list prints wrong output for multiplexed volume.
                          • #1492849: xlator/tier: flood of -Wformat-truncation warnings with gcc-7.
                          • #1492851: xlator/bitrot: flood of -Wformat-truncation warnings with gcc-7.
                          • #1492968: CLIENT_CONNECT event is not notified by eventsapi
                          • #1492996: Readdirp is considerably slower than readdir on acl clients
                          • #1493133: GlusterFS failed to build while running make
                          • #1493415: self-heal daemon stuck
                          • #1493539: AFR_SUBVOL_UP and AFR_SUBVOLS_DOWN events not working
                          • #1493893: gluster volume asks for confirmation for disperse volume even with force
                          • #1493967: glusterd ends up with multiple uuids for the same node
                          • #1495384: Gluster 3.12.1 Packages require manual systemctl daemon reload after install
                          • #1495436: [geo-rep]: Scheduler help needs correction for description of --no-color
                          • #1496363: Add generated HMAC token in header for webhook calls
                          • #1496379: glusterfs process consume huge memory on both server and client node
                          • #1496675: Verify pool pointer before destroying it
                          • #1498570: client-io-threads option not working for replicated volumes
                          • #1499004: [Glusterd] Volume operations fail on a (tiered) volume because of a stale lock held by one of the nodes
                          • #1499159: [geo-rep]: Improve the output message to reflect the real failure with schedule_georep script
                          • #1499180: [geo-rep]: Observed \"Operation not supported\" error with traceback on slave log
                          • #1499391: [geo-rep]: Worker crashes with OSError: [Errno 61] No data available
                          • #1499393: [geo-rep] master worker crash with interrupted system call
                          • #1499509: Brick Multiplexing: Gluster volume start force complains with command \"Error : Request timed out\" when there are multiple volumes
                          • #1499641: gfapi: API needed to set lk_owner
                          • #1499663: Mark test case ./tests/bugs/bug-1371806_1.t as a bad test case.
                          • #1499933: md-cache: Add additional samba and macOS specific EAs to mdcache
                          • #1500269: opening a file that is destination of rename results in ENOENT errors
                          • #1500284: [geo-rep]: Status shows ACTIVE for most workers in EC before it becomes the PASSIVE
                          • #1500346: [geo-rep]: Incorrect last sync \"0\" during hystory crawl after upgrade/stop-start
                          • #1500433: [geo-rep]: RSYNC throwing internal errors
                          • #1500649: Shellcheck errors in hook scripts
                          • #1501235: [SNAPSHOT] Unable to mount a snapshot on client
                          • #1501317: glusterfs fails to build twice in a row
                          • #1501390: Intermittent failure in tests/basic/afr/gfid-mismatch-resolution-with-fav-child-policy.t on NetBSD
                          • #1502253: snapshot_scheduler crashes when SELinux is absent on the system
                          • #1503246: clean up port map on brick disconnect
                          • #1503394: Mishandling null check at send_brick_req of glusterfsd/src/gf_attach.c
                          • #1503424: server.allow-insecure should be visible in \"gluster volume set help\"
                          • #1503510: [BitRot] man page of gluster needs to be updated for scrub-frequency
                          • #1503519: default timeout of 5min not honored for analyzing split-brain files post setfattr replica.split-brain-heal-finalize
                          • #1503983: Wrong usage of getopt shell command in hook-scripts
                          • #1505253: Update .t test files to use the new tier commands
                          • #1505323: When sub-dir is mounted on Fuse client,adding bricks to the same volume unmounts the subdir from fuse client
                          • #1505325: Potential use of NULL this variable before it gets initialized
                          • #1505527: Posix compliance rename test fails on fuse subdir mount
                          • #1505663: [GSS] gluster volume status command is missing in man page
                          • #1505807: files are appendable on file-based worm volume
                          • #1506083: Ignore disk space reserve check for internal FOPS
                          • #1506513: stale brick processes getting created and volume status shows brick as down(pkill glusterfsd glusterfs ,glusterd restart)
                          • #1506589: Brick port mismatch
                          • #1506903: Event webhook should work with HTTPS urls
                          • #1507466: reset-brick commit force failed with glusterd_volume_brickinfo_get Returning -1
                          • #1508898: Add new configuration option to manage deletion of Worm files
                          • #1509789: The output of the \"gluster help\" command is difficult to read
                          • #1510012: GlusterFS 3.13.0 tracker
                          • #1510019: Change default versions of certain features to 3.13 from 4.0
                          • #1510022: Revert experimental and 4.0 features to prepare for 3.13 release
                          • #1511274: Rebalance estimate(ETA) shows wrong details(as intial message of 10min wait reappears) when still in progress
                          • #1511293: In distribute volume after glusterd restart, brick goes offline
                          • #1511768: In Replica volume 2*2 when quorum is set, after glusterd restart nfs server is coming up instead of self-heal daemon
                          • #1512435: Test bug-1483058-replace-brick-quorum-validation.t fails inconsistently
                          • #1512460: disperse eager-lock degrades performance for file create workloads
                          • #1513259: NetBSD port
                          • #1514419: gluster volume splitbrain info needs to display output of each brick in a stream fashion instead of buffering and dumping at the end
                          • #1515045: bug-1247563.t is failing on master
                          • #1515572: Accessing a file when source brick is down results in that FOP being hung
                          • #1516313: Bringing down data bricks in cyclic order results in arbiter brick becoming the source for heal.
                          • #1517692: Memory leak in locks xlator
                          • #1518257: EC DISCARD doesn't punch hole properly
                          • #1518512: Change GD_OP_VERSION to 3_13_0 from 3_12_0 for RFE https://bugzilla.redhat.com/show_bug.cgi?id=1464350
                          • #1518744: Add release notes about DISCARD on EC volume
                          "},{"location":"release-notes/3.13.1/","title":"Release notes for Gluster 3.13.1","text":"

                          This is a bugfix release. The release notes for 3.13.0, contain a listing of all the new features that were added and bugs fixed in the GlusterFS 3.13 stable release.

                          "},{"location":"release-notes/3.13.1/#major-changes-features-and-limitations-addressed-in-this-release","title":"Major changes, features and limitations addressed in this release","text":"

                          No Major changes

                          "},{"location":"release-notes/3.13.1/#major-issues","title":"Major issues","text":"
                          1. Expanding a gluster volume that is sharded may cause file corruption

                            • Sharded volumes are typically used for VM images, if such volumes are expanded or possibly contracted (i.e add/remove bricks and rebalance) there are reports of VM images getting corrupted.
                            • The last known cause for corruption (Bug #1515434) is still under review.
                            • Status of this bug can be tracked here, #1515434
                          "},{"location":"release-notes/3.13.1/#bugs-addressed","title":"Bugs addressed","text":"

                          Bugs addressed since release-3.13.0 are listed below.

                          • #1428060: write-behind: Allow trickling-writes to be configurable, fix usage of page_size and window_size
                          • #1520232: Rebalance fails on NetBSD because fallocate is not implemented
                          • #1522710: Directory listings on fuse mount are very slow due to small number of getdents() entries
                          • #1523046: glusterd consuming high memory
                          • #1523456: Store allocated objects in the mem_acct
                          • #1527275: feature/bitrot: remove internal xattrs from lookup cbk
                          • #1527699: rdma transport may access an obsolete item in gf_rdma_device_t->all_mr, and causes glusterfsd/glusterfs process crash.
                          "},{"location":"release-notes/3.13.2/","title":"Release notes for Gluster 3.13.2","text":"

                          This is a bugfix release. The release notes for 3.13.0 and 3.13.1, contain a listing of all the new features that were added and bugs fixed in the GlusterFS 3.13 stable release.

                          "},{"location":"release-notes/3.13.2/#major-changes-features-and-limitations-addressed-in-this-release","title":"Major changes, features and limitations addressed in this release","text":"

                          No Major changes

                          "},{"location":"release-notes/3.13.2/#major-issues","title":"Major issues","text":"

                          No Major iissues

                          "},{"location":"release-notes/3.13.2/#bugs-addressed","title":"Bugs addressed","text":"

                          Bugs addressed since release-3.13.1 are listed below.

                          • #1511293: In distribute volume after glusterd restart, brick goes offline
                          • #1515434: dht_(f)xattrop does not implement migration checks
                          • #1516313: Bringing down data bricks in cyclic order results in arbiter brick becoming the source for heal.
                          • #1529055: Test case ./tests/bugs/bug-1371806_1.t is failing
                          • #1529084: fstat returns ENOENT/ESTALE
                          • #1529094: /usr/sbin/glusterfs crashing on Red Hat OpenShift Container Platform node
                          • #1530449: glustershd fails to start on a volume force start after a brick is down
                          • #1531371: Use after free in cli_cmd_volume_create_cbk
                          • #1533023: [Disperse] Implement open fd heal for disperse volume
                          • #1534842: entries not getting cleared post healing of softlinks (stale entries showing up in heal info)
                          • #1535438: Take full lock on files in 3 way replication
                          • #1536294: Random GlusterFSD process dies during rebalance
                          "},{"location":"release-notes/3.5.0/","title":"3.5.0","text":""},{"location":"release-notes/3.5.0/#major-changes-and-features","title":"Major Changes and Features","text":"

                          Documentation about major changes and features is also included in the doc/features/ directory of GlusterFS repository.

                          "},{"location":"release-notes/3.5.0/#afr_cli_enhancements","title":"AFR_CLI_enhancements","text":"

                          The AFR reporting via CLI has been improved. This feature provides a coherent mechanism to present heal status,information and the logs associated. This makes the end user more aware of healing status and provides statistics.

                          For more information refer here.

                          "},{"location":"release-notes/3.5.0/#file_snapshot","title":"File_Snapshot","text":"

                          This feature provides ability to take snapshots of files in GlusterFS. File snapshot is supported on the files of QCOW2/QED format.

                          This feature adds better integration with Openstack Cinder, and in general ability to take snapshots of files (typically VM images)

                          For more information refer here.

                          "},{"location":"release-notes/3.5.0/#gfid-access","title":"gfid-access","text":"

                          This feature add a new translator which is designed to provide direct access to files in glusterfs using its GFID

                          For more information refer here.

                          "},{"location":"release-notes/3.5.0/#prevent-nfs-restart-on-volume-change","title":"Prevent NFS restart on Volume change","text":"

                          Earlier any volume change (volume option, volume start, volume stop, volume delete,brick add, etc) required restarting NFS server.

                          With this feature, it is no longer required to restart NFS server, thereby providing better usability with no disrupts in NFS connections

                          "},{"location":"release-notes/3.5.0/#featuresquota_scalability","title":"Features/Quota_Scalability","text":"

                          This feature provides support upto 65536 quota configurations per volume.

                          "},{"location":"release-notes/3.5.0/#readdir_ahead","title":"readdir_ahead","text":"

                          This feature provides read-ahead support for directories to improve sequential directory read performance.

                          "},{"location":"release-notes/3.5.0/#zerofill","title":"zerofill","text":"

                          zerofill feature allows creation of pre-allocated and zeroed-out files on GlusterFS volumes by offloading the zeroing part to server and/or storage (storage offloads use SCSI WRITESAME), thereby achieves quick creation of pre-allocated and zeroed-out VM disk image by using server/storage off-loads.

                          For more information refer here.

                          "},{"location":"release-notes/3.5.0/#brick_failure_detection","title":"Brick_Failure_Detection","text":"

                          This feature attempts to identify storage/file system failures and disable the failed brick without disrupting the rest of the NODE operation.

                          This adds a health-checker that periodically checks the status of the filesystem (implies checking of functional storage-hardware).

                          For more information refer here.

                          "},{"location":"release-notes/3.5.0/#changelog-based-distributed-geo-replication","title":"Changelog based distributed geo-replication","text":"

                          New improved geo-replication which makes use of all the nodes in the master volume. Unlike previous version of geo-replication where all changes were detected and synced on a single node in master volume, now each node of master volume participates in the geo-replication.

                          Change Detection - Now geo-rep makes use of changelog xlator to detect the set of files which needs to be synced. Changelog xlator runs per brick and when enabled, records each fops which modifies the files. geo-rep consumes these journal created by this xlator and syncs the files identified as 'changed' to slave.

                          Distributed nature - Each of the nodes take the repsonsibility of syncing the data which is present in that node. In case of replicated volume, one of them will be 'Active'ly syncing the data, while the other one is 'Passive'.

                          Syncing Method - Apart from the using rsync as the syncing method, now there tar+ssh syncing method, which can be leveraged by the workload where there is large amount of smallfiles.

                          "},{"location":"release-notes/3.5.0/#improved-block-device-translator","title":"Improved block device translator","text":"

                          This feature provides a translator to use logical volumes to store VM images and expose them as files to QEMU/KVM.

                          The Volume group is represented as directory and logical volumes as files.

                          "},{"location":"release-notes/3.5.0/#remove-brick-cli-change","title":"Remove brick CLI Change","text":"

                          remove-brick CLI earlier used to remove the brick forcefully ( without data migration ), when called without any arguments. This mode of 'remove-brick' cli, without any arguments has been deprecated.

                          "},{"location":"release-notes/3.5.0/#experimental-features","title":"Experimental Features","text":"

                          The following features are experimental with this release:

                          • RDMA-connection manager (RDMA-CM).
                          • support for NUFA translator.
                          • disk-encryption
                          • On-Wire Compression + Decompression [CDC]
                          "},{"location":"release-notes/3.5.0/#minor-improvements","title":"Minor Improvements:","text":"
                          • Old graphs are cleaned up by FUSE clients

                          • New command \"volume status tasks\" introduced to track asynchronous tasks like rebalance and remove-brick

                          • glfs_readdir(), glfs_readdirplus(), glfs_fallocate(), glfs_discard() APIs support added in libgfapi

                          • Per client RPC throttling added in rpc server

                          • Communication between cli and glusterd happens over unix domain socket

                          • Information on connected NFS clients is persistent across NFS restarts.

                          • Hardlink creation failures with SMB addressed

                          • Non-local clients function with nufa volumes

                          • Configurable option added to mount.glusterfs to use kernel-readdirp with fuse client

                          • AUTH support for exported nfs sub-directories added

                          "},{"location":"release-notes/3.5.0/#known-issues","title":"Known Issues:","text":"
                          • The following configuration changes are necessary for qemu and samba integration with libgfapi to work seamlessly:
                          1) gluster volume set <volname> server.allow-insecure on\n\n2) Edit /etc/glusterfs/glusterd.vol to contain this line:\n      option rpc-auth-allow-insecure on\n\nPost 1), restarting the volume would be necessary.\nPost 2), restarting glusterd would be necessary.\n
                          • RDMA connection manager needs IPoIB for connection establishment. More details can be found here.

                          • For Block Device translator based volumes open-behind translator at the client side needs to be disabled.

                          • libgfapi clients calling glfs_fini before a successfull glfs_init will cause the client to hang as reported here. The workaround is NOT to call glfs_fini for error cases encountered before a successfull glfs_init.

                          "},{"location":"release-notes/3.5.1/","title":"3.5.1","text":""},{"location":"release-notes/3.5.1/#release-notes-for-glusterfs-351","title":"Release Notes for GlusterFS 3.5.1","text":"

                          This is mostly a bugfix release. The Release Notes for 3.5.0 contain a listing of all the new features that were added.

                          There are two notable changes that are not only bug fixes, or documentation additions:

                          1. a new volume option server.manage-gids has been added This option should be used when users of a volume are in more than approximately 93 groups (Bug 1096425)
                          2. Duplicate Request Cache for NFS has now been disabled by default, this may reduce performance for certain workloads, but improves the overall stability and memory footprint for most users
                          "},{"location":"release-notes/3.5.1/#bugs-fixed","title":"Bugs Fixed:","text":"
                          • 765202: lgetxattr called with invalid keys on the bricks
                          • 833586: inodelk hang from marker_rename_release_newp_lock
                          • 859581: self-heal process can sometimes create directories instead of symlinks for the root gfid file in .glusterfs
                          • 986429: Backupvolfile server option should work internal to GlusterFS framework
                          • 1039544: [FEAT] \"gluster volume heal info\" should list the entries that actually required to be healed.
                          • 1046624: Unable to heal symbolic Links
                          • 1046853: AFR : For every file self-heal there are warning messages reported in glustershd.log file
                          • 1063190: Volume was not accessible after server side quorum was met
                          • 1064096: The old Python Translator code (not Glupy) should be removed
                          • 1066996: Using sanlock on a gluster mount with replica 3 (quorum-type auto) leads to a split-brain
                          • 1071191: [3.5.1] Sporadic SIGBUS with mmap() on a sparse file created with open(), seek(), write()
                          • 1078061: Need ability to heal mismatching user extended attributes without any changelogs
                          • 1078365: New xlators are linked as versioned .so files, creating .so.0.0.0
                          • 1086743: Add documentation for the Feature: RDMA-connection manager (RDMA-CM)
                          • 1086748: Add documentation for the Feature: AFR CLI enhancements
                          • 1086749: Add documentation for the Feature: Exposing Volume Capabilities
                          • 1086750: Add documentation for the Feature: File Snapshots in GlusterFS
                          • 1086751: Add documentation for the Feature: gfid-access
                          • 1086752: Add documentation for the Feature: On-Wire Compression/Decompression
                          • 1086754: Add documentation for the Feature: Quota Scalability
                          • 1086755: Add documentation for the Feature: readdir-ahead
                          • 1086756: Add documentation for the Feature: zerofill API for GlusterFS
                          • 1086758: Add documentation for the Feature: Changelog based parallel geo-replication
                          • 1086760: Add documentation for the Feature: Write Once Read Many (WORM) volume
                          • 1086762: Add documentation for the Feature: BD Xlator - Block Device translator
                          • 1086766: Add documentation for the Feature: Libgfapi
                          • 1086774: Add documentation for the Feature: Access Control List - Version 3 support for Gluster NFS
                          • 1086781: Add documentation for the Feature: Eager locking
                          • 1086782: Add documentation for the Feature: glusterfs and oVirt integration
                          • 1086783: Add documentation for the Feature: qemu 1.3 - libgfapi integration
                          • 1088848: Spelling errors in rpc/rpc-transport/rdma/src/rdma.c
                          • 1089054: gf-error-codes.h is missing from source tarball
                          • 1089470: SMB: Crash on brick process during compile kernel.
                          • 1089934: list dir with more than N files results in Input/output error
                          • 1091340: Doc: Add glfs_fini known issue to release notes 3.5
                          • 1091392: glusterfs.spec.in: minor/nit changes to sync with Fedora spec
                          • 1095256: Excessive logging from self-heal daemon, and bricks
                          • 1095595: Stick to IANA standard while allocating brick ports
                          • 1095775: Add support in libgfapi to fetch volume info from glusterd.
                          • 1095971: Stopping/Starting a Gluster volume resets ownership
                          • 1096040: AFR : self-heal-daemon not clearing the change-logs of all the sources after self-heal
                          • 1096425: i/o error when one user tries to access RHS volume over NFS with 100+ GIDs
                          • 1099878: Need support for handle based Ops to fetch/modify extended attributes of a file
                          • 1101647: gluster volume heal volname statistics heal-count not giving desired output.
                          • 1102306: license: xlators/features/glupy dual license GPLv2 and LGPLv3+
                          • 1103413: Failure in gf_log_init reopening stderr
                          • 1104592: heal info may give Success instead of transport end point not connected when a brick is down.
                          • 1104915: glusterfsd crashes while doing stress tests
                          • 1104919: Fix memory leaks in gfid-access xlator.
                          • 1104959: Dist-geo-rep : some of the files not accessible on slave after the geo-rep sync from master to slave.
                          • 1105188: Two instances each, of brick processes, glusterfs-nfs and quotad seen after glusterd restart
                          • 1105524: Disable nfs.drc by default
                          • 1107937: quota-anon-fd-nfs.t fails spuriously
                          • 1109832: I/O fails for for glusterfs 3.4 AFR clients accessing servers upgraded to glusterfs 3.5
                          • 1110777: glusterfsd OOM - using all memory when quota is enabled
                          • "},{"location":"release-notes/3.5.1/#known-issues","title":"Known Issues:","text":"
                            • The following configuration changes are necessary for qemu and samba integration with libgfapi to work seamlessly:

                              1. gluster volume set server.allow-insecure on
                              2. restarting the volume is necessary

                                gluster volume stop <volname>\ngluster volume start <volname>\n
                              3. Edit /etc/glusterfs/glusterd.vol to contain this line:

                                option rpc-auth-allow-insecure on\n
                              4. restarting glusterd is necessary

                                service glusterd restart\n
                              5. More details are also documented in the Gluster Wiki on the Libgfapi with qemu libvirt page.

                                • For Block Device translator based volumes open-behind translator at the client side needs to be disabled.

                                • libgfapi clients calling glfs_fini before a successfull glfs_init will cause the client to hang has been reported by QEMU developers. The workaround is NOT to call glfs_fini for error cases encountered before a successfull glfs_init. Follow Bug 1091335 to get informed when a release is made available that contains a final fix.

                                • After enabling server.manage-gids, the volume needs to be stopped and started again to have the option enabled in the brick processes

                                  gluster volume stop <volname>\ngluster volume start <volname>\n
                                "},{"location":"release-notes/3.5.2/","title":"3.5.2","text":""},{"location":"release-notes/3.5.2/#release-notes-for-glusterfs-352","title":"Release Notes for GlusterFS 3.5.2","text":"

                                This is mostly a bugfix release. The Release Notes for 3.5.0 and 3.5.1 contain a listing of all the new features that were added and bugs fixed.

                                "},{"location":"release-notes/3.5.2/#bugs-fixed","title":"Bugs Fixed:","text":"
                                • 1096020: NFS server crashes in _socket_read_vectored_request
                                • 1100050: Can't write to quota enable folder
                                • 1103050: nfs: reset command does not alter the result for nfs options earlier set
                                • 1105891: features/gfid-access: stat on .gfid virtual directory return EINVAL
                                • 1111454: creating symlinks generates errors on stripe volume
                                • 1112111: Self-heal errors with \"afr crawl failed for child 0 with ret -1\" while performing rolling upgrade.
                                • 1112348: [AFR] I/O fails when one of the replica nodes go down
                                • 1112659: Fix inode leaks in gfid-access xlator
                                • 1112980: NFS subdir authentication doesn't correctly handle multi-(homed,protocol,etc) network addresses
                                • 1113007: nfs-utils should be installed as dependency while installing glusterfs-server
                                • 1113403: Excessive logging in quotad.log of the kind 'null client'
                                • 1113749: client_t clienttable cliententries are never expanded when all entries are used
                                • 1113894: AFR : self-heal of few files not happening when a AWS EC2 Instance is back online after a restart
                                • 1113959: Spec %post server does not wait for the old glusterd to exit
                                • 1114501: Dist-geo-rep : deletion of files on master, geo-rep fails to propagate to slaves.
                                • 1115369: Allow the usage of the wildcard character '*' to the options \"nfs.rpc-auth-allow\" and \"nfs.rpc-auth-reject\"
                                • 1115950: glfsheal: Improve the way in which we check the presence of replica volumes
                                • 1116672: Resource cleanup doesn't happen for clients on servers after disconnect
                                • 1116997: mounting a volume over NFS (TCP) with MOUNT over UDP fails
                                • 1117241: backport 'gluster volume status --xml' issues
                                • 1120151: Glustershd memory usage too high
                                • 1124728: SMB: CIFS mount fails with the latest glusterfs rpm's
                                "},{"location":"release-notes/3.5.2/#known-issues","title":"Known Issues:","text":"
                                • The following configuration changes are necessary for 'qemu' and 'samba vfs plugin' integration with libgfapi to work seamlessly:

                                • gluster volume set server.allow-insecure on

                                • restarting the volume is necessary

                                  gluster volume stop <volname> gluster volume start <volname>

                                • Edit /etc/glusterfs/glusterd.vol to contain this line:

                                  option rpc-auth-allow-insecure on

                                • restarting glusterd is necessary

                                  service glusterd restart

                                • More details are also documented in the Gluster Wiki on the Libgfapi with qemu libvirt page.

                                  • For Block Device translator based volumes open-behind translator at the client side needs to be disabled.

                                      gluster volume set <volname> performance.open-behind disabled\n
                                  • libgfapi clients calling glfs_fini before a successfull glfs_init will cause the client to hang as reported here. The workaround is NOT to call glfs_fini for error cases encountered before a successfull glfs_init.

                                  • If the /var/run/gluster directory does not exist enabling quota will likely fail (Bug 1117888).

                                  "},{"location":"release-notes/3.5.3/","title":"3.5.3","text":""},{"location":"release-notes/3.5.3/#release-notes-for-glusterfs-353","title":"Release Notes for GlusterFS 3.5.3","text":"

                                  This is a bugfix release. The Release Notes for 3.5.0, 3.5.1 and 3.5.2 contain a listing of all the new features that were added and bugs fixed in the GlusterFS 3.5 stable release.

                                  "},{"location":"release-notes/3.5.3/#bugs-fixed","title":"Bugs Fixed:","text":"
                                  • 1081016: glusterd needs xfsprogs and e2fsprogs packages
                                  • 1100204: brick failure detection does not work for ext4 filesystems
                                  • 1126801: glusterfs logrotate config file pollutes global config
                                  • 1129527: DHT :- data loss - file is missing on renaming same file from multiple client at same time
                                  • 1129541: [DHT:REBALANCE]: Rebalance failures are seen with error message \" remote operation failed: File exists\"
                                  • 1132391: NFS interoperability problem: stripe-xlator removes EOF at end of READDIR
                                  • 1133949: Minor typo in afr logging
                                  • 1136221: The memories are exhausted quickly when handle the message which has multi fragments in a single record
                                  • 1136835: crash on fsync
                                  • 1138922: DHT + rebalance : rebalance process crashed + data loss + few Directories are present on sub-volumes but not visible on mount point + lookup is not healing directories
                                  • 1139103: DHT + Snapshot :- If snapshot is taken when Directory is created only on hashed sub-vol; On restoring that snapshot Directory is not listed on mount point and lookup on parent is not healing
                                  • 1139170: DHT :- rm -rf is not removing stale link file and because of that unable to create file having same name as stale link file
                                  • 1139245: vdsm invoked oom-killer during rebalance and Killed process 4305, UID 0, (glusterfs nfs process)
                                  • 1140338: rebalance is not resulting in the hash layout changes being available to nfs client
                                  • 1140348: Renaming file while rebalance is in progress causes data loss
                                  • 1140549: DHT: Rebalance process crash after add-brick and `rebalance start' operation
                                  • 1140556: Core: client crash while doing rename operations on the mount
                                  • 1141558: AFR : \"gluster volume heal info\" prints some random characters
                                  • 1141733: data loss when rebalance + renames are in progress and bricks from replica pairs goes down and comes back
                                  • 1142052: Very high memory usage during rebalance
                                  • 1142614: files with open fd's getting into split-brain when bricks goes offline and comes back online
                                  • 1144315: core: all brick processes crash when quota is enabled
                                  • 1145000: Spec %post server does not wait for the old glusterd to exit
                                  • 1147156: AFR client segmentation fault in afr_priv_destroy
                                  • 1147243: nfs: volume set help says the rmtab file is in \"/var/lib/glusterd/rmtab\"
                                  • 1149857: Option transport.socket.bind-address ignored
                                  • 1153626: Sizeof bug for allocation of memory in afr_lookup
                                  • 1153629: AFR : excessive logging of \"Non blocking entrylks failed\" in glfsheal log file.
                                  • 1153900: Enabling Quota on existing data won't create pgfid xattrs
                                  • 1153904: self heal info logs are filled with messages reporting ENOENT while self-heal is going on
                                  • 1155073: Excessive logging in the self-heal daemon after a replace-brick
                                  • 1157661: GlusterFS allows insecure SSL modes
                                  • "},{"location":"release-notes/3.5.3/#known-issues","title":"Known Issues:","text":"
                                    • The following configuration changes are necessary for 'qemu' and 'samba vfs plugin' integration with libgfapi to work seamlessly:

                                    • gluster volume set server.allow-insecure on

                                    • restarting the volume is necessary

                                      gluster volume stop <volname> gluster volume start <volname>

                                    • Edit /etc/glusterfs/glusterd.vol to contain this line:

                                      option rpc-auth-allow-insecure on

                                    • restarting glusterd is necessary

                                      service glusterd restart

                                    • More details are also documented in the Gluster Wiki on the Libgfapi with qemu libvirt page.

                                      • For Block Device translator based volumes open-behind translator at the client side needs to be disabled.

                                          gluster volume set <volname> performance.open-behind disabled\n
                                      • libgfapi clients calling glfs_fini before a successful glfs_init will cause the client to hang as reported here. The workaround is NOT to call glfs_fini for error cases encountered before a successful glfs_init. This is being tracked in Bug 1134050 for glusterfs-3.5 and Bug 1093594 for mainline.

                                      • If the /var/run/gluster directory does not exist enabling quota will likely fail (Bug 1117888).

                                      "},{"location":"release-notes/3.5.4/","title":"3.5.4","text":""},{"location":"release-notes/3.5.4/#release-notes-for-glusterfs-354","title":"Release Notes for GlusterFS 3.5.4","text":"

                                      This is a bugfix release. The Release Notes for 3.5.0, 3.5.1, 3.5.2 and 3.5.3 contain a listing of all the new features that were added and bugs fixed in the GlusterFS 3.5 stable release.

                                      "},{"location":"release-notes/3.5.4/#bugs-fixed","title":"Bugs Fixed:","text":"
                                      • 1092037: Issues reported by Cppcheck static analysis tool
                                      • 1101138: meta-data split-brain prevents entry/data self-heal of dir/file respectively
                                      • 1115197: Directory quota does not apply on it's sub-directories
                                      • 1159968: glusterfs.spec.in: deprecate *.logrotate files in dist-git in favor of the upstream logrotate files
                                      • 1160711: libgfapi: use versioned symbols in libgfapi.so for compatibility
                                      • 1161102: self heal info logs are filled up with messages reporting split-brain
                                      • 1162150: AFR gives EROFS when fop fails on all subvolumes when client-quorum is enabled
                                      • 1162226: bulk remove xattr should not fail if removexattr fails with ENOATTR/ENODATA
                                      • 1162230: quota xattrs are exposed in lookup and getxattr
                                      • 1162767: DHT: Rebalance- Rebalance process crash after remove-brick
                                      • 1166275: Directory fd leaks in index translator
                                      • 1168173: Regression tests fail in quota-anon-fs-nfs.t
                                      • 1173515: [HC] - mount.glusterfs fails to check return of mount command.
                                      • 1174250: Glusterfs outputs a lot of warnings and errors when quota is enabled
                                      • 1177339: entry self-heal in 3.5 and 3.6 are not compatible
                                      • 1177928: Directories not visible anymore after add-brick, new brick dirs not part of old bricks
                                      • 1184528: Some newly created folders have root ownership although created by unprivileged user
                                      • 1186121: tar on a gluster directory gives message \"file changed as we read it\" even though no updates to file in progress
                                      • 1190633: self-heal-algorithm with option \"full\" doesn't heal sparse files correctly
                                      • 1191006: Building argp-standalone breaks nightly builds on Fedora Rawhide
                                      • 1192832: log files get flooded when removexattr() can't find a specified key or value
                                      • 1200764: [AFR] Core dump and crash observed during disk replacement case
                                      • 1202675: Perf: readdirp in replicated volumes causes performance degrade
                                      • 1211841: glusterfs-api.pc versioning breaks QEMU
                                      • 1222150: readdirp return 64bits inodes even if enable-ino32 is set
                                      "},{"location":"release-notes/3.5.4/#known-issues","title":"Known Issues:","text":"
                                      • The following configuration changes are necessary for 'qemu' and 'samba vfs plugin' integration with libgfapi to work seamlessly:

                                      • gluster volume set server.allow-insecure on

                                      • restarting the volume is necessary

                                        gluster volume stop <volname> gluster volume start <volname>

                                      • Edit /etc/glusterfs/glusterd.vol to contain this line:

                                        option rpc-auth-allow-insecure on

                                      • restarting glusterd is necessary

                                        service glusterd restart

                                      • More details are also documented in the Gluster Wiki on the Libgfapi with qemu libvirt page.

                                        • For Block Device translator based volumes open-behind translator at the client side needs to be disabled.

                                            gluster volume set <volname> performance.open-behind disabled\n
                                        • libgfapi clients calling glfs_fini before a successful glfs_init will cause the client to hang as reported here. The workaround is NOT to call glfs_fini for error cases encountered before a successful glfs_init. This is being tracked in Bug 1134050 for glusterfs-3.5 and Bug 1093594 for mainline.

                                        • If the /var/run/gluster directory does not exist enabling quota will likely fail (Bug 1117888).

                                        "},{"location":"release-notes/3.6.0/","title":"3.6.0","text":""},{"location":"release-notes/3.6.0/#major-changes-and-features","title":"Major Changes and Features","text":"

                                        Documentation about major changes and features is also included in the doc/features/ directory of GlusterFS repository.

                                        "},{"location":"release-notes/3.6.0/#volume-snapshot","title":"Volume Snapshot","text":"

                                        Volume snapshot provides a point-in-time copy of a GlusterFS volume. The snapshot is an online operation and hence filesystem data continues to be available for the clients while the snapshot is being taken.

                                        For more information refer here.

                                        "},{"location":"release-notes/3.6.0/#user-serviceable-snapshots","title":"User Serviceable Snapshots","text":"

                                        User Serviceable Snapshots provides the ability for users to access snapshots of GlusterFS volumes without administrative intervention.

                                        For more information refer here.

                                        "},{"location":"release-notes/3.6.0/#erasure-coding","title":"Erasure Coding","text":"

                                        The new disperse translator provides the ability to perform erasure coding across nodes.

                                        For more information refer here.

                                        "},{"location":"release-notes/3.6.0/#granular-locking-support-for-management-operations","title":"Granular locking support for management operations","text":"

                                        Glusterd now holds a volume lock to support parallel management operations on different volumes.

                                        "},{"location":"release-notes/3.6.0/#journaling-enhancements-changelog-xlator","title":"Journaling enhancements (changelog xlator)","text":"

                                        Introduction of history API to consume journal records which were persisted by the changelog translator. With this API, it's not longer required to perform an expensive filesystem crawl to identify changes. Geo-replication makes use of this (on [re]start) thereby optimizing remote replication for purges, hardlinks, etc.

                                        "},{"location":"release-notes/3.6.0/#better-support-for-bricks-with-heterogeneous-sizes","title":"Better Support for bricks with heterogeneous sizes","text":"

                                        Prior to 3.6, bricks with heterogeneous sizes were treated as equal regardless of size, and would have been assigned an equal share of files. From 3.6, assignment of files to bricks will take into account the sizes of the bricks.

                                        "},{"location":"release-notes/3.6.0/#improved-ssl-support","title":"Improved SSL support","text":"

                                        GlusterFS 3.6 provides better support to enable SSL on both management and data connections. This feature is currently being consumed by the GlusterFS native driver in OpenStack Manila.

                                        "},{"location":"release-notes/3.6.0/#better-peer-identification","title":"Better peer identification","text":"

                                        GlusterFS 3.6 improves peer identification. GlusterD will no longer complain when a mixture of FQDNs, shortnames and IP addresses are used. Changes done for this improvement have also laid down a base for improving multi network support in GlusterFS.

                                        "},{"location":"release-notes/3.6.0/#meta-translator","title":"Meta translator","text":"

                                        Meta translator provides a virtual interface for viewing internal state of translators.

                                        "},{"location":"release-notes/3.6.0/#improved-synchronous-replication-support-afrv2","title":"Improved synchronous replication support (AFRv2)","text":"

                                        The replication translator (AFR) in GlusterFS 3.6 has undergone a complete rewrite (http://review.gluster.org/#/c/6010/) and is referred to as AFRv2.

                                        From a user point of view, there is no change in the replication behaviour but there are some caveats to be noted from an admin point of view:

                                        • Lookups do not trigger meta-data and data self-heals anymore. They only trigger entry-self-heals. Data and meta-data are healed by the self-heal daemon only.

                                        • Bricks in a replica set do not mark any pending change log extended attributes for itself during pre or post op. They only mark it for other bricks in the replica set.

                                        For e.g.: In a replica 2 volume, trusted.afr.<volname>-client-0 for brick-0 and trusted.afr.<volname>-client-1 for brick-1 will always be 0x000000000000000000000000.

                                        • If the post-op changelog updation does not complete successfully on a brick, a trusted.afr.dirty extended attribute is set on that brick.
                                        "},{"location":"release-notes/3.6.0/#barrier-translator","title":"Barrier translator","text":"

                                        The barrier translator allows file operations to be temporarily 'paused' on GlusterFS bricks, which is needed for performing consistent snapshots of a GlusterFS volume.

                                        For more information, see here.

                                        "},{"location":"release-notes/3.6.0/#remove-brick-moves-data-by-default","title":"Remove brick moves data by default","text":"

                                        Prior to 3.6, volume remove-brick <volname> CLI would remove the brick from the volume without performing any data migration. Now the default behavior has been changed to perform data migration when this command is issued. Removing a brick without data migration can now be performed through volume remove-brick <volname> force interface.

                                        "},{"location":"release-notes/3.6.0/#experimental-features","title":"Experimental Features","text":"

                                        The following features are experimental with this release:

                                        • support for rdma volumes.
                                        • support for NUFA translator.
                                        • disk-encryption
                                        • On-Wire Compression + Decompression [CDC]
                                        "},{"location":"release-notes/3.6.0/#porting-status","title":"Porting Status","text":"
                                        • NetBSD and FreeBSD support is experimental, but regressions tests suggest that it is close to be fully supported. Please make sure you use latest NetBSD code from -current or netbsd-7 branches.

                                        • OSX support is in an alpha state. More testing will help in maturing this support.

                                        "},{"location":"release-notes/3.6.0/#minor-improvements","title":"Minor Improvements:","text":"
                                        • Introduction of server.anonuid and server.anongid options for root squashing

                                        • Root squashing doesn't happen for clients in trusted storage pool

                                        • Memory accounting of glusterfs processes has been enabled by default

                                        • The Gluster/NFS server now has support for setting access permissions on volumes with wildcard IP-addresses and IP-address/subnet (CIDR notation). More details and examples are in the commit message.

                                        • More preparation for better integration with the nfs-ganesha user-space NFS-server. The changes are mostly related to the handle-based functions in libgfapi.so.

                                        • A new logging framework that can suppress repetitive log messages and provide a dictionary of messages has been added. Few translators have now been integrated with the framework. More translators are expected to integrate with this framework in upcoming minor & major releases.

                                        "},{"location":"release-notes/3.6.0/#known-issues","title":"Known Issues:","text":"
                                        • The following configuration changes are necessary for qemu and samba integration with libgfapi to work seamlessly:

                                        • gluster volume set <volname> server.allow-insecure on

                                        • Edit /etc/glusterfs/glusterd.vol to contain this line: option rpc-auth-allow-insecure on

                                        Post 1, restarting the volume would be necessary: # gluster volume stop <volname> # gluster volume start <volname>

                                        Post 2, restarting glusterd would be necessary: # service glusterd restart

                                        • For Block Device translator based volumes open-behind translator at the client side needs to be disabled.

                                        • Renames happening on a file that is being migrated during rebalance will fail.

                                        • Dispersed volumes do not work with self-heal daemon. Self-healing is only activated when a damaged file or directory is accessed. To force a full self-heal or to replace a brick requires to traverse the file system from a mount point. This is the recommended command to do so:

                                          find <mount> -d -exec getfattr -h -n test {} \\;\n
                                        • Quota on dispersed volumes is not correctly computed, allowing to store more data than specified. A workaround to this problem is to define a smaller quota based on this formula:

                                          Q' = Q / (N - R)\n

                                        Where Q is the desired quota value, Q' is the new quota value to use, N is the number of bricks per disperse set, and R is the redundancy.

                                        "},{"location":"release-notes/3.6.0/#upgrading-to-36x","title":"Upgrading to 3.6.X","text":"

                                        Before upgrading to 3.6 version of gluster from 3.4.X or 3.5.x, please take a look at following link: Upgrade Gluster to 3.6

                                        "},{"location":"release-notes/3.6.3/","title":"3.6.3","text":""},{"location":"release-notes/3.6.3/#release-notes-for-glusterfs-363","title":"Release Notes for GlusterFS 3.6.3","text":"

                                        This is a bugfix release. The Release Notes for 3.6.0 contain a listing of all the new features that were added and bugs fixed in the GlusterFS 3.6 stable release.

                                        "},{"location":"release-notes/3.6.3/#bugs-fixed","title":"Bugs Fixed:","text":"
                                        • 1187526: Disperse volume mounted through NFS doesn't list any files/directories
                                        • 1188471: When the volume is in stopped state/all the bricks are down mount of the volume hangs
                                        • 1201484: glusterfs-3.6.2 fails to build on Ubuntu Precise: 'RDMA_OPTION_ID_REUSEADDR' undeclared
                                        • 1202212: Performance enhancement for RDMA
                                        • 1189023: Directories not visible anymore after add-brick, new brick dirs not part of old bricks
                                        • 1202673: Perf: readdirp in replicated volumes causes performance degrade
                                        • 1203081: Entries in indices/xattrop directory not removed appropriately
                                        • 1203648: Quota: Build ancestry in the lookup
                                        • 1199936: readv on /var/run/6b8f1f2526c6af8a87f1bb611ae5a86f.socket failed when NFS is disabled
                                        • 1200297: cli crashes when listing quota limits with xml output
                                        • 1201622: Convert quota size from n-to-h order before using it
                                        • 1194141: AFR : failure in self-heald.t
                                        • 1201624: Spurious failure of tests/bugs/quota/bug-1038598.t
                                        • 1194306: Do not count files which did not need index heal in the first place as successfully healed
                                        • 1200258: Quota: features.quota-deem-statfs is \"on\" even after disabling quota.
                                        • 1165938: Fix regression test spurious failures
                                        • 1197598: NFS logs are filled with system.posix_acl_access messages
                                        • 1199577: mount.glusterfs uses /dev/stderr and fails if the device does not exist
                                        • 1197598: NFS logs are filled with system.posix_acl_access messages
                                        • 1188066: logging improvements in marker translator
                                        • 1191537: With afrv2 + ext4, lookups on directories with large offsets could result in duplicate/missing entries
                                        • 1165129: libgfapi: use versioned symbols in libgfapi.so for compatibility
                                        • 1179136: glusterd: Gluster rebalance status returns failure
                                        • 1176756: glusterd: remote locking failure when multiple synctask transactions are run
                                        • 1188064: log files get flooded when removexattr() can't find a specified key or value
                                        • 1165938: Fix regression test spurious failures
                                        • 1192522: index heal doesn't continue crawl on self-heal failure
                                        • 1193970: Fix spurious ssl-authz.t regression failure (backport)
                                        • 1138897: NetBSD port
                                        • 1184527: Some newly created folders have root ownership although created by unprivileged user
                                        • 1181977: gluster vol clear-locks vol-name path kind all inode return IO error in a disperse volume
                                        • 1159471: rename operation leads to core dump
                                        • 1173528: Change in volume heal info command output
                                        • 1186119: tar on a gluster directory gives message \"file changed as we read it\" even though no updates to file in progress
                                        • 1183716: Force replace-brick lead to the persistent write(use dd) return Input/output error
                                        • 1138897: NetBSD port
                                        • 1178590: Enable quota(default) leads to heal directory's xattr failed.
                                        • 1182490: Internal ec xattrs are allowed to be modified
                                        • 1187547: self-heal-algorithm with option \"full\" doesn't heal sparse files correctly
                                        • 1174170: Glusterfs outputs a lot of warnings and errors when quota is enabled
                                        • 1212684: - GlusterD segfaults when started with management SSL
                                        "},{"location":"release-notes/3.6.3/#known-issues","title":"Known Issues:","text":"
                                        • The following configuration changes are necessary for 'qemu' and 'samba vfs plugin' integration with libgfapi to work seamlessly:

                                        • gluster volume set server.allow-insecure on

                                        • restarting the volume is necessary

                                          gluster volume stop <volname> gluster volume start <volname>

                                        • Edit /etc/glusterfs/glusterd.vol to contain this line:

                                          option rpc-auth-allow-insecure on

                                        • restarting glusterd is necessary

                                          service glusterd restart

                                        • More details are also documented in the Gluster Wiki on the Libgfapi with qemu libvirt page.

                                          • For Block Device translator based volumes open-behind translator at the client side needs to be disabled.
                                              gluster volume set <volname> performance.open-behind disable\n
                                          "},{"location":"release-notes/3.7.0/","title":"3.7.0","text":"

                                          Release Notes for GlusterFS 3.7.0

                                          "},{"location":"release-notes/3.7.0/#major-changes-and-features","title":"Major Changes and Features","text":"

                                          Documentation about major changes and features is included in the doc/features/ directory of GlusterFS repository.

                                          "},{"location":"release-notes/3.7.0/#geo-replication","title":"Geo Replication","text":"

                                          Many improvements have gone in the geo replication. A detailed documentation about all the improvements can be found here

                                          "},{"location":"release-notes/3.7.0/#bitrot-detection","title":"Bitrot Detection","text":"

                                          Bitrot detection is a technique used to identify an \u201cinsidious\u201d type of disk error where data is silently corrupted with no indication from the disk to the storage software layer that an error has occurred. When bitrot detection is enabled on a volume, gluster performs signing of all files/objects in the volume and scrubs data periodically for signature verification. All anomalies observed will be noted in log files.

                                          For more information, refer here.

                                          "},{"location":"release-notes/3.7.0/#multi-threaded-epoll-for-performance-improvements","title":"Multi threaded epoll for performance improvements","text":"

                                          Gluster 3.7 introduces multiple threads to dequeue and process more requests from epoll queues. This improves performance by processing more I/O requests. Workloads that involve read/write operations on a lot of small files can benefit from this enhancement.

                                          For more information refer here.

                                          "},{"location":"release-notes/3.7.0/#volume-tiering-experimental","title":"Volume Tiering [Experimental]","text":"

                                          Policy based tiering for placement of files. This feature will serve as a foundational piece for building support for data classification.

                                          For more information refer here.

                                          Volume Tiering is marked as an experimental feature for this release. It is expected to be fully supported in a 3.7.x minor release.

                                          "},{"location":"release-notes/3.7.0/#trashcan","title":"Trashcan","text":"

                                          This feature will enable administrators to temporarily store deleted files from Gluster volumes for a specified time period.

                                          For more information refer here.

                                          "},{"location":"release-notes/3.7.0/#efficient-object-count-and-inode-quota-support","title":"Efficient Object Count and Inode Quota Support","text":"

                                          This improvement enables an easy mechanism to retrieve the number of objects per directory or volume. Count of objects/files within a directory hierarchy is stored as an extended attribute of a directory. The extended attribute can be queried to retrieve the count.

                                          For more information refer here.

                                          This feature has been utilized to add support for inode quotas.

                                          For more details about inode quotas, refer here.

                                          "},{"location":"release-notes/3.7.0/#pro-active-self-healing-for-erasure-coding","title":"Pro-active Self healing for Erasure Coding","text":"

                                          Gluster 3.7 adds pro-active self healing support for erasure coded volumes.

                                          "},{"location":"release-notes/3.7.0/#exports-and-netgroups-authentication-for-nfs","title":"Exports and Netgroups Authentication for NFS","text":"

                                          This feature adds Linux-style exports & netgroups authentication to the native NFS server. This enables administrators to restrict access to specific clients & netgroups for volume/sub-directory NFSv3 exports.

                                          For more information refer here.

                                          "},{"location":"release-notes/3.7.0/#glusterfind","title":"GlusterFind","text":"

                                          GlusterFind is a new tool that provides a mechanism to monitor data events within a volume. Detection of events like modified files is made easier without having to traverse the entire volume.

                                          For more information refer here.

                                          "},{"location":"release-notes/3.7.0/#rebalance-performance-improvements","title":"Rebalance Performance Improvements","text":"

                                          Rebalance and remove brick operations in Gluster get a performance boost by speeding up identification of files needing movement and a multi-threaded mechanism to move all such files.

                                          For more information refer here.

                                          "},{"location":"release-notes/3.7.0/#nfsv4-and-pnfs-support","title":"NFSv4 and pNFS support","text":"

                                          Gluster 3.7 supports export of volumes through NFSv4, NFSv4.1 and pNFS. This support is enabled via NFS Ganesha. Infrastructure changes done in Gluster 3.7 to support this feature include:

                                          • Addition of upcall infrastructure for cache invalidation.
                                          • Support for lease locks and delegations.
                                          • Support for enabling Ganesha through Gluster CLI.
                                          • Corosync and pacemaker based implementation providing resource monitoring and failover to accomplish NFS HA.

                                          For more information refer the below links:

                                          • NFS Ganesha Integration
                                          • Upcall Infrastructure
                                          • Gluster CLI for NFS Ganesha
                                          • High Availability for NFS Ganesha
                                          • pNFS support for Gluster

                                          pNFS support for Gluster volumes and NFSv4 delegations are in beta for this release. Infrastructure changes to support Lease locks and NFSv4 delegations are targeted for a 3.7.x minor release.

                                          "},{"location":"release-notes/3.7.0/#snapshot-scheduling","title":"Snapshot Scheduling","text":"

                                          With this enhancement, administrators can schedule volume snapshots.

                                          For more information, see here.

                                          "},{"location":"release-notes/3.7.0/#snapshot-cloning","title":"Snapshot Cloning","text":"

                                          Volume snapshots can now be cloned to create a new writeable volume.

                                          For more information, see here.

                                          "},{"location":"release-notes/3.7.0/#sharding-experimental","title":"Sharding [Experimental]","text":"

                                          Sharding addresses the problem of fragmentation of space within a volume. This feature adds support for files that are larger than the size of an individual brick. Sharding works by chunking files to blobs of a configurabe size.

                                          For more information, see here.

                                          Sharding is an experimental feature for this release. It is expected to be fully supported in a 3.7.x minor release.

                                          "},{"location":"release-notes/3.7.0/#rcu-in-glusterd","title":"RCU in glusterd","text":"

                                          Thread synchronization and critical section access has been improved by introducing userspace RCU in glusterd

                                          "},{"location":"release-notes/3.7.0/#arbiter-volumes","title":"Arbiter Volumes","text":"

                                          Arbiter volumes are 3 way replicated volumes where the 3rd brick of the replica is automatically configured as an arbiter. The 3rd brick contains only metadata which provides network partition tolerance and prevents split-brains from happening.

                                          For more information, see here.

                                          "},{"location":"release-notes/3.7.0/#better-split-brain-resolution","title":"Better split-brain resolution","text":"

                                          split-brain resolutions can now be also driven by users without administrative intervention.

                                          For more information, see the 'Resolution of split-brain from the mount point' section here.

                                          "},{"location":"release-notes/3.7.0/#minor-improvements","title":"Minor Improvements","text":"
                                          • Message ID based logging has been added for several translators.
                                          • Quorum support for reads.
                                          • Snapshot names contain timestamps by default.Subsequent access to the snapshots should be done by the name listed in gluster snapshot list
                                          • Support for gluster volume get <volname> added.
                                          • libgfapi has added handle based functions to get/set POSIX ACLs based on common libacl structures.
                                          "},{"location":"release-notes/3.7.0/#known-issues","title":"Known Issues","text":"
                                          • Enabling Bitrot on volumes with more than 2 bricks on a node is known to cause problems.
                                          • Addition of bricks dynamically to cold or hot tiers in a tiered volume is not supported.
                                          • The following configuration changes are necessary for qemu and samba integration with libgfapi to work seamlessly:

                                          # gluster volume set <volname> server.allow-insecure on

                                          Edit /etc/glusterfs/glusterd.vol to contain this line: option rpc-auth-allow-insecure on

                                          Post 1, restarting the volume would be necessary:

                                          # gluster volume stop <volname> # gluster volume start <volname>

                                          Post 2, restarting glusterd would be necessary:

                                          # service glusterd restart

                                          or

                                          # systemctl restart glusterd

                                          "},{"location":"release-notes/3.7.0/#upgrading-to-370","title":"Upgrading to 3.7.0","text":"

                                          Instructions for upgrading from previous versions of GlusterFS are maintained on this page.

                                          "},{"location":"release-notes/3.7.1/","title":"3.7.1","text":""},{"location":"release-notes/3.7.1/#release-notes-for-glusterfs-371","title":"Release Notes for GlusterFS 3.7.1","text":"

                                          This is a bugfix release. The Release Notes for 3.7.0, contain a listing of all the new features that were added.

                                          Note: Enabling Bitrot on volumes with more than 2 bricks on a node works with this release.

                                          "},{"location":"release-notes/3.7.1/#bugs-fixed","title":"Bugs Fixed","text":"
                                          • 1212676: NetBSD port
                                          • 1218863: `ls' on a directory which has files with mismatching gfid's does not list anything
                                          • 1219782: Regression failures in tests/bugs/snapshot/bug-1112559.t
                                          • 1221000: detach-tier status emulates like detach-tier stop
                                          • 1221470: dHT rebalance: Dict_copy log messages when running rebalance on a dist-rep volume
                                          • 1221476: Data Tiering:rebalance fails on a tiered volume
                                          • 1221477: The tiering feature requires counters.
                                          • 1221503: DHT Rebalance : Misleading log messages for linkfiles
                                          • 1221507: NFS-Ganesha: ACL should not be enabled by default
                                          • 1221534: rebalance failed after attaching the tier to the volume.
                                          • 1221967: Do not allow detach-tier commands on a non tiered volume
                                          • 1221969: tiering: use sperate log/socket/pid file for tiering
                                          • 1222198: Fix nfs/mount3.c build warnings reported in Koji
                                          • 1222750: non-root geo-replication session goes to faulty state, when the session is started
                                          • 1222869: [SELinux] [BVT]: Selinux throws AVC errors while running DHT automation on Rhel6.6
                                          • 1223215: gluster volume status fails with locking failed error message
                                          • 1223286: [geo-rep]: worker died with \"ESTALE\" when performed rm -rf on a directory from mount of master volume
                                          • 1223644: [geo-rep]: With tarssh the file is created at slave but it doesnt get sync
                                          • 1224100: [geo-rep]: Even after successful sync, the DATA counter did not reset to 0
                                          • 1224241: gfapi: zero size issue in glfs_h_acl_set()
                                          • 1224292: peers connected in the middle of a transaction are participating in the transaction
                                          • 1224647: [RFE] Provide hourly scrubbing option
                                          • 1224650: SIGNING FAILURE Error messages are poping up in the bitd log
                                          • 1224894: Quota: spurious failures with quota testcases
                                          • 1225077: Fix regression test spurious failures
                                          • 1225279: Different client can not execute \"for((i=0;i<1000;i++));do ls -al;done\" in a same directory at the sametime
                                          • 1225318: glusterd could crash in remove-brick-status when local remove-brick process has just completed
                                          • 1225320: ls command failed with features.read-only on while mounting ec volume.
                                          • 1225331: [geo-rep] stop-all-gluster-processes.sh fails to stop all gluster processes
                                          • 1225543: [geo-rep]: snapshot creation timesout even if geo-replication is in pause/stop/delete state
                                          • 1225552: [Backup]: Unable to create a glusterfind session
                                          • 1225709: [RFE] Move signing trigger mechanism to [f]setxattr()
                                          • 1225743: [AFR-V2] - afr_final_errno() should treat op_ret > 0 also as success
                                          • 1225796: Spurious failure in tests/bugs/disperse/bug-1161621.t
                                          • 1225919: Log EEXIST errors in DEBUG level in fops MKNOD and MKDIR
                                          • 1225922: Sharding - Skip update of block count and size for directories in readdirp callback
                                          • 1226024: cli/tiering:typo errors in tiering
                                          • 1226029: I/O's hanging on tiered volumes (NFS)
                                          • 1226032: glusterd crashed on the node when tried to detach a tier after restoring data from the snapshot.
                                          • 1226117: [RFE] Return proper error codes in case of snapshot failure
                                          • 1226120: [Snapshot] Do not run scheduler if ovirt scheduler is running
                                          • 1226139: Implement MKNOD fop in bit-rot.
                                          • 1226146: BitRot :- bitd is not signing Objects if more than 3 bricks are present on same node
                                          • 1226153: Quota: Do not allow set/unset of quota limit in heterogeneous cluster
                                          • 1226629: bug-973073.t fails spuriously
                                          • 1226853: Volume start fails when glusterfs is source compiled with GCC v5.1.1
                                          "},{"location":"release-notes/3.7.1/#known-issues","title":"Known Issues","text":"
                                          • 1227677: Glusterd crashes and cannot start after rebalance
                                          • 1227656: Glusted dies when adding new brick to a distributed volume and converting to replicated volume
                                          • 1210256: gluster volume info --xml gives back incorrect typrStr in xml
                                          • 1212842: tar on a glusterfs mount displays \"file changed as we read it\" even though the file was not changed
                                          • 1220347: Read operation on a file which is in split-brain condition is successful
                                          • 1213352: nfs-ganesha: HA issue, the iozone process is not moving ahead, once the nfs-ganesha is killed
                                          • 1220270: nfs-ganesha: Rename fails while exectuing Cthon general category test
                                          • 1214169: glusterfsd crashed while rebalance and self-heal were in progress
                                          • 1221941: glusterfsd: bricks crash while executing ls on nfs-ganesha vers=3
                                          • 1225809: [DHT-REBALANCE]-DataLoss: The data appended to a file during its migration will be lost once the migration is done
                                          • 1225940: DHT: lookup-unhashed feature breaks runtime compatibility with older client versions

                                          • Addition of bricks dynamically to cold or hot tiers in a tiered volume is not supported.

                                          • The following configuration changes are necessary for qemu and samba integration with libgfapi to work seamlessly:

                                          # gluster volume set <volname> server.allow-insecure on Edit /etc/glusterfs/glusterd.vol to contain this line: option rpc-auth-allow-insecure on

                                          Post 1, restarting the volume would be necessary:\n
                                          # gluster volume stop <volname>\n# gluster volume start <volname>\n

                                          Post 2, restarting glusterd would be necessary:

                                          # service glusterd restart\n
                                          or\n
                                          # systemctl restart glusterd\n
                                          "},{"location":"release-notes/3.9.0/","title":"Release notes for Gluster 3.9.0","text":"

                                          This is a major release that includes a huge number of changes. Many improvements contribute to better support of Gluster with containers and running your storage on the same server as your hypervisors. Lots of work has been done to integrate with other projects that are part of the Open Source storage ecosystem.

                                          The most notable features and changes are documented on this page. A full list of bugs that has been addressed is included further below.

                                          "},{"location":"release-notes/3.9.0/#major-changes-and-features","title":"Major changes and features","text":""},{"location":"release-notes/3.9.0/#introducing-reset-brick-command","title":"Introducing reset-brick command","text":"

                                          Notes for users: The reset-brick command provides support to reformat/replace the disk(s) represented by a brick within a volume. This is helpful when a disk goes bad etc

                                          Start reset process -

                                          gluster volume reset-brick VOLNAME HOSTNAME:BRICKPATH start\n

                                          The above command kills the respective brick process. Now the brick can be reformatted.

                                          To restart the brick after modifying configuration -

                                          gluster volume reset-brick VOLNAME HOSTNAME:BRICKPATH HOSTNAME:BRICKPATH commit\n

                                          If the brick was killed to replace the brick with same brick path, restart with following command -

                                          gluster volume reset-brick VOLNAME HOSTNAME:BRICKPATH HOSTNAME:BRICKPATH commit force\n

                                          Limitations:

                                          1. resetting a brick kills a brick process in concern. During this period the brick will not be available for IO's.
                                          2. Replacing a brick with this command will work only if both the brick paths are same and belong to same volume.
                                          "},{"location":"release-notes/3.9.0/#get-node-level-status-of-a-cluster","title":"Get node level status of a cluster","text":"

                                          Notes for users: The get-state command provides node level status of a trusted storage pool from the point of view of glusterd in a parseable format. Using get-state command, external applications can invoke the command on all nodes of the cluster, and parse and collate the data obtained from all these nodes to get a complete picture of the state of the cluster.

                                          # gluster get-state <glusterd> [odir <path/to/output/dir] [file <filename>]\n

                                          This would dump data points that reflect the local state representation of the cluster as maintained in glusterd (no other daemons are supported as of now) to a file inside the specified output directory. The default output directory and filename is /var/run/gluster and glusterdstate respectively.

                                          Following are the sections in the output:

                                          1. Global: UUID and op-version of glusterd
                                          2. Global options: Displays cluster specific options that have been set explicitly through the volume set command.
                                          3. Peers: Displays the peer node information including its hostname and connection status
                                          4. Volumes: Displays the list of volumes created on this node along with detailed information on each volume.
                                          5. Services: Displays the list of the services configured on this node along with their corresponding statuses.

                                          Limitations:

                                          1. This only supports glusterd.
                                          2. Does not provide complete cluster state. Data to be collated from all nodes by external application to get the complete cluster state.
                                          "},{"location":"release-notes/3.9.0/#multi-threaded-self-heal-for-disperse-volumes","title":"Multi threaded self-heal for Disperse volumes","text":"

                                          Notes for users: Users now have the ability to configure multi-threaded self-heal in disperse volumes using the following commands:

                                          Option below can be used to control number of parallel heals in SHD\n# gluster volume set <volname> disperse.shd-max-threads [1-64] # default is 1\nOption below can be used to control number of heals that can wait in SHD\n# gluster volume set <volname> disperse.shd-wait-qlength [1-65536] # default is 1024\n
                                          "},{"location":"release-notes/3.9.0/#hardware-extention-acceleration-in-disperse-volumes","title":"Hardware extention acceleration in Disperse volumes","text":"

                                          Notes for users: If the user has hardware that has special instructions which can be used in erasure code calculations on the client it will be automatically used. At the moment this support is added for cpu-extentions: x64, sse, avx

                                          "},{"location":"release-notes/3.9.0/#lock-revocation-feature","title":"Lock revocation feature","text":"

                                          Notes for users:

                                          1. Motivation: Prevents cluster instability by mis-behaving clients causing bricks to OOM due to inode/entry lock pile-ups.
                                          2. Adds option to strip clients of entry/inode locks after N seconds
                                          3. Adds option to clear ALL locks should the revocation threshold get hit
                                          4. Adds option to clear all or granted locks should the max-blocked threshold get hit (can be used in combination w/ revocation-clear-all).
                                          5. Adds logging to indicate revocation event & reason
                                          6. Options are:
                                          # gluster volume set <volname> features.locks-revocation-secs <integer; 0 to disable>\n# gluster volume set <volname> features.locks-revocation-clear-all [on/off]\n# gluster volume set <volname> features.locks-revocation-max-blocked <integer>\n
                                          "},{"location":"release-notes/3.9.0/#on-demand-scrubbing-for-bitrot-detection","title":"On demand scrubbing for Bitrot Detection:","text":"

                                          Notes for users: With 'ondemand' scrub option, you don't need to wait for the scrub-frequency to expire. As the option name itself says, the scrubber can be initiated on demand to detect the corruption. If the scrubber is already running, this option is a no op.

                                          # gluster volume bitrot <volume-name> scrub ondemand\n
                                          "},{"location":"release-notes/3.9.0/#improvements-in-gluster-nfs-ganesha-integration","title":"Improvements in Gluster NFS-Ganesha integration","text":"

                                          Notes for users: With this release the major change done is to store all the ganesha related configuration files in the shared storage volume mount point instead of having separate local copy in '/etc/ganesha' folder on each node.

                                          For new users, before enabling nfs-ganesha

                                          1. create a directory named nfs-ganesha in the shared storage mount point (/var/run/gluster/shared_storage/)

                                          2. Create ganesha.conf & ganesha-ha.conf in that directory with the required details filled in.

                                          For existing users, before starting nfs-ganesha service do the following :

                                          1. Copy all the contents of /etc/ganesha directory (including .export_added file) to /var/run/gluster/shared_storage/nfs-ganesha from any of the ganesha nodes

                                          2. Create symlink using /var/run/gluster/shared_storage/nfs-ganesha/ganesha.conf on /etc/ganesha one each node in ganesha-cluster

                                          3. Change path for each export entry in ganesha.conf file

                                          Example: if a volume \"test\" was exported, then ganesha.conf shall have below export entry -\n %include \"/etc/ganesha/exports/export.test.conf\" export entry.\nChange that line to\n %include \"/var/run/gluster/shared_storage/nfs-ganesha/exports/export.test.conf\"\n

                                          In addition, following changes have been made -

                                          • The entity \"HA_VOL_SERVER= \" in ganesha-ha.conf is no longer required.
                                          • A new resource-agent called portblock (available in >= resource-agents-3.9.5 package) is added to the cluster configuration to speed up the nfs-client connections post IP failover or failback. This may be noticed while looking at the cluster configuration status using the command pcs status.
                                          "},{"location":"release-notes/3.9.0/#availability-of-python-bindings-to-libgfapi","title":"Availability of python bindings to libgfapi","text":"

                                          The official python bindings for GlusterFS libgfapi C library interface is mostly API complete. The complete API reference and documentation can be found at libgfapi-python.rtfd.io

                                          The python bindings have been packaged and has been made available over PyPI.

                                          "},{"location":"release-notes/3.9.0/#small-file-improvements-in-gluster-with-md-cache-experimental","title":"Small file improvements in Gluster with md-cache (Experimental)","text":"

                                          Notes for users: With this release, metadata cache on the client side is integrated with the cache-invalidation feature so that the clients can cache longer without compromising on consistency. By enabling, the metadata cache and cache invalidation feature and extending the cache timeout to 600s, we have seen performance improvements in metadata operation like creates, ls/stat, chmod, rename, delete. The perf improvements is significant in SMB access of gluster volume, but as a cascading effect the improvements is also seen on FUSE/Native access and NFS access.

                                          Use the below options in the order mentioned, to enable the features:

                                            # gluster volume set <volname> features.cache-invalidation on\n  # gluster volume set <volname> features.cache-invalidation-timeout 600\n  # gluster volume set <volname> performance.stat-prefetch on\n  # gluster volume set <volname> performance.cache-invalidation on\n  # gluster volume set <volname> performance.cache-samba-metadata on     # Only for SMB access\n  # gluster volume set <volname> performance.md-cache-timeout 600\n
                                          "},{"location":"release-notes/3.9.0/#real-time-cluster-notifications-using-events-apis","title":"Real time Cluster notifications using Events APIs","text":"

                                          Let us imagine we have a Gluster monitoring system which displays list of volumes and its state, to show the realtime status, monitoring app need to query the Gluster in regular interval to check volume status, new volumes etc. Assume if the polling interval is 5 seconds then monitoring app has to run gluster volume info command ~17000 times a day!

                                          With Gluster 3.9 release, Gluster provides close to realtime notification and alerts for the Gluster cluster state changes and alerts. Webhooks can be registered to listen to Events emitted by Gluster. More details about this new feature is available here.

                                          http://docs.gluster.org/en/latest/Administrator%20Guide/Events%20APIs

                                          "},{"location":"release-notes/3.9.0/#geo-replication-improvements","title":"Geo-replication improvements","text":""},{"location":"release-notes/3.9.0/#documentation-improvements","title":"Documentation improvements:","text":"

                                          Upstream documentation is rewritten to reflect the latest version of Geo-replication. Removed the stale/duplicate documentation. We are still working on to add Troubleshooting, Cluster expand/shrink notes to it. Latest version of documentation is available here http://docs.gluster.org/en/latest/Administrator%20Guide/Geo%20Replication

                                          "},{"location":"release-notes/3.9.0/#geo-replication-events-are-available-for-events-api-consumers","title":"Geo-replication Events are available for Events API consumers:","text":"

                                          Events APIs is the new Gluster feature available with 3.9 release, most of the events from Geo-replication are added to eventsapi.

                                          Read more about the Events APIs and Geo-replication events here http://docs.gluster.org/en/latest/Administrator%20Guide/Events%20APIs

                                          "},{"location":"release-notes/3.9.0/#new-simplified-command-to-setup-non-root-geo-replication","title":"New simplified command to setup Non root Geo-replication","text":"

                                          Non root Geo-replication setup was not easy with multiple manual steps. Non root Geo-replication steps are simplified. Read more about the new steps in Admin guide.

                                          http://docs.gluster.org/en/latest/Administrator%20Guide/Geo%20Replication/#slave-user-setup

                                          "},{"location":"release-notes/3.9.0/#new-command-to-generate-ssh-keysalternative-command-to-gsec_create","title":"New command to generate SSH keys(Alternative command to gsec_create)","text":"

                                          gluster system:: execute gsec_create command generates ssh keys in every Master cluster nodes and copies to initiated node. This command silently ignores error if any node is down in cluster. It will not collect SSH keys from that node. When Geo-rep create push-pem command is issued it will copy public keys from those nodes which were up during gsec_create. This causes Geo-rep to go to Faulty when that master node tries to make the connection to slave nodes. With the new command, output shows if any Master node was down while generating ssh keys. Read more about `gluster-georep-sshkey

                                          http://docs.gluster.org/en/latest/Administrator%20Guide/Geo%20Replication/#setting-up-the-environment-for-geo-replication

                                          "},{"location":"release-notes/3.9.0/#logging-improvements","title":"Logging improvements","text":"

                                          New logs are added, now from the log we can clearly understand what is going on. Note: This feature may change logging format of existing log messages, Please update your parsers if used to parse Geo-rep logs.

                                          Patch: http://review.gluster.org/15710

                                          "},{"location":"release-notes/3.9.0/#new-configuration-options-available-changelog-log-level","title":"New Configuration options available: changelog-log-level","text":"

                                          All the changelog related log messages are logged in /var/log/glusterfs/geo-replication/<SESSION>/*.changes.log in Master nodes. Log level was hard coded as TRACE for Changelog logs. New configuration option provided to modify the changelog log level and defaulted to INFO

                                          "},{"location":"release-notes/3.9.0/#behavior-changes","title":"Behavior changes","text":"
                                          • #1221623: Earlier the ports GlusterD used to allocate for the daemons like brick processes, quotad, shd et all were persistent through the volume's life cycle, so every restart of the process(es) or a node reboot will try to use the same ports which were allocated for the first time. With release-3.9 onwards, GlusterD will try to allocate a fresh port once a daemon is restarted or the node is rebooted.
                                          • #1348944: with 3.9 release the default log file for glusterd has been renamed to glusterd.log from etc-glusterfs-glusterd.vol.log
                                          "},{"location":"release-notes/3.9.0/#known-issues","title":"Known Issues","text":"
                                          • #1387878:add-brick on a vm-store configuration which has sharding enabled is leading to vm corruption. To work around this issue, one can scale up by creating more volumes until this issue is fixed.
                                          "},{"location":"release-notes/3.9.0/#bugs-addressed","title":"Bugs addressed","text":"

                                          A total of 571 patches has been sent, addressing 422 bugs:

                                          • #762184: Support mandatory locking in glusterfs
                                          • #789278: Issues reported by Coverity static analysis tool
                                          • #1005257: [PATCH] Small typo fixes
                                          • #1175711: posix: Set correct d_type for readdirp() calls
                                          • #1193929: GlusterFS can be improved
                                          • #1198849: Minor improvements and cleanup for the build system
                                          • #1200914: pathinfo is wrong for striped replicated volumes
                                          • #1202274: Minor improvements and code cleanup for libgfapi
                                          • #1207604: [rfe] glusterfs snapshot cli commands should provide xml output.
                                          • #1211863: RFE: Support in md-cache to use upcall notifications to invalidate its cache
                                          • #1221623: glusterd: add brick command should re-use the port for listening which is freed by remove-brick.
                                          • #1222915: usage text is wrong for use-readdirp mount default
                                          • #1223937: Outdated autotools helper config.* files
                                          • #1225718: [FEAT] DHT - rebalance - rebalance status o/p should be different for 'fix-layout' option, it should not show 'Rebalanced-files' , 'Size', 'Scanned' etc as it is not migrating any files.
                                          • #1227667: Minor improvements and code cleanup for protocol server/client
                                          • #1228142: clang-analyzer: adding clang static analysis support
                                          • #1231224: Misleading error messages on brick logs while creating directory (mkdir) on fuse mount
                                          • #1236009: do an explicit lookup on the inodes linked in readdirp
                                          • #1254067: remove unused variables
                                          • #1266876: cluster/afr: AFR2 returns empty readdir results to clients if brick is added back into cluster after re-imaging/formatting
                                          • #1278325: DHT: Once remove brick start failed in between Remove brick commit should not be allowed
                                          • #1285152: store afr pending xattrs as a volume option
                                          • #1292020: quota: client gets IO error instead of disk quota exceed when the limit is exceeded
                                          • #1294813: [geo-rep]: Multiple geo-rep session to the same slave is allowed for different users
                                          • #1296043: Wrong usage of dict functions
                                          • #1302277: Wrong XML output for Volume Options
                                          • #1302948: tar complains: : file changed as we read it
                                          • #1303668: packaging: rpmlint warning and errors - Documentation URL 404
                                          • #1305031: AFR winds a few reads of a file in metadata split-brain.
                                          • #1306398: Tiering and AFR may result in data loss
                                          • #1311002: NFS+attach tier:IOs hang while attach tier is issued
                                          • #1311926: [georep]: If a georep session is recreated the existing files which are deleted from slave doesn't get sync again from master
                                          • #1315666: Data Tiering:tier volume status shows as in-progress on all nodes of a cluster even if the node is not part of volume
                                          • #1316178: changelog/rpc: Memory leak- rpc_clnt_t object is never freed
                                          • #1316389: georep: tests for logrotate, create+rename and hard-link rename
                                          • #1318204: Input / Output when chmoding files on NFS mount point
                                          • #1318289: [RFE] Add arbiter brick hotplug
                                          • #1318591: Glusterd not operational due to snapshot conflicting with nfs-ganesha export file in \"/var/lib/glusterd/snaps\"
                                          • #1319992: RFE: Lease support for gluster
                                          • #1320388: [GSS]-gluster v heal volname info does not work with enabled ssl/tls
                                          • #1321836: gluster volume info --xml returns 0 for nonexistent volume
                                          • #1322214: [HC] Add disk in a Hyper-converged environment fails when glusterfs is running in directIO mode
                                          • #1322805: [scale] Brick process does not start after node reboot
                                          • #1322825: IO-stats, client profile is overwritten when it is on the same node as bricks
                                          • #1324439: SAMBA+TIER : Wrong message display.On detach tier success the message reflects Tier command failed.
                                          • #1325831: gluster snap status xml output shows incorrect details when the snapshots are in deactivated state
                                          • #1326410: /var/lib/glusterd/$few-directories not owned by any package, causing it to remain after glusterfs-server is uninstalled
                                          • #1327171: Disperse: Provide description of disperse.eager-lock option.
                                          • #1328224: RFE : Feature: Automagic unsplit-brain policies for AFR
                                          • #1329211: values for Number of Scrubbed files, Number of Unsigned files, Last completed scrub time and Duration of last scrub are shown as zeros in bit rot scrub status
                                          • #1330032: rm -rf to a dir gives directory not empty(ENOTEMPTY) error
                                          • #1330097: ganesha exported volumes doesn't get synced up on shutdown node when it comes up.
                                          • #1330583: glusterfs-libs postun ldconfig: relative path `1' used to build cache
                                          • #1331254: Disperse volume fails on high load and logs show some assertion failures
                                          • #1331287: No xml output on gluster volume heal info command with --xml
                                          • #1331323: [Granular entry sh] - Implement renaming of indices in index translator
                                          • #1331423: distaf: Add io_libs to namespace package list
                                          • #1331720: implement meta-lock/unlock for lock migration
                                          • #1331721: distaf: Add README and HOWTO to distaflibs as well
                                          • #1331860: Wrong constant used in length based comparison for XATTR_SECURITY_PREFIX
                                          • #1331969: Ganesha+Tiering: Continuous \"0-glfs_h_poll_cache_invalidation: invalid argument\" messages getting logged in ganesha-gfapi logs.
                                          • #1332020: multiple regression failures for tests/basic/quota-ancestry-building.t
                                          • #1332021: multiple failures for testcase: tests/basic/inode-quota-enforcing.t
                                          • #1332054: multiple failures of tests/bugs/disperse/bug-1236065.t
                                          • #1332073: EINVAL errors while aggregating the directory size by quotad
                                          • #1332134: bitrot: Build generates Compilation Warning.
                                          • #1332136: Detach tier fire before the background fixlayout is complete may result in failure
                                          • #1332156: SMB:while running I/O on cifs mount and doing graph switch causes cifs mount to hang.
                                          • #1332219: tier: avoid pthread_join if pthread_create fails
                                          • #1332413: Wrong op-version for mandatory-locks volume set option
                                          • #1332419: geo-rep: address potential leak of memory
                                          • #1332460: [features/worm] - when disabled, worm xl should simply pass requested fops to its child xl
                                          • #1332465: glusterd + bitrot : Creating clone of snapshot. error \"xlator.c:148:xlator_volopt_dynload] 0-xlator: /usr/lib64/glusterfs/3.7.9/xlator/features/bitrot.so: cannot open shared object file:
                                          • #1332473: tests: 'tests/bitrot/br-state-check.t' fails in netbsd
                                          • #1332501: Mandatory locks are not migrated during lock migration
                                          • #1332566: [granular entry sh] - Add more tests
                                          • #1332798: [AFR]: \"volume heal info\" command is failing during in-service upgrade to latest.
                                          • #1332822: distaf: Add library functions for gluster snapshot operations
                                          • #1332885: distaf: Add library functions for gluster bitrot operations and generic library utility functions generic to all components
                                          • #1332952: distaf: Add library functions for gluster quota operations
                                          • #1332994: Self Heal fails on a replica3 volume with 'disk quota exceeded'
                                          • #1333023: readdir-ahead does not fetch xattrs that md-cache needs in it's internal calls
                                          • #1333043: Fix excessive logging due to NULL dict in dht
                                          • #1333263: [features/worm] Unwind FOPs with op_errno and add gf_worm prefix to functions
                                          • #1333317: rpc_clnt will sometimes not reconnect when using encryption
                                          • #1333319: Unexporting a volume sometimes fails with \"Dynamic export addition/deletion failed\".
                                          • #1333370: [FEAT] jbr-server handle lock/unlock fops
                                          • #1333738: distaf: Add GlusterBaseClass (gluster_base_class.py) to distaflibs-gluster.
                                          • #1333912: client ID should logged when SSL connection fails
                                          • #1333925: libglusterfs: race conditions and illegal mem access in timer
                                          • #1334044: [RFE] Eventing for Gluster
                                          • #1334164: Worker dies with [Errno 5] Input/output error upon creation of entries at slave
                                          • #1334208: distaf: Add library functions for gluster rebalance operations
                                          • #1334269: GlusterFS 3.8 fails to build in the CentOS Community Build System
                                          • #1334270: glusterd: glusterd provides stale port information when a volume is recreated with same brick path
                                          • #1334285: Under high read load, sometimes the message \"XDR decoding failed\" appears in the logs and read fails
                                          • #1334314: changelog: changelog_rollover breaks when number of fds opened is more than 1024
                                          • #1334444: SAMBA-VSS : Permission denied issue while restoring the directory from windows client 1 when files are deleted from windows client 2
                                          • #1334620: stop all gluster processes should also include glusterfs mount process
                                          • #1334621: set errno in case of inode_link failures
                                          • #1334721: distaf: Add library functions for gluster tiering operations
                                          • #1334839: [Tiering]: Files remain in hot tier even after detach tier completes
                                          • #1335019: Add graph for decompounder xlator
                                          • #1335091: mount/fuse: Logging improvements
                                          • #1335231: features/locks: clang compile warning in posix.c
                                          • #1335232: features/index: clang compile warnings in index.c
                                          • #1335429: Self heal shows different information for the same volume from each node
                                          • #1335494: Modifying peer ops library
                                          • #1335531: Modified volume options are not syncing once glusterd comes up.
                                          • #1335652: Heal info shows split-brain for .shard directory though only one brick was down
                                          • #1335717: PREFIX is not honoured during build and install
                                          • #1335776: rpc: change client insecure port ceiling from 65535 to 49151
                                          • #1335818: Revert \"features/shard: Make o-direct writes work with sharding: http://review.gluster.org/#/c/13846/\"
                                          • #1335858: Files present in the .shard folder even after deleting all the vms from the UI
                                          • #1335973: [Tiering]: The message 'Max cycle time reached..exiting migration' incorrectly displayed as an 'error' in the logs
                                          • #1336197: failover is not working with latest builds.
                                          • #1336328: [FEAT] jbr: Improve code modularity
                                          • #1336354: Provide a way to configure gluster source location in devel-vagrant
                                          • #1336373: Distaf: Add gluster specific config file
                                          • #1336381: ENOTCONN error during parallel rmdir
                                          • #1336508: rpc-transport: compiler warning format string
                                          • #1336612: one of vm goes to paused state when network goes down and comes up back
                                          • #1336630: ERROR and Warning message on writing a file from mount point \"null gfid for path (null)\" repeated 3 times between\"
                                          • #1336642: [RFE] git-branch-diff: wrapper script for git to visualize backports
                                          • #1336698: DHT : few Files are not accessible and not listed on mount + more than one Directory have same gfid + (sometimes) attributes has ?? in ls output after renaming Directories from multiple client at same time
                                          • #1336793: assorted typos and spelling mistakes from Debian lintian
                                          • #1336818: Add ability to set oom_score_adj for glusterfs process
                                          • #1336853: scripts: bash-isms in scripts
                                          • #1336945: [NFS-Ganesha] : stonith-enabled option not set with new versions of cman,pacemaker,corosync and pcs
                                          • #1337160: distaf: Added libraries to setup nfs-ganesha in gluster through distaf
                                          • #1337227: [tiering]: error message shown during the failure of detach tier commit isn't intuitive
                                          • #1337405: Some of VMs go to paused state when there is concurrent I/O on vms
                                          • #1337473: upgrade path when slave volume uuid used in geo-rep session
                                          • #1337597: Mounting a volume over NFS with a subdir followed by a / returns \"Invalid argument\"
                                          • #1337650: log flooded with Could not map name=xxxx to a UUID when config'd with long hostnames
                                          • #1337777: tests/bugs/write-behind/1279730.t fails spuriously
                                          • #1337791: tests/basic/afr/tarissue.t fails regression
                                          • #1337899: Misleading error message on rebalance start when one of the glusterd instance is down
                                          • #1338544: fuse: In fuse_first_lookup(), dict is not un-referenced in case create_frame returns an empty pointer.
                                          • #1338634: AFR : fuse,nfs mount hangs when directories with same names are created and deleted continuously
                                          • #1338733: __inode_ctx_put: fix mem leak on failure
                                          • #1338967: common-ha: ganesha.nfsd not put into NFS-GRACE after fail-back
                                          • #1338991: DHT2: Tracker bug
                                          • #1339071: dht/rebalance: mark hardlink migration failure as skipped for rebalance process
                                          • #1339149: Error and warning messages related to xlator/features/snapview-client.so adding up to the client log on performing IO operations
                                          • #1339166: distaf: Added timeout value to wait for rebalance to complete and removed older rebalance library file
                                          • #1339181: Full heal of a sub-directory does not clean up name-indices when granular-entry-heal is enabled.
                                          • #1339214: gfapi: set mem_acct for the variables created for upcall
                                          • #1339471: [geo-rep]: Worker died with [Errno 2] No such file or directory
                                          • #1339472: [geo-rep]: Monitor crashed with [Errno 3] No such process
                                          • #1339541: Added libraries to setup CTDB in gluster through distaf
                                          • #1339553: gfapi: in case of handle based APIs, close glfd after successful create
                                          • #1339689: RFE - capacity info (df -h on a mount) is incorrect for a tiered volume
                                          • #1340488: copy-export-ganesha.sh does not have a correct shebang
                                          • #1340623: Directory creation(mkdir) fails when the remove brick is initiated for replicated volumes accessing via nfs-ganesha
                                          • #1340853: [geo-rep]: If the session is renamed, geo-rep configuration are not retained
                                          • #1340936: Automount fails because /sbin/mount.glusterfs does not accept the -s option
                                          • #1341007: gfapi : throwing warning message for unused variable in glfs_h_find_handle()
                                          • #1341009: Log parameters such as the gfid, fd address, offset and length of the reads upon failure for easier debugging
                                          • #1341294: build: RHEL7 unpackaged files /var/lib/glusterd/hooks/.../S57glusterfind-delete-post.{pyc,pyo}
                                          • #1341474: [geo-rep]: Snapshot creation having geo-rep session is broken
                                          • #1341650: conservative merge happening on a x3 volume for a deleted file
                                          • #1341768: After setting up ganesha on RHEL 6, nodes remains in stopped state and grace related failures observed in pcs status
                                          • #1341796: [quota+snapshot]: Directories are inaccessible from activated snapshot, when the snapshot was created during directory creation
                                          • #1342171: O_DIRECT support for sharding
                                          • #1342259: [features/worm] - write FOP should pass for the normal files
                                          • #1342298: reading file with size less than 512 fails with odirect read
                                          • #1342356: [RFE] Python library for creating Cluster aware CLI tools for Gluster
                                          • #1342420: [georep]: Stopping volume fails if it has geo-rep session (Even in stopped state)
                                          • #1342796: self heal deamon killed due to oom kills on a dist-disperse volume using nfs ganesha
                                          • #1342979: [geo-rep]: Add-Brick use case: create push-pem force on existing geo-rep fails
                                          • #1343038: IO ERROR when multiple graph switches
                                          • #1343286: enabling glusternfs with nfs.rpc-auth-allow to many hosts failed
                                          • #1343333: [RFE] Simplify Non Root Geo-replication Setup
                                          • #1343374: Gluster fuse client crashed generating core dump
                                          • #1343838: Implement API to get page aligned iobufs in iobuf.c
                                          • #1343906: [Stress/Scale] : I/O errors out from gNFS mount points during high load on an erasure coded volume,Logs flooded with Error messages.
                                          • #1343943: Old documentation link in log during Geo-rep MISCONFIGURATION
                                          • #1344277: [disperse] mkdir after re balance give Input/Output Error
                                          • #1344340: Unsafe access to inode->fd_list
                                          • #1344396: fd leak in disperse
                                          • #1344407: fail delete volume operation if one of the glusterd instance is down in cluster
                                          • #1344686: tiering : Multiple brick processes crashed on tiered volume while taking snapshots
                                          • #1344714: removal of file from nfs mount crashs ganesha server
                                          • #1344836: [Disperse volume]: IO hang seen on mount with file ops
                                          • #1344885: inode leak in brick process
                                          • #1345727: Bricks are starting when server quorum not met.
                                          • #1345744: [geo-rep]: Worker crashed with \"KeyError: \"
                                          • #1345748: SAMBA-DHT : Crash seen while rename operations in cifs mount and windows access of share mount
                                          • #1345846: quota : rectify quota-deem-statfs default value in gluster v set help command
                                          • #1345855: Possible crash due to a timer cancellation race
                                          • #1346138: [RFE] Non root Geo-replication Error logs improvements
                                          • #1346211: cleanup glusterd-georep code
                                          • #1346551: wrong understanding of function's parameter
                                          • #1346719: [Disperse] dd + rm + ls lead to IO hang
                                          • #1346821: cli core dumped while providing/not wrong values during arbiter replica volume
                                          • #1347249: libgfapi : variables allocated by glfs_set_volfile_server is not freed
                                          • #1347354: glusterd: SuSE build system error for incorrect strcat, strncat usage
                                          • #1347686: IO error seen with Rolling or non-disruptive upgrade of an distribute-disperse(EC) volume from 3.7.5 to 3.7.9
                                          • #1348897: Add relative path validation for gluster copy file utility
                                          • #1348904: [geo-rep]: If the data is copied from .snaps directory to the master, it doesn't get sync to slave [First Copy]
                                          • #1348944: Change the glusterd log file name to glusterd.log
                                          • #1349270: ganesha.enable remains on in volume info file even after we disable nfs-ganesha on the cluster.
                                          • #1349273: Geo-rep silently ignores config parser errors
                                          • #1349276: Buffer overflow when attempting to create filesystem using libgfapi as driver on OpenStack
                                          • #1349284: [tiering]: Files of size greater than that of high watermark level should not be promoted
                                          • #1349398: nfs-ganesha disable doesn't delete nfs-ganesha folder from /var/run/gluster/shared_storage
                                          • #1349657: process glusterd set TCP_USER_TIMEOUT failed
                                          • #1349709: Polling failure errors getting when volume is started&stopped with SSL enabled setup.
                                          • #1349723: Added libraries to get server_brick dictionaries
                                          • #1350017: Change distaf glusterbase class and mount according to the config file changes
                                          • #1350168: distaf: made changes to create_volume function
                                          • #1350173: distaf: Adding samba_ops library
                                          • #1350188: distaf: minor import changes in ganesha.py
                                          • #1350191: race condition when set ctx->timer in function gf_timer_registry_init
                                          • #1350237: Gluster/NFS does not accept dashes in hostnames in exports/netgroups files
                                          • #1350245: distaf: Add library functions for gluster volume operations
                                          • #1350248: distaf: Modified get_pathinfo function in lib_utils.py
                                          • #1350256: Distaf: Modifying the ctdb_libs to get server host from the server dict
                                          • #1350258: Distaf: add a sample test case to the framework
                                          • #1350327: Protocol client not mounting volumes running on older versions.
                                          • #1350371: ganesha/glusterd : remove 'HA_VOL_SERVER' from ganesha-ha.conf
                                          • #1350383: distaf: Modified distaf gluster config file
                                          • #1350427: distaf: Modified tier_attach() to get bricks path for attaching tier from the available bricks in server
                                          • #1350744: GlusterFS 3.9.0 tracker
                                          • #1350793: build: remove absolute paths from glusterfs spec file
                                          • #1350867: RFE: FEATURE: Lock revocation for features/locks xlator
                                          • #1351021: [DHT]: Rebalance info for remove brick operation is not showing after glusterd restart
                                          • #1351071: [geo-rep] Stopped geo-rep session gets started automatically once all the master nodes are upgraded
                                          • #1351134: [SSL] : gluster v set help does not show ssl options
                                          • #1351537: [Bitrot] Need a way to set scrub interval to a minute, for ease of testing
                                          • #1351880: gluster volume status client\" isn't showing any information when one of the nodes in a 3-way Distributed-Replicate volume is shut down
                                          • #1352019: RFE: Move throttling code to libglusterfs from bitrot
                                          • #1352277: a two node glusterfs seems not possible anymore?!
                                          • #1352279: [scale]: Bricks not started after node reboot.
                                          • #1352423: should find_library(\"c\") be used instead of find_library(\"libc\") in geo-replication/syncdaemon/libcxattr.py?
                                          • #1352634: qemu libgfapi clients hang when doing I/O
                                          • #1352671: RFE: As a part of xattr invalidation, send the stat info as well
                                          • #1352854: GlusterFS - Memory Leak - High Memory Utilization
                                          • #1352871: [Bitrot]: Scrub status- Certain fields continue to show previous run's details, even if the current run is in progress
                                          • #1353156: [RFE] CLI to get local state representation for a cluster
                                          • #1354141: several problems found in failure handle logic
                                          • #1354221: noisy compilation warnning with Wstrict-prototypes
                                          • #1354372: Fix timing issue in tests/bugs/glusterd/bug-963541.t
                                          • #1354439: nfs client I/O stuck post IP failover
                                          • #1354489: service file is executable
                                          • #1355604: afr coverity fixes
                                          • #1355628: Upgrade from 3.7.8 to 3.8.1 doesn't regenerate the volfiles
                                          • #1355706: [Bitrot]: Sticky bit files considered and skipped by the scrubber, instead of getting ignored.
                                          • #1355956: RFE : move ganesha related configuration into shared storage
                                          • #1356032: quota: correct spelling mistakes in quota src files
                                          • #1356068: observing \" Too many levels of symbolic links\" after adding bricks and then issuing a replace brick
                                          • #1356504: Move gf_log->gf_msg in index feature
                                          • #1356508: [RFE] Handle errors during SSH key generation(gsec_create)
                                          • #1356528: memory leak in glusterd-georeplication
                                          • #1356851: [Bitrot+Sharding] Scrub status shows incorrect values for 'files scrubbed' and 'files skipped'
                                          • #1356868: File not found errors during rpmbuild: /var/lib/glusterd/hooks/1/delete/post/S57glusterfind-delete-post.py{c,o}
                                          • #1356888: Correct code in socket.c to avoid fd leak
                                          • #1356998: syscalls: readdir_r() is deprecated in newer glibc
                                          • #1357210: add several fops support in io-threads
                                          • #1357226: add a basis function to reduce verbose code
                                          • #1357397: Trash translator fails to create 'internal_op' directory under already existing trash directory
                                          • #1357463: Error: quota context not set inode (gfid:nnn) [Invalid argument]
                                          • #1357490: libglusterfs : update correct memory segments in glfs-message-id
                                          • #1357821: Make install fails second time without uninstall
                                          • #1358114: tests: ./tests/bitrot/br-stub.t fails intermittently
                                          • #1358195: Fix spurious failure of tests/bugs/glusterd/bug-1111041.t
                                          • #1358196: Tiering related core observed with \"uuid_is_null () message\".
                                          • #1358244: [SNAPSHOT]: The PID for snapd is displayed even after snapd process is killed.
                                          • #1358594: Enable gfapi test cases in Gluster upstream regression
                                          • #1358608: Memory leak observed with upcall polling
                                          • #1358671: Add Events for Volume Set and Reset
                                          • #1358922: missunderstanding about GF_PROTOCOL_DICT_SERIALIZE
                                          • #1358936: coverity: iobuf_get_page_aligned calling iobuf_get2 should check the return pointer
                                          • #1358944: jbr resource leak, forget free \"path\"
                                          • #1358976: Fix spurious failures in split-brain-favorite-child-policy.t
                                          • #1359001: Fix spurious failures in ec.t
                                          • #1359190: Glusterd crashes upon receiving SIGUSR1
                                          • #1359370: glfs: fix glfs_set_volfile_server doc
                                          • #1359711: [GSS] Rebalance crashed
                                          • #1359717: Fix failure of ./tests/bugs/snapshot/bug-1316437.t
                                          • #1360169: Fix bugs in compound fops framework
                                          • #1360401: RFE: support multiple bricks within one process
                                          • #1360402: Clients can starve under heavy load
                                          • #1360647: gfapi: deprecate the rdma support for management connections
                                          • #1360670: Add output option --xml to man page of gluster
                                          • #1360679: Bricks doesn't come online after reboot [ Brick Full ]
                                          • #1360682: tests: ./tests/bitrot/bug-1244613.t fails intermittently
                                          • #1360693: [RFE] Add a count of snapshots associated with a volume to the output of the vol info command
                                          • #1360809: [RFE] Capture events in GlusterD
                                          • #1361094: Auto generate header files during Make
                                          • #1361249: posix: leverage FALLOC_FL_ZERO_RANGE in zerofill fop
                                          • #1361300: Direct io to sharded files fails when on zfs backend
                                          • #1361678: thread CPU saturation limiting throughput on write workloads
                                          • #1361983: Move USE_EVENTS in gf_events API
                                          • #1361999: Remove ganesha xlator code from gluster code base
                                          • #1362144: Python library to send Events
                                          • #1362151: [libgfchangelog]: If changelogs are not available for the requested time range, no proper error message
                                          • #1362397: Mem leak in meta_default_readv in meta xlators
                                          • #1362520: Per xlator logging not working
                                          • #1362602: [Open SSL] : Unable to mount an SSL enabled volume via SMB v3/Ganesha v4
                                          • #1363591: Geo-replication user driven Events
                                          • #1363721: [HC]: After bringing down and up of the bricks VM's are getting paused
                                          • #1363948: Spurious failure in tests/bugs/glusterd/bug-1089668.t
                                          • #1364026: glfs_fini() crashes with SIGSEGV
                                          • #1364420: [RFE] History Crawl performance improvement
                                          • #1364449: posix: honour fsync flags in posix_do_zerofill
                                          • #1364529: api: revert glfs_ipc_xd intended for 4.0
                                          • #1365455: [AFR]: Files not available in the mount point after converting Distributed volume type to Replicated one.
                                          • #1365489: glfs_truncate missing
                                          • #1365506: gfapi: use const qualifier for glfs_*timens()
                                          • #1366195: [Bitrot - RFE]: On demand scrubbing option to scrub
                                          • #1366222: \"heal info --xml\" not showing the brick name of offline bricks.
                                          • #1366226: Move alloca0 definition to common-utils
                                          • #1366284: fix bug in protocol/client lookup callback
                                          • #1367258: Log EEXIST errors at DEBUG level
                                          • #1367478: Second gluster volume is offline after daemon restart or server reboot
                                          • #1367527: core: use for makedev(3), major(3), minor(3)
                                          • #1367665: rotated FUSE mount log is using to populate the information after log rotate.
                                          • #1367771: Introduce graceful mode in stop-all-gluster-processes.sh
                                          • #1367774: Support for Client side Events
                                          • #1367815: [Bitrot - RFE]: Bitrot Events
                                          • #1368042: make fails if Events APIs are disabled
                                          • #1368349: tests/bugs/cli/bug-1320388.t: Infrequent failures
                                          • #1368451: [RFE] Implement multi threaded self-heal for ec volumes
                                          • #1368842: Applications not calling glfs_h_poll_upcall() have upcall events cached for no use
                                          • #1368882: log level set in glfs_set_logging() does not work
                                          • #1368931: [ RFE] Quota Events
                                          • #1368953: spurious netbsd run failures in tests/basic/glusterd/volfile_server_switch.t
                                          • #1369124: fix unused variable warnings from out-of-tree builds generate XDR headers and source files i...
                                          • #1369331: Memory leak with a replica 3 arbiter 1 configuration
                                          • #1369401: NetBSD hangs at /tests/features/lock_revocation.t
                                          • #1369430: Track the client that performed readdirp
                                          • #1369432: IATT cache invalidation should be sent when permission changes on file
                                          • #1369524: segment fault while join thread reaper_thr in fini()
                                          • #1369530: protocol/server: readlink rsp xdr failed while readlink got an error
                                          • #1369638: DHT stale layout issue will be seen often with md-cache prolonged cache of lookups
                                          • #1369721: EventApis will not work if compiled using ./configure --disable-glupy
                                          • #1370053: fix EXPECT_WITHIN
                                          • #1370074: Fix mistakes in self-heald.t
                                          • #1370406: build: eventtypes.h is missing
                                          • #1370445: Geo-replication server side events
                                          • #1370862: dht: fix the broken build
                                          • #1371541: Spurious regressions in ./tests/bugs/gfapi/bug-1093594.t
                                          • #1371543: Add cache invalidation stat in profile info
                                          • #1371775: gluster system:: uuid get hangs
                                          • #1372278: [RFE] Provide snapshot events for the new eventing framework
                                          • #1372586: Fix the test case http://review.gluster.org/#/c/15385/
                                          • #1372686: [RFE]Reducing number of network round trips
                                          • #1373529: Node remains in stopped state in pcs status with \"/usr/lib/ocf/resource.d/heartbeat/ganesha_mon: line 137: [: too many arguments ]\" messages in logs.
                                          • #1373735: Event pushed even if Answer is No in the Volume Stop and Delete prompt
                                          • #1373740: [RFE]: events from protocol server
                                          • #1373743: [RFE]: AFR events
                                          • #1374153: [RFE] History Crawl performance improvement
                                          • #1374167: disperse: Integrate important events with events framework
                                          • #1374278: rpc/xdr: generated files are filtered with a sed extended regex
                                          • #1374298: \"gluster vol status all clients --xml\" doesn't generate xml if there is a failure in between
                                          • #1374324: [RFE] Tier Events
                                          • #1374567: [Bitrot]: Recovery fails of a corrupted hardlink (and the corresponding parent file) in a disperse volume
                                          • #1374581: Geo-rep worker Faulty with OSError: [Errno 21] Is a directory
                                          • #1374597: [geo-rep]: AttributeError: 'Popen' object has no attribute 'elines'
                                          • #1374608: geo-replication *changes.log does not respect the log-level configured
                                          • #1374626: Worker crashes with EINVAL errors
                                          • #1374630: [geo-replication]: geo-rep Status is not showing bricks from one of the nodes
                                          • #1374639: glusterfs: create a directory with 0464 mode return EIO error
                                          • #1374649: Support for rc.d and init for Service management
                                          • #1374841: Implement SIMD support on EC
                                          • #1375042: bug-963541.t spurious failure
                                          • #1375537: gf_event python fails with ImportError
                                          • #1375543: [geo-rep]: defunct tar process while using tar+ssh sync
                                          • #1375570: Detach tier commit is allowed when detach tier start goes into failed state
                                          • #1375914: posix: Integrate important events with events framework
                                          • #1376331: Rpm installation fails with conflicts error for eventsconfig.json file
                                          • #1376396: /var/tmp/rpm-tmp.KPCugR: line 2: /bin/systemctl: No such file or directory
                                          • #1376477: [RFE] DHT Events
                                          • #1376874: RFE : move ganesha related configuration into shared storage
                                          • #1377288: The GlusterFS Callback RPC-calls always use RPC/XID 42
                                          • #1377386: glusterd experiencing repeated connect/disconnect messages when shd is down
                                          • #1377570: EC: Set/unset dirty flag for all the update operations
                                          • #1378814: Files not being opened with o_direct flag during random read operation (Glusterfs 3.8.2)
                                          • #1378948: removal of file from nfs mount crashes ganesha server
                                          • #1379028: Modifications to AFR Events
                                          • #1379287: warning messages seen in glusterd logs for each 'gluster volume status' command
                                          • #1379528: Poor smallfile read performance on Arbiter volume compared to Replica 3 volume
                                          • #1379707: gfapi: Fix fd ref leaks
                                          • #1379996: Volume restart couldn't re-export the volume exported via ganesha.
                                          • #1380252: glusterd fails to start without installing glusterfs-events package
                                          • #1383591: glfs_realpath() should not return malloc()'d allocated memory
                                          • #1383692: GlusterFS fails to build on old Linux distros with linux/oom.h missing
                                          • #1383913: spurious heal info as pending heal entries never end on an EC volume while IOs are going on
                                          • #1385224: arbiter volume write performance is bad with sharding
                                          • #1385236: invalid argument warning messages seen in fuse client logs 2016-09-30 06:34:58.938667] W [dict.c:418ict_set] (-->/usr/lib64/glusterfs/3.8.4/xlator/cluster/replicate.so(+0x58722) 0-dict: !this || !value for key=link-count [Invalid argument]
                                          • #1385451: \"nfs.disable: on\" is not showing in Vol info by default for the 3.7.x volumes after updating to 3.9.0
                                          • #1386072: Spurious permission denied problems observed
                                          • #1386178: eventsapi/georep: Events are not available for Checkpoint and Status Change
                                          • #1386338: pmap_signin event fails to update brickinfo->signed_in flag
                                          • #1387099: Boolean attributes are published as string
                                          • #1387492: Error and warning message getting while removing glusterfs-events package
                                          • #1387502: Incorrect volume type in the \"glusterd_state\" file generated using CLI \"gluster get-state\"
                                          • #1387564: [Eventing]: UUID is showing zeros in the event message for the peer probe operation.
                                          • #1387894: Regression caused by enabling client-io-threads by default
                                          • #1387960: Sequential volume start&stop is failing with SSL enabled setup.
                                          • #1387964: [Eventing]: 'gluster vol bitrot scrub ondemand' does not produce an event
                                          • #1387975: Continuous warning messages getting when one of the cluster node is down on SSL setup.
                                          • #1387981: [Eventing]: 'gluster volume tier start force' does not generate a TIER_START event
                                          • #1387984: Add a test script for compound fops changes in AFR
                                          • #1387990: [RFE] Geo-replication Logging Improvements
                                          • #1388150: geo-replica slave node goes faulty for non-root user session due to fail to locate gluster binary
                                          • #1388323: fuse mount point not accessible
                                          • #1388350: Memory Leaks in snapshot code path
                                          • #1388470: throw warning to show that older tier commands are depricated and will be removed.
                                          • #1388563: [Eventing]: 'VOLUME_REBALANCE' event messages have an incorrect volume name
                                          • #1388579: crypt: changes needed for openssl-1.1 (coming in Fedora 26)
                                          • #1388731: [GSS]glusterfind pre session hangs indefinitely in RHGS 3.1.3
                                          • #1388912: glusterfs can't self heal character dev file for invalid dev_t parameters
                                          • #1389675: Experimental translators and 4.0 features need to be disabled for release-3.9
                                          • #1389742: build: incorrect Requires: for portblock resource agent
                                          • #1390837: write-behind: flush stuck by former failed write
                                          • #1391448: md-cache: Invalidate cache entry in case of OPEN with O_TRUNC
                                          • #1392286: gfapi clients crash while using async calls due to double fd_unref
                                          • #1392718: Quota version not changing in the quota.conf after upgrading to 3.7.1 from 3.6.1
                                          • #1392844: Hosted Engine VM paused post replace-brick operation
                                          • #1392869: The FUSE client log is filling up with posix_acl_default and posix_acl_access messages
                                          • "},{"location":"release-notes/4.0.0/","title":"Release notes for Gluster 4.0.0","text":"

                                            The Gluster community celebrates 13 years of development with this latest release, Gluster 4.0. This release enables improved integration with containers, an enhanced user experience, and a next-generation management framework. The 4.0 release helps cloud-native app developers choose Gluster as the default scale-out distributed file system.

                                            A selection of the important features and changes are documented on this page. A full list of bugs that have been addressed is included further below.

                                            • Announcements
                                            • Major changes and features
                                            • Major issues
                                            • Bugs addressed in the release
                                            "},{"location":"release-notes/4.0.0/#announcements","title":"Announcements","text":"
                                            1. As 3.13 was a short term maintenance release, features which have been included in that release are available with 4.0.0 as well.These features may be of interest to users upgrading to 4.0.0 from older than 3.13 releases. The 3.13 release notes captures the list of features that were introduced with 3.13.

                                            NOTE: As 3.13 was a short term maintenance release, it will reach end of life (EOL) with the release of 4.0.0. (reference)

                                            1. Releases that receive maintenance updates post 4.0 release are, 3.10, 3.12, 4.0 (reference)

                                            2. With this release, the CentOS storage SIG will not build server packages for CentOS6. Server packages will be available for CentOS7 only. For ease of migrations, client packages on CentOS6 will be published and maintained.

                                            NOTE: This change was announced here

                                            "},{"location":"release-notes/4.0.0/#major-changes-and-features","title":"Major changes and features","text":"

                                            Features are categorized into the following sections,

                                            • Management
                                            • Monitoring
                                            • Performance
                                            • Geo-replication
                                            • Standalone
                                            • Developer related
                                            "},{"location":"release-notes/4.0.0/#management","title":"Management","text":"

                                            GlusterD2 (GD2) is new management daemon for Gluster-4.0. It is a complete rewrite, with all new internal core frameworks, that make it more scalable, easier to integrate with and has lower maintenance requirements.

                                            A quick start guide is available to get started with GD2.

                                            GD2 in Gluster-4.0 is a technical preview release. It is not recommended for production use. For the current release glusterd is the preferred management daemon. More information is available in the Limitations section.

                                            GD2 brings many new changes and improvements, that affect both users and developers.

                                            "},{"location":"release-notes/4.0.0/#features","title":"Features","text":"

                                            The most significant new features brought by GD2 are below.

                                            "},{"location":"release-notes/4.0.0/#native-rest-apis","title":"Native REST APIs","text":"

                                            GD2 exposes all of its management functionality via ReST APIs. The ReST APIs accept and return data encoded in JSON. This enables external projects such as Heketi to be better integrated with GD2.

                                            "},{"location":"release-notes/4.0.0/#cli","title":"CLI","text":"

                                            GD2 provides a new CLI, glustercli, built on top of the ReST API. The CLI retains much of the syntax of the old gluster command. In addition we have,

                                            • Improved CLI help messages
                                            • Auto completion for sub commands
                                            • Improved CLI error messages on failure
                                            • Framework to run glustercli from outside the Cluster.

                                            In this release, the following CLI commands are available,

                                            • Peer management
                                            • Peer Probe/Attach
                                            • Peer Detach
                                            • Peer Status
                                            • Volume Management
                                            • Create/Start/Stop/Delete
                                            • Expand
                                            • Options Set/Get
                                            • Bitrot
                                            • Enable/Disable
                                            • Configure
                                            • Status
                                            • Geo-replication
                                            • Create/Start/Pause/Resume/Stop/Delete
                                            • Configure
                                            • Status
                                            "},{"location":"release-notes/4.0.0/#configuration-store","title":"Configuration store","text":"

                                            GD2 uses etcd to store the Gluster pool configuration, which solves the config synchronize issues reported against the Gluster management daemon.

                                            GD2 embeds etcd, and automatically creates and manages an etcd cluster when forming the trusted storage pool. If required, GD2 can also connect to an already existing etcd cluster.

                                            "},{"location":"release-notes/4.0.0/#transaction-framework","title":"Transaction Framework","text":"

                                            GD2 brings a newer more flexible distributed framework, to help it perform actions across the storage pool. The transaction framework provides better control for choosing peers for a Gluster operation and it also provides a mechanism to roll back the changes when something goes bad.

                                            "},{"location":"release-notes/4.0.0/#volume-options","title":"Volume Options","text":"

                                            GD2 intelligently fetches and builds the list of volume options by directly reading xlators *.so files. It does required validations during volume set without maintaining duplicate list of options. This avoids lot of issues which can happen due to mismatch in the information between Glusterd and xlator shared libraries.

                                            Volume options listing is also improved, to clearly distinguish configured options and default options. Work is still in progress to categorize these options and tune the list for better understanding and ease of use.

                                            "},{"location":"release-notes/4.0.0/#volfiles-generation-and-management","title":"Volfiles generation and management","text":"

                                            GD2 has a newer and better structured way for developers to define volfile structure. The new method reduces the effort required to extend graphs or add new graphs.

                                            Also, volfiles are generated in single peer and stored in etcd store. This is very important for scalability since Volfiles are not stored in every node.

                                            "},{"location":"release-notes/4.0.0/#security","title":"Security","text":"

                                            GD2 supports TLS for ReST and internal communication, and authentication for the ReST API.If enabled, ReST APIs are currently limited to CLI, or the users who have access to the Token file present in $GLUSTERD2_WORKDIR/auth file.

                                            "},{"location":"release-notes/4.0.0/#features-integration-self-heal","title":"Features integration - Self Heal","text":"

                                            Self Heal feature integrated for the new Volumes created using Glusterd2.

                                            "},{"location":"release-notes/4.0.0/#geo-replication","title":"Geo-replication","text":"

                                            With GD2 integration Geo-replication setup becomes very easy. If Master and Remote volume are available and running, Geo-replication can be setup with just a single command.

                                            glustercli geo-replication create <mastervol> <remotehost>::<remotevol>\n

                                            Geo-replication status is improved, Status clearly distinguishes the multiple session details in status output.

                                            Order of status rows was not predictable in earlier releases. It was very difficult to correlate the Geo-replication status with Bricks. With this release, Master worker status rows will always match with Bricks list in Volume info.

                                            Status can be checked using,

                                            glustercli geo-replication status\nglustercli geo-replication status <mastervol> <remotehost>::<remotevol>\n

                                            All the other commands are available as usual.

                                            Limitations:

                                            • On Remote nodes, Geo-replication is not yet creates the log directories. As a workaround, create the required log directories in Remote Volume nodes.
                                            "},{"location":"release-notes/4.0.0/#events-apis","title":"Events APIs","text":"

                                            Events API feature is integrated with GD2. Webhooks can be registered to listen for GlusterFS events. Work is in progress for exposing an REST API to view all the events happened in last 15 minutes.

                                            "},{"location":"release-notes/4.0.0/#limitations","title":"Limitations","text":""},{"location":"release-notes/4.0.0/#backward-compatibility","title":"Backward compatibility","text":"

                                            GD2 is not backwards compatible with the older GlusterD. Heterogeneous clusters running both GD2 and GlusterD are not possible.

                                            GD2 retains compatibility with Gluster-3.x clients. Old clients will still be able to mount and use volumes exported using GD2.

                                            "},{"location":"release-notes/4.0.0/#upgrade-and-migration","title":"Upgrade and migration","text":"

                                            GD2 does not support upgrade from Gluster-3.x releases, in Gluster-4.0. Gluster-4.0 will be shipping with both GD2 and the existing GlusterD. Users will be able to upgrade to Gluster-4.0 while continuing to use GlusterD.

                                            In Gluster-4.1, users will be able to migrate from GlusterD to GD2. Further, upgrades from Gluster-4.1 running GD2 to higher Gluster versions would be supported from release 4.1 onwards.

                                            Post Gluster-4.1, GlusterD would be maintained for a couple of releases, post which the only option to manage the cluster would be GD2.

                                            "},{"location":"release-notes/4.0.0/#missing-and-partial-commands","title":"Missing and partial commands","text":"

                                            Not all commands from GlusterD, have been implemented for GD2. Some have been only partially implemented. This means not all GlusterFS features are available in GD2. We aim to bring most of the commands back in Gluster-4.1.

                                            "},{"location":"release-notes/4.0.0/#recovery-from-full-shutdown","title":"Recovery from full shutdown","text":"

                                            With GD2, the process of recovery from a situation of a full cluster shutdown requires reading the document available as well as some expertise.

                                            "},{"location":"release-notes/4.0.0/#known-issues","title":"Known Issues","text":""},{"location":"release-notes/4.0.0/#2-node-clusters","title":"2-node clusters","text":"

                                            GD2 does not work well in 2-node clusters. Two main issues exist in this regard.

                                            • Restarting GD2 fails in 2-node clusters #352
                                            • Detach fails in 2-node clusters #332

                                            So it is recommended right now to run GD2 only in clusters of 3 or larger.

                                            "},{"location":"release-notes/4.0.0/#other-issues","title":"Other issues","text":"

                                            Other known issues are tracked on github issues right now. Please file any other issue you find on github issues.

                                            "},{"location":"release-notes/4.0.0/#monitoring","title":"Monitoring","text":"

                                            Till date, the absence of support for live monitoring on GlusterFS created constrained user experience for both users and developers. Statedump is useful for debugging, but is heavy for live monitoring.

                                            Further, the existence of debug/io-stats translator was not known to many and gluster volume profile was not recommended as it impacted performance.

                                            In this release, GlusterFS enables a lightweight method to access internal information and avoids the performance penalty and complexities of previous approaches.

                                            "},{"location":"release-notes/4.0.0/#1-metrics-collection-across-every-fop-in-every-xlator","title":"1. Metrics collection across every FOP in every xlator","text":"

                                            Notes for users: Now, Gluster now has in-built latency measures in the xlator abstraction, thus enabling capture of metrics and usage patterns across workloads.

                                            These measures are currently enabled by default.

                                            Limitations: This feature is auto-enabled and cannot be disabled.

                                            "},{"location":"release-notes/4.0.0/#2-monitoring-support","title":"2. Monitoring support","text":"

                                            Notes for users: Currently, the only project which consumes metrics and provides basic monitoring is glustermetrics, which provides a good idea on how to utilize the metrics dumped from the processes.

                                            Users can send SIGUSR2 signal to the process to dump the metrics, in /var/run/gluster/metrics/ directory.

                                            Limitations: Currently core gluster stack and memory management systems provide metrics. A framework to generate more metrics is present for other translators and core components. However, additional metrics are not added in this release.

                                            "},{"location":"release-notes/4.0.0/#performance","title":"Performance","text":""},{"location":"release-notes/4.0.0/#1-ec-make-metadata-fgetxattr-operations-faster","title":"1. EC: Make metadata [F]GETXATTR operations faster","text":"

                                            Notes for users: Disperse translator has made performance improvements to the [F]GETXATTR operation. Workloads involving heavy use of extended attributes on files and directories, will gain from the improvements made.

                                            "},{"location":"release-notes/4.0.0/#2-allow-md-cache-to-serve-nameless-lookup-from-cache","title":"2. Allow md-cache to serve nameless lookup from cache","text":"

                                            Notes for users: The md-cache translator is enhanced to cache nameless lookups (typically seen with NFS workloads). This helps speed up overall operations on the volume reducing the number of lookups done over the network. Typical workloads that will benefit from this enhancement are,

                                            • NFS based access
                                            • Directory listing with FUSE, when ACLs are enabled
                                            "},{"location":"release-notes/4.0.0/#3-md-cache-allow-runtime-addition-of-xattrs-to-the-list-of-xattrs-that-md-cache-caches","title":"3. md-cache: Allow runtime addition of xattrs to the list of xattrs that md-cache caches","text":"

                                            Notes for users: md-cache was enhanced to cache extended attributes of a file or directory, for gluster specific attributes. This has now been enhanced to cache user provided attributes (xattrs) as well.

                                            To add specific xattrs to the cache list, use the following command:

                                            # gluster volume set <volname> xattr-cache-list \"<xattr-name>,<xattr-name>,...\"\n

                                            Existing options, such as \"cache-samba-metadata\" \"cache-swift-metadata\" continue to function. The new option \"xattr-cache-list\" appends to the list generated by the existing options.

                                            Limitations: Setting this option overwrites the previous value set for this option. The append to the existing list of xattr is not supported with this release.

                                            "},{"location":"release-notes/4.0.0/#4-cache-last-stripe-of-an-ec-volume-while-write-is-going-on","title":"4. Cache last stripe of an EC volume while write is going on","text":"

                                            Notes for users: Disperse translator now has the option to retain a write-through cache of the last write stripe. This helps in improved small append sequential IO patterns by reducing the need to read a partial stripe for appending operations.

                                            To enable this use,

                                            # gluster volume set <volname> disperse.stripe-cache <N>\n

                                            Where, is the number of stripes to cache."},{"location":"release-notes/4.0.0/#5-tie-breaker-logic-for-blocking-inodelksentrylk-in-shd","title":"5. tie-breaker logic for blocking inodelks/entrylk in SHD","text":"

                                            Notes for users: Self-heal deamon locking has been enhanced to identify situations where an selfheal deamon is actively working on an inode. This enables other selfheal daemons to proceed with other entries in the queue, than waiting on a particular entry, thus preventing starvation among selfheal threads.

                                            "},{"location":"release-notes/4.0.0/#6-independent-eager-lock-options-for-file-and-directory-accesses","title":"6. Independent eager-lock options for file and directory accesses","text":"

                                            Notes for users: A new option named 'disperse.other-eager-lock' has been added to make it possible to have different settings for regular file accesses and accesses to other types of files (like directories).

                                            By default this option is enabled to ensure the same behavior as the previous versions. If you have multiple clients creating, renaming or removing files from the same directory, you can disable this option to improve the performance for these users while still keeping best performance for file accesses.

                                            "},{"location":"release-notes/4.0.0/#7-md-cache-added-an-option-to-cache-statfs-data","title":"7. md-cache: Added an option to cache statfs data","text":"

                                            Notes for users: This can be controlled with option performance.md-cache-statfs

                                            gluster volume set <volname> performance.md-cache-statfs <on|off>\n
                                            "},{"location":"release-notes/4.0.0/#8-improved-disperse-performance-due-to-parallel-xattrop-updates","title":"8. Improved disperse performance due to parallel xattrop updates","text":"

                                            Notes for users: Disperse translator has been optimized to perform xattrop update operation in parallel on the bricks during self-heal to improve performance.

                                            "},{"location":"release-notes/4.0.0/#geo-replication_1","title":"Geo-replication","text":""},{"location":"release-notes/4.0.0/#1-geo-replication-improve-gverifysh-logs","title":"1. Geo-replication: Improve gverify.sh logs","text":"

                                            Notes for users: gverify.sh is the script which runs during geo-rep session creation which validates pre-requisites. The logs have been improved and locations are changed as follows,

                                            1. Slave mount log file is changed from <logdir>/geo-replication-slaves/slave.log to, <logdir>/geo-replication/gverify-slavemnt.log
                                            2. Master mount log file is separated from the slave log file under, <logdir>/geo-replication/gverify-mastermnt.log
                                            "},{"location":"release-notes/4.0.0/#2-geo-rep-cleanup-stale-unusable-xsync-changelogs","title":"2. Geo-rep: Cleanup stale (unusable) XSYNC changelogs.","text":"

                                            Notes for users: Stale xsync logs were not cleaned up, causing accumulation of these on the system. This change cleans up the stale xsync logs, if geo-replication has to restart from a faulty state.

                                            "},{"location":"release-notes/4.0.0/#standalone","title":"Standalone","text":""},{"location":"release-notes/4.0.0/#1-ability-to-force-permissions-while-creating-filesdirectories-on-a-volume","title":"1. Ability to force permissions while creating files/directories on a volume","text":"

                                            Notes for users: Options have been added to the posix translator, to override default umask values with which files and directories are created. This is particularly useful when sharing content by applications based on GID. As the default mode bits prevent such useful sharing, and supersede ACLs in this regard, these options are provided to control this behavior.

                                            Command usage is as follows:

                                            # gluster volume set <volume name> storage.<option-name> <value>\n

                                            The valid <value> ranges from 0000 to 0777

                                            <option-name> are:

                                            • create-mask
                                            • create-directory-mask
                                            • force-create-mode
                                            • force-create-directory

                                            Options \"create-mask\" and \"create-directory-mask\" are added to remove the mode bits set on a file or directory when its created. Default value of these options is 0777. Options \"force-create-mode\" and \"force-create-directory\" sets the default permission for a file or directory irrespective of the clients umask. Default value of these options is 0000.

                                            "},{"location":"release-notes/4.0.0/#2-replace-md5-usage-to-enable-fips-support","title":"2. Replace MD5 usage to enable FIPS support","text":"

                                            Notes for users: Previously, if Gluster was run on a FIPS enabled system, it used to crash because MD5 is not FIPS compliant and Gluster consumes MD5 checksum in various places like self-heal and geo-replication. By replacing MD5 with a FIPS complaint SHA256, Gluster no longer crashes on a FIPS enabled system.

                                            However, in order for AFR self-heal to work correctly during rolling upgrade to 4.0, we have tied this to a volume option called fips-mode-rchecksum.

                                            gluster volume set <VOLNAME> fips-mode-rchecksum on has to be performed post upgrade to change the defaults from MD5 to SHA256. Post this gluster processes will run clean on a FIPS enabled system.

                                            NOTE: Once glusterfs 3.x is EOL'ed, the usage of the option to control this change will be removed.

                                            Limitations Snapshot feature in Gluster still uses md5 checksums, hence running in FIPS compliant systems requires that the snapshot feature is not used.

                                            "},{"location":"release-notes/4.0.0/#3-dentry-fop-serializer-xlator-on-brick-stack","title":"3. Dentry fop serializer xlator on brick stack","text":"

                                            Notes for users: This feature strengthens consistency of the file system, trading it for some performance and is strongly suggested for workloads where consistency is required.

                                            In previous releases the meta-data about the files and directories shared across the clients were not always consistent when the use-cases/workloads involved a large number of renames, frequent creations and deletions. They do eventually become consistent, but a large proportion of applications are not built to handle eventual consistency.

                                            This feature can be enabled as follows,

                                            # gluster volume set <volname> features.sdfs enable\n

                                            Limitations: This feature is released as a technical preview, as performance implications are not known completely.

                                            "},{"location":"release-notes/4.0.0/#4-add-option-to-disable-nftw-based-deletes-when-purging-the-landfill-directory","title":"4. Add option to disable nftw() based deletes when purging the landfill directory","text":"

                                            Notes for users: The gluster brick processes use an optimized manner of deleting entire sub-trees using the nftw call. With this release, an option is being added to toggle this behavior in cases where this optimization is not desired.

                                            This is not an exposed option, and needs to be controlled using the volume graph. Adding the disable-landfill-purge option to the storage/posix translator helps toggle this feature.

                                            The default is always enabled, as in the older releases.

                                            "},{"location":"release-notes/4.0.0/#5-add-option-in-posix-to-limit-hardlinks-per-inode","title":"5. Add option in POSIX to limit hardlinks per inode","text":"

                                            Notes for users: Added an option to POSIX that limits the number of hard links that can be created against an inode (file). This helps when there needs to be a different hardlink limit than what the local FS provides for the bricks.

                                            The option to control this behavior is,

                                            # gluster volume set <volname> storage.max-hardlinks <N>\n

                                            Where, <N> is 0-0xFFFFFFFF. If the local file system that the brick is using has a lower limit than this setting, that would be honored.

                                            Default is set to 100, setting this to 0 turns it off and leaves it to the local file system defaults. Setting it to 1 turns off hard links.

                                            "},{"location":"release-notes/4.0.0/#6-enhancements-for-directory-listing-in-readdirp","title":"6. Enhancements for directory listing in readdirp","text":"

                                            Notes for users: Prior to this release, rebalance performed a fix-layout on a directory before healing its subdirectories. If there were a lot of subdirs, it could take a while before all subdirs were created on the newly added bricks. This led to some missed directory listings.

                                            This is changed with this release to process children directories before the parents, thereby changing the way rebalance acts (files within sub directories are migrated first) and also resolving the directory listing issue.

                                            "},{"location":"release-notes/4.0.0/#7-rebalance-skips-migration-of-file-if-it-detects-writes-from-application","title":"7. Rebalance skips migration of file if it detects writes from application","text":"

                                            Notes for users: Rebalance process skips migration of file if it detects writes from application. To force migration even in the presence of writes from application to file, \"cluster.force-migration\" has to be turned on, which is off by default.

                                            The option to control this behavior is,

                                            # gluster volume set <volname> cluster.force-migration <on/off>\n

                                            Limitations: It is suggested to run remove-brick with cluster.force-migration turned off. This results in files which have writes from clients being skipped during rebalance. It is suggested to copy these files manually to a Gluster mount post remove brick commit is performed.

                                            Rebalancing files with active write IO to them has a chance of data corruption.

                                            "},{"location":"release-notes/4.0.0/#developer-related","title":"Developer related","text":""},{"location":"release-notes/4.0.0/#1-xlators-should-not-provide-init-fini-and-others-directly-but-have-class_methods","title":"1. xlators should not provide init(), fini() and others directly, but have class_methods","text":"

                                            Notes for developers: This release brings in a new unified manner of defining xlator methods. Which avoids certain unwanted side-effects of the older method (like having to have certain symbols being defined always), and helps a cleaner single point registration mechanism for all xlator methods.

                                            The new method, needs just a single symbol in the translator code to be exposed, which is named xlator_api.

                                            The elements of this structure is defined here and an example usage of the same can be seen here.

                                            The older mechanism is still supported, but not preferred.

                                            "},{"location":"release-notes/4.0.0/#2-framework-for-distributed-testing","title":"2. Framework for distributed testing","text":"

                                            Notes for developers: A new framework for running the regression tests for Gluster is added. The README has details on how to use the same.

                                            "},{"location":"release-notes/4.0.0/#3-new-api-for-acquiring-mandatory-locks","title":"3. New API for acquiring mandatory locks","text":"

                                            Notes for developers: The current API for byte-range locks glfs_posix_lock doesn't allow applications to specify whether it is advisory or mandatory type lock. This particular change is to introduce an extended byte-range lock API with an additional argument for including the byte-range lock mode to be one among advisory(default) or mandatory.

                                            Refer to the header for details on how to use this API.

                                            A sample test program can be found here that also helps in understanding the usage of this API.

                                            "},{"location":"release-notes/4.0.0/#4-new-on-wire-protocol-xdr-needed-to-support-iattx-and-cleaner-dictionary-structure","title":"4. New on-wire protocol (XDR) needed to support iattx and cleaner dictionary structure","text":"

                                            Notes for developers: With changes in the code to adapt to a newer iatt structure, and stricter data format enforcement within dictionaries passed across the wire, and also as a part of reducing technical debt around the RPC layer, this release introduces a new RPC Gluster protocol version (4.0.0).

                                            Typically this does not impact any development, other than to ensure that newer RPCs that are added would need to be on the 4.0.0 version of the protocol and dictionaries on the wire need to be better encoded.

                                            The newer iatt structure can be viewed here.

                                            An example of better encoding dictionary values for wire transfers can be seen here.

                                            Here is some additional information on Gluster RPC programs for the inquisitive.

                                            "},{"location":"release-notes/4.0.0/#5-the-protocol-xlators-should-prevent-sending-binary-values-in-a-dict-over-the-networks","title":"5. The protocol xlators should prevent sending binary values in a dict over the networks","text":"

                                            Notes for developers: Dict data over the wire in Gluster was sent in binary. This has been changed with this release, as the on-wire protocol wire is also new, to send XDR encoded dict values across. In the future, any new dict type needs to also handle the required XDR encoding of the same.

                                            "},{"location":"release-notes/4.0.0/#6-translator-to-handle-global-options","title":"6. Translator to handle 'global' options","text":"

                                            Notes for developers: GlusterFS process has around 50 command line arguments to itself. While many of the options are initial settings, many others can change its value in volume lifetime. Prior to this release there was no way to change a setting, other than restarting the process for many of these options.

                                            With the introduction of global option translator, it is now possible to handle these options without restarts.

                                            If contributing code that adds to the process options, strongly consider adding the same to the global option translator. An example is provided here.

                                            "},{"location":"release-notes/4.0.0/#major-issues","title":"Major issues","text":"

                                            None

                                            "},{"location":"release-notes/4.0.0/#bugs-addressed","title":"Bugs addressed","text":"

                                            Bugs addressed since release-3.13.0 are listed below.

                                            • #827334: gfid is not there in the fsetattr and rchecksum requests being sent from protocol client
                                            • #1336889: Gluster's XDR does not conform to RFC spec
                                            • #1369028: rpc: Change the way client uuid is built
                                            • #1370116: Tests : Adding a test to check for inode leak
                                            • #1428060: write-behind: Allow trickling-writes to be configurable, fix usage of page_size and window_size
                                            • #1430305: Fix memory leak in rebalance
                                            • #1431955: [Disperse] Implement open fd heal for disperse volume
                                            • #1440659: Add events to notify disk getting fill
                                            • #1443145: Free runtime allocated resources upon graph switch or glfs_fini()
                                            • #1446381: detach start does not kill the tierd
                                            • #1467250: Accessing a file when source brick is down results in that FOP being hung
                                            • #1467614: Gluster read/write performance improvements on NVMe backend
                                            • #1469487: sys_xxx() functions should guard against bad return values from fs
                                            • #1471031: dht_(f)xattrop does not implement migration checks
                                            • #1471753: [disperse] Keep stripe in in-memory cache for the non aligned write
                                            • #1474768: The output of the \"gluster help\" command is difficult to read
                                            • #1479528: Rebalance estimate(ETA) shows wrong details(as intial message of 10min wait reappears) when still in progress
                                            • #1480491: tests: Enable geo-rep test cases
                                            • #1482064: Bringing down data bricks in cyclic order results in arbiter brick becoming the source for heal.
                                            • #1488103: Rebalance fails on NetBSD because fallocate is not implemented
                                            • #1492625: Directory listings on fuse mount are very slow due to small number of getdents() entries
                                            • #1496335: Extreme Load from self-heal
                                            • #1498966: Test case ./tests/bugs/bug-1371806_1.t is failing
                                            • #1499566: [Geo-rep]: Directory renames are not synced in hybrid crawl
                                            • #1501054: Structured logging support for Gluster logs
                                            • #1501132: posix health check should validate time taken between write timestamp and read timestamp cycle
                                            • #1502610: disperse eager-lock degrades performance for file create workloads
                                            • #1503227: [RFE] Changelog option in a gluster volume disables with no warning if geo-rep is configured
                                            • #1505660: [QUOTA] man page of gluster should be updated to list quota commands
                                            • #1506104: gluster volume splitbrain info needs to display output of each brick in a stream fashion instead of buffering and dumping at the end
                                            • #1506140: Add quorum checks in post-op
                                            • #1506197: [Parallel-Readdir]Warning messages in client log saying 'parallel-readdir' is not recognized.
                                            • #1508898: Add new configuration option to manage deletion of Worm files
                                            • #1508947: glusterfs: Include path in pkgconfig file is wrong
                                            • #1509189: timer: Possible race condition between gftimer* routines
                                            • #1509254: snapshot remove does not cleans lvm for deactivated snaps
                                            • #1509340: glusterd does not write pidfile correctly when forking
                                            • #1509412: Change default versions of certain features to 3.13 from 4.0
                                            • #1509644: rpc: make actor search parallel
                                            • #1509647: rpc: optimize fop program lookup
                                            • #1509845: In distribute volume after glusterd restart, brick goes offline
                                            • #1510324: Master branch is broken because of the conflicts
                                            • #1510397: Compiler atomic built-ins are not correctly detected
                                            • #1510401: fstat returns ENOENT/ESTALE
                                            • #1510415: spurious failure of tests/bugs/glusterd/bug-1345727-bricks-stop-on-no-quorum-validation.t
                                            • #1510874: print-backtrace.sh failing with cpio version 2.11 or older
                                            • #1510940: The number of bytes of the quota specified in version 3.7 or later is incorrect
                                            • #1511310: Test bug-1483058-replace-brick-quorum-validation.t fails inconsistently
                                            • #1511339: In Replica volume 2*2 when quorum is set, after glusterd restart nfs server is coming up instead of self-heal daemon
                                            • #1512437: parallel-readdir = TRUE prevents directories listing
                                            • #1512451: Not able to create snapshot
                                            • #1512455: glustereventsd hardcodes working-directory
                                            • #1512483: Not all files synced using geo-replication
                                            • #1513692: io-stats appends now instead of overwriting which floods filesystem with logs
                                            • #1513928: call stack group list leaks
                                            • #1514329: bug-1247563.t is failing on master
                                            • #1515161: Memory leak in locks xlator
                                            • #1515163: centos regression fails for tests/bugs/replicate/bug-1292379.t
                                            • #1515266: Prevent ec from continue processing heal operations after PARENT_DOWN
                                            • #1516206: EC DISCARD doesn't punch hole properly
                                            • #1517068: Unable to change the Slave configurations
                                            • #1517554: help for volume profile is not in man page
                                            • #1517633: Geo-rep: access-mount config is not working
                                            • #1517904: tests/bugs/core/multiplex-limit-issue-151.t fails sometimes in upstream master
                                            • #1517961: Failure of some regression tests on Centos7 (passes on centos6)
                                            • #1518508: Change GD_OP_VERSION to 3_13_0 from 3_12_0 for RFE https://bugzilla.redhat.com/show_bug.cgi?id=1464350
                                            • #1518582: Reduce lock contention on fdtable lookup
                                            • #1519598: Reduce lock contention on protocol client manipulating fd
                                            • #1520245: High mem/cpu usage, brick processes not starting and ssl encryption issues while testing scaling with multiplexing (500-800 vols)
                                            • #1520758: [Disperse] Add stripe in cache even if file/data does not exist
                                            • #1520974: Compiler warning in dht-common.c because of a switch statement on a boolean
                                            • #1521013: rfc.sh should allow custom remote names for ORIGIN
                                            • #1521014: quota_unlink_cbk crashes when loc.inode is null
                                            • #1521116: Absorb all test fixes from 3.8-fb branch into master
                                            • #1521213: crash when gifs_set_logging is called concurrently
                                            • #1522651: rdma transport may access an obsolete item in gf_rdma_device_t->all_mr, and causes glusterfsd/glusterfs process crash.
                                            • #1522662: Store allocated objects in the mem_acct
                                            • #1522775: glusterd consuming high memory
                                            • #1522847: gNFS Bug Fixes
                                            • #1522950: io-threads is unnecessarily calling accurate time calls on every FOP
                                            • #1522968: glusterd bug fixes
                                            • #1523295: md-cache should have an option to cache STATFS calls
                                            • #1523353: io-stats bugs and features
                                            • #1524252: quick-read: Discard cache for fallocate, zerofill and discard ops
                                            • #1524365: feature/bitrot: remove internal xattrs from lookup cbk
                                            • #1524816: heketi was not removing the LVs associated with Bricks removed when Gluster Volumes were deleted
                                            • #1526402: glusterd crashes when 'gluster volume set help' is executed
                                            • #1526780: ./run-tests-in-vagrant.sh fails because of disabled Gluster/NFS
                                            • #1528558: /usr/sbin/glusterfs crashing on Red Hat OpenShift Container Platform node
                                            • #1528975: Fedora 28 (Rawhide) renamed the pyxattr package to python2-pyxattr
                                            • #1529440: Files are not rebalanced if destination brick(available size) is of smaller size than source brick(available size)
                                            • #1529463: JWT support without external dependency
                                            • #1529480: Improve geo-replication logging
                                            • #1529488: entries not getting cleared post healing of softlinks (stale entries showing up in heal info)
                                            • #1529515: AFR: 3-way-replication: gluster volume set cluster.quorum-count should validate max no. of brick count to accept
                                            • #1529883: glusterfind is extremely slow if there are lots of changes
                                            • #1530281: glustershd fails to start on a volume force start after a brick is down
                                            • #1530910: Use after free in cli_cmd_volume_create_cbk
                                            • #1531149: memory leak: get-state leaking memory in small amounts
                                            • #1531987: increment of a boolean expression warning
                                            • #1532238: Failed to access volume via Samba with undefined symbol from socket.so
                                            • #1532591: Tests: Geo-rep tests are failing in few regression machines
                                            • #1533594: EC test fails when brick mux is enabled
                                            • #1533736: posix_statfs returns incorrect f_bfree values if brick is full.
                                            • #1533804: readdir-ahead: change of cache-size should be atomic
                                            • #1533815: Mark ./tests/basic/ec/heal-info.t as bad
                                            • #1534602: FUSE reverse notificatons are not written to fuse dump
                                            • #1535438: Take full lock on files in 3 way replication
                                            • #1535772: Random GlusterFSD process dies during rebalance
                                            • #1536913: tests/bugs/cli/bug-822830.t fails on Centos 7 and locally
                                            • #1538723: build: glibc has removed legacy rpc headers and rpcgen in Fedora28, use libtirpc
                                            • #1539657: Georeplication tests intermittently fail
                                            • #1539701: gsyncd is running gluster command to get config file path is not required
                                            • #1539842: GlusterFS 4.0.0 tracker
                                            • #1540438: Remove lock recovery logic from client and server protocol translators
                                            • #1540554: Optimize glusterd_import_friend_volume code path
                                            • #1540882: Do lock conflict check correctly for wait-list
                                            • #1541117: sdfs: crashes if the features is enabled
                                            • #1541277: dht_layout_t leak in dht_populate_inode_for_dentry
                                            • #1541880: Volume wrong size
                                            • #1541928: A down brick is incorrectly considered to be online and makes the volume to be started without any brick available
                                            • #1542380: Changes to self-heal logic w.r.t. detecting of split-brains
                                            • #1542382: Add quorum checks in post-op
                                            • #1542829: Too many log messages about dictionary and options
                                            • #1543487: dht_lookup_unlink_of_false_linkto_cbk fails with \"Permission denied\"
                                            • #1543706: glusterd fails to attach brick during restart of the node
                                            • #1543711: glustershd/glusterd is not using right port when connecting to glusterfsd process
                                            • #1544366: Rolling upgrade to 4.0 is broken
                                            • #1544638: 3.8 -> 3.10 rolling upgrade fails (same for 3.12 or 3.13) on Ubuntu 14
                                            • #1545724: libgfrpc does not export IPv6 RPC methods even with --with-ipv6-default
                                            • #1547635: add option to bulld rpm without server
                                            • #1547842: Typo error in __dht_check_free_space function log message
                                            • #1548264: [Rebalance] \"Migrate file failed: : failed to get xattr [No data available]\" warnings in rebalance logs
                                            • #1548271: DHT calls dht_lookup_everywhere for 1xn volumes
                                            • #1550808: memory leak in pre-op in replicate volumes for every write
                                            • #1551112: Rolling upgrade to 4.0 is broken
                                            • #1551640: GD2 fails to dlopen server xlator
                                            • #1554077: 4.0 clients may fail to convert iatt in dict when recieving the same from older (< 4.0) servers
                                            • "},{"location":"release-notes/4.0.1/","title":"Release notes for Gluster 4.0.1","text":"

                                              This is a bugfix release. The release notes for 4.0.0, contain a listing of all the new features that were added and bugs fixed in the GlusterFS 4.0 release.

                                              "},{"location":"release-notes/4.0.1/#major-changes-features-and-limitations-addressed-in-this-release","title":"Major changes, features and limitations addressed in this release","text":"

                                              No Major changes

                                              "},{"location":"release-notes/4.0.1/#major-issues","title":"Major issues","text":"

                                              No Major issues

                                              "},{"location":"release-notes/4.0.1/#bugs-addressed","title":"Bugs addressed","text":"

                                              Bugs addressed since release-4.0.0 are listed below.

                                              • #1550946: [brick-mux] performance bottleneck introduced while solving ping timer expiry
                                              • #1552404: [CIOT] : Gluster CLI says \"io-threads : enabled\" on existing volumes post upgrade.
                                              • #1554235: Memory corruption is causing crashes, hangs and invalid answers
                                              • #1555198: After a replace brick command, self-heal takes some time to start healing files on disperse volumes
                                              • #1555309: core: libtirpc, backport XDR macro refactor
                                              • #1557906: [EC] Read performance of EC volume exported over gNFS is significantly lower than write performance
                                              "},{"location":"release-notes/4.0.2/","title":"Release notes for Gluster 4.0.2","text":"

                                              This is a bugfix release. The release notes for 4.0.0, and 4.0.1 contain a listing of all the new features that were added and bugs fixed in the GlusterFS 4.0 release.

                                              "},{"location":"release-notes/4.0.2/#major-changes-features-and-limitations-addressed-in-this-release","title":"Major changes, features and limitations addressed in this release","text":"

                                              This release contains a fix for a security vulerability in Gluster as follows,

                                              • http://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2018-1088
                                              • https://nvd.nist.gov/vuln/detail/CVE-2018-1088

                                              Installing the updated packages and restarting gluster services, will update the Gluster shared storage volume volfiles, that are more secure than the defaults currently in place.

                                              Further, for increased security, the Gluster shared storage volume can be TLS enabled, and access to the same restricted using the auth.ssl-allow option. See, this guide for more details.

                                              "},{"location":"release-notes/4.0.2/#major-issues","title":"Major issues","text":"

                                              No Major issues

                                              "},{"location":"release-notes/4.0.2/#bugs-addressed","title":"Bugs addressed","text":"

                                              Bugs addressed since release-4.0.1 are listed below.

                                              • #1558959: [brick-mux] incorrect event-thread scaling in server_reconfigure()
                                              • #1559079: test ./tests/bugs/ec/bug-1236065.t is generating crash on build
                                              • #1559244: enable ownthread feature for glusterfs4_0_fop_prog
                                              • #1561721: Rebalance failures on a dispersed volume with lookup-optimize enabled
                                              • #1562728: SHD is not healing entries in halo replication
                                              • #1564461: gfapi: fix a couple of minor issues
                                              • #1565654: /var/log/glusterfs/bricks/export_vdb.log flooded with this error message \"Not able to add to index [Too many links]\"
                                              • #1566822: [Remove-brick] Many files were not migrated from the decommissioned bricks; commit results in data loss
                                              • #1569403: EIO errors on some operations when volume has mixed brick versions on a disperse volume
                                              • #1570432: CVE-2018-1088 glusterfs: Privilege escalation via gluster_shared_storage when snapshot scheduling is enabled [fedora-all]
                                              "},{"location":"release-notes/4.1.0/","title":"Release notes for Gluster 4.1.0","text":"

                                              This is a major release that includes a range of features enhancing management, performance, monitoring, and providing newer functionality like thin arbiters, cloud archival, time consistency. It also contains several bug fixes.

                                              A selection of the important features and changes are documented on this page. A full list of bugs that have been addressed is included further below.

                                              • Announcements
                                              • Major changes and features
                                              • Major issues
                                              • Bugs addressed in the release
                                              "},{"location":"release-notes/4.1.0/#announcements","title":"Announcements","text":"
                                              1. As 4.0 was a short term maintenance release, features which have been included in that release are available with 4.1.0 as well. These features may be of interest to users upgrading to 4.1.0 from older than 4.0 releases. The 4.0 release notes captures the list of features that were introduced with 4.0.

                                              NOTE: As 4.0 was a short term maintenance release, it will reach end of life (EOL) with the release of 4.1.0. (reference)

                                              1. Releases that receive maintenance updates post 4.1 release are, 3.12, and 4.1 (reference)

                                              NOTE: 3.10 long term maintenance release, will reach end of life (EOL) with the release of 4.1.0. (reference)

                                              1. Continuing with this release, the CentOS storage SIG will not build server packages for CentOS6. Server packages will be available for CentOS7 only. For ease of migrations, client packages on CentOS6 will be published and maintained.

                                              NOTE: This change was announced here

                                              "},{"location":"release-notes/4.1.0/#major-changes-and-features","title":"Major changes and features","text":"

                                              Features are categorized into the following sections,

                                              • Management
                                              • Monitoring
                                              • Performance
                                              • Standalone
                                              • Developer related
                                              "},{"location":"release-notes/4.1.0/#management","title":"Management","text":""},{"location":"release-notes/4.1.0/#glusterd2","title":"GlusterD2","text":"

                                              IMP: GlusterD2 in Gluster-4.1.0 is still considered a preview and is experimental. It should not be considered for production use. Users should still expect breaking changes to be possible, though efforts will be taken to avoid such changes. As GD2 is still under heavy development, new features can be expected throughout the 4.1 release.

                                              GD2 brings initial support for rebalance, snapshots, intelligent volume provisioning and a lot of other bug fixes and internal changes.

                                              "},{"location":"release-notes/4.1.0/#rebalance-786","title":"Rebalance #786","text":"

                                              GD2 supports running rebalance on volumes. Supported rebalance operations include,

                                              • rebalance start
                                              • rebalance start with fix-layout
                                              • rebalance stop
                                              • rebalance status

                                              Support only exists in the ReST API right now. CLI support will be introduced in subsequent releases.

                                              "},{"location":"release-notes/4.1.0/#snapshot-533","title":"Snapshot #533","text":"

                                              Initial support for volume snapshot has been introduced. At the moment, snapshots are supported only on Thin-LVM bricks.

                                              Support snapshot operations include,

                                              • create
                                              • activate/deactivate
                                              • list
                                              • info
                                              "},{"location":"release-notes/4.1.0/#intelligent-volume-provisioning-ivp-661","title":"Intelligent volume provisioning (IVP) #661","text":"

                                              GD2 brings very early preview for intelligent volume creation, similar to Heketi.

                                              IMP: This is considered experimental, and the API and implementation is not final. It is very possible that both the API and the implementation will change.

                                              IVP enables users to create volumes by just providing the expected volume type and a size, without providing the bricks layout. IVP is supported in CLI in the normal volume create command.

                                              More information on IVP can be found in the pull-request.

                                              To support IVP, support for adding and managing block devices, and basic support for zones is available. #783 #785

                                              "},{"location":"release-notes/4.1.0/#other-changes","title":"Other changes","text":"

                                              Other notable changes include,

                                              • Support for volume option levels (experimental, advanced, deprecated) #591
                                              • Support for resetting volume options #545
                                              • Option hooks for volume set #708
                                              • Support for setting quota options #583
                                              • Changes to transaction locking #808
                                              • Support for setting metadata on peers and volume #600 #689 #704
                                              • Thin arbiter support #673 #702

                                              In addition to the above, a lot of smaller bug-fixes and enhancements to internal frameworks and tests have also been done.

                                              "},{"location":"release-notes/4.1.0/#known-issues","title":"Known issues","text":"

                                              GD2 is still under heavy development and has lots of known bugs. For filing new bugs or tracking known bugs, please use the GD2 github issue tracker.

                                              "},{"location":"release-notes/4.1.0/#2-changes-to-gluster-based-smbconf-share-management","title":"2. Changes to gluster based smb.conf share management","text":"

                                              Previously Gluster used to delete the entire volume share section from smb.conf either after volume is stopped or while disabling user.cifs/user.smb volume set options. With this release those volume share sections, that were added by samba hook scripts inside smb.conf, will not get removed post a volume stop or on disabling user.cifs/user.smb volume set options. Instead we add the following share specific smb.conf parameter to the end of corresponding volume share section to make it unavailable for client access:

                                              available = no\n

                                              This will make sure that the additional smb.conf parameters configured externally are retained. For more details on the above parameter search under \"available (S)\" at smb.conf(5) manual page.

                                              "},{"location":"release-notes/4.1.0/#monitoring","title":"Monitoring","text":"

                                              Various xlators are enhanced to provide additional metrics, that help in determining the effectiveness of the xlator in various workloads.

                                              These metrics can be dumped and visualized as detailed here.

                                              "},{"location":"release-notes/4.1.0/#1-additional-metrics-added-to-negative-lookup-cache-xlator","title":"1. Additional metrics added to negative lookup cache xlator","text":"

                                              Metrics added are:

                                              • negative_lookup_hit_count
                                              • negative_lookup_miss_count
                                              • get_real_filename_hit_count
                                              • get_real_filename_miss_count
                                              • nameless_lookup_count
                                              • inodes_with_positive_dentry_cache
                                              • inodes_with_negative_dentry_cache
                                              • dentry_invalidations_recieved
                                              • cache_limit
                                              • consumed_cache_size
                                              • inode_limit
                                              • consumed_inodes
                                              "},{"location":"release-notes/4.1.0/#2-additional-metrics-added-to-md-cache-xlator","title":"2. Additional metrics added to md-cache xlator","text":"

                                              Metrics added are:

                                              • stat_cache_hit_count
                                              • stat_cache_miss_count
                                              • xattr_cache_hit_count
                                              • xattr_cache_miss_count
                                              • nameless_lookup_count
                                              • negative_lookup_count
                                              • stat_cache_invalidations_received
                                              • xattr_cache_invalidations_received
                                              "},{"location":"release-notes/4.1.0/#3-additional-metrics-added-to-quick-read-xlator","title":"3. Additional metrics added to quick-read xlator","text":"

                                              Metrics added are:

                                              • total_files_cached
                                              • total_cache_used
                                              • cache-hit
                                              • cache-miss
                                              • cache-invalidations
                                              "},{"location":"release-notes/4.1.0/#performance","title":"Performance","text":""},{"location":"release-notes/4.1.0/#1-support-for-fuse-writeback-cache","title":"1. Support for fuse writeback cache","text":"

                                              Gluster FUSE mounts support FUSE extension to leverage the kernel \"writeback cache\".

                                              For usage help see man 8 glusterfs and man 8 mount.glusterfs, specifically the options -kernel-writeback-cache and -attr-times-granularity.

                                              "},{"location":"release-notes/4.1.0/#2-extended-eager-lock-to-metadata-transactions-in-replicate-xlator","title":"2. Extended eager-lock to metadata transactions in replicate xlator","text":"

                                              Eager lock feature in replicate xlator is extended to support metadata transactions in addition to data transactions. This helps in improving the performance when there are frequent metadata updates in the workload. This is typically seen with sharded volumes by default, and in other workloads that incur a higher rate of metadata modifications to the same set of files.

                                              As a part of this feature, compounded FOPs feature in AFR is deprecated, volumes that are configured to leverage compounding will start disregarding the option use-compound-fops.

                                              NOTE: This is an internal change in AFR xlator and is not user controlled or configurable.

                                              "},{"location":"release-notes/4.1.0/#3-support-for-multi-threaded-fuse-readers","title":"3. Support for multi-threaded fuse readers","text":"

                                              FUSE based mounts can specify number of FUSE request processing threads during a mount. For workloads that have high concurrency on a single client, this helps in processing FUSE requests in parallel, than the existing single reader model.

                                              This is provided as a mount time option named reader-thread-count and can be used as follows,

                                              # mount -t glusterfs -o reader-thread-count=<n> <server>:<volname> <mntpoint>\n
                                              "},{"location":"release-notes/4.1.0/#4-configurable-aggregate-size-for-write-behind-xlator","title":"4. Configurable aggregate size for write-behind xlator","text":"

                                              Write-behind xlator provides the option performance.aggregate-size to enable configurable aggregate write sizes. This option enables write-behind xlator to aggregate writes till the specified value before the writes are sent to the bricks.

                                              Existing behaviour set this size to a maximum of 128KB per file. The configurable option provides the ability to tune this up or down based on the workload to improve performance of writes.

                                              Usage:

                                              # gluster volume set <volname> performance.aggregate-size <size>\n
                                              "},{"location":"release-notes/4.1.0/#5-adaptive-read-replica-selection-based-on-queue-length","title":"5. Adaptive read replica selection based on queue length","text":"

                                              AFR xlator is enhanced with a newer value for the option read-hash-mode. Providing this option with a value of 3 will distribute reads across AFR subvolumes based on the subvol having the least outstanding read requests.

                                              This helps in better distributing and hence improving workload performance on reads, in replicate based volumes.

                                              "},{"location":"release-notes/4.1.0/#standalone","title":"Standalone","text":""},{"location":"release-notes/4.1.0/#1-thin-arbiter-quorum-for-2-way-replication","title":"1. Thin arbiter quorum for 2-way replication","text":"

                                              NOTE: This feature is available only with GlusterD2

                                              Documentation for the feature is provided here.

                                              "},{"location":"release-notes/4.1.0/#2-automatically-configure-backup-volfile-servers-in-clients","title":"2. Automatically configure backup volfile servers in clients","text":"

                                              NOTE: This feature is available only with GlusterD2

                                              Clients connecting and mounting a Gluster volume, will automatically fetch and configure backup volfile servers, for future volfile updates and fetches, when the initial server used to fetch the volfile and mount is down.

                                              When using glusterd, this is achieved using the FUSE mount option backup-volfile-servers, and when using GlusterD2 this is done automatically.

                                              "},{"location":"release-notes/4.1.0/#3-cmtime-equivalence-across-replicate-and-disperse-subvolumes","title":"3. (c/m)time equivalence across replicate and disperse subvolumes","text":"

                                              Enabling the utime feature, enables Gluster to maintain consistent change and modification time stamps on files and directories across bricks.

                                              This feature is useful when applications are sensitive to time deltas between operations (for example tar may report \"file changed as we read it\"), to maintain and report equal time stamps on the file across the subvolumes.

                                              To enable the feature use,

                                              # gluster volume set <volname> features.utime\n

                                              Limitations:

                                              • Mounting gluster volume with time attribute options (noatime, realatime...) is not supported with this feature
                                              • Certain entry operations (with differing creation flags) would reflect an eventual consistency w.r.t the time attributes
                                              • This feature does not guarantee consistent time for directories if hashed sub-volume for the directory is down
                                              • readdirp (or directory listing) is not supported with this feature
                                              "},{"location":"release-notes/4.1.0/#developer-related","title":"Developer related","text":""},{"location":"release-notes/4.1.0/#1-new-api-for-acquiring-leases-and-acting-on-lease-recalls","title":"1. New API for acquiring leases and acting on lease recalls","text":"

                                              A new API to acquire a lease on an open file and also to receive callbacks when the lease is recalled, is provided with gfapi.

                                              Refer to the header for details on how to use this API.

                                              "},{"location":"release-notes/4.1.0/#2-extended-language-bindings-for-gfapi-to-include-perl","title":"2. Extended language bindings for gfapi to include perl","text":"

                                              See, libgfapi-perl - Libgfapi bindings for Perl using FFI

                                              "},{"location":"release-notes/4.1.0/#major-issues","title":"Major issues","text":"

                                              None

                                              "},{"location":"release-notes/4.1.0/#bugs-addressed","title":"Bugs addressed","text":"

                                              Bugs addressed since release-4.0.0 are listed below.

                                              • #1074947: add option to build rpm without server
                                              • #1234873: glusterfs-resource-agents - volume - voldir is not properly set
                                              • #1272030: Remove lock recovery logic from client and server protocol translators
                                              • #1304962: Intermittent file creation fail,while doing concurrent writes on distributed volume has more than 40 bricks
                                              • #1312830: tests fail because bug-924726.t depends on netstat
                                              • #1319992: RFE: Lease support for gluster
                                              • #1450546: Paths to some tools are hardcoded to /sbin or /usr/sbin
                                              • #1450593: Gluster Python scripts do not check return value of find_library
                                              • #1468483: Sharding sends all application sent fsyncs to the main shard file
                                              • #1495153: xlator_t structure's 'client_latency' variable is not used
                                              • #1500649: Shellcheck errors in hook scripts
                                              • #1505355: quota: directories doesn't get heal on newly added bricks when quota is full on sub-directory
                                              • #1506140: Add quorum checks in post-op
                                              • #1507230: Man pages badly formatted
                                              • #1512691: PostgreSQL DB Restore: unexpected data beyond EOF
                                              • #1517260: Volume wrong size
                                              • #1521030: rpc: unregister programs before registering them again
                                              • #1523122: fix serval bugs found on testing protocol/client
                                              • #1523219: fuse xlator uses block size and fragment size 128KB leading to rounding off in df output
                                              • #1530905: Reducing regression time of glusterd test cases
                                              • #1533342: Syntactical errors in hook scripts for managing SELinux context on bricks
                                              • #1536024: Rebalance process is behaving differently for AFR and EC volume.
                                              • #1536186: build: glibc has removed legacy rpc headers and rpcgen in Fedora28, use libtirpc
                                              • #1537362: glustershd/glusterd is not using right port when connecting to glusterfsd process
                                              • #1537364: [RFE] - get-state option should mark profiling enabled flag at volume level
                                              • #1537457: DHT log messages: Found anomalies in (null) (gfid = 00000000-0000-0000-0000-000000000000). Holes=1 overlaps=0
                                              • #1537602: Georeplication tests intermittently fail
                                              • #1538258: build: python-ctypes only in RHEL <= 7
                                              • #1538427: Seeing timer errors in the rebalance logs
                                              • #1539023: Add ability to control verbosity settings while compiling
                                              • #1539166: [bitrot] scrub ondemand reports it's start as success without additional detail
                                              • #1539358: Changes to self-heal logic w.r.t. detecting of split-brains
                                              • #1539510: Optimize glusterd_import_friend_volume code path
                                              • #1539545: gsyncd is running gluster command to get config file path is not required
                                              • #1539603: Glusterfs crash when doing statedump with memory accounting is disabled
                                              • #1540338: Change op-version of master to 4.1.0 for future options that maybe added
                                              • #1540607: glusterd fails to attach brick during restart of the node
                                              • #1540669: Do lock conflict check correctly for wait-list
                                              • #1541038: A down brick is incorrectly considered to be online and makes the volume to be started without any brick available
                                              • #1541264: dht_layout_t leak in dht_populate_inode_for_dentry
                                              • #1541916: The used space in the volume increases when the volume is expanded
                                              • #1542318: dht_lookup_unlink_of_false_linkto_cbk fails with \"Permission denied\"
                                              • #1542829: Too many log messages about dictionary and options
                                              • #1543279: Moving multiple temporary files to the same destination concurrently causes ESTALE error
                                              • #1544090: possible memleak in glusterfsd process with brick multiplexing on
                                              • #1544600: 3.8 -> 3.10 rolling upgrade fails (same for 3.12 or 3.13) on Ubuntu 14
                                              • #1544699: Rolling upgrade to 4.0 is broken
                                              • #1544961: libgfrpc does not export IPv6 RPC methods even with --with-ipv6-default
                                              • #1545048: [brick-mux] process termination race while killing glusterfsd on last brick detach
                                              • #1545056: [CIOT] : Gluster CLI says \"io-threads : enabled\" on existing volumes post upgrade.
                                              • #1545891: Provide a automated way to update bugzilla status with patch merge.
                                              • #1546129: Geo-rep: glibc fix breaks geo-replication
                                              • #1546620: DHT calls dht_lookup_everywhere for 1xn volumes
                                              • #1546954: [Rebalance] \"Migrate file failed: : failed to get xattr [No data available]\" warnings in rebalance logs
                                              • #1547068: Bricks getting assigned to different pids depending on whether brick path is IP or hostname based
                                              • #1547128: Typo error in __dht_check_free_space function log message
                                              • #1547662: After a replace brick command, self-heal takes some time to start healing files on disperse volumes
                                              • #1547888: [brick-mux] incorrect event-thread scaling in server_reconfigure()
                                              • #1548361: Make afr_fsync a transaction
                                              • #1549000: line-coverage tests not capturing details properly.
                                              • #1549606: Eager lock should be present for both metadata and data transactions
                                              • #1549915: [Fuse Sub-dir] After performing add-brick on volume,doing rm -rf * on subdir mount point fails with \"Transport endpoint is not connected\"
                                              • #1550078: memory leak in pre-op in replicate volumes for every write
                                              • #1550339: glusterd leaks memory when vol status is issued
                                              • #1550895: GD2 fails to dlopen server xlator
                                              • #1550936: Pause/Resume of geo-replication with wrong user specified returns success
                                              • #1553129: Memory corruption is causing crashes, hangs and invalid answers
                                              • #1553598: [Rebalance] ENOSPC errors on few files in rebalance logs
                                              • #1553926: configure --without-ipv6-default has odd behaviour
                                              • #1553938: configure summary TIRPC result is misleading
                                              • #1554053: 4.0 clients may fail to convert iatt in dict when recieving the same from older (< 4.0) servers
                                              • #1554743: [EC] Read performance of EC volume exported over gNFS is significantly lower than write performance
                                              • #1555154: glusterd: TLS verification fails when using intermediate CA instead of self-signed certificates
                                              • #1555167: namespace test failure
                                              • #1557435: Enable lookup-optimize by default
                                              • #1557876: Fuse mount crashed with only one VM running with its image on that volume
                                              • #1557932: Shard replicate volumes don't use eager-lock affectively
                                              • #1558016: test ./tests/bugs/ec/bug-1236065.t is generating crash on build
                                              • #1558074: [disperse] Add tests for in-memory stripe cache for the non aligned write
                                              • #1558380: Modify glfsheal binary to accept socket file path as an optional argument.
                                              • #1559004: /var/log/glusterfs/bricks/export_vdb.log flooded with this error message \"Not able to add to index [Too many links]\"
                                              • #1559075: enable ownthread feature for glusterfs4_0_fop_prog
                                              • #1559126: Incorrect error message in /features/changelog/lib/src/gf-history-changelog.c
                                              • #1559130: ssh stderr in glusterfind gets swallowed
                                              • #1559235: Increase the inode table size on server when upcall enabled
                                              • #1560319: NFS client gets \"Invalid argument\" when writing file through nfs-ganesha with quota
                                              • #1560393: Fix regresssion failure for ./tests/basic/md-cache/bug-1418249.t
                                              • #1560411: fallocate created data set is crossing storage reserve space limits resulting 100% brick full
                                              • #1560441: volume stop in mgmt v3
                                              • #1560589: nl-cache.t fails
                                              • #1560957: After performing remove-brick followed by add-brick operation, brick went offline state
                                              • #1561129: When storage reserve limit is reached, appending data to an existing file throws EROFS error
                                              • #1561406: Rebalance failures on a dispersed volume with lookup-optimize enabled
                                              • #1562052: build: revert configure --without-ipv6-default behaviour
                                              • #1562717: SHD is not healing entries in halo replication
                                              • #1562907: set mgmt_v3_timer->timer to NULL after mgmt_v3_timer is deleted
                                              • #1563273: mark brick as online only when portmap registration is completed
                                              • #1563334: Honour cluster.localtime-logging option for all the daemons
                                              • #1563511: Redundant synchronization in rename codepath for a single subvolume DHT
                                              • #1563945: [EC] Turn ON the stripe-cache option by default for ec volume
                                              • #1564198: [Remove-brick] Many files were not migrated from the decommissioned bricks; commit results in data loss
                                              • #1564235: gfapi: fix a couple of minor issues
                                              • #1564600: Client can create denial of service (DOS) conditions on server
                                              • #1566067: Volume status inode is broken with brickmux
                                              • #1566207: Linux kernel untar failed with \"xz: (stdin): Read error: Invalid argument\" immediate after add-brick
                                              • #1566303: Removing directories from multiple clients throws ESTALE errors
                                              • #1566386: Disable choose-local in groups virt and gluster-block
                                              • #1566732: EIO errors on some operations when volume has mixed brick versions on a disperse volume
                                              • #1567209: Geo-rep: faulty session due to OSError: [Errno 95] Operation not supported
                                              • #1567880: Grant Deepshikha access to all CI-related infrastructure
                                              • #1567881: Halo replication I/O path is not working
                                              • #1568348: Rebalance on few nodes doesn't seem to complete - stuck at FUTEX_WAIT
                                              • #1568521: shard files present even after deleting vm from ovirt UI
                                              • #1568820: Add generated HMAC token in header for webhook calls
                                              • #1568844: [snapshot-scheduler]Prevent access of shared storage volume from the outside client
                                              • #1569198: bitrot scrub status does not show the brick where the object (file) is corrupted
                                              • #1569489: Need heal-timeout to be configured as low as 5 seconds
                                              • #1570011: test case is failing ./tests/bugs/glusterd/add-brick-and-validate-replicated-volume-options.t while brick mux is enabled
                                              • #1570538: linux untar errors out at completion during disperse volume inservice upgrade
                                              • #1570962: print the path of the corrupted object in scrub status
                                              • #1571069: [geo-rep]: Lot of changelogs retries and \"dict is null\" errors in geo-rep logs
                                              • #1572076: Dictionary response is not captured in syncop_(f)xattrop
                                              • #1572581: Remove-brick failed on Distributed volume while rm -rf is in-progress
                                              • #1572586: dht: do not allow migration if file is open
                                              • #1573066: growing glusterd memory usage with connected RHGSWA
                                              • #1573119: Amends in volume profile option 'gluster-block'
                                              • #1573220: Memory leak in volume tier status command
                                              • #1574259: Errors unintentionally reported for snapshot status
                                              • #1574305: rm command hangs in fuse_request_send
                                              • #1574606: the regression test \"tests/bugs/posix/bug-990028.t\" fails
                                              • #1575294: lease recall callback should be avoided on closed
                                              • #1575386: GlusterFS 4.1.0 tracker
                                              • #1575707: Gluster volume smb share options are getting overwritten after restating the gluster volume
                                              • #1576814: GlusterFS can be improved
                                              • #1577162: gfapi: broken symbol versions
                                              • #1579674: Remove EIO from the dht_inode_missing macro
                                              • #1579736: Additional log messages in dht_readdir(p)_cbk
                                              • #1579757: DHT Log flooding in mount log \"key=trusted.glusterfs.dht.mds [Invalid argument]\"
                                              • #1580215: [geo-rep]: Lot of changelogs retries and \"dict is null\" errors in geo-rep logs
                                              • #1580540: make getfattr return proper response for \"glusterfs.gfidtopath\" xattr for files created when gfid2path was off
                                              • #1581548: writes succeed when only good brick is down in 1x3 volume
                                              • #1581745: bug-1309462.t is failing reliably due to changes in security.capability changes in the kernel
                                              • #1582056: Input/Output errors on a disperse volume with concurrent reads and writes
                                              • #1582063: rpc: The gluster auth version is always AUTH_GLUSTERFS_v2
                                              • #1582068: ctime: Rename and unlink does not update ctime
                                              • #1582072: posix/ctime: Access time is not updated for file with a hardlink
                                              • #1582080: posix/ctime: The first lookup on file is not healing the gfid
                                              • #1582199: posix unwinds readdirp calls with readdir signature
                                              • #1582286: Brick-mux regressions failing on 4.1 branch
                                              • #1582531: posix/ctime: Mtime is not updated on setting it to older date
                                              • #1582549: api: missing __THROW on pub function decls
                                              • #1583016: libgfapi: glfs init fails on afr volume with ctime feature enabled
                                              • #1583734: rpc_transport_unref() called for an unregistered socket fd
                                              • #1583769: Fix incorrect rebalance log message
                                              • #1584633: Brick process crashed after upgrade from RHGS-3.3.1 async(7.4) to RHGS-3.4(7.5)
                                              • #1585894: posix/ctime: EC self heal of directory is blocked with ctime feature enabled
                                              • #1587908: Fix deadlock in failure codepath of shard fsync
                                              • #1590128: xdata is leaking in server3_3_seek
                                              • "},{"location":"release-notes/4.1.1/","title":"Release notes for Gluster 4.1.1","text":"

                                                This is a bugfix release. The release notes for 4.1.0 contains a listing of all the new features that were added and bugs fixed in the GlusterFS 4.1 stable release.

                                                "},{"location":"release-notes/4.1.1/#major-changes-features-and-limitations-addressed-in-this-release","title":"Major changes, features and limitations addressed in this release","text":"

                                                This release contains a fix for a security vulerability in Gluster as follows,

                                                • http://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2018-10841
                                                • https://nvd.nist.gov/vuln/detail/CVE-2018-10841

                                                Installing the updated packages and restarting gluster services on gluster brick hosts, will help prevent the security issue.

                                                "},{"location":"release-notes/4.1.1/#major-issues","title":"Major issues","text":"

                                                None

                                                "},{"location":"release-notes/4.1.1/#bugs-addressed","title":"Bugs addressed","text":"

                                                Bugs addressed since release-4.1.0 are listed below.

                                                • #1590195: /usr/sbin/gcron.py aborts with OSError
                                                • #1591185: Gluster Block PVC fails to mount on Jenkins pod
                                                • #1593525: CVE-2018-10841 glusterfs: access trusted peer group via remote-host command [glusterfs upstream]
                                                "},{"location":"release-notes/4.1.10/","title":"Release notes for Gluster 4.1.10","text":"

                                                This is a bugfix release. The release notes for 4.1.0, 4.1.1, 4.1.2, 4.1.3, 4.1.4, 4.1.5, 4.1.6, 4.1.7, 4.1.8 and 4.1.9 contains a listing of all the new features that were added and bugs fixed in the GlusterFS 4.1 stable release.

                                                "},{"location":"release-notes/4.1.10/#major-changes-features-and-limitations-addressed-in-this-release","title":"Major changes, features and limitations addressed in this release","text":"

                                                None

                                                "},{"location":"release-notes/4.1.10/#major-issues","title":"Major issues","text":"

                                                None

                                                "},{"location":"release-notes/4.1.10/#bugs-addressed","title":"Bugs addressed","text":"

                                                Bugs addressed since release-4.1.9 are listed below.

                                                • #1721109: Failed to create volume which transport_type is \"tcp,rdma\"
                                                • #1729221: Upcall: Avoid sending upcalls for invalid Inode
                                                • #1729223: Ganesha-gfapi logs are flooded with error messages related to \"gf_uuid_is_null(gfid)) [Invalid argument]\" when lookups are running from multiple clients
                                                "},{"location":"release-notes/4.1.2/","title":"Release notes for Gluster 4.1.2","text":"

                                                This is a bugfix release. The release notes for 4.1.0 and 4.1.1 contains a listing of all the new features that were added and bugs fixed in the GlusterFS 4.1 stable release.

                                                "},{"location":"release-notes/4.1.2/#major-changes-features-and-limitations-addressed-in-this-release","title":"Major changes, features and limitations addressed in this release","text":"
                                                1. Release 4.1.0 notes incorrectly reported that all python code in Gluster packages are python3 compliant, this is not the case and the release note is amended accordingly.
                                                "},{"location":"release-notes/4.1.2/#major-issues","title":"Major issues","text":"

                                                None

                                                "},{"location":"release-notes/4.1.2/#bugs-addressed","title":"Bugs addressed","text":"

                                                Bugs addressed since release-4.1.1 are listed below.

                                                • #1593536: ctime: Self heal of symlink is failing on EC subvolume
                                                • #1593537: posix/ctime: Mdata value of a directory is different across replica/EC subvolume
                                                • #1595524: rmdir is leaking softlinks to directories in .glusterfs
                                                • #1597116: afr: don't update readables if inode refresh failed on all children
                                                • #1597117: lookup not assigning gfid if file is not present in all bricks of replica
                                                • #1597229: glustershd crashes when index heal is launched before graph is initialized.
                                                • #1598193: Stale lock with lk-owner all-zeros is observed in some tests
                                                • #1599629: Don't execute statements after decrementing call count in afr
                                                • #1599785: _is_prefix should return false for 0-length strings
                                                • #1600941: [geo-rep]: geo-replication scheduler is failing due to unsuccessful umount
                                                • #1603056: When reserve limits are reached, append on an existing file after truncate operation results to hang
                                                • #1603099: directories are invisible on client side
                                                "},{"location":"release-notes/4.1.3/","title":"Release notes for Gluster 4.1.3","text":"

                                                This is a bugfix release. The release notes for 4.1.0, 4.1.1, and 4.1.2 contains a listing of all the new features that were added and bugs fixed in the GlusterFS 4.1 stable release.

                                                NOTE: Next minor release tentative date: Week of 24th September, 2018

                                                "},{"location":"release-notes/4.1.3/#major-changes-features-and-limitations-addressed-in-this-release","title":"Major changes, features and limitations addressed in this release","text":"

                                                None

                                                "},{"location":"release-notes/4.1.3/#major-issues","title":"Major issues","text":"
                                                1. Bug #1601356 titled \"Problem with SSL/TLS encryption\", is not yet fixed with this release. Patch to fix the same is in progress and can be tracked here.
                                                "},{"location":"release-notes/4.1.3/#bugs-addressed","title":"Bugs addressed","text":"

                                                Bugs addressed since release-4.1.2 are listed below.

                                                • #1425326: gluster bash completion leaks TOP=0 into the environment
                                                • #1596686: key = trusted.glusterfs.protect.writes [Invalid argument]; key = glusterfs.avoid.overwrite [Invalid argument]
                                                • #1609550: glusterfs-resource-agents should not be built for el6
                                                • #1609551: glusterfs-resource-agents should not be built for el6
                                                • #1611104: [geo-rep]: Upgrade fails, session in FAULTY state
                                                • #1611106: Glusterd crashed on a few (master) nodes
                                                • #1611108: [geo-rep]: Geo-rep scheduler fails
                                                • #1611110: Glusterd memory leaking in gf_gld_mt_linebuf
                                                • #1611111: [geo-rep]: Geo-replication in FAULTY state - CENTOS 6
                                                • #1611113: [geo-rep]: Geo-replication not syncing renamed symlink
                                                • #1611114: [geo-rep]: [Errno 2] No such file or directory
                                                • #1611115: avoid possible glusterd crash in glusterd_verify_slave
                                                • #1611116: 'custom extended attributes' set on a directory are not healed after bringing back the down sub-volumes
                                                • #1618347: [Ganesha] Ganesha crashed in mdcache_alloc_and_check_handle while running bonnie and untars with parallel lookups
                                                "},{"location":"release-notes/4.1.4/","title":"Release notes for Gluster 4.1.4","text":"

                                                This is a bugfix release. The release notes for 4.1.0, 4.1.1, 4.1.2 and 4.1.3 contains a listing of all the new features that were added and bugs fixed in the GlusterFS 4.1 stable release.

                                                "},{"location":"release-notes/4.1.4/#major-changes-features-and-limitations-addressed-in-this-release","title":"Major changes, features and limitations addressed in this release","text":"
                                                1. This release contains fix for following security vulnerabilities,

                                                  • https://nvd.nist.gov/vuln/detail/CVE-2018-10904
                                                  • https://nvd.nist.gov/vuln/detail/CVE-2018-10907
                                                  • https://nvd.nist.gov/vuln/detail/CVE-2018-10911
                                                  • https://nvd.nist.gov/vuln/detail/CVE-2018-10913
                                                  • https://nvd.nist.gov/vuln/detail/CVE-2018-10914
                                                  • https://nvd.nist.gov/vuln/detail/CVE-2018-10923
                                                  • https://nvd.nist.gov/vuln/detail/CVE-2018-10926
                                                  • https://nvd.nist.gov/vuln/detail/CVE-2018-10927
                                                  • https://nvd.nist.gov/vuln/detail/CVE-2018-10928
                                                  • https://nvd.nist.gov/vuln/detail/CVE-2018-10929
                                                  • https://nvd.nist.gov/vuln/detail/CVE-2018-10930
                                                2. To resolve the security vulnerabilities following limitations were made in GlusterFS

                                                  • open,read,write on special files like char and block are no longer permitted
                                                  • io-stat xlator can dump stat info only to /var/run/gluster directory

                                                Installing the updated packages and restarting gluster services on gluster brick hosts, will fix the security issues.

                                                "},{"location":"release-notes/4.1.4/#major-issues","title":"Major issues","text":"
                                                1. Bug #1601356 titled \"Problem with SSL/TLS encryption\", is not yet fixed with this release. Patch to fix the same is in progress and can be tracked here.
                                                "},{"location":"release-notes/4.1.4/#bugs-addressed","title":"Bugs addressed","text":"

                                                Bugs addressed since release-4.1.3 are listed below.

                                                • #1625089: Improper deserialization in dict.c:dict_unserialize() can allow attackers to read arbitrary memory
                                                • #1625095: Files can be renamed outside volume
                                                • #1625096: I/O to arbitrary devices on storage server
                                                • #1625097: Stack-based buffer overflow in server-rpc-fops.c allows remote attackers to execute arbitrary code
                                                • #1625102: Information Exposure in posix_get_file_contents function in posix-helpers.c
                                                • #1625106: Unsanitized file names in debug/io-stats translator can allow remote attackers to execute arbitrary code
                                                "},{"location":"release-notes/4.1.5/","title":"Release notes for Gluster 4.1.5","text":"

                                                This is a bugfix release. The release notes for 4.1.0, 4.1.1, 4.1.2, 4.1.3 and 4.1.4 contains a listing of all the new features that were added and bugs fixed in the GlusterFS 4.1 stable release.

                                                NOTE: Next minor release tentative date: Week of 19th November, 2018

                                                "},{"location":"release-notes/4.1.5/#major-changes-features-and-limitations-addressed-in-this-release","title":"Major changes, features and limitations addressed in this release","text":"

                                                None

                                                "},{"location":"release-notes/4.1.5/#major-issues","title":"Major issues","text":"

                                                None

                                                "},{"location":"release-notes/4.1.5/#bugs-addressed","title":"Bugs addressed","text":"

                                                Bugs addressed since release-4.1.4 are listed below.

                                                • #1601356: Problem with SSL/TLS encryption on Gluster 4.0 & 4.1
                                                • #1625575: Prevent hangs while increasing replica-count/replace-brick for directory hierarchy
                                                • #1629548: Excessive logging in posix_set_parent_ctime()
                                                • #1630140: geo-rep: geo-rep config set fails to set rsync-options
                                                • #1630141: libgfchangelog: History API fails
                                                • #1630144: Geo-rep: Geo-rep regression times out occasionally
                                                • #1630145: Geo-rep: Few workers fails to start with out any failure
                                                "},{"location":"release-notes/4.1.6/","title":"Release notes for Gluster 4.1.6","text":"

                                                This is a bugfix release. The release notes for 4.1.0, 4.1.1, 4.1.2, 4.1.3, 4.1.4 and 4.1.5 contains a listing of all the new features that were added and bugs fixed in the GlusterFS 4.1 stable release.

                                                NOTE: Next minor release tentative date: Week of 20th January, 2019

                                                "},{"location":"release-notes/4.1.6/#major-changes-features-and-limitations-addressed-in-this-release","title":"Major changes, features and limitations addressed in this release","text":"

                                                This release contains fixes for several security vulnerabilities in Gluster as follows,

                                                • https://nvd.nist.gov/vuln/detail/CVE-2018-14651
                                                • https://nvd.nist.gov/vuln/detail/CVE-2018-14652
                                                • https://nvd.nist.gov/vuln/detail/CVE-2018-14653
                                                • https://nvd.nist.gov/vuln/detail/CVE-2018-14654
                                                • https://nvd.nist.gov/vuln/detail/CVE-2018-14659
                                                • https://nvd.nist.gov/vuln/detail/CVE-2018-14660
                                                • https://nvd.nist.gov/vuln/detail/CVE-2018-14661
                                                "},{"location":"release-notes/4.1.6/#major-issues","title":"Major issues","text":"

                                                None

                                                "},{"location":"release-notes/4.1.6/#bugs-addressed","title":"Bugs addressed","text":"

                                                Bugs addressed since release-4.1.5 are listed below.

                                                • #1632013: georep: hard-coded paths in gsyncd.conf.in
                                                • #1633479: 'df' shows half as much space on volume after upgrade to RHGS 3.4
                                                • #1633634: split-brain observed on parent dir
                                                • #1635979: Writes taking very long time leading to system hogging
                                                • #1635980: Low Random write IOPS in VM workloads
                                                • #1636218: [SNAPSHOT]: with brick multiplexing, snapshot restore will make glusterd send wrong volfile
                                                • #1637953: data-self-heal in arbiter volume results in stale locks.
                                                • #1641761: Spurious failures in bug-1637802-arbiter-stale-data-heal-lock.t
                                                • #1643052: Seeing defunt translator and discrepancy in volume info when issued from node which doesn't host bricks in that volume
                                                • #1643075: tests/bugs/glusterd/optimized-basic-testcases-in-cluster.t failing
                                                • #1643929: geo-rep: gluster-mountbroker status crashes
                                                • #1644163: geo-rep: geo-replication gets stuck after file rename and gfid conflict
                                                • #1644474: afr/lease: Read child nodes from lease structure
                                                • #1644516: geo-rep: gluster-mountbroker status crashes
                                                • #1644518: [Geo-Replication] Geo-rep faulty sesion because of the directories are not synced to slave.
                                                • #1644524: Excessive logging in posix_update_utime_in_mdata
                                                • #1645363: CVE-2018-14652 glusterfs: Buffer overflow in \"features/locks\" translator allows for denial of service [fedora-all]
                                                • #1646200: CVE-2018-14654 glusterfs: \"features/index\" translator can create arbitrary, empty files [fedora-all]
                                                • #1646806: [Geo-rep]: Faulty geo-rep sessions due to link ownership on slave volume
                                                • #1647667: CVE-2018-14651 glusterfs: glusterfs server exploitable via symlinks to relative paths [fedora-all]
                                                • #1647668: CVE-2018-14661 glusterfs: features/locks translator passes an user-controlled string to snprintf without a proper format string resulting in a denial of service [fedora-all]
                                                • #1647669: CVE-2018-14659 glusterfs: Unlimited file creation via \"GF_XATTR_IOSTATS_DUMP_KEY\" xattr allows for denial of service [fedora-all]
                                                • #1647670: CVE-2018-14653 glusterfs: Heap-based buffer overflow via \"gf_getspec_req\" RPC message [fedora-all]
                                                • #1647972: CVE-2018-14660 glusterfs: Repeat use of \"GF_META_LOCK_KEY\" xattr allows for memory exhaustion [fedora-all]
                                                • #1648367: crash seen while running regression, intermittently.
                                                • #1648938: gfapi: fix bad dict setting of lease-id
                                                • #1648982: packaging: don't include bd.so in rpm when --without bd
                                                "},{"location":"release-notes/4.1.7/","title":"Release notes for Gluster 4.1.7","text":"

                                                This is a bugfix release. The release notes for 4.1.0, 4.1.1, 4.1.2, 4.1.3, 4.1.4, 4.1.5 and 4.1.6 contains a listing of all the new features that were added and bugs fixed in the GlusterFS 4.1 stable release.

                                                NOTE: Next minor release tentative date: Week of 20th March, 2019

                                                "},{"location":"release-notes/4.1.7/#major-changes-features-and-limitations-addressed-in-this-release","title":"Major changes, features and limitations addressed in this release","text":"

                                                None

                                                "},{"location":"release-notes/4.1.7/#major-issues","title":"Major issues","text":"

                                                None

                                                "},{"location":"release-notes/4.1.7/#bugs-addressed","title":"Bugs addressed","text":"

                                                Bugs addressed since release-4.1.6 are listed below.

                                                • #1654118: [geo-rep]: Failover / Failback shows fault status in a non-root setup
                                                • #1654229: Provide an option to silence glfsheal logs
                                                • #1655527: Incorrect usage of local->fd in afr_open_ftruncate_cbk
                                                • #1655532: Tracker bug for all leases related issues
                                                • #1655561: gfid heal does not happen when there is no source brick
                                                • #1662635: Fix tests/bugs/shard/zero-flag.t
                                                • #1663132: [Ganesha] Ganesha failed on one node while exporting volumes in loop
                                                "},{"location":"release-notes/4.1.8/","title":"Release notes for Gluster 4.1.8","text":"

                                                This is a bugfix release. The release notes for 4.1.0, 4.1.1, 4.1.2, 4.1.3, 4.1.4, 4.1.5, 4.1.6 and 4.1.7 contains a listing of all the new features that were added and bugs fixed in the GlusterFS 4.1 stable release.

                                                NOTE: Next minor release tentative date: Week of 20th May, 2019

                                                "},{"location":"release-notes/4.1.8/#major-changes-features-and-limitations-addressed-in-this-release","title":"Major changes, features and limitations addressed in this release","text":"

                                                None

                                                "},{"location":"release-notes/4.1.8/#major-issues","title":"Major issues","text":"

                                                None

                                                "},{"location":"release-notes/4.1.8/#bugs-addressed","title":"Bugs addressed","text":"

                                                Bugs addressed since release-4.1.7 are listed below.

                                                • #1670303: api: bad GFAPI_4.1.6 block
                                                • #1672249: quorum count value not updated in nfs-server vol file
                                                • #1673265: Fix timeouts so the tests pass on AWS
                                                • #1687746: [geo-rep]: Checksum mismatch when 2x2 vols are converted to arbiter
                                                • #1691292: glusterfs FUSE client crashing every few days with 'Failed to dispatch handler'
                                                • #1693057: dht_revalidate may not heal attrs on the brick root
                                                • #1693201: core: move \"dict is NULL\" logs to DEBUG log level
                                                "},{"location":"release-notes/4.1.9/","title":"Release notes for Gluster 4.1.9","text":"

                                                This is a bugfix release. The release notes for 4.1.0, 4.1.1, 4.1.2, 4.1.3, 4.1.4, 4.1.5, 4.1.6, 4.1.7 and 4.1.8 contains a listing of all the new features that were added and bugs fixed in the GlusterFS 4.1 stable release.

                                                "},{"location":"release-notes/4.1.9/#major-changes-features-and-limitations-addressed-in-this-release","title":"Major changes, features and limitations addressed in this release","text":"

                                                None

                                                "},{"location":"release-notes/4.1.9/#major-issues","title":"Major issues","text":"

                                                None

                                                "},{"location":"release-notes/4.1.9/#bugs-addressed","title":"Bugs addressed","text":"

                                                Bugs addressed since release-4.1.8 are listed below.

                                                • #1660225: geo-rep does not replicate mv or rename of file
                                                • #1684404: Multiple shd processes are running on brick_mux environmet
                                                • #1694563: gfapi: do not block epoll thread for upcall notifications
                                                • #1696513: Multiple shd processes are running on brick_mux environmet
                                                • #1707200: VM stuck in a shutdown because of a pending fuse request
                                                "},{"location":"release-notes/5.0/","title":"Release notes for Gluster 5.0","text":"

                                                This is a major release that includes a range of code improvements and stability fixes among a few features as noted below.

                                                A selection of the key features and changes are documented on this page. A full list of bugs that have been addressed is included further below.

                                                • Announcements
                                                • Major changes and features
                                                • Major issues
                                                • Bugs addressed in the release
                                                "},{"location":"release-notes/5.0/#announcements","title":"Announcements","text":"
                                                1. Releases that receive maintenance updates post release 5 are, 4.1 (reference)

                                                NOTE: 3.12 long term maintenance release, will reach end of life (EOL) with the release of 5.0. (reference)

                                                1. Release 5 will receive maintenance updates around the 10th of every month for the first 3 months post release (i.e Nov'18, Dec'18, Jan'18). Post the initial 3 months, it will receive maintenance updates every 2 months till EOL. (reference)
                                                "},{"location":"release-notes/5.0/#major-changes-and-features","title":"Major changes and features","text":"

                                                Features are categorized into the following sections,

                                                • Management
                                                • Standalone
                                                "},{"location":"release-notes/5.0/#management","title":"Management","text":""},{"location":"release-notes/5.0/#glusterd2","title":"GlusterD2","text":"

                                                IMP: GlusterD2 in Gluster-5 is still considered a preview and is experimental. It should not be considered ready for production use. Users should still expect some breaking changes even though all efforts would be taken to ensure that these can be avoided. As GD2 is still under heavy development, new features can be expected throughout the Gluster 5 release.

                                                The following major changes have been committed to GlusterD2 since v4.1.0.

                                                1. Volume snapshots : Most snapshot operations are available including create, delete, activate, deactivate, clone and restore.

                                                2. Volume heal: Support for full heal and index heal for replicate volumes has been implemented.

                                                3. Tracing with Opencensus: Support for tracing distributed operations has been implemented in GD2, using the Opencensus API. Tracing instrumentation has been done for volume create, list and delete operations. Other operations will follow subsequently.

                                                4. Portmap refactoring: Portmap in GlisterD2 no longer selects a port for the bricks to listen on, instead leaving the choice upto the bricks. Portmap only saves port information provided by brick during signin.

                                                5. Smartvol API merged with volume create API: The smart volume API which allows user to create a volume by just specifying a size has been merged with the normal volume create API.

                                                6. Configure GlusterD2 with environment variables: In addition to CLI flags, and the config file, GD2 configuration options can be set using environment variables.

                                                In addition to the above, many changes have been merged for minor bug-fixes and to help with testing.

                                                Refer to the user documentation section for details on how to get started with GlusterD2.

                                                "},{"location":"release-notes/5.0/#standalone","title":"Standalone","text":""},{"location":"release-notes/5.0/#1-entry-creation-and-handling-consistency-is-improved","title":"1. Entry creation and handling, consistency is improved","text":"

                                                The dentry serializer feature was introduced in Gluster 4.0, to strengthen the consistency handling of entry operations in the Gluster stack. Entry operations refer to creating, linking, renaming and unlinking of files and directory names into the filesystem space.

                                                When this feature was first introduced (in 4.0) it was optional, with this release this feature is enabled by default.

                                                "},{"location":"release-notes/5.0/#2-python-code-in-gluster-packages-is-python-3-ready","title":"2. Python code in Gluster packages is Python 3 ready","text":""},{"location":"release-notes/5.0/#3-quota-fsck-script-to-correct-quota-accounting","title":"3. Quota fsck script to correct quota accounting","text":"

                                                See usage documentation here

                                                "},{"location":"release-notes/5.0/#4-added-noatime-option-in-utime-xlator","title":"4. Added noatime option in utime xlator","text":"

                                                Enabling the utime and ctime feature, enables Gluster to maintain consistent change and modification time stamps on files and directories across bricks.

                                                The utime xlator is enhanced with a noatime option and is set by default to enabled, when the utime feature is enabled. This helps to ignore atime updates for operations that change may trigger an atime update on the file system objects.

                                                To enable the feature use,

                                                # gluster volume set <volname> features.utime on\n# gluster volume set <volname> features.ctime on\n
                                                "},{"location":"release-notes/5.0/#5-added-ctime-invalidation-option-in-quick-read-xlator","title":"5. Added ctime-invalidation option in quick-read xlator","text":"

                                                Quick-read xlator by default uses mtime (files last modification time) to identify changes to file data. However, there are applications, like rsync, which explicitly set mtime making it unreliable for the purpose of identifying changes to the file content.

                                                Since ctime (files last status change time) also changes when content of a file changes and cannot be set explicitly by applications, it becomes a more reliable source to identify staleness of cached data.

                                                The ctime-invalidation option makes quick-read to prefer ctime over mtime to validate staleness of its cache.

                                                To enable this option use,

                                                # gluster volume set <volname> ctime-invalidation on\n

                                                NOTE: Using ctime can result in false positives as ctime is updated even on attribute changes, like mode bits, without changes to file data. As a result this option is recommended in situations where mtime is not reliable.

                                                "},{"location":"release-notes/5.0/#6-added-shard-deletion-rate-option-in-shard-xlator","title":"6. Added shard-deletion-rate option in shard xlator","text":"

                                                The shard-deletion-rate option is introduced, to configure the number of shards to delete in parallel when a file that is sharded is deleted.

                                                The default value is set at 100, but can be increased to delete more shards in parallel for faster space reclamation.

                                                To change the defaults for this option use,

                                                # gluster volume set <volname> shard-deletion-rate <n>\n

                                                NOTE: The upper limit is unbounded, use it with caution as a very large number will cause lock contention on the bricks. As an example, during testing, an upper limit of 125000 was enough to cause timeouts and hangs in the gluster processes due to lock contention.

                                                "},{"location":"release-notes/5.0/#7-removed-last-usage-of-md5-digest-in-code-towards-better-fips-compliance","title":"7. Removed last usage of MD5 digest in code, towards better FIPS compliance","text":"

                                                In an effort to ensure that Gluster can be installed and deployed on machines that are compliant with the requirements for FIPS, remaining uses of MD5 digest is removed from the code base.

                                                Addressing this feature's requirements was initiated during the 4.0 release, at which point enabling user space snapshots, which still used MD5 for certain needs, broke the FIPS compliance requirements. This limitation is now addressed in this release.

                                                "},{"location":"release-notes/5.0/#8-code-improvements","title":"8. Code improvements","text":"

                                                Over the course of this release, the contributors have been active in addressing various Coverity issues, GCC and clang warnings, clang formatting of the code base, micro improvements to GLibC API usage and memory handling around string handling and allocation routines.

                                                The above are ongoing efforts, but major strides were made during this release to actively address code quality in these areas.

                                                "},{"location":"release-notes/5.0/#major-issues","title":"Major issues","text":"
                                                1. The following options are removed from the code base and require to be unset before an upgrade from releases older than release 4.1.0,

                                                2. features.lock-heal

                                                3. features.grace-timeout

                                                To check if these options are set use,

                                                # gluster volume info\n

                                                and ensure that the above options are not part of the Options Reconfigured: section in the output of all volumes in the cluster.

                                                If these are set, then unset them using the following commands,

                                                # gluster volume reset <volname> <option>\n

                                                NOTE: Failure to do the above may result in failure during online upgrades, and the reset of these options to their defaults needs to be done prior to upgrading the cluster.

                                                "},{"location":"release-notes/5.0/#bugs-addressed","title":"Bugs addressed","text":"

                                                Bugs addressed since release-4.1.0 are listed below.

                                                • #853601: working-directory should be protected from being a brick
                                                • #1312832: tests fail because bug-924726.t depends on netstat
                                                • #1390050: Elasticsearch get CorruptIndexException errors when running with GlusterFS persistent storage
                                                • #1405147: glusterfs (posix-acl xlator layer) checks for \"write permission\" instead for \"file owner\" during open() when writing to a file
                                                • #1425325: gluster bash completion leaks TOP=0 into the environment
                                                • #1437780: don't send lookup in fuse_getattr()
                                                • #1455872: [Perf]: 25% regression on sequential reads on EC over SMB3
                                                • #1492847: core (named threads): flood of -Wformat-truncation warnings with gcc-7.
                                                • #1512691: PostgreSQL DB Restore: unexpected data beyond EOF
                                                • #1524323: No need to load ctr xlator if user has not configured tiering
                                                • #1526780: ./run-tests-in-vagrant.sh fails because of disabled Gluster/NFS
                                                • #1533000: Quota crawl regressed
                                                • #1537602: Georeplication tests intermittently fail
                                                • #1543279: Moving multiple temporary files to the same destination concurrently causes ESTALE error
                                                • #1545048: [brick-mux] process termination race while killing glusterfsd on last brick detach
                                                • #1546103: run-tests-in-vagrant.sh should return test status
                                                • #1558574: Coverity: Warning for singlton array..
                                                • #1558921: Gluster volume smb share options are getting overwritten after restating the gluster volume
                                                • #1561332: merge ssl infra with epoll infra
                                                • #1564071: directories are invisible on client side
                                                • #1564149: Agree upon a coding standard, and automate check for this in smoke
                                                • #1564419: Client side memory leak in encryption xlator (crypt.c).
                                                • #1568521: shard files present even after deleting vm from ovirt UI
                                                • #1569345: Need COMMITMENT from community for GPL Cure.
                                                • #1569399: glusterfsd should be able to start without any other arguments than a single volfile.
                                                • #1570538: linux untar errors out at completion during disperse volume inservice upgrade
                                                • #1570962: print the path of the corrupted object in scrub status
                                                • #1574421: Provide a way to get the hashed-subvol for a file
                                                • #1575381: gluster volume heal info prints extra newlines
                                                • #1575490: [geo-rep]: Upgrade fails, session in FAULTY state
                                                • #1575587: Leverage MDS subvol for dht_removexattr also
                                                • #1575716: gfapi: broken symbol versions
                                                • #1575742: Change op-version of master to 4.2.0 for future options that maybe added
                                                • #1575858: quota crawler fails w/ TLS enabled
                                                • #1575864: glusterfsd crashing because of RHGS WA?
                                                • #1575887: Additional log messages in dht_readdir(p)_cbk
                                                • #1575910: DHT Log flooding in mount log \"key=trusted.glusterfs.dht.mds [Invalid argument]\"
                                                • #1576179: [geo-rep]: Geo-rep scheduler fails
                                                • #1576392: Glusterd crashed on a few (master) nodes
                                                • #1576418: Warning messages generated for the removal of extended attribute security.ima flodding client logs
                                                • #1576767: [geo-rep]: Lot of changelogs retries and \"dict is null\" errors in geo-rep logs
                                                • #1576842: cloudsync: make plugins configurable
                                                • #1577574: brick crash seen while creating and deleting two volumes in loop
                                                • #1577627: [Geo-rep]: Status in ACTIVE/Created state
                                                • #1577672: Brick-mux regressions failing for over 8+ weeks on master
                                                • #1577731: [Ganesha] \"Gluster nfs-ganesha enable\" commands sometimes gives output as \"failed\" with \"Unlocking failed\" error messages ,even though cluster is up and healthy in backend
                                                • #1577744: The tool to generate new xlator template code is not upto date
                                                • #1578325: Input/Output errors on a disperse volume with concurrent reads and writes
                                                • #1578650: If parallel-readdir is enabled, the readdir-optimize option even when it is set to on it behaves as off
                                                • #1578721: Statedump prints memory usage statistics twice
                                                • #1578823: Remove EIO from the dht_inode_missing macro
                                                • #1579276: rpc: The gluster auth version is always AUTH_GLUSTERFS_v2
                                                • #1579769: inode status command is broken with distributed replicated volumes
                                                • #1579786: Thin-arbiter: Provide script to start and run thin arbiter process
                                                • #1579788: Thin-arbiter: Have the state of volume in memory
                                                • #1580020: ctime: Rename and unlink does not update ctime
                                                • #1580238: Fix incorrect rebalance log message
                                                • #1580269: [Remove-brick+Rename] Failure count shows zero though there are file migration failures
                                                • #1580352: Glusterd memory leaking in gf_gld_mt_linebuf
                                                • #1580529: posix/ctime: Access time is not updated for file with a hardlink
                                                • #1580532: posix/ctime: The first lookup on file is not healing the gfid
                                                • #1581035: posix/ctime: Mtime is not updated on setting it to older date
                                                • #1581345: posix unwinds readdirp calls with readdir signature
                                                • #1581735: bug-1309462.t is failing reliably due to changes in security.capability changes in the kernel
                                                • #1582051: Fix failure of readdir-ahead/bug-1439640.t in certain cases
                                                • #1582516: libgfapi: glfs init fails on afr volume with ctime feature enabled
                                                • #1582704: rpc_transport_unref() called for an unregistered socket fd
                                                • #1583018: changelog: Changelog is not capturing rename of files
                                                • #1583565: [distribute]: Excessive 'dict is null' errors in geo-rep logs
                                                • #1583583: \"connecting\" state in protocol client is useless
                                                • #1583937: Brick process crashed after upgrade from RHGS-3.3.1 async(7.4) to RHGS-3.4(7.5)
                                                • #1584098: 'custom extended attributes' set on a directory are not healed after bringing back the down sub-volumes
                                                • #1584483: afr: don't update readables if inode refresh failed on all children
                                                • #1584517: Inconsistent access permissions on directories after bringing back the down sub-volumes
                                                • #1584864: sometime messages
                                                • #1584981: posix/ctime: EC self heal of directory is blocked with ctime feature enabled
                                                • #1585391: glusteshd wrong status caused by gluterd big lock
                                                • #1585585: Cleanup \"connected\" state management of rpc-clnt
                                                • #1586018: (f)Setxattr and (f)removexattr invalidates the stat cache in md-cache
                                                • #1586020: [GSS] Pending heals are not getting completed in CNS environment
                                                • #1586342: Refactor the distributed test code to make it work for ipv4
                                                • #1586363: Refactor rebalance code
                                                • #1589253: After creating and starting 601 volumes, self heal daemon went down and seeing continuous warning messages in glusterd log
                                                • #1589691: xdata is leaking in server3_3_seek
                                                • #1589782: [geo-rep]: Geo-replication in FAULTY state - CENTOS 6
                                                • #1589842: [USS] snapview server does not go through the list of all the snapshots for validating a snap
                                                • #1590193: /usr/sbin/gcron.py aborts with OSError
                                                • #1590385: Refactor dht lookup code
                                                • #1590655: Excessive logging in posix_check_internal_writes() due to NULL dict
                                                • #1590710: Gluster Block PVC fails to mount on Jenkins pod
                                                • #1591193: lookup not assigning gfid if file is not present in all bricks of replica
                                                • #1591580: Remove code duplication in protocol/client
                                                • #1591621: Arequal checksum mismatch on older mount
                                                • #1592141: Null pointer deref in error paths
                                                • #1592275: posix/ctime: Mdata value of a directory is different across replica/EC subvolume
                                                • #1592509: ctime: Self heal of symlink is failing on EC subvolume
                                                • #1593232: CVE-2018-10841 glusterfs: access trusted peer group via remote-host command [glusterfs upstream]
                                                • #1593351: mount.glusterfs incorrectly reports \"getfattr not found\"
                                                • #1593548: Stack overflow in readdirp with parallel-readdir enabled
                                                • #1593562: Add new peers to Glusto
                                                • #1593651: gnfs nfs.register-with-portmap issue with ipv6_default
                                                • #1595174: Found an issue on using lock before init in md-cache
                                                • #1595190: rmdir is leaking softlinks to directories in .glusterfs
                                                • #1595320: gluster wrongly reports bricks online, even when brick path is not available
                                                • #1595492: tests: remove tarissue.t from BAD_TEST
                                                • #1595726: tests/geo-rep: Add test case for symlink rename
                                                • #1596020: Introduce database group profile
                                                • #1596513: glustershd crashes when index heal is launched before graph is initialized.
                                                • #1596524: 'replica 3 aribiter 1' is not a industry standard way of telling 2-way replicate with arbiter.
                                                • #1596789: Update mount-shared-storage.sh to automatically include all enabled glusterfs mounts in fstab
                                                • #1597156: Need a simpler way to find if a replica/ec subvolume is up
                                                • #1597247: restart all the daemons after all the bricks
                                                • #1597473: introduce cluster.daemon-log-level option
                                                • #1597512: Remove contrib/ipaddr-py
                                                • #1597540: tests/geo-rep: Add test cases for rsnapshot use case
                                                • #1597563: [geo-rep+tiering]: Hot and Cold tier brick changelogs report rsync failure
                                                • #1597568: Mark brick online after port registration even for brick-mux cases
                                                • #1597627: tests/bugs/core/bug-1432542-mpx-restart-crash.t is generated crash
                                                • #1597662: Stale entries of snapshots need to be removed from /var/run/gluster/snaps
                                                • #1597776: br-state-check.t crashed while brick multiplex is enabled
                                                • #1597805: Stale lock with lk-owner all-zeros is observed in some tests
                                                • #1598325: Replace the BROKEN_TESTS environment variable value
                                                • #1598345: gluster get-state command is crashing glusterd process when geo-replication is configured
                                                • #1598390: Remove extras/prot_filter.py
                                                • #1598548: Disabling iostats diagnostics.stats-dump-interval (set to 0) does not terminate the dump thread
                                                • #1598663: Don't execute statements after decrementing call count in afr
                                                • #1598884: [geo-rep]: [Errno 2] No such file or directory
                                                • #1598926: Misleading error messages on bricks caused by lseek
                                                • #1598977: [geo-rep]: geo-replication scheduler is failing due to unsuccessful umount
                                                • #1599219: configure fails complaining absence of libxml2-devel
                                                • #1599250: bug-1432542-mpx-restart-crash.t takes a lot of time to complete cleanup
                                                • #1599628: To find a compatible brick ignore diagnostics.brick-log-level option while brick mux is enabled
                                                • #1599783: _is_prefix should return false for 0-length strings
                                                • #1600405: [geo-rep]: Geo-replication not syncing renamed symlink
                                                • #1600451: crash on glusterfs_handle_brick_status of the glusterfsd
                                                • #1600687: fuse process segfault when use resolve-gids option
                                                • #1600812: A new volume set option to for GD2 quota integration
                                                • #1600878: crash seen while running regression, intermittently.
                                                • #1600963: get the failed test details into gerrit output itself
                                                • #1601166: performance.read-ahead causes huge increase in unnecessary network traffic
                                                • #1601390: Distributed testing: Fix build environment
                                                • #1601423: memory leak in get-state when geo-replication session is configured
                                                • #1601683: dht: remove useless argument from dht_iatt_merge
                                                • #1602070: [SNAPSHOT] snapshot daemon crashes if a fd from a deleted snapshot is accessed
                                                • #1602121: avoid possible glusterd crash in glusterd_verify_slave
                                                • #1602236: When reserve limits are reached, append on an existing file after truncate operation results to hang
                                                • #1602866: dht: Crash seen in thread dht_dir_attr_heal
                                                • #1603063: ./tests/bugs/glusterd/validating-server-quorum.t is generated core
                                                • #1605056: [RHHi] Mount hung and not accessible
                                                • #1605077: If a node disconnects during volume delete, it assumes deleted volume as a freshly created volume when it is back online
                                                • #1607049: Excessive logging in posix_set_parent_ctime()
                                                • #1607319: Remove uuid from contrib/
                                                • #1607689: Memory leaks on glfs_fini
                                                • #1607783: Segmentation fault while using gfapi while getting volume utilization
                                                • #1608175: Skip hash checks in dht_readdirp_cbk if dht has a single child subvol.
                                                • #1608564: line coverage tests failing consistently over a week
                                                • #1608566: line coverage tests: glusterd crash in ./tests/basic/sdfs-sanity.t
                                                • #1608568: line coverage tests: bug-1432542-mpx-restart-crash.t times out consistently
                                                • #1608684: Change glusto ownership to reflect current reality
                                                • #1608991: Remove code duplication in socket
                                                • #1609126: Fix mem leak and smoke failure for gcc8 in cloudsync
                                                • #1609207: thin arbiter: set notify-contention option to yes
                                                • #1609337: Remove argp-standalone from contrib/
                                                • #1609551: glusterfs-resource-agents should not be built for el6
                                                • #1610236: [Ganesha] Ganesha crashed in mdcache_alloc_and_check_handle while running bonnie and untars with parallel lookups
                                                • #1610256: [Ganesha] While performing lookups from two of the clients, \"ls\" command got failed with \"Invalid argument\"
                                                • #1610405: Geo-rep: Geo-rep regression times out occasionally
                                                • #1610726: Fuse mount of volume fails when gluster_shared_storage is enabled
                                                • #1611103: online_brick_count check in volume.rc should ignore bitrot and scrubber daemons
                                                • #1611566: tests/bitrot: tests/bitrot/bug-1373520.t fails intermittently
                                                • #1611692: Mount process crashes on a sharded volume during rename when dst doesn't exist
                                                • #1611834: glusterfsd crashes when SEEK_DATA/HOLE is not supported
                                                • #1612017: MAINTAINERS: Add Xavier Hernandez as peer for shard xlator
                                                • #1612037: Entry will be present even if the gfid link creation inside .glusterfs fails
                                                • #1612054: Test case bug-1586020-mark-dirty-for-entry-txn-on-quorum-failure.t failure
                                                • #1612418: Brick not coming up on a volume after rebooting the node
                                                • #1612750: gfapi: Use inode_forget in case of unlink/rename objects
                                                • #1613098: posix-acl: skip acl_permits check when the owner setting GF_POSIX_ACL_xxxx
                                                • #1613807: Fix spurious failures in tests/basic/afr/granular-esh/replace-brick.t
                                                • #1614062: Provide/preserve tarball of retried tests
                                                • #1614088: kill_brick function needs to wait for brick to be killed
                                                • #1614124: glusterfsd process crashed in a multiplexed configuration during cleanup of a single brick-graph triggered by volume-stop.
                                                • #1614142: Fix the grammar error in the rpc log
                                                • #1614168: [uss]snapshot: posix acl authentication is not working as expected
                                                • #1614654: Potential fixes for tests/basic/afr/add-brick-self-heal.t failure
                                                • #1614662: ./tests/bugs/replicate/bug-1448804-check-quorum-type-values.t
                                                • #1614718: Fix spurious failures in tests/bugs/index/bug-1559004-EMLINK-handling.t
                                                • #1614730: Test case bug-1433571-undo-pending-only-on-up-bricks.t failure
                                                • #1614799: Geo-rep: Few workers fails to start with out any failure
                                                • #1615037: Multiplex tests use a cleanup pattern that results in empty tarballs on failure
                                                • #1615078: tests/bugs/replicate/bug-1408712.t fails.
                                                • #1615092: tests/bugs/shard/configure-lru-limit.t spurious failure
                                                • #1615096: ./tests/bugs/quick-read/bug-846240.t fails spuriously
                                                • #1615239: Fix ./tests/basic/afr/replace-brick-self-heal.t failure
                                                • #1615331: gfid-mismatch-resolution-with-fav-child-policy.t is failing
                                                • #1615474: Rebalance status shows wrong count of \"Rebalanced-files\" if the file has hardlinks
                                                • #1615582: test: ./tests/basic/stats-dump.t fails spuriously not finding queue_size in stats output for some brick
                                                • #1615703: [Disperse] Improve log messages for EC volume
                                                • #1615789: Come up with framework to test thin-arbiter
                                                • #1618004: [GSS] glusterd not starting after upgrade due to snapshots error in RHEV + RHGS
                                                • #1619027: geo-rep: Active/Passive status change logging is redundant
                                                • #1619423: cli: Command gluster volume statedump <volname> dumps core
                                                • #1619475: NetBSD memory detection issue
                                                • #1619720: posix_mknod does not update trusted.pgfid.xx xattr correctly
                                                • #1619843: Snapshot status fails with commit failure
                                                • #1620544: Brick process NOT ONLINE for heketidb and block-hosting volume
                                                • #1621981: dht: File rename removes the .glusterfs handle for linkto file
                                                • #1622076: [geo-rep]: geo-rep reverse sync in FO/FB can accidentally delete the content at original master incase of gfid conflict in 3.4.0 without explicit user rmdir
                                                • #1622422: glusterd cli is showing brick status N/A even brick is consumed by a brick process
                                                • #1622549: libgfchangelog: History API fails
                                                • #1622665: clang-scan report: glusterfs issues
                                                • #1622821: Prevent hangs while increasing replica-count/replace-brick for directory hierarchy
                                                • #1623408: rpc: log fuse request ID with gluster transaction ID
                                                • #1623759: [Disperse] Don't send final version update if non data fop succeeded
                                                • #1624244: DHT: Rework the virtual xattr to get the hash subvol
                                                • #1624440: Fail volume stop operation in case brick detach request fails
                                                • #1625089: CVE-2018-10911 glusterfs: Improper deserialization in dict.c:dict_unserialize() can allow attackers to read arbitrary memory
                                                • #1625095: CVE-2018-10930 glusterfs: Files can be renamed outside volume
                                                • #1625096: CVE-2018-10923 glusterfs: I/O to arbitrary devices on storage server
                                                • #1625097: CVE-2018-10907 glusterfs: Stack-based buffer overflow in server-rpc-fops.c allows remote attackers to execute arbitrary code
                                                • #1625102: CVE-2018-10913 glusterfs: Information Exposure in posix_get_file_contents function in posix-helpers.c
                                                • #1625106: CVE-2018-10904 glusterfs: Unsanitized file names in debug/io-stats translator can allow remote attackers to execute arbitrary code
                                                • #1625643: Use CALLOC in dht_layouts_init
                                                • #1626319: DH ciphers disabled errors are encountered on basic mount & unmount with ssl enabled setup
                                                • #1626346: dht: Use snprintf in dht_filter_loc_subvol_key
                                                • #1626394: dht_create: Create linkto files if required when using dht_filter_loc_subvol_key
                                                • #1626787: sas workload job getting stuck after sometime
                                                • #1627044: Converting to replica 2 volume is not throwing warning
                                                • #1627620: SAS job aborts complaining about file doesn't exist
                                                • #1628668: Update op-version from 4.2 to 5.0
                                                • #1629877: GlusterFS can be improved (clone for Gluster-5)
                                                • #1630673: geo-rep: geo-rep config set fails to set rsync-options
                                                • #1630804: libgfapi-python: test_listdir_with_stat and test_scandir failure on release 5 branch
                                                • #1633015: ctime: Access time is different with in same replica/EC volume
                                                • #1633242: 'df' shows half as much space on volume after upgrade to RHGS 3.4
                                                • #1633552: glusterd crash in regression build
                                                • #1635373: ASan (address sanitizer) fixes - Blanket bug
                                                • #1635972: Low Random write IOPS in VM workloads
                                                • #1635975: Writes taking very long time leading to system hogging
                                                • #1636162: [SNAPSHOT]: with brick multiplexing, snapshot restore will make glusterd send wrong volfile
                                                • #1636842: df shows Volume size as zero if Volume created and mounted using Glusterd2
                                                • #1638159: data-self-heal in arbiter volume results in stale locks.
                                                • #1638163: split-brain observed on parent dir
                                                • #1639688: core: backport uuid fixes
                                                • #1640392: io-stats: garbage characters in the filenames generated
                                                "},{"location":"release-notes/5.1/","title":"Release notes for Gluster 5.1","text":"

                                                This is a bugfix release. The release notes for 5.0 contains a listing of all the new features that were added and bugs fixed in the GlusterFS 5 stable release.

                                                NOTE: Next minor release tentative date: Week of 10th December, 2018

                                                "},{"location":"release-notes/5.1/#major-changes-features-and-limitations-addressed-in-this-release","title":"Major changes, features and limitations addressed in this release","text":"

                                                This release contains fixes for several security vulnerabilities in Gluster as follows,

                                                • https://nvd.nist.gov/vuln/detail/CVE-2018-14651
                                                • https://nvd.nist.gov/vuln/detail/CVE-2018-14652
                                                • https://nvd.nist.gov/vuln/detail/CVE-2018-14653
                                                • https://nvd.nist.gov/vuln/detail/CVE-2018-14654
                                                • https://nvd.nist.gov/vuln/detail/CVE-2018-14659
                                                • https://nvd.nist.gov/vuln/detail/CVE-2018-14660
                                                • https://nvd.nist.gov/vuln/detail/CVE-2018-14661
                                                "},{"location":"release-notes/5.1/#major-issues","title":"Major issues","text":"

                                                None

                                                "},{"location":"release-notes/5.1/#bugs-addressed","title":"Bugs addressed","text":"

                                                Bugs addressed since release-5.0 are listed below.

                                                • #1641429: Gfid mismatch seen on shards when lookup and mknod are in progress at the same time
                                                • #1641440: [ovirt-gluster] Mount hung and not accessible
                                                • #1641872: Spurious failures in bug-1637802-arbiter-stale-data-heal-lock.t
                                                • #1643078: tests/bugs/glusterd/optimized-basic-testcases-in-cluster.t failing
                                                • #1643402: [Geo-Replication] Geo-rep faulty sesion because of the directories are not synced to slave.
                                                • #1644158: geo-rep: geo-replication gets stuck after file rename and gfid conflict
                                                • #1644161: cliutils: geo-rep cliutils' usage of Popen is not python3 compatible
                                                • #1644314: build/packaging: el-X (x > 7) isms
                                                • #1644514: geo-rep: On gluster command failure on slave, worker crashes with python3
                                                • #1644515: geo-rep: gluster-mountbroker status crashes
                                                • #1644526: Excessive logging in posix_update_utime_in_mdata
                                                • #1644622: [Stress] : Mismatching iatt in glustershd logs during MTSH and continous IO from Ganesha mounts
                                                • #1644645: [AFR] : Start crawling indices and healing only if both data bricks are UP in replica 2 (thin-arbiter)
                                                • #1646204: CVE-2018-14654 glusterfs: \"features/index\" translator can create arbitrary, empty files [fedora-all]
                                                • #1646896: [Geo-Replication] Geo-rep faulty sesion because of the directories are not synced to slave.
                                                • #1647663: CVE-2018-14651 glusterfs: glusterfs server exploitable via symlinks to relative paths [fedora-all]
                                                • #1647664: CVE-2018-14653 glusterfs: Heap-based buffer overflow via \"gf_getspec_req\" RPC message [fedora-all]
                                                • #1647665: CVE-2018-14659 glusterfs: Unlimited file creation via \"GF_XATTR_IOSTATS_DUMP_KEY\" xattr allows for denial of service [fedora-all]
                                                • #1647666: CVE-2018-14661 glusterfs: features/locks translator passes an user-controlled string to snprintf without a proper format string resulting in a denial of service [fedora-all]
                                                • #1647801: can't enable shared-storage
                                                • #1647962: CVE-2018-14660 glusterfs: Repeat use of \"GF_META_LOCK_KEY\" xattr allows for memory exhaustion [fedora-all]
                                                • #1647968: Seeing defunt translator and discrepancy in volume info when issued from node which doesn't host bricks in that volume
                                                • #1648923: gfapi: fix bad dict setting of lease-id
                                                "},{"location":"release-notes/5.10/","title":"Release notes for Gluster 5.10","text":"

                                                This is a bugfix release. The release notes for 5.0, 5.1, 5.2, 5.3, 5.5, 5.6, 5.8 and 5.9 contains a listing of all the new features that were added and bugs fixed in the GlusterFS 5 stable release.

                                                Next minor release tentative date: Week of 10th December, 2019

                                                "},{"location":"release-notes/5.10/#major-changes-features-and-limitations-addressed-in-this-release","title":"Major changes, features and limitations addressed in this release","text":"

                                                None

                                                "},{"location":"release-notes/5.10/#major-issues","title":"Major issues","text":"

                                                None

                                                "},{"location":"release-notes/5.10/#bugs-addressed","title":"Bugs addressed","text":"

                                                Bugs addressed since release-5.9 are listed below.

                                                • #1749352: Failures in remove-brick due to [Input/output error] errors
                                                • #1750230: [geo-rep]: Non-root - Unable to set up mountbroker root directory and group
                                                • #1739336: Multiple disconnect events being propagated for the same child
                                                "},{"location":"release-notes/5.11/","title":"Release notes for Gluster 5.11","text":"

                                                This is a bugfix release. The release notes for 5.0, 5.1, 5.2, 5.3, 5.5, 5.6, 5.8, 5.9, and 5.10 contains a listing of all the new features that were added and bugs fixed in the GlusterFS 5 stable release.

                                                Next minor release tentative date: Week of 10th February, 2020

                                                "},{"location":"release-notes/5.11/#major-changes-features-and-limitations-addressed-in-this-release","title":"Major changes, features and limitations addressed in this release","text":"

                                                None

                                                "},{"location":"release-notes/5.11/#major-issues","title":"Major issues","text":"

                                                None

                                                "},{"location":"release-notes/5.11/#bugs-addressed","title":"Bugs addressed","text":"

                                                Bugs addressed since release-5.10 are listed below.

                                                • #1718734: Memory leak in glusterfsd process
                                                • #1760710: glustershd can not decide heald_sinks, and skip repair, so some entries lingering in volume heal info
                                                • #1767305: READDIRP incorrectly updates posix-acl inode ctx
                                                • #1779284: Backport GNFS memory leak fix to version 5
                                                "},{"location":"release-notes/5.12/","title":"Release notes for Gluster 5.12","text":"

                                                This is a bugfix release. The release notes for 5.0, 5.1, 5.2, 5.3, 5.5, 5.6, 5.8, 5.9, 5.10, and 5.11 contains a listing of all the new features that were added and bugs fixed in the GlusterFS 5 stable release.

                                                Next minor release tentative date: Week of 10th April, 2020

                                                "},{"location":"release-notes/5.12/#major-changes-features-and-limitations-addressed-in-this-release","title":"Major changes, features and limitations addressed in this release","text":"

                                                None

                                                "},{"location":"release-notes/5.12/#major-issues","title":"Major issues","text":"

                                                None

                                                "},{"location":"release-notes/5.12/#bugs-addressed","title":"Bugs addressed","text":"

                                                Bugs addressed since release-5.11 are listed below.

                                                • #1803810: Functionality to enable log rotation for user serviceable snapshot's logs.
                                                • #1804512: Mounts fails after reboot of 1/3 gluster nodes
                                                • #1804522: Rebalance is causing glusterfs crash on client node
                                                • #1805047: I/O error on writes to a disperse volume when replace-brick is executed
                                                • #1805049: Glusterfsd crashing in ec-inode-write.c, in GF_ASSERT
                                                • #1805050: [Disperse] : Client side heal is not removing dirty flag for some of the files.
                                                • #1805051: Disperse volume : data corruption with ftruncate data in 4+2 config
                                                • #1805052: Disperse volume : Ganesha crash with IO in 4+2 config when one glusterfsd restart every 600s
                                                • #1805053: An Input/Output error happens on a disperse volume when doing unaligned writes to a sparse file
                                                • #1805054: Disperse volume : data corruption with ftruncate data in 4+2 config
                                                • #1805055: Open fd heal should filter O_APPEND/O_EXCL
                                                • #1805056: Disperse volume : data corruption with ftruncate data in 4+2 config
                                                • #1805057: [EC] shd crashed while heal failed due to out of memory error.
                                                "},{"location":"release-notes/5.13/","title":"Release notes for Gluster 5.13","text":"

                                                This is a bugfix release. The release notes for 5.0, 5.1, 5.2, 5.3, 5.5, 5.6, 5.8, 5.9, 5.10, 5.11, and 5.12 contains a listing of all the new features that were added and bugs fixed in the GlusterFS 5 stable release.

                                                NOTE: This is supposed to be last minor release of 5.

                                                "},{"location":"release-notes/5.13/#major-changes-features-and-limitations-addressed-in-this-release","title":"Major changes, features and limitations addressed in this release","text":"

                                                None

                                                "},{"location":"release-notes/5.13/#major-issues","title":"Major issues","text":"

                                                None

                                                "},{"location":"release-notes/5.13/#bugs-addressed","title":"Bugs addressed","text":"

                                                Bugs addressed since release-5.12 are listed below.

                                                • #1803810: Functionality to enable log rotation for user serviceable snapshot's logs.
                                                • #1127: Mount crash during background shard cleanup
                                                • #1103:afr: prevent spurious entry heals leading to gfid split-brain
                                                • #1067:Metadata heal picks different brick each time as source if there are no pending xattrs
                                                • #1028:Segfault in FUSE process, potential use after free
                                                • #1390914: Glusterfs create a flock lock by anonymous fd, but can't release it forever.
                                                • #1806931: Changes to self-heal logic w.r.t. detecting metadata split-brains
                                                • #1807007: The result (hostname) of getnameinfo for all bricks (ipv6 addresses) are the same, while they are not.
                                                • #1807431: Setting cluster.heal-timeout requires volume restart
                                                • #1807748: bug-1402841.t-mt-dir-scan-race.t fails spuriously
                                                • #1808256: Glusterfs create a flock lock by anonymous fd, but can't release it forever.
                                                • #1809440: [brickmux]: glustershd crashed when rebooting 1/3 nodes at regular intervals
                                                "},{"location":"release-notes/5.2/","title":"Release notes for Gluster 5.2","text":"

                                                This is a bugfix release. The release notes for 5.0 and 5.1 contains a listing of all the new features that were added and bugs fixed in the GlusterFS 5 stable release.

                                                NOTE: Next minor release tentative date: Week of 10th January, 2019

                                                "},{"location":"release-notes/5.2/#major-changes-features-and-limitations-addressed-in-this-release","title":"Major changes, features and limitations addressed in this release","text":"

                                                None

                                                "},{"location":"release-notes/5.2/#major-issues","title":"Major issues","text":"

                                                None

                                                "},{"location":"release-notes/5.2/#bugs-addressed","title":"Bugs addressed","text":"

                                                Bugs addressed since release-5.1 are listed below.

                                                • #1651525: Issuing a \"heal ... full\" on a disperse volume causes permanent high CPU utilization.
                                                • #1654115: [Geo-rep]: Faulty geo-rep sessions due to link ownership on slave volume
                                                • #1654117: [geo-rep]: Failover / Failback shows fault status in a non-root setup
                                                • #1654236: Provide an option to silence glfsheal logs
                                                • #1654370: Bitrot: Scrub status say file is corrupted even it was just created AND 'path' in the output is broken
                                                • #1655545: gfid heal does not happen when there is no source brick
                                                "},{"location":"release-notes/5.3/","title":"Release notes for Gluster 5.3","text":"

                                                This is a bugfix release. The release notes for 5.0, 5.1 and 5.2 contains a listing of all the new features that were added and bugs fixed in the GlusterFS 5 stable release.

                                                NOTE: Next minor release tentative date: Week of 10th March, 2019

                                                "},{"location":"release-notes/5.3/#major-changes-features-and-limitations-addressed-in-this-release","title":"Major changes, features and limitations addressed in this release","text":"

                                                None

                                                "},{"location":"release-notes/5.3/#major-issues","title":"Major issues","text":"

                                                None

                                                "},{"location":"release-notes/5.3/#bugs-addressed","title":"Bugs addressed","text":"

                                                Bugs addressed since release-5.2 are listed below.

                                                • #1623107: FUSE client's memory leak
                                                • #1648642: fails to sync non-ascii (utf8) file and directory names, causes permanently faulty geo-replication state
                                                • #1651323: Tracker bug for all leases related issues
                                                • #1659563: gluster-blockd segfaults because of a null-dereference in shard.so
                                                • #1659676: Memory leak: dict_t leak in rda_opendir
                                                • #1660736: dht_revalidate may not heal attrs on the brick root
                                                • #1660932: Fix tests/bugs/shard/zero-flag.t
                                                • #1662200: NL cache: fix typos
                                                • #1663131: [Ganesha] Ganesha failed on one node while exporting volumes in loop
                                                • #1665803: [ovirt-gluster] Fuse mount crashed while deleting a 1 TB image file from ovirt
                                                "},{"location":"release-notes/5.5/","title":"Release notes for Gluster 5.5","text":"

                                                This is a bugfix release. The release notes for 5.0, 5.1, 5.2 and 5.3 contains a listing of all the new features that were added and bugs fixed in the GlusterFS 5 stable release.

                                                NOTE: Next minor release tentative date: Week of 10th May, 2019

                                                NOTE: Release 5.4 was never announced as there was a fix which prevented rolling upgrades from working correctly. Hence this release notes contains a skip from 5.3 till 5.5 in terms of issues addressed and also addresses the issue were rolling upgrades were broken.

                                                "},{"location":"release-notes/5.5/#major-changes-features-and-limitations-addressed-in-this-release","title":"Major changes, features and limitations addressed in this release","text":"

                                                None

                                                "},{"location":"release-notes/5.5/#major-issues","title":"Major issues","text":"

                                                None

                                                "},{"location":"release-notes/5.5/#bugs-addressed","title":"Bugs addressed","text":"

                                                Bugs addressed since release-5.3 are listed below.

                                                • #1684385: [ovirt-gluster] Rolling gluster upgrade from 3.12.5 to 5.3 led to shard on-disk xattrs disappearing
                                                • #1684569: Upgrade from 4.1 and 5 is broken
                                                • #1687249: Error handling in /usr/sbin/gluster-eventsapi produces IndexError: tuple index out of range
                                                • #1687687: [geo-rep]: Checksum mismatch when 2x2 vols are converted to arbiter
                                                • #1649054: glustereventsd does not start on Ubuntu 16.04 LTS
                                                • #1651246: Failed to dispatch handler
                                                • #1665145: Writes on Gluster 5 volumes fail with EIO when \"cluster.consistent-metadata\" is set
                                                • #1669382: [ovirt-gluster] Fuse mount crashed while creating the preallocated image
                                                • #1670307: api: bad GFAPI_4.1.6 block
                                                • #1671217: core: move \"dict is NULL\" logs to DEBUG log level
                                                • #1671556: glusterfs FUSE client crashing every few days with 'Failed to dispatch handler'
                                                • #1671611: Unable to delete directories that contain linkto files that point to itself.
                                                • #1672248: quorum count not updated in nfs-server vol file
                                                • #1672314: thin-arbiter: Check with thin-arbiter file before marking new entry change log
                                                • #1673268: Fix timeouts so the tests pass on AWS
                                                • #1678726: Integer Overflow possible in md-cache.c due to data type inconsistency
                                                • #1679968: Upgrade from glusterfs 3.12 to gluster 4/5 broken
                                                "},{"location":"release-notes/5.6/","title":"Release notes for Gluster 5.6","text":"

                                                This is a bugfix release. The release notes for 5.0, 5.1, 5.2, 5.3 and 5.5 contains a listing of all the new features that were added and bugs fixed in the GlusterFS 5 stable release.

                                                NOTE: Next minor release tentative date: Week of 10th June, 2019

                                                NOTE: Release 5.4 was never announced as there was a fix which prevented rolling upgrades from working correctly. Hence this release notes contains a skip from 5.3 till 5.5 in terms of issues addressed and also addresses the issue were rolling upgrades were broken.

                                                "},{"location":"release-notes/5.6/#major-changes-features-and-limitations-addressed-in-this-release","title":"Major changes, features and limitations addressed in this release","text":"

                                                None

                                                "},{"location":"release-notes/5.6/#major-issues","title":"Major issues","text":"

                                                Several users had issues around increased network usage after upgrading to 5.x release, this issue was tracked against bug#1673058 and is now addressed as a part of this minor release.

                                                "},{"location":"release-notes/5.6/#bugs-addressed","title":"Bugs addressed","text":"

                                                Bugs addressed since release-5.5 are listed below.

                                                • #1673058: Network throughput usage increased x5
                                                • #1690952: lots of \"Matching lock not found for unlock xxx\" when using disperse (ec) xlator
                                                • #1694562: gfapi: do not block epoll thread for upcall notifications
                                                • #1694612: glusterd leaking memory when issued gluster vol status all tasks continuosly
                                                • #1695391: GF_LOG_OCCASSIONALLY API doesn't log at first instance
                                                • #1695403: rm -rf fails with \"Directory not empty\"
                                                • #1696147: Multiple shd processes are running on brick_mux environmet
                                                "},{"location":"release-notes/5.8/","title":"Release notes for Gluster 5.8","text":"

                                                This is a bugfix release. The release notes for 5.0, 5.1, 5.2, 5.3, 5.5, and 5.6 contains a listing of all the new features that were added and bugs fixed in the GlusterFS 5 stable release.

                                                NOTE: The 5.7 is dead by release due to #1728988 The packages weren't released. Please use 5.8. Next minor release tentative date: Week of 10th August, 2019

                                                "},{"location":"release-notes/5.8/#major-changes-features-and-limitations-addressed-in-this-release","title":"Major changes, features and limitations addressed in this release","text":"

                                                None

                                                "},{"location":"release-notes/5.8/#major-issues","title":"Major issues","text":"

                                                A issue that was blocking the build was addressed #1728988

                                                "},{"location":"release-notes/5.8/#bugs-addressed","title":"Bugs addressed","text":"

                                                Bugs addressed since release-5.6 are listed below.

                                                • #1717282: ec ignores lock contention notifications for partially acquired locks
                                                • #1629877: GlusterFS can be improved (clone for Gluster-5)
                                                • #1695399: With parallel-readdir enabled, deleting a directory containing stale linkto files fails with \"Directory not empty\"
                                                • #1699500: fix truncate lock to cover the write in tuncate clean
                                                • #1699736: Fops hang when inodelk fails on the first fop
                                                • #1707198: VM stuck in a shutdown because of a pending fuse request

                                                • #1720634: Upcall: Avoid sending upcalls for invalid Inode

                                                • #1720636: Ganesha-gfapi logs are flooded with error messages related to \"gf_uuid_is_null(gfid)) [Invalid argument]\" when lookups are running from multiple clients
                                                • #1721106: Failed to create volume which transport_type is \"tcp,rdma\"
                                                • #1728988: release-5.7 glupy is not getting built during packaging.
                                                "},{"location":"release-notes/5.9/","title":"Release notes for Gluster 5.9","text":"

                                                This is a bugfix release. The release notes for 5.0, 5.1, 5.2, 5.3, 5.5, 5.6 and 5.8 contains a listing of all the new features that were added and bugs fixed in the GlusterFS 5 stable release.

                                                Next minor release tentative date: Week of 10th October, 2019

                                                "},{"location":"release-notes/5.9/#major-changes-features-and-limitations-addressed-in-this-release","title":"Major changes, features and limitations addressed in this release","text":"

                                                None

                                                "},{"location":"release-notes/5.9/#major-issues","title":"Major issues","text":"

                                                None

                                                "},{"location":"release-notes/5.9/#bugs-addressed","title":"Bugs addressed","text":"

                                                Bugs addressed since release-5.8 are listed below.

                                                • #1733881: [geo-rep]: gluster command not found while setting up a non-root session
                                                • #1736342: potential deadlock while processing callbacks in gfapi
                                                • #1737716: Unable to create geo-rep session on a non-root setup.
                                                "},{"location":"release-notes/6.0/","title":"Release notes for Gluster 6.0","text":"

                                                This is a major release that includes a range of code improvements and stability fixes along with a few features as noted below.

                                                A selection of the key features and changes are documented in this page. A full list of bugs that have been addressed is included further below.

                                                • Announcements
                                                • Major changes and features
                                                • Major issues
                                                • Bugs addressed in the release
                                                "},{"location":"release-notes/6.0/#announcements","title":"Announcements","text":"
                                                1. Releases that receive maintenance updates post release 6 are, 4.1 and 5 (reference)

                                                2. Release 6 will receive maintenance updates around the 10th of every month for the first 3 months post release (i.e Apr'19, May'19, Jun'19). Post the initial 3 months, it will receive maintenance updates every 2 months till EOL. (reference)

                                                3. A series of features/xlators have been deprecated in release 6 as follows, for upgrade procedures from volumes that use these features to release 6 refer to the release 6 upgrade guide.

                                                This deprecation was announced at the gluster-users list here.

                                                Features deprecated:

                                                • Block device (bd) xlator
                                                • Decompounder feature
                                                • Crypt xlator
                                                • Symlink-cache xlator
                                                • Stripe feature
                                                • Tiering support (tier xlator and changetimerecorder)
                                                "},{"location":"release-notes/6.0/#major-changes-and-features","title":"Major changes and features","text":""},{"location":"release-notes/6.0/#highlights","title":"Highlights","text":"
                                                • Several stability fixes addressing,
                                                • coverity, clang-scan, address sanitizer and valgrind reported issues
                                                • removal of unused and hence, deprecated code and features
                                                • Client side inode garbage collection
                                                • This release addresses one of the major concerns regarding FUSE mount process memory footprint, by introducing client side inode garbage collection
                                                • See standalone section for more details
                                                • Performance Improvements
                                                • --auto-invalidation on FUSE mounts to leverage kernel page cache more effectively

                                                Features are categorized into the following sections,

                                                • Management
                                                • Standalone
                                                • Developer
                                                "},{"location":"release-notes/6.0/#management","title":"Management","text":"

                                                NOTE: There have been several stability improvements around the brick multiplexing feature

                                                "},{"location":"release-notes/6.0/#glusterd2","title":"GlusterD2","text":"

                                                GlusterD2 (or GD2, in short) was planned as the next generation management service for Gluster project.

                                                Currently, GD2s main focus is not replacing glusterd, but to serve as a thin management layer when using gluster with container orchestration systems.

                                                There is no specific update around GD2 provided as a part of this release.

                                                "},{"location":"release-notes/6.0/#standalone","title":"Standalone","text":""},{"location":"release-notes/6.0/#1-client-side-inode-garbage-collection-via-lru-list","title":"1. client-side inode garbage collection via LRU list","text":"

                                                A FUSE mount's inode cache can now be limited to a maximum number, thus reducing the memory footprint of FUSE mount processes.

                                                See the lru-limit option in man 8 mount.glusterfs for details.

                                                NOTE: Setting this to a low value (say less than 4000), will evict inodes from FUSE and Gluster caches at a much faster rate, and can cause performance degrades. The setting has to be determined based on the available client memory and required performance.

                                                "},{"location":"release-notes/6.0/#2-glusterfind-tool-enhanced-with-a-filter-option","title":"2. Glusterfind tool enhanced with a filter option","text":"

                                                glusterfind tool has an added option \"--type\", to be used with the \"--full\" option. The option supports finding and listing files or directories only, and defaults to both if not specified.

                                                Example usage with the pre and query commands are given below,

                                                1. Pre command (reference):

                                                2. Lists both files and directories in OUTFILE: glusterfind pre SESSION_NAME VOLUME_NAME OUTFILE

                                                3. Lists only files in OUTFILE: glusterfind pre SESSION_NAME VOLUME_NAME OUTFILE --type f

                                                4. Lists only directories in OUTFILE: glusterfind pre SESSION_NAME VOLUME_NAME OUTFILE --type d

                                                5. Query command:

                                                6. Lists both files and directories in OUTFILE: glusterfind query VOLUME_NAME --full OUTFILE

                                                7. Lists only files in OUTFILE: glusterfind query VOLUME_NAME --full --type f OUTFILE

                                                8. Lists only directories in OUTFILE: glusterfind query VOLUME_NAME --full --type d OUTFILE

                                                "},{"location":"release-notes/6.0/#3-fuse-mounts-are-enhanced-to-handle-interrupts-to-blocked-lock-requests","title":"3. FUSE mounts are enhanced to handle interrupts to blocked lock requests","text":"

                                                FUSE mounts are enhanced to handle interrupts to blocked locks.

                                                For example, scripts using the flock (man 1 flock) utility without the -n(nonblock) option against files on a FUSE based gluster mount, can now be interrupted when the lock is not granted in time or using the -w option with the same utility.

                                                "},{"location":"release-notes/6.0/#4-optimizedpass-through-distribute-functionality-for-1-way-distributed-volumes","title":"4. Optimized/pass-through distribute functionality for 1-way distributed volumes","text":"

                                                NOTE: There are no user controllable changes with this feature

                                                The distribute xlator now skips unnecessary checks and operations when the distribute count is one for a volume, resulting in improved performance.

                                                "},{"location":"release-notes/6.0/#5-options-introduced-to-disable-invalidations-of-kernel-page-cache","title":"5. Options introduced to disable invalidations of kernel page cache","text":"

                                                For workloads, where multiple FUSE client mounts do not concurrently operate on any files in the volume, it is now possible to maintain a longer duration kernel page cache using the following options in conjunction,

                                                • Setting --auto-invalidation option to \"no\" on the glusterfs FUSE mount process
                                                • Disabling the volume option performance.global-cache-invalidation

                                                This enables better performance as the data is served from the kernel page cache where possible.

                                                "},{"location":"release-notes/6.0/#6-changes-to-gluster-based-smb-share-management","title":"6. Changes to gluster based SMB share management","text":"

                                                Previously all GlusterFS volumes were being exported by default via smb.conf in a Samba-CTDB setup. This includes creating a share section for CTDB lock volume too which is not recommended. Along with few syntactical errors these scripts failed to execute in a non-Samba setup in the absence of necessary configuration and binary files.

                                                Hereafter newly created GlusterFS volumes are not exported as SMB share via Samba unless either of 'user.cifs' or 'user.smb' volume set options are enabled on the volume. The existing GlusterFS volume share sections in smb.conf will remain unchanged.

                                                "},{"location":"release-notes/6.0/#7-ctime-feature-is-enabled-by-default","title":"7. ctime feature is enabled by default","text":"

                                                The ctime feature which maintains (c/m) time consistency across replica and disperse subvolumes is enabled by default.

                                                Also, with this release, a single option is provided to enable/disable ctime feature,

                                                #gluster vol set <volname> ctime <on/off>\n

                                                NOTE: The time information used is from clients, hence it's required that clients are synced with respect to their times, using NTP or other such means.

                                                Limitations:

                                                • Mounting gluster volume with time attribute options (noatime, realatime...) is not supported with this feature
                                                • This feature does not guarantee consistent time for directories if the hashed sub-volume for the directory is down
                                                • Directory listing is not supported with this feature, and may report inconsistent time information
                                                • Older files created before upgrade, would witness update of ctime upon accessing after upgrade BUG:1593542
                                                "},{"location":"release-notes/6.0/#developer","title":"Developer","text":""},{"location":"release-notes/6.0/#1-gluster-code-can-be-compiled-and-executed-using-tsan","title":"1. Gluster code can be compiled and executed using TSAN","text":"

                                                While configuring the sources for a build use the extra option --enable-tsan to enable thread sanitizer based builds.

                                                "},{"location":"release-notes/6.0/#2-gfapi-a-class-of-apis-have-been-enhanced-to-return-prepost-gluster_stat-information","title":"2. gfapi: A class of APIs have been enhanced to return pre/post gluster_stat information","text":"

                                                A set of apis have been enhanced to return pre/post gluster_stat information. Applications using gfapi would need to adapt to the newer interfaces to compile against release-6 apis. Pre-compiled applications, or applications using the older API SDK will continue to work as before.

                                                "},{"location":"release-notes/6.0/#major-issues","title":"Major issues","text":"

                                                None

                                                "},{"location":"release-notes/6.0/#bugs-addressed","title":"Bugs addressed","text":"

                                                Bugs addressed since release-5 are listed below.

                                                • #1138841: allow the use of the CIDR format with auth.allow
                                                • #1236272: socket: Use newer system calls that provide better interface/performance on Linux/*BSD when available
                                                • #1243991: \"gluster volume set group \" is not in the help text
                                                • #1285126: RFE: GlusterFS NFS does not implement an all_squash volume setting
                                                • #1343926: port-map: let brick choose its own port
                                                • #1364707: Remove deprecated stripe xlator
                                                • #1427397: script to strace processes consuming high CPU
                                                • #1467614: Gluster read/write performance improvements on NVMe backend
                                                • #1486532: need a script to resolve backtraces
                                                • #1511339: In Replica volume 2*2 when quorum is set, after glusterd restart nfs server is coming up instead of self-heal daemon
                                                • #1535495: Add option -h and --help to gluster cli
                                                • #1535528: Gluster cli show no help message in prompt
                                                • #1560561: systemd service file enhancements
                                                • #1560969: Garbage collect inactive inodes in fuse-bridge
                                                • #1564149: Agree upon a coding standard, and automate check for this in smoke
                                                • #1564890: mount.glusterfs: can't shift that many
                                                • #1575836: logic in S30samba-start.sh hook script needs tweaking
                                                • #1579788: Thin-arbiter: Have the state of volume in memory
                                                • #1582516: libgfapi: glfs init fails on afr volume with ctime feature enabled
                                                • #1590385: Refactor dht lookup code
                                                • #1593538: ctime: Access time is different with in same replica/EC volume
                                                • #1596787: glusterfs rpc-clnt.c: error returned while attempting to connect to host: (null), port 0
                                                • #1598345: gluster get-state command is crashing glusterd process when geo-replication is configured
                                                • #1600145: [geo-rep]: Worker still ACTIVE after killing bricks
                                                • #1605056: [RHHi] Mount hung and not accessible
                                                • #1605077: If a node disconnects during volume delete, it assumes deleted volume as a freshly created volume when it is back online
                                                • #1608512: cluster.server-quorum-type help text is missing possible settings
                                                • #1624006: /var/run/gluster/metrics/ wasn't created automatically
                                                • #1624332: [Thin-arbiter]: Add tests for thin arbiter feature
                                                • #1624724: ctime: Enable ctime feature by default and also improve usability by providing single option to enable
                                                • #1624796: mkdir -p fails with \"No data available\" when root-squash is enabled
                                                • #1625850: tests: fixes to bug-1015990-rep.t
                                                • #1625961: Writes taking very long time leading to system hogging
                                                • #1626313: fix glfs_fini related problems
                                                • #1626610: [USS]: Change gf_log to gf_msg
                                                • #1626994: split-brain observed on parent dir
                                                • #1627610: glusterd crash in regression build
                                                • #1627620: SAS job aborts complaining about file doesn't exist
                                                • #1628194: tests/dht: Additional tests for dht operations
                                                • #1628605: One client hangs when another client loses communication with bricks during intensive write I/O
                                                • #1628664: Update op-version from 4.2 to 5.0
                                                • #1629561: geo-rep: geo-rep config set fails to set rsync-options
                                                • #1630368: Low Random write IOPS in VM workloads
                                                • #1630798: Add performance options to virt profile
                                                • #1630804: libgfapi-python: test_listdir_with_stat and test_scandir failure on release 5 branch
                                                • #1630922: glusterd crashed and core generated at gd_mgmt_v3_unlock_timer_cbk after huge number of volumes were created
                                                • #1631128: rpc marks brick disconnected from glusterd & volume stop transaction gets timed out
                                                • #1631357: glusterfsd keeping fd open in index xlator after stop the volume
                                                • #1631886: Update database profile settings for gluster
                                                • #1632161: [Disperse] : Set others.eager-lock on for ec-1468261.t test to pass
                                                • #1632236: Provide indication at the console or in the logs about the progress being made with changelog processing.
                                                • #1632503: FUSE client segfaults when performance.md-cache-statfs is enabled for a volume
                                                • #1632717: EC crashes when running on non 64-bit architectures
                                                • #1632889: 'df' shows half as much space on volume after upgrade to RHGS 3.4
                                                • #1633926: Script to collect system-stats
                                                • #1634102: MAINTAINERS: Add sunny kumar as a peer for snapshot component
                                                • #1634220: md-cache: some problems of cache virtual glusterfs ACLs for ganesha
                                                • #1635050: [SNAPSHOT]: with brick multiplexing, snapshot restore will make glusterd send wrong volfile
                                                • #1635145: I/O errors observed on the application side after the creation of a 'linkto' file
                                                • #1635480: Correction for glusterd memory leak because use \"gluster volume status volume_name --detail\" continuesly (cli)
                                                • #1635593: glusterd crashed in cleanup_and_exit when glusterd comes up with upgrade mode.
                                                • #1635688: Keep only the valid (maintained/supported) components in the build
                                                • #1635820: Seeing defunt translator and discrepancy in volume info when issued from node which doesn't host bricks in that volume
                                                • #1635863: Gluster peer probe doesn't work for IPv6
                                                • #1636570: Cores due to SIGILL during multiplex regression tests
                                                • #1636631: Issuing a \"heal ... full\" on a disperse volume causes permanent high CPU utilization.
                                                • #1637196: Disperse volume 'df' usage is extremely incorrect after replace-brick.
                                                • #1637249: gfid heal does not happen when there is no source brick
                                                • #1637802: data-self-heal in arbiter volume results in stale locks.
                                                • #1637934: glusterfsd is keeping fd open in index xlator
                                                • #1638453: Gfid mismatch seen on shards when lookup and mknod are in progress at the same time
                                                • #1639599: Improve support-ability of glusterfs
                                                • #1640026: improper checking to avoid identical mounts
                                                • #1640066: [Stress] : Mismatching iatt in glustershd logs during MTSH and continous IO from Ganesha mounts
                                                • #1640165: io-stats: garbage characters in the filenames generated
                                                • #1640489: Invalid memory read after freed in dht_rmdir_readdirp_cbk
                                                • #1640495: [GSS] Fix log level issue with brick mux
                                                • #1640581: [AFR] : Start crawling indices and healing only if both data bricks are UP in replica 2 (thin-arbiter)
                                                • #1641344: Spurious failures in bug-1637802-arbiter-stale-data-heal-lock.t
                                                • #1642448: EC volume getting created without any redundant brick
                                                • #1642597: tests/bugs/glusterd/optimized-basic-testcases-in-cluster.t failing
                                                • #1642800: socket: log voluntary socket close/shutdown and EOF on socket at INFO log-level
                                                • #1642807: remove 'tier' translator from build and code
                                                • #1642810: remove glupy from code and build
                                                • #1642850: glusterd: raise default transport.listen-backlog to 1024
                                                • #1642865: geo-rep: geo-replication gets stuck after file rename and gfid conflict
                                                • #1643349: [OpenSSL] : auth.ssl-allow has no option description.
                                                • #1643402: [Geo-Replication] Geo-rep faulty sesion because of the directories are not synced to slave.
                                                • #1643519: Provide an option to silence glfsheal logs
                                                • #1643929: geo-rep: gluster-mountbroker status crashes
                                                • #1643932: geo-rep: On gluster command failure on slave, worker crashes with python3
                                                • #1643935: cliutils: geo-rep cliutils' usage of Popen is not python3 compatible
                                                • #1644129: Excessive logging in posix_update_utime_in_mdata
                                                • #1644164: Use GF_ATOMIC ops to update inode->nlookup
                                                • #1644629: [rpcsvc] Single request Queue for all event threads is a performance bottleneck
                                                • #1644755: CVE-2018-14651 glusterfs: glusterfs server exploitable via symlinks to relative paths [fedora-all]
                                                • #1644756: CVE-2018-14653 glusterfs: Heap-based buffer overflow via \"gf_getspec_req\" RPC message [fedora-all]
                                                • #1644757: CVE-2018-14659 glusterfs: Unlimited file creation via \"GF_XATTR_IOSTATS_DUMP_KEY\" xattr allows for denial of service [fedora-all]
                                                • #1644758: CVE-2018-14660 glusterfs: Repeat use of \"GF_META_LOCK_KEY\" xattr allows for memory exhaustion [fedora-all]
                                                • #1644760: CVE-2018-14654 glusterfs: \"features/index\" translator can create arbitrary, empty files [fedora-all]
                                                • #1644763: CVE-2018-14661 glusterfs: features/locks translator passes an user-controlled string to snprintf without a proper format string resulting in a denial of service [fedora-all]
                                                • #1645986: tests/bugs/glusterd/optimized-basic-testcases-in-cluster.t failing in distributed regression
                                                • #1646104: [Geo-rep]: Faulty geo-rep sessions due to link ownership on slave volume
                                                • #1646728: [snapview-server]:forget glfs handles during inode forget
                                                • #1646869: gNFS crashed when processing \"gluster v status [vol] nfs clients\"
                                                • #1646892: Portmap entries showing stale brick entries when bricks are down
                                                • #1647029: can't enable shared-storage
                                                • #1647074: when peer detach is issued, throw a warning to remount volumes using other cluster IPs before proceeding
                                                • #1647651: gfapi: fix bad dict setting of lease-id
                                                • #1648237: Bumping up of op-version times out on a scaled system with ~1200 volumes
                                                • #1648298: dht_revalidate may not heal attrs on the brick root
                                                • #1648687: Incorrect usage of local->fd in afr_open_ftruncate_cbk
                                                • #1648768: Tracker bug for all leases related issues
                                                • #1649709: profile info doesn't work when decompounder xlator is not in graph
                                                • #1650115: glusterd requests are timing out in a brick multiplex setup
                                                • #1650389: rpc: log flooding with ENODATA errors
                                                • #1650403: Memory leaks observed in brick-multiplex scenario on volume start/stop loop
                                                • #1650893: fails to sync non-ascii (utf8) file and directory names, causes permanently faulty geo-replication state
                                                • #1651059: [OpenSSL] : Retrieving the value of \"client.ssl\" option,before SSL is set up, fails .
                                                • #1651165: Race in per-thread mem-pool when a thread is terminated
                                                • #1651431: Resolve memory leak at the time of graph init
                                                • #1651439: gluster-NFS crash while expanding volume
                                                • #1651463: glusterd can't regenerate volfiles in container storage upgrade workflow
                                                • #1651498: [geo-rep]: Failover / Failback shows fault status in a non-root setup
                                                • #1651584: [geo-rep]: validate the config checkpoint date and fail if it is not is exact format hh:mm:ss
                                                • #1652118: default cluster.max-bricks-per-process to 250
                                                • #1652430: glusterd fails to start, when glusterd is restarted in a loop for every 45 seconds while volume creation is in-progress
                                                • #1652852: \"gluster volume get\" doesn't show real default value for server.tcp-user-timeout
                                                • #1652887: Geo-rep help looks to have a typo.
                                                • #1652911: Add no-verify and ssh-port n options for create command in man page
                                                • #1653277: bump up default value of server.event-threads
                                                • #1653359: Self-heal:Improve heal performance
                                                • #1653565: tests/geo-rep: Add arbiter volume test case
                                                • #1654138: Optimize for virt store fails with distribute volume type
                                                • #1654181: glusterd segmentation fault: glusterd_op_ac_brick_op_failed (event=0x7f44e0e63f40, ctx=0x0) at glusterd-op-sm.c:5606
                                                • #1654187: [geo-rep]: RFE - Make slave volume read-only while setting up geo-rep (by default)
                                                • #1654270: glusterd crashed with seg fault possibly during node reboot while volume creates and deletes were happening
                                                • #1654521: io-stats outputs json numbers as strings
                                                • #1654805: Bitrot: Scrub status say file is corrupted even it was just created AND 'path' in the output is broken
                                                • #1654917: cleanup resources in server_init in case of failure
                                                • #1655050: automatic split resolution with size as policy should not work on a directory which is in metadata splitbrain
                                                • #1655052: Automatic Splitbrain with size as policy must not resolve splitbrains when both the copies are of same size
                                                • #1655827: [Glusterd]: Glusterd crash while expanding volumes using heketi
                                                • #1655854: Converting distribute to replica-3/arbiter volume fails
                                                • #1656100: configure.ac does not enforce automake --foreign
                                                • #1656264: Fix tests/bugs/shard/zero-flag.t
                                                • #1656348: Commit c9bde3021202f1d5c5a2d19ac05a510fc1f788ac causes ls slowdown
                                                • #1656517: [GSS] Gluster client logs filling with 0-glusterfs-socket: invalid port messages
                                                • #1656682: brick memory consumed by volume is not getting released even after delete
                                                • #1656771: [Samba-Enhancement] Need for a single group command for setting up volume options for samba
                                                • #1656951: cluster.max-bricks-per-process 250 not working as expected
                                                • #1657607: Convert nr_files to gf_atomic in posix_private structure
                                                • #1657744: quorum count not updated in nfs-server vol file
                                                • #1657783: Rename of a file leading to stale reads
                                                • #1658045: Resolve memory leak in mgmt_pmap_signout_cbk
                                                • #1658116: python2 to python3 compatibilty issues
                                                • #1659327: 43% regression in small-file sequential read performance
                                                • #1659432: Memory leak: dict_t leak in rda_opendir
                                                • #1659708: Optimize by not stopping (restart) selfheal deamon (shd) when a volume is stopped unless it is the last volume
                                                • #1659857: change max-port value in glusterd vol file to 60999
                                                • #1659868: glusterd : features.selinux was missing in glusterd-volume-set file
                                                • #1659869: improvements to io-cache
                                                • #1659971: Setting slave volume read-only option by default results in failure
                                                • #1660577: [Ganesha] Ganesha failed on one node while exporting volumes in loop
                                                • #1660701: Use adaptive mutex in rpcsvc_program_register to improve performance
                                                • #1661214: Brick is getting OOM for tests/bugs/core/bug-1432542-mpx-restart-crash.t
                                                • #1662089: NL cache: fix typos
                                                • #1662264: thin-arbiter: Check with thin-arbiter file before marking new entry change log
                                                • #1662368: [ovirt-gluster] Fuse mount crashed while deleting a 1 TB image file from ovirt
                                                • #1662679: Log connection_id in statedump for posix-locks as well for better debugging experience
                                                • #1662906: Longevity: glusterfsd(brick process) crashed when we do volume creates and deletes
                                                • #1663077: memory leak in mgmt handshake
                                                • #1663102: Change default value for client side heal to off for replicate volumes
                                                • #1663223: profile info command is not displaying information of bricks which are hosted on peers
                                                • #1663243: rebalance status does not display localhost statistics when op-version is not bumped up
                                                • #1664122: do not send bit-rot virtual xattrs in lookup response
                                                • #1664124: Improve information dumped from io-threads in statedump
                                                • #1664551: Wrong description of localtime-logging in manpages
                                                • #1664647: dht: Add NULL check for stbuf in dht_rmdir_lookup_cbk
                                                • #1664934: glusterfs-fuse client not benefiting from page cache on read after write
                                                • #1665038: glusterd crashed while running \"gluster get-state glusterd odir /get-state\"
                                                • #1665332: Wrong offset is used in offset for zerofill fop
                                                • #1665358: allow regression to not run tests with nfs, if nfs is disabled.
                                                • #1665363: Fix incorrect definition in index-mem-types.h
                                                • #1665656: testcaes glusterd/add-brick-and-validate-replicated-volume-options.t is crash while brick_mux is enable
                                                • #1665826: [geo-rep]: Directory renames not synced to slave in Hybrid Crawl
                                                • #1666143: Several fixes on socket pollin and pollout return value
                                                • #1666833: move few recurring logs to DEBUG level.
                                                • #1667779: glusterd leaks about 1GB memory per day on single machine of storage pool
                                                • #1667804: Unable to delete directories that contain linkto files that point to itself.
                                                • #1667905: dict_leak in __glusterd_handle_cli_uuid_get function
                                                • #1668190: Block hosting volume deletion via heketi-cli failed with error \"target is busy\" but deleted from gluster backend
                                                • #1668268: Unable to mount gluster volume
                                                • #1669077: [ovirt-gluster] Fuse mount crashed while creating the preallocated image
                                                • #1669937: Rebalance : While rebalance is in progress , SGID and sticky bit which is set on the files while file migration is in progress is seen on the mount point
                                                • #1670031: performance regression seen with smallfile workload tests
                                                • #1670253: Writes on Gluster 5 volumes fail with EIO when \"cluster.consistent-metadata\" is set
                                                • #1670259: New GFID file recreated in a replica set after a GFID mismatch resolution
                                                • #1671213: core: move \"dict is NULL\" logs to DEBUG log level
                                                • #1671637: geo-rep: Issue with configparser import
                                                • #1672205: 'gluster get-state' command fails if volume brick doesn't exist.
                                                • #1672818: GlusterFS 6.0 tracker
                                                • #1673267: Fix timeouts so the tests pass on AWS
                                                • #1673972: insufficient logging in glusterd_resolve_all_bricks
                                                • #1674364: glusterfs-fuse client not benefiting from page cache on read after write
                                                • #1676429: distribute: Perf regression in mkdir path
                                                • #1677260: rm -rf fails with \"Directory not empty\"
                                                • #1678570: glusterfs FUSE client crashing every few days with 'Failed to dispatch handler'
                                                • #1679004: With parallel-readdir enabled, deleting a directory containing stale linkto files fails with \"Directory not empty\"
                                                • #1679275: dht: fix double extra unref of inode at heal path
                                                • #1679965: Upgrade from glusterfs 3.12 to gluster 4/5 broken
                                                • #1679998: GlusterFS can be improved
                                                • #1680020: Integer Overflow possible in md-cache.c due to data type inconsistency
                                                • #1680585: remove glupy from code and build
                                                • #1680586: Building RPM packages with _for_fedora_koji_builds enabled fails on el6
                                                • #1683008: glustereventsd does not start on Ubuntu 16.04 LTS
                                                • #1683506: remove experimental xlators informations from glusterd-volume-set.c
                                                • #1683716: glusterfind: revert shebangs to #!/usr/bin/python3
                                                • #1683880: Multiple shd processes are running on brick_mux environmet
                                                • #1683900: Failed to dispatch handler
                                                • #1684029: upgrade from 3.12, 4.1 and 5 to 6 broken
                                                • #1684777: gNFS crashed when processing \"gluster v profile [vol] info nfs\"
                                                • #1685771: glusterd memory usage grows at 98 MB/h while being monitored by RHGSWA
                                                • #1686364: [ovirt-gluster] Rolling gluster upgrade from 3.12.5 to 5.3 led to shard on-disk xattrs disappearing
                                                • #1686399: listing a file while writing to it causes deadlock
                                                • #1686875: packaging: rdma on s390x, unnecessary ldconfig scriptlets
                                                • #1687248: Error handling in /usr/sbin/gluster-eventsapi produces IndexError: tuple index out of range
                                                • #1687672: [geo-rep]: Checksum mismatch when 2x2 vols are converted to arbiter
                                                • #1688218: Brick process has coredumped, when starting glusterd
                                                • "},{"location":"release-notes/6.1/","title":"Release notes for Gluster 6.1","text":"

                                                  This is a bugfix release. The release notes for 6.0 contains a listing of all the new features that were added and bugs fixed in the GlusterFS 6 stable release.

                                                  NOTE: Next minor release tentative date: Week of 10th May, 2019

                                                  "},{"location":"release-notes/6.1/#major-changes-features-and-limitations-addressed-in-this-release","title":"Major changes, features and limitations addressed in this release","text":""},{"location":"release-notes/6.1/#major-issues","title":"Major issues","text":"

                                                  None

                                                  "},{"location":"release-notes/6.1/#bugs-addressed","title":"Bugs addressed","text":"

                                                  Bugs addressed since release-6.0 are listed below.

                                                  • #1679904: client log flooding with intentional socket shutdown message when a brick is down
                                                  • #1690950: lots of \"Matching lock not found for unlock xxx\" when using disperse (ec) xlator
                                                  • #1691187: fix Coverity CID 1399758
                                                  • #1692101: Network throughput usage increased x5
                                                  • #1692957: rpclib: slow floating point math and libm
                                                  • #1693155: Excessive AFR messages from gluster showing in RHGSWA.
                                                  • #1693223: [Disperse] : Client side heal is not removing dirty flag for some of the files.
                                                  • #1693992: Thin-arbiter minor fixes
                                                  • #1694002: Geo-re: Geo replication failing in \"cannot allocate memory\"
                                                  • #1694561: gfapi: do not block epoll thread for upcall notifications
                                                  • #1694610: glusterd leaking memory when issued gluster vol status all tasks continuosly
                                                  • #1695436: geo-rep session creation fails with IPV6
                                                  • #1695445: ssh-port config set is failing
                                                  • #1697764: [cluster/ec] : Fix handling of heal info cases without locks
                                                  • #1698471: ctime feature breaks old client to connect to new server
                                                  • #1699198: Glusterfs create a flock lock by anonymous fd, but can't release it forever.
                                                  • #1699319: Thin-Arbiter SHD minor fixes
                                                  • #1699499: fix truncate lock to cover the write in tuncate clean
                                                  • #1699703: ctime: Creation of tar file on gluster mount throws warning \"file changed as we read it\"
                                                  • #1699713: glusterfs build is failing on rhel-6
                                                  • #1699714: Brick is not able to detach successfully in brick_mux environment
                                                  • #1699715: Log level changes do not take effect until the process is restarted
                                                  • #1699731: Fops hang when inodelk fails on the first fop
                                                  "},{"location":"release-notes/6.10/","title":"Release notes for Gluster 6.10","text":"

                                                  This is a bugfix release. The release notes for 6.0, 6.1, 6.2, 6.3, 6.4, 6.5, 6.6, 6.7, 6.8 and 6.9 contain a listing of all the new features that were added and bugs fixed in the GlusterFS 6 stable release.

                                                  NOTE: This is last minor release of 6. Users are highly encouraged to upgrade to newer releases of GlusterFS.

                                                  "},{"location":"release-notes/6.10/#bugs-addressed","title":"Bugs addressed","text":"

                                                  Bugs addressed since release-6.9 are listed below.

                                                  • #1740494: Fencing: Added the tcmu-runner ALUA feature support but after one of node is rebooted the glfs_file_lock() get stucked
                                                  • #1000 [bug:1193929] GlusterFS can be improved
                                                  • #1016 [bug:1795609] glusterfsd memory leak observed after enable tls
                                                  • #1060 [bug:789278] Issues reported by Coverity static analysis tool
                                                  • #1127 Mount crash during background shard cleanup
                                                  • #1179 gnfs split brain when 1 server in 3x1 down (high load)
                                                  • #1220 cluster/ec: return correct error code and log the message in ...
                                                  • #1223 Failure of tests/basic/gfapi/gfapi-copy-file-range.t
                                                  • #1254 Prioritize ENOSPC over other lesser priority errors
                                                  • #1303 Failures in rebalance due to [Input/output error]
                                                  • #1307 Spurious failure of tests/bug-844688.t: test bug-844688.t on ...
                                                  • #1349 Issue for backporting https://review.gluster.org//c/glusterf...
                                                  • #1362 [bug: 1687326]: Revoke access from nodes using Certificate Re...
                                                  "},{"location":"release-notes/6.2/","title":"Release notes for Gluster 6.2","text":"

                                                  This is a bugfix release. The release notes for 6.0 and 6.1 contains a listing of all the new features that were added and bugs fixed in the GlusterFS 6 stable release.

                                                  NOTE: Next minor release tentative date: Week of 10th June, 2019

                                                  "},{"location":"release-notes/6.2/#major-changes-features-and-limitations-addressed-in-this-release","title":"Major changes, features and limitations addressed in this release","text":"

                                                  None

                                                  "},{"location":"release-notes/6.2/#major-issues","title":"Major issues","text":"

                                                  None

                                                  "},{"location":"release-notes/6.2/#bugs-addressed","title":"Bugs addressed","text":"

                                                  Bugs addressed since release-6.1 are listed below.

                                                  • #1699917: I/O error on writes to a disperse volume when replace-brick is executed
                                                  • #1701818: Syntactical errors in hook scripts for managing SELinux context on bricks #2 (S10selinux-label-brick.sh + S10selinux-del-fcontext.sh)
                                                  • #1702271: Memory accounting information is not always accurate
                                                  • #1702734: ctime: Logs are flooded with \"posix set mdata failed, No ctime\" error during open
                                                  • #1703759: statedump is not capturing info related to glusterd
                                                  • #1707393: Refactor dht lookup code
                                                  • #1709130: thin-arbiter lock release fixes
                                                  • #1709143: [Thin-arbiter] : send correct error code in case of failure
                                                  • #1709660: Glusterfsd crashing in ec-inode-write.c, in GF_ASSERT
                                                  • #1709685: Geo-rep: Value of pending entry operations in detail status output is going up after each synchronization.
                                                  • #1709734: Geo-rep: Data inconsistency while syncing heavy renames with constant destination name
                                                  • #1709737: geo-rep: Always uses rsync even with use_tarssh set to true
                                                  • #1709738: geo-rep: Sync hangs with tarssh as sync-engine
                                                  • #1712220: tests/geo-rep: arequal checksum comparison always succeeds
                                                  • #1712223: geo-rep: With heavy rename workload geo-rep log if flooded
                                                  "},{"location":"release-notes/6.3/","title":"Release notes for Gluster 6.3","text":"

                                                  This is a bugfix release. The release notes for 6.0, 6.1 and 6.2 contains a listing of all the new features that were added and bugs fixed in the GlusterFS 6 stable release.

                                                  NOTE: Next minor release tentative date: Week of 10th July, 2019

                                                  "},{"location":"release-notes/6.3/#major-changes-features-and-limitations-addressed-in-this-release","title":"Major changes, features and limitations addressed in this release","text":"

                                                  None

                                                  "},{"location":"release-notes/6.3/#major-issues","title":"Major issues","text":"

                                                  None

                                                  "},{"location":"release-notes/6.3/#bugs-addressed","title":"Bugs addressed","text":"

                                                  Bugs addressed since release-6.2 are listed below.

                                                  • #1714172: ec ignores lock contention notifications for partially acquired locks
                                                  • #1715012: Failure when glusterd is configured to bind specific IPv6 address. If bind-address is IPv6, *addr_len will be non-zero and it goes to ret = -1 branch, which will cause listen failure eventually
                                                  "},{"location":"release-notes/6.4/","title":"Release notes for Gluster 6.4","text":"

                                                  This is a bugfix release. The release notes for 6.0, 6.1, 6.2 and 6.3 contains a listing of all the new features that were added and bugs fixed in the GlusterFS 6 stable release.

                                                  NOTE: Next minor release tentative date: Week of 10th August, 2019

                                                  "},{"location":"release-notes/6.4/#major-changes-features-and-limitations-addressed-in-this-release","title":"Major changes, features and limitations addressed in this release","text":"

                                                  None

                                                  "},{"location":"release-notes/6.4/#major-issues","title":"Major issues","text":"

                                                  None

                                                  "},{"location":"release-notes/6.4/#bugs-addressed","title":"Bugs addressed","text":"

                                                  Bugs addressed since release-6.3 are listed below.

                                                  • #1679998: GlusterFS can be improved
                                                  • #1683815: Memory leak when peer detach fails
                                                  • #1716812: Failed to create volume which transport_type is \"tcp,rdma\"
                                                  • #1716871: Image size as reported from the fuse mount is incorrect
                                                  • #1718227: SELinux context labels are missing for newly added bricks using add-brick command
                                                  • #1720633: Upcall: Avoid sending upcalls for invalid Inode
                                                  • #1720635: Ganesha-gfapi logs are flooded with error messages related to \"gf_uuid_is_null(gfid)) [Invalid argument]\" when lookups are running from multiple clients
                                                  • #1720993: tests/features/subdir-mount.t is failing for brick_mux regrssion
                                                  • #1721105: Failed to create volume which transport_type is \"tcp,rdma\"
                                                  • #1721783: ctime changes: tar still complains file changed as we read it if uss is enabled
                                                  • #1722805: Healing not proceeding during in-service upgrade on a disperse volume
                                                  • #1723658: [In-service] Post upgrade glusterd is crashing with a backtrace on the upgraded node while issuing gluster volume status from non-upgraded nodes
                                                  • #1723659: ESTALE change in fuse breaks get_real_filename implementation
                                                  • #1724210: Incorrect power of two calculation in mem_pool_get_fn
                                                  • #1724558: [Ganesha]: truncate operation not updating the ctime
                                                  • #1726294: DHT: severe memory leak in dht rename
                                                  • #1726327: tests/features/subdir-mount.t is failing for brick_mux regrssion
                                                  • #1727984: User serviceable snapshots (USS) are not accessible after changing transport.socket.bind-address of glusterd
                                                  • #1728126: [In-service] Post upgrade glusterd is crashing with a backtrace on the upgraded node while issuing gluster volume status from non-upgraded nodes
                                                  • #1729952: Deadlock when generating statedumps
                                                  "},{"location":"release-notes/6.5/","title":"Release notes for Gluster 6.5","text":"

                                                  This is a bugfix release. The release notes for 6.0, 6.1, 6.2, 6.3 and 6.4 contains a listing of all the new features that were added and bugs fixed in the GlusterFS 6 stable release.

                                                  NOTE: Next minor release tentative date: Week of 30th October, 2019

                                                  "},{"location":"release-notes/6.5/#major-changes-features-and-limitations-addressed-in-this-release","title":"Major changes, features and limitations addressed in this release","text":"

                                                  None

                                                  "},{"location":"release-notes/6.5/#major-issues","title":"Major issues","text":"

                                                  None

                                                  "},{"location":"release-notes/6.5/#bugs-addressed","title":"Bugs addressed","text":"

                                                  Bugs addressed since release-6.4 are listed below.

                                                  • #1716848: DHT: directory permissions are wiped out
                                                  • #1730545: gluster v geo-rep status command timing out
                                                  • #1731509: snapd crashes sometimes
                                                  • #1736341: potential deadlock while processing callbacks in gfapi- #1733880: [geo-rep]: gluster command not found while setting up a non-root session
                                                  • #1733885: ctime: Upgrade/Enabling ctime feature wrongly updates older files with latest {a|m|c}time
                                                  • #1737712: Unable to create geo-rep session on a non-root setup.
                                                  • #1737745: ctime: When healing ctime xattr for legacy files, if multiple clients access and modify the same file, the ctime might be updated incorrectly.
                                                  • #1737746: ctime: nfs client gets bad ctime for copied file which is on glusterfs disperse volume with ctime on
                                                  "},{"location":"release-notes/6.6/","title":"Release notes for Gluster 6.6","text":"

                                                  This is a bugfix release. The release notes for 6.0, 6.1, 6.2, 6.3, 6.4 and 6.5 contains a listing of all the new features that were added and bugs fixed in the GlusterFS 6 stable release.

                                                  NOTE: Next minor release tentative date: Week of 30th December, 2019

                                                  "},{"location":"release-notes/6.6/#major-changes-features-and-limitations-addressed-in-this-release","title":"Major changes, features and limitations addressed in this release","text":"

                                                  None

                                                  "},{"location":"release-notes/6.6/#major-issues","title":"Major issues","text":"

                                                  None

                                                  "},{"location":"release-notes/6.6/#bugs-addressed","title":"Bugs addressed","text":"

                                                  Bugs addressed since release-6.5 are listed below.

                                                  • #1726175: CentOs 6 GlusterFS client creates files with time 01/01/1970
                                                  • #1737141: read() returns more than file size when using direct I/O
                                                  • #1739320: The result (hostname) of getnameinfo for all bricks (ipv6 addresses) are the same, while they are not.
                                                  • #1739335: Multiple disconnect events being propagated for the same child
                                                  • #1739451: An Input/Output error happens on a disperse volume when doing unaligned writes to a sparse file
                                                  • #1740525: event: rename eventXXX with gf prefixed to avoid crash when apps linked libevent at the same time
                                                  • #1741044: atime/mtime is not restored after healing for entry self heals
                                                  • #1741402: READDIRP incorrectly updates posix-acl inode ctx
                                                  • #1743219: glusterd start is failed and throwing an error Address already in use
                                                  • #1743782: Windows client fails to copy large file to GlusterFS volume share with fruit and streams_xattr VFS modules via Samba
                                                  • #1743988: Setting cluster.heal-timeout requires volume restart
                                                  • #1745421: ./tests/bugs/glusterd/bug-1595320.t is failing
                                                  • #1746118: capture stat failure error while setting the gfid
                                                  • #1746138: ctime: If atime is updated via utimensat syscall ctime is not getting updated
                                                  • #1749157: bug-1402841.t-mt-dir-scan-race.t fails spuriously
                                                  • #1749307: Failures in remove-brick due to [Input/output error] errors
                                                  • #1750228: [geo-rep]: Non-root - Unable to set up mountbroker root directory and group
                                                  • #1751557: syncop: Bail out if frame creation fails
                                                  • #1752413: ctime: Cannot see the \"trusted.glusterfs.mdata\" xattr for directory on a new brick after rebalance
                                                  • #1753561: Custom xattrs are not healed on newly added brick
                                                  • #1753571: interrupts leak memory
                                                  • #1755679: Segmentation fault occurs while truncate file
                                                  • #1755785: git clone fails on gluster volumes exported via nfs-ganesha
                                                  • #1760361: packaging: remove leftover bd cruft in rpm .spec.in
                                                  • #1760706: glustershd can not decide heald_sinks, and skip repair, so some entries lingering in volume heal info
                                                  • #1760792: afr: support split-brain CLI for replica 3
                                                  • #1761907: Rebalance causing IO Error - File descriptor in bad state
                                                  • #1763028: [geo-rep] sync_method showing rsync instead of tarssh post in-service upgrade
                                                  • #1764171: [Upgrade] Config files are not upgraded to new version
                                                  • #1764172: geo-replication sessions going faulty
                                                  • #1764174: geo-rep syncing significantly behind and also only one of the directories are synced with tracebacks seen
                                                  • #1764176: geo-rep: Changelog archive file format is incorrect
                                                  • #1764178: tests/geo-rep: Add test case to validate non-root geo-replication setup
                                                  • #1764183: [GSS] geo-rep entering into faulty state with OSError: [Errno 13] Permission denied
                                                  • #1765433: test: fix non-root geo-rep test case
                                                  "},{"location":"release-notes/6.7/","title":"Release notes for Gluster 6.7","text":"

                                                  This is a bugfix release. The release notes for 6.0, 6.1, 6.2, 6.3, 6.4, 6.5 and 6.6 contain a listing of all the new features that were added and bugs fixed in the GlusterFS 6 stable release.

                                                  NOTE: Tentative date for next minor release: Week of 29th February, 2020

                                                  "},{"location":"release-notes/6.7/#major-changes-features-and-limitations-addressed-in-this-release","title":"Major changes, features and limitations addressed in this release","text":"

                                                  None

                                                  "},{"location":"release-notes/6.7/#major-issues","title":"Major issues","text":"

                                                  We have come across a issue where the client undergoing IO crashes when a rebalance is running.

                                                  https://bugzilla.redhat.com/show_bug.cgi?id=1786983

                                                  Workaround: We can avoid this issue by stopping the IOs while running rebalance.

                                                  Fix: The fix is ready and will be a part of the next release 6.8 which is supposed to be out around 29th of February. https://review.gluster.org/#/c/glusterfs/+/23938/

                                                  "},{"location":"release-notes/6.7/#bugs-addressed","title":"Bugs addressed","text":"

                                                  Bugs addressed since release-6.6 are listed below.

                                                  • #1739446: [Disperse] : Client side heal is not removing dirty flag for some of the files.
                                                  • #1739449: Disperse volume : data corruption with ftruncate data in 4+2 config
                                                  • #1739450: Open fd heal should filter O_APPEND/O_EXCL
                                                  • #1749625: [GlusterFS 6.1] GlusterFS brick process crash
                                                  • #1766425: cgroup control-cpu-load.sh script not working
                                                  • #1768726: Memory leak in glusterfsd process
                                                  • #1770100: [geo-rep]: Geo-rep goes FAULTY with OSError
                                                  • #1771842: [CENTOS 6] Geo-replication session not starting after creation
                                                  • #1778182: glusterfsd crashed with \"'MemoryError' Cannot access memory at address\"
                                                  • #1782495: GlusterFS brick process crash
                                                  • #1784796: tests/00-geo-rep/00-georep-verify-non-root-setup.t fail on freshly installed builder
                                                  "},{"location":"release-notes/6.8/","title":"Release notes for Gluster 6.8","text":"

                                                  This is a bugfix release. The release notes for 6.0, 6.1, 6.2, 6.3, 6.4, 6.5, 6.7, and 6.8 contain a listing of all the new features that were added and bugs fixed in the GlusterFS 6 stable release.

                                                  NOTE: Tentative date for next minor release: Week of 30th April, 2020

                                                  "},{"location":"release-notes/6.8/#major-changes-features-and-limitations-addressed-in-this-release","title":"Major changes, features and limitations addressed in this release","text":"

                                                  None

                                                  "},{"location":"release-notes/6.8/#major-issues","title":"Major issues","text":"

                                                  None

                                                  "},{"location":"release-notes/6.8/#bugs-addressed","title":"Bugs addressed","text":"

                                                  Bugs addressed since release-6.7 are listed below.

                                                  • #1786754: Functionality to enable log rotation for user serviceable snapshot's logs.
                                                  • #1786983: Rebalance is causing glusterfs crash on client node
                                                  • #1789337: glusterfs process memory leak in ior test
                                                  • #1790445: glusterfind pre output file is empty
                                                  • #1790449: S57glusterfind-delete-post.py not python3 ready (does not decode bytestring)
                                                  • #1790850: Remove extra argument
                                                  • #1792857: Memory corruption when sending events to an IPv6 host
                                                  • #1793096: gf_event doesn't work for glfsheal process
                                                  • #1794020: Mounts fails after reboot of 1/3 gluster nodes
                                                  • #1797985: Brick logs inundated with [2019-04-27 22:14:53.378047] I [dict.c:541:dict_get] (-->/usr/lib64/glusterfs/6.0/xlator/features/worm.so(+0x7241) [0x7fe857bb3241] -->/usr/lib64/glusterfs/6.0/xlator/features/locks.so(+0x1c219) [0x7fe857dda219] [Invalid argumen
                                                  • #1804546: [Thin-arbiter] : Wait for connection with TA node before sending lookup/create of ta-replica id file
                                                  • #1804594: Heal pending on volume, even after all the bricks are up
                                                  • #1805097: Changes to self-heal logic w.r.t. detecting metadata split-brains
                                                  • #1805671: Memory corruption when glfs_init() is called after glfs_fini()
                                                  • #1806836: [EC] shd crashed while heal failed due to out of memory error.
                                                  • #1806838: Disperse volume : Ganesha crash with IO in 4+2 config when one glusterfsd restart every 600s
                                                  • #1807786: seeing error message in glustershd.log on volume start(or may be as part of shd graph regeneration) inet_pton failed with return code 0 [Invalid argument]
                                                  • #1807793: glusterfs-libs: usage of inet_addr() may impact IPv6
                                                  "},{"location":"release-notes/6.9/","title":"Release notes for Gluster 6.9","text":"

                                                  This is a bugfix release. The release notes for 6.0, 6.1, 6.2, 6.3, 6.4, 6.5, 6.7, 6.8 and 6.9 contain a listing of all the new features that were added and bugs fixed in the GlusterFS 6 stable release.

                                                  NOTE: Tentative date for next minor release: Week of 30th June, 2020

                                                  "},{"location":"release-notes/6.9/#major-changes-features-and-limitations-addressed-in-this-release","title":"Major changes, features and limitations addressed in this release","text":"

                                                  None

                                                  "},{"location":"release-notes/6.9/#major-issues","title":"Major issues","text":"

                                                  None

                                                  "},{"location":"release-notes/6.9/#bugs-addressed","title":"Bugs addressed","text":"

                                                  Bugs addressed since release-6.8 are listed below.

                                                  • #832: Permission Denied in logs.
                                                  • #1152: Spurious failure of tests/bugs/protocol/bug-1433815-auth-allow.t
                                                  • #1140: getfattr returns ENOATTR for system.posix_acl_access on disperse type volumes
                                                  • #884: [bug:1808688] Data corruption with asynchronous writes (please try to reproduce!)
                                                  • #1134: snap_scheduler.py init failing with \"TypeError: Can't mix strings and bytes in path components\"
                                                  • #1067: [bug:1661889] Metadata heal picks different brick each time as source if there are no pending xattrs.
                                                  • #1028: [bug:1810934] Segfault in FUSE process, potential use after free
                                                  • #1146: gfapi/Upcall: Potential deadlock in synctask threads processing upcall notifications
                                                  • #1808966: Set volume option when one of the node is powered off, After powering the node brick processes are offline
                                                  • #1809439: [brickmux]: glustershd crashed when rebooting 1/3 nodes at regular intervals
                                                  "},{"location":"release-notes/7.0/","title":"Release notes for Gluster 7.0","text":"

                                                  This is a major release that includes a range of code improvements and stability fixes along with a few features as noted below.

                                                  A selection of the key features and changes are documented in this page. A full list of bugs that have been addressed is included further below.

                                                  • Announcements
                                                  • Major changes and features
                                                  • Major issues
                                                  • Bugs addressed in the release
                                                  "},{"location":"release-notes/7.0/#announcements","title":"Announcements","text":"
                                                  1. Releases that receive maintenance updates post release 7 are, 5, 6 and 7 (reference)

                                                  2. Release 7 will receive maintenance updates around the 10th of every month for the first 3 months post release (i.e Dec'19, Jan'20, Feb'20). Post the initial 3 months, it will receive maintenance updates every 2 months till EOL.

                                                  "},{"location":"release-notes/7.0/#major-changes-and-features","title":"Major changes and features","text":""},{"location":"release-notes/7.0/#highlights","title":"Highlights","text":"
                                                  • Several stability fixes addressing,
                                                  • coverity, clang-scan, address sanitizer and valgrind reported issues
                                                  • removal of unused and hence, deprecated code and features
                                                  • Performance Improvements

                                                  Features

                                                  "},{"location":"release-notes/7.0/#1-rpcbind-not-required-in-glusterdservice-when-gnfs-isnt-built","title":"1. Rpcbind not required in glusterd.service when gnfs isn't built.","text":""},{"location":"release-notes/7.0/#2-latency-based-read-child-to-improve-read-workload-latency-in-a-cluster-especially-in-a-cloud-setup-also-provides-a-load-balancing-with-the-outstanding-pending-request","title":"2. Latency based read child to improve read workload latency in a cluster, especially in a cloud setup. Also provides a load balancing with the outstanding pending request.","text":""},{"location":"release-notes/7.0/#3-glusterfind-integrate-with-gfid2path-to-improve-performance","title":"3. Glusterfind: integrate with gfid2path, to improve performance.","text":""},{"location":"release-notes/7.0/#4-issue-532-work-towards-implementing-global-thread-pooling-has-started","title":"4. Issue #532: Work towards implementing global thread pooling has started","text":""},{"location":"release-notes/7.0/#5-this-release-includes-extra-coverage-for-glfs-public-apis-in-our-regression-tests-so-we-dont-break-anything","title":"5. This release includes extra coverage for glfs public APIs in our regression tests, so we don't break anything.","text":""},{"location":"release-notes/7.0/#6-thin-arbiter-integration-with-gd1","title":"6. Thin-arbiter integration with GD1","text":""},{"location":"release-notes/7.0/#major-issues","title":"Major issues","text":"
                                                  • #1771308:Unable to build the gluster packages for centos-6
                                                  "},{"location":"release-notes/7.0/#note","title":"Note","text":"

                                                  Any new volumes created with the release will have the fips-mode-rchecksum volume option set to on by default.

                                                  If a client older than glusterfs-4.x (i.e. 3.x clients) accesses a volume which has the fips-mode-rchecksum volume option enabled, it can cause erroneous checksum computation/ unwanted behaviour during afr self-heal. This option is to be enabled only when all clients are also >=4.x. So if you are using these older clients, please explicitly turn this option off.

                                                  "},{"location":"release-notes/7.0/#bugs-addressed","title":"Bugs addressed","text":"

                                                  Bugs addressed since release-6 are listed below.

                                                  • #789278: Issues reported by Coverity static analysis tool
                                                  • #1098991: Dist-geo-rep: Invalid slave url (::: three or more colons) error out with unclear error message.
                                                  • #1193929: GlusterFS can be improved
                                                  • #1241494: [Backup]: Glusterfind CLI commands need to verify the accepted names for session/volume, before failing with error(s)
                                                  • #1512093: Value of pending entry operations in detail status output is going up after each synchronization.
                                                  • #1535511: Gluster CLI shouldn't stop if log file couldn't be opened
                                                  • #1542072: Syntactical errors in hook scripts for managing SELinux context on bricks #2 (S10selinux-label-brick.sh + S10selinux-del-fcontext.sh)
                                                  • #1573226: eventsapi: ABRT report for package glusterfs has reached 10 occurrences
                                                  • #1580315: gluster volume status inode getting timed out after 30 minutes with no output/error
                                                  • #1590385: Refactor dht lookup code
                                                  • #1593224: [Disperse] : Client side heal is not removing dirty flag for some of the files.
                                                  • #1596787: glusterfs rpc-clnt.c: error returned while attempting to connect to host: (null), port 0
                                                  • #1622665: clang-scan report: glusterfs issues
                                                  • #1624701: error-out {inode,entry}lk fops with all-zero lk-owner
                                                  • #1628194: tests/dht: Additional tests for dht operations
                                                  • #1633930: ASan (address sanitizer) fixes - Blanket bug
                                                  • #1634664: Inconsistent quorum checks during open and fd based operations
                                                  • #1635688: Keep only the valid (maintained/supported) components in the build
                                                  • #1642168: changes to cloudsync xlator
                                                  • #1642810: remove glupy from code and build
                                                  • #1648169: Fuse mount would crash if features.encryption is on in the version from 3.13.0 to 4.1.5
                                                  • #1648768: Tracker bug for all leases related issues
                                                  • #1650095: Regression tests for geo-replication on EC volume is not available. It should be added.
                                                  • #1651246: Failed to dispatch handler
                                                  • #1651439: gluster-NFS crash while expanding volume
                                                  • #1651445: [RFE] storage.reserve option should take size of disk as input instead of percentage
                                                  • #1652887: Geo-rep help looks to have a typo.
                                                  • #1654021: Gluster volume heal causes continuous info logging of \"invalid argument\"
                                                  • #1654270: glusterd crashed with seg fault possibly during node reboot while volume creates and deletes were happening
                                                  • #1659334: FUSE mount seems to be hung and not accessible
                                                  • #1659708: Optimize by not stopping (restart) selfheal deamon (shd) when a volume is stopped unless it is the last volume
                                                  • #1664934: glusterfs-fuse client not benefiting from page cache on read after write
                                                  • #1670031: performance regression seen with smallfile workload tests
                                                  • #1672480: Bugs Test Module tests failing on s390x
                                                  • #1672711: Upgrade from glusterfs 3.12 to gluster 4/5 broken
                                                  • #1672727: Fix timeouts so the tests pass on AWS
                                                  • #1672851: With parallel-readdir enabled, deleting a directory containing stale linkto files fails with \"Directory not empty\"
                                                  • #1674389: [thin arbiter] : rpm - add thin-arbiter package
                                                  • #1674406: glusterfs FUSE client crashing every few days with 'Failed to dispatch handler'
                                                  • #1674412: listing a file while writing to it causes deadlock
                                                  • #1675076: [posix]: log the actual path wherever possible
                                                  • #1676400: rm -rf fails with \"Directory not empty\"
                                                  • #1676430: distribute: Perf regression in mkdir path
                                                  • #1676736: tests: ./tests/bugs/distribute/bug-1161311.t times out
                                                  • #1676797: server xlator doesn't handle dict unserialization failures correctly
                                                  • #1677559: gNFS crashed when processing \"gluster v profile [vol] info nfs\"
                                                  • #1678726: Integer Overflow possible in md-cache.c due to data type inconsistency
                                                  • #1679401: Geo-rep setup creates an incorrectly formatted authorized_keys file
                                                  • #1679406: glustereventsd does not start on Ubuntu 16.04 LTS
                                                  • #1680587: Building RPM packages with _for_fedora_koji_builds enabled fails on el6
                                                  • #1683352: remove experimental xlators informations from glusterd-volume-set.c
                                                  • #1683594: nfs ltp ftest* fstat gets mismatch size as except after turn on md-cache
                                                  • #1683816: Memory leak when peer detach fails
                                                  • #1684385: [ovirt-gluster] Rolling gluster upgrade from 3.12.5 to 5.3 led to shard on-disk xattrs disappearing
                                                  • #1684404: Multiple shd processes are running on brick_mux environmet
                                                  • #1685027: Error handling in /usr/sbin/gluster-eventsapi produces IndexError: tuple index out of range
                                                  • #1685120: upgrade from 3.12, 4.1 and 5 to 6 broken
                                                  • #1685414: glusterd memory usage grows at 98 MB/h while running \"gluster v profile\" in a loop
                                                  • #1685944: WORM-XLator: Maybe integer overflow when computing new atime
                                                  • #1686371: Cleanup nigel access and document it
                                                  • #1686398: Thin-arbiter minor fixes
                                                  • #1686568: [geo-rep]: Checksum mismatch when 2x2 vols are converted to arbiter
                                                  • #1686711: [Thin-arbiter] : send correct error code in case of failure
                                                  • #1687326: [RFE] Revoke access from nodes using Certificate Revoke List in SSL
                                                  • #1687705: Brick process has coredumped, when starting glusterd
                                                  • #1687811: core dump generated while running the test ./tests/00-geo-rep/georep-basic-dr-rsync-arbiter.t
                                                  • #1688068: Proper error message needed for FUSE mount failure when /var is filled.
                                                  • #1688106: Remove implementation of number of files opened in posix xlator
                                                  • #1688116: Spurious failure in test ./tests/bugs/glusterfs/bug-844688.t
                                                  • #1688287: ganesha crash on glusterfs with shard volume
                                                  • #1689097: gfapi: provide an option for changing statedump path in glfs-api.
                                                  • #1689799: [cluster/ec] : Fix handling of heal info cases without locks
                                                  • #1689920: lots of \"Matching lock not found for unlock xxx\" when using disperse (ec) xlator
                                                  • #1690753: Volume stop when quorum not met is successful
                                                  • #1691164: glusterd leaking memory when issued gluster vol status all tasks continuosly
                                                  • #1691616: client log flooding with intentional socket shutdown message when a brick is down
                                                  • #1692093: Network throughput usage increased x5
                                                  • #1692612: Locking issue when restarting bricks
                                                  • #1692666: ssh-port config set is failing
                                                  • #1693575: gfapi: do not block epoll thread for upcall notifications
                                                  • #1693648: Geo-re: Geo replication failing in \"cannot allocate memory\"
                                                  • #1693692: Increase code coverage from regression tests
                                                  • #1694820: Geo-rep: Data inconsistency while syncing heavy renames with constant destination name
                                                  • #1694925: GF_LOG_OCCASSIONALLY API doesn't log at first instance
                                                  • #1695327: regression test fails with brick mux enabled.
                                                  • #1696046: Log level changes do not take effect until the process is restarted
                                                  • #1696077: Add pause and resume test case for geo-rep
                                                  • #1696136: gluster fuse mount crashed, when deleting 2T image file from oVirt Manager UI
                                                  • #1696512: glusterfs build is failing on rhel-6
                                                  • #1696599: Fops hang when inodelk fails on the first fop
                                                  • #1697316: Getting SEEK-2 and SEEK7 errors with [Invalid argument] in the bricks' logs
                                                  • #1697486: bug-1650403.t && bug-858215.t are throwing error \"No such file\" at the time of access glustershd pidfile
                                                  • #1697866: Provide a way to detach a failed node
                                                  • #1697907: ctime feature breaks old client to connect to new server
                                                  • #1697930: Thin-Arbiter SHD minor fixes
                                                  • #1698078: ctime: Creation of tar file on gluster mount throws warning \"file changed as we read it\"
                                                  • #1698449: thin-arbiter lock release fixes
                                                  • #1699025: Brick is not able to detach successfully in brick_mux environment
                                                  • #1699176: rebalance start command doesn't throw up error message if the command fails
                                                  • #1699189: fix truncate lock to cover the write in tuncate clean
                                                  • #1699339: With 1800+ vol and simultaneous 2 gluster pod restarts, running gluster commands gives issues once all pods are up
                                                  • #1699394: [geo-rep]: Geo-rep goes FAULTY with OSError
                                                  • #1699866: I/O error on writes to a disperse volume when replace-brick is executed
                                                  • #1700078: disablle + reenable of bitrot leads to files marked as bad
                                                  • #1700865: FUSE mount seems to be hung and not accessible
                                                  • #1701337: issues with 'building' glusterfs packages if we do 'git clone --depth 1'
                                                  • #1701457: ctime: Logs are flooded with \"posix set mdata failed, No ctime\" error during open
                                                  • #1702131: The source file is left in EC volume after rename when glusterfsd out of service
                                                  • #1702185: coredump reported by test ./tests/bugs/glusterd/bug-1699339.t
                                                  • #1702299: Custom xattrs are not healed on newly added brick
                                                  • #1702303: Enable enable fips-mode-rchecksum for new volumes by default
                                                  • #1702952: remove tier related information from manual pages
                                                  • #1703020: The cluster.heal-timeout option is unavailable for ec volume
                                                  • #1703629: statedump is not capturing info related to glusterd
                                                  • #1703948: Self-heal daemon resources are not cleaned properly after a ec fini
                                                  • #1704252: Creation of bulkvoldict thread logic is not correct while brick_mux is enabled for single volume
                                                  • #1704888: delete the snapshots and volume at the end of uss.t
                                                  • #1705865: VM stuck in a shutdown because of a pending fuse request
                                                  • #1705884: Image size as reported from the fuse mount is incorrect
                                                  • #1706603: Glusterfsd crashing in ec-inode-write.c, in GF_ASSERT
                                                  • #1707081: Self heal daemon not coming up after upgrade to glusterfs-6.0-2 (intermittently) on a brick mux setup
                                                  • #1707700: maintain consistent values across for options when fetched at cluster level or volume level
                                                  • #1707728: geo-rep: Sync hangs with tarssh as sync-engine
                                                  • #1707742: tests/geo-rep: arequal checksum comparison always succeeds
                                                  • #1707746: AFR-v2 does not log before attempting data self-heal
                                                  • #1708051: Capture memory consumption for gluster process at the time of throwing no memory available message
                                                  • #1708156: ec ignores lock contention notifications for partially acquired locks
                                                  • #1708163: tests: fix bug-1319374.c compile warnings.
                                                  • #1708926: Invalid memory access while executing cleanup_and_exit
                                                  • #1708929: Add more test coverage for shd mux
                                                  • #1709248: [geo-rep]: Non-root - Unable to set up mountbroker root directory and group
                                                  • #1709653: geo-rep: With heavy rename workload geo-rep log if flooded
                                                  • #1710054: Optimize the glustershd manager to send reconfigure
                                                  • #1710159: glusterd: While upgrading (3-node cluster) 'gluster v status' times out on node to be upgraded
                                                  • #1711240: [GNFS] gf_nfs_mt_inode_ctx serious memory leak
                                                  • #1711250: bulkvoldict thread is not handling all volumes while brick multiplex is enabled
                                                  • #1711297: Optimize glusterd code to copy dictionary in handshake code path
                                                  • #1711764: Files inaccessible if one rebalance process is killed in a multinode volume
                                                  • #1711820: Typo in cli return string.
                                                  • #1711827: test case bug-1399598-uss-with-ssl.t is generating crash
                                                  • #1712322: Brick logs inundated with [2019-04-27 22:14:53.378047] I [dict.c:541:dict_get] (-->/usr/lib64/glusterfs/6.0/xlator/features/worm.so(+0x7241) [0x7fe857bb3241] -->/usr/lib64/glusterfs/6.0/xlator/features/locks.so(+0x1c219) [0x7fe857dda219] [Invalid argumen
                                                  • #1712668: Remove-brick shows warning cluster.force-migration enabled where as cluster.force-migration is disabled on the volume
                                                  • #1712741: glusterd_svcs_stop should call individual wrapper function to stop rather than calling the glusterd_svc_stop
                                                  • #1713730: Failure when glusterd is configured to bind specific IPv6 address. If bind-address is IPv6, *addr_len will be non-zero and it goes to ret = -1 branch, which will cause listen failure eventually
                                                  • #1714098: Make debugging hung frames easier
                                                  • #1714415: Script to make it easier to find hung frames
                                                  • #1714973: upgrade after tier code removal results in peer rejection.
                                                  • #1715921: uss.t tests times out with brick-mux regression
                                                  • #1716695: Fix memory leaks that are present even after an xlator fini [client side xlator]
                                                  • #1716766: [Thin-arbiter] TA process is not picking 24007 as port while starting up
                                                  • #1716812: Failed to create volume which transport_type is \"tcp,rdma\"
                                                  • #1716830: DHT: directory permissions are wiped out
                                                  • #1717757: WORM: Segmentation Fault if bitrot stub do signature
                                                  • #1717782: gluster v get all still showing storage.fips-mode-rchecksum off
                                                  • #1717819: Changes to self-heal logic w.r.t. detecting metadata split-brains
                                                  • #1717953: SELinux context labels are missing for newly added bricks using add-brick command
                                                  • #1718191: Regression: Intermittent test failure for quick-read-with-upcall.t
                                                  • #1718273: markdown formatting errors in files present under /doc directory of the project
                                                  • #1718316: Ganesha-gfapi logs are flooded with error messages related to \"gf_uuid_is_null(gfid)) [Invalid argument]\" when lookups are running from multiple clients
                                                  • #1718338: Upcall: Avoid sending upcalls for invalid Inode
                                                  • #1718848: False positive logging of mount failure
                                                  • #1718998: Fix test case \"tests/basic/afr/split-brain-favorite-child-policy.t\" failure
                                                  • #1720201: Healing not proceeding during in-service upgrade on a disperse volume
                                                  • #1720290: ctime changes: tar still complains file changed as we read it if uss is enabled
                                                  • #1720615: [RHEL-8.1] yum update fails for rhel-8 glusterfs client packages 6.0-5.el8
                                                  • #1720993: tests/features/subdir-mount.t is failing for brick_mux regrssion
                                                  • #1721385: glusterfs-libs: usage of inet_addr() may impact IPv6
                                                  • #1721435: DHT: Internal xattrs visible on the mount
                                                  • #1721441: geo-rep: Fix permissions for GEOREP_DIR in non-root setup
                                                  • #1721601: [SHD] : logs of one volume are going to log file of other volume
                                                  • #1722541: stale shd process files leading to heal timing out and heal deamon not coming up for all volumes
                                                  • #1703322: Need to document about fips-mode-rchecksum in gluster-7 release notes.
                                                  • #1722802: Incorrect power of two calculation in mem_pool_get_fn
                                                  • #1723890: Crash in glusterd when running test script bug-1699339.t
                                                  • #1728770: Failures in remove-brick due to [Input/output error] errors
                                                  • #1736481: capture stat failure error while setting the gfid
                                                  • #1739424: Disperse volume : data corruption with ftruncate data in 4+2 config
                                                  • #1739426: Open fd heal should filter O_APPEND/O_EXCL
                                                  • #1739427: An Input/Output error happens on a disperse volume when doing unaligned writes to a sparse file
                                                  • #1741041: atime/mtime is not restored after healing for entry self heals
                                                  • #1743200: ./tests/bugs/glusterd/bug-1595320.t is failing
                                                  • #1744874: interrupts leak memory
                                                  • #1745422: ./tests/bugs/glusterd/bug-1595320.t is failing
                                                  • #1745914: ESTALE change in fuse breaks get_real_filename implementation
                                                  • #1746142: ctime: If atime is updated via utimensat syscall ctime is not getting updated
                                                  • #1746145: CentOs 6 GlusterFS client creates files with time 01/01/1970
                                                  • #1747301: Setting cluster.heal-timeout requires volume restart
                                                  • #1747746: The result (hostname) of getnameinfo for all bricks (ipv6 addresses) are the same, while they are not.
                                                  • #1748448: syncop: Bail out if frame creation fails
                                                  • #1748774: Incorrect power of two calculation in mem_pool_get_fn
                                                  • #1749155: bug-1402841.t-mt-dir-scan-race.t fails spuriously
                                                  • #1749305: Failures in remove-brick due to [Input/output error] errors
                                                  • #1749664: The result (hostname) of getnameinfo for all bricks (ipv6 addresses) are the same, while they are not.
                                                  • #1751556: syncop: Bail out if frame creation fails
                                                  • #1752245: Crash in glusterd when running test script bug-1699339.t
                                                  • #1752429: Ctime: Cannot see the \"trusted.glusterfs.mdata\" xattr for directory on a new brick after rebalance
                                                  • #1755212: geo-rep: performance improvement while syncing heavy renames with existing destination
                                                  • #1755213: geo-rep: non-root session going fault due improper sub-command
                                                  • #1755678: Segmentation fault occurs while truncate file
                                                  • #1756002: git clone fails on gluster volumes exported via nfs-ganesha
                                                  • "},{"location":"release-notes/7.1/","title":"Release notes for Gluster 7.1","text":"

                                                    This is a bugfix release. The release notes for 7.0 contains a listing of all the new features that were added and bugs fixed in the GlusterFS 7 stable release.

                                                    NOTE: Next minor release tentative date: Week of 20th Jan, 2020

                                                    "},{"location":"release-notes/7.1/#major-changes-features-and-limitations-addressed-in-this-release","title":"Major changes, features and limitations addressed in this release","text":"

                                                    None

                                                    "},{"location":"release-notes/7.1/#major-issues","title":"Major issues","text":"

                                                    None

                                                    "},{"location":"release-notes/7.1/#note","title":"Note","text":""},{"location":"release-notes/7.1/#glusterfs-samba","title":"GlusterFS-Samba","text":"

                                                    Following parameters will be added to GlusterFS volume share section (if not present) in smb.conf when user.smb or user.cifs option is set on a volume:

                                                    kernel share modes = no

                                                    Following parameters will NOT be added to GlusterFS volume share section(if not present) in smb.conf when user.smb or user.cifs option is set on a volume:

                                                    guest ok = yes

                                                    "},{"location":"release-notes/7.1/#bugs-addressed","title":"Bugs addressed","text":"

                                                    Bugs addressed since release-7.0 are listed below.

                                                    • #1760356: packaging: remove leftover bd cruft in rpm .spec.in
                                                    • #1760699: glustershd can not decide heald_sinks, and skip repair, so some entries lingering in volume heal info
                                                    • #1760791: afr: support split-brain CLI for replica 3
                                                    • #1761910: Rebalance causing IO Error - File descriptor in bad state
                                                    • #1764003: [Upgrade] Config files are not upgraded to new version
                                                    • #1764007: geo-replication sessions going faulty
                                                    • #1764015: geo-rep syncing significantly behind and also only one of the directories are synced with tracebacks seen
                                                    • #1764023: geo-rep: Changelog archive file format is incorrect
                                                    • #1764026: tests/geo-rep: Add test case to validate non-root geo-replication setup
                                                    • #1764028: [geo-rep] sync_method showing rsync instead of tarssh post in-service upgrade
                                                    • #1764030: [GSS] geo-rep entering into faulty state with OSError: [Errno 13] Permission denied
                                                    • #1765431: test: fix non-root geo-rep test case
                                                    • #1766424: cgroup control-cpu-load.sh script not working
                                                    • #1768742: Memory leak in glusterfsd process
                                                    • #1768760: tests/bugs/shard/unlinks-and-renames.t fails on RHEL8
                                                    • #1769315: Rebalance is causing glusterfs crash on client node
                                                    • #1769320: Spurious failure tests/bugs/replicate/bug-1734370-entry-heal-restore-time.t
                                                    • #1771840: [CENTOS 6] Geo-replication session not starting after creation
                                                    • #1775495: [GNFS] showmout -a cause gnfs crash
                                                    • #1777769: auth-allow of IPv4 address doesn't take netmask into consideration
                                                    • #1778175: glusterfsd crashed with \"'MemoryError' Cannot access memory at address\"
                                                    • #1781483: Remove guest access by default for GlusterFS volume SMB shares added by hook scripts
                                                    • #1781486: gluster-smb:glusto-test access gluster by cifs test write report Device or resource busy
                                                    • #1782826: event_slot_alloc not able to return index after reach slot_used count to 1024
                                                    • #1783227: GlusterFS brick process crash
                                                    • #1783858: Heal Info is hung when I/O is in progress on a gluster block volume
                                                    • #1784790: tests/00-geo-rep/00-georep-verify-non-root-setup.t fail on freshly installed builder
                                                    • #1785228: Windows client fails to copy large file to GlusterFS volume share with fruit and streams_xattr VFS modules via Samba
                                                    • #1785493: READDIRP incorrectly updates posix-acl inode ctx
                                                    • "},{"location":"release-notes/7.2/","title":"Release notes for Gluster 7.2","text":"

                                                      This is a bugfix release. The release notes for 7.0 contains a listing of all the new features that were added and bugs fixed in the GlusterFS 7 stable release.

                                                      NOTE: Next minor release tentative date: Week of 20th Feb, 2020

                                                      "},{"location":"release-notes/7.2/#major-changes-features-and-limitations-addressed-in-this-release","title":"Major changes, features and limitations addressed in this release","text":"

                                                      None

                                                      "},{"location":"release-notes/7.2/#major-issues","title":"Major issues","text":"

                                                      None

                                                      "},{"location":"release-notes/7.2/#bugs-addressed","title":"Bugs addressed","text":"

                                                      Bugs addressed since release-7.1 are listed below.

                                                      • #1767264: glusterfs client process coredump
                                                      • #1786753: Functionality to enable log rotation for user serviceable snapshot's logs.
                                                      • #1788785: Unable to set/modify optimistic-change-log for replicate volumes
                                                      • #1789336: glusterfs process memory leak in ior test
                                                      • #1790423: Glusterfind pre command fails
                                                      • #1790428: glusterfind pre output file is empty
                                                      • #1790438: S57glusterfind-delete-post.py not python3 ready (does not decode bytestring)
                                                      • #1790846: Remove extra argument
                                                      "},{"location":"release-notes/7.3/","title":"Release notes for Gluster 7.3","text":"

                                                      This is a bugfix release. The release notes for 7.0 contains a listing of all the new features that were added and bugs fixed in the GlusterFS 7 stable release.

                                                      NOTE: Next minor release tentative date: Week of 20th Mar, 2020

                                                      "},{"location":"release-notes/7.3/#major-changes-features-and-limitations-addressed-in-this-release","title":"Major changes, features and limitations addressed in this release","text":"

                                                      Features

                                                      Make thin-arbiter name unique in 'pending-xattr' option. By making this unique, we can host single thin-arbiter node for multiple clusters.

                                                      "},{"location":"release-notes/7.3/#major-issues","title":"Major issues","text":"

                                                      None

                                                      "},{"location":"release-notes/7.3/#bugs-addressed","title":"Bugs addressed","text":"

                                                      Bugs addressed since release-7.2 are listed below.

                                                      • #1768407: glusterfsd memory leak observed after enable tls
                                                      • #1791154: xlators/features/quota/src/quota.c:quota_log_usage
                                                      • #1793085: gf_event doesn't work for glfsheal process
                                                      • #1793412: config ssh-port can accept negative and outside allowed port range value
                                                      • #1793492: cli: duplicate defns of cli_default_conn_timeout and cli_ten_minutes_timeout
                                                      • #1794019: Mounts fails after reboot of 1/3 gluster nodes
                                                      • #1795540: mem leak while using gluster tools
                                                      • #1802449: spurious self-heald.t failure
                                                      "},{"location":"release-notes/7.4/","title":"Release notes for Gluster 7.4","text":"

                                                      This is a bugfix release. The release notes for 7.0 contains a listing of all the new features that were added and bugs fixed in the GlusterFS 7 stable release.

                                                      NOTE: Next minor release tentative date: Week of 20th Apr, 2020

                                                      "},{"location":"release-notes/7.4/#major-changes-features-and-limitations-addressed-in-this-release","title":"Major changes, features and limitations addressed in this release","text":"

                                                      None

                                                      "},{"location":"release-notes/7.4/#major-issues","title":"Major issues","text":"

                                                      None

                                                      "},{"location":"release-notes/7.4/#bugs-addressed","title":"Bugs addressed","text":"

                                                      Bugs addressed since release-7.3 are listed below.

                                                      • #1785323: glusterfsd crashes after a few seconds
                                                      • #1804591: Heal pending on volume, even after all the bricks are up
                                                      • #1805668: Memory corruption when glfs_init() is called after glfs_fini()
                                                      • #1806843: Disperse volume : Ganesha crash with IO in 4+2 config when one glusterfsd restarts every 600s
                                                      • #1807785: seeing error message in glustershd.log on volume start(or may be as part of shd graph regeneration) inet_pton failed with return code 0 [Invalid argument]
                                                      • #1808964: Set volume option when one of the nodes is powered off, After powering the node brick processes are offline
                                                      • #1809438: [brickmux]: glustershd crashed when rebooting 1/3 nodes at regular intervals
                                                      • #1812849: Setting volume option when one of the glusterds is stopped in the cluster, post glusterd restart seeing couldn't find vol info in glusterd logs and shd, brick process offline
                                                      • #1061: [EC] shd crashed while heal failed due to out of memory error.
                                                      • #1030: Memory corruption when sending events to an IPv6 host
                                                      "},{"location":"release-notes/7.5/","title":"Release notes for Gluster 7.5","text":"

                                                      This is a bugfix release. The release notes for 7.0 contains a listing of all the new features that were added and bugs fixed in the GlusterFS 7 stable release.

                                                      NOTE: Next minor release tentative date: Week of 20th May, 2020

                                                      "},{"location":"release-notes/7.5/#major-changes-features-and-limitations-addressed-in-this-release","title":"Major changes, features and limitations addressed in this release","text":"

                                                      None

                                                      "},{"location":"release-notes/7.5/#major-issues","title":"Major issues","text":"

                                                      None

                                                      "},{"location":"release-notes/7.5/#bugs-addressed","title":"Bugs addressed","text":"

                                                      Bugs addressed since release-7.4 are listed below.

                                                      • #832 Permission Denied in logs
                                                      • #884 [bug:1808688] Data corruption with asynchronous writes
                                                      • #1067 [bug:1661889] Metadata heal picks different brick each time as source if there are no pending xattrs.
                                                      • #1127 Mount crash during background shard cleanup
                                                      • #1134 snap_scheduler.py init failing with \"TypeError: Can't mix strings and bytes in path components\"
                                                      • #1152 Spurious failure of tests/bugs/protocol/bug-1433815-auth-allow.t
                                                      • #1168 glusterfsd crash due to health-check failed, going down ,system call errorno not return
                                                      "},{"location":"release-notes/7.6/","title":"Release notes for Gluster 7.6","text":"

                                                      This is a bugfix release. The release notes for 7.0, 7.1, 7.2, 7.3, 7.4 and 7.5 contains a listing of all the new features that were added and bugs fixed in the GlusterFS 7 stable release.

                                                      NOTE: Next minor release tentative date: Week of 20th Jul, 2020

                                                      "},{"location":"release-notes/7.6/#major-changes-features-and-limitations-addressed-in-this-release","title":"Major changes, features and limitations addressed in this release","text":"

                                                      None

                                                      "},{"location":"release-notes/7.6/#major-issues","title":"Major issues","text":"

                                                      None

                                                      "},{"location":"release-notes/7.6/#bugs-addressed","title":"Bugs addressed","text":"

                                                      Bugs addressed since release-7.5 are listed below.

                                                      • #1060 [bug:789278] Issues reported by Coverity static analysis tool
                                                      • #1140 getfattr returns ENOATTR for system.posix_acl_access on dispe...
                                                      • #1146 gfapi/Upcall: Potential deadlock in synctask threads processi...
                                                      • #1179 gnfs split brain when 1 server in 3x1 down (high load)
                                                      • #1000 [bug:1193929] GlusterFS can be improved
                                                      "},{"location":"release-notes/7.7/","title":"Release notes for Gluster 7.7","text":"

                                                      This is a bugfix release. The release notes for 7.0, 7.1, 7.2, 7.3, 7.4 7.5 and 7.6 contains a listing of all the new features that were added and bugs fixed in the GlusterFS 7 stable release.

                                                      NOTE: Next minor release tentative date: Week of 20th Sep, 2020

                                                      "},{"location":"release-notes/7.7/#major-changes-features-and-limitations-addressed-in-this-release","title":"Major changes, features and limitations addressed in this release","text":"

                                                      None

                                                      "},{"location":"release-notes/7.7/#major-issues","title":"Major issues","text":"

                                                      None

                                                      "},{"location":"release-notes/7.7/#bugs-addressed","title":"Bugs addressed","text":"

                                                      Bugs addressed since release-7.6 are listed below.

                                                      • #1000 [bug:1193929] GlusterFS can be improved
                                                      • #1220 cluster/ec: return correct error code and log the message in ...
                                                      • #1223 Failure of tests/basic/gfapi/gfapi-copy-file-range.t
                                                      • #1225 fuse causes glusterd to dump core
                                                      • #1243 Modify and return iatt (especially size and block-count) in s...
                                                      • #1254 Prioritize ENOSPC over other lesser priority errors
                                                      • #1296 Implement seek in open-behind
                                                      • #1303 Failures in rebalance due to [Input/output error]
                                                      • #1348 Fuse mount crashes in shard translator when truncating a *real...
                                                      "},{"location":"release-notes/7.8/","title":"Release notes for Gluster 7.8","text":"

                                                      This is a bugfix release. The release notes for 7.0, 7.1, 7.2, 7.3, 7.4 7.5, 7.6 and 7.7 contain a listing of all the new features that were added and bugs fixed in the GlusterFS 7 stable release.

                                                      NOTE:

                                                      1. Next minor release tentative date: Week of 30th Nov, 2020.
                                                      2. Next minor release would be the last release of release-7.x series.
                                                      "},{"location":"release-notes/7.8/#highlights-of-release","title":"Highlights of Release","text":"

                                                      This release contains majorly the bug fixes as described in the issues section.

                                                      "},{"location":"release-notes/7.8/#builds-are-available-at","title":"Builds are available at","text":"

                                                      https://download.gluster.org/pub/gluster/glusterfs/7/7.8/

                                                      "},{"location":"release-notes/7.8/#issues-addressed-in-this-release","title":"Issues addressed in this release","text":"

                                                      Please find the list of issues added to this release below.

                                                      • #763 thin-arbiter: Testing report
                                                      • #1000 [bug:1193929] GlusterFS can be improved
                                                      • #1002 [bug:1679998] GlusterFS can be improved
                                                      • #1250 geo-rep: Fix corner case in rename on mkdir during hybrid crawl
                                                      • #1253 On Ovirt setup glusterfs performs poorly
                                                      • #1332 Unable to Upgrade to Gluster 7 from Earlier Version
                                                      • #1351 issue with gf_fill_iatt_for_dirent()
                                                      • #1354 High CPU utilization by self-heal on disperse volumes with no ...
                                                      • #1385 High CPU utilization by self-heal on disperse volumes when an ...
                                                      • #1407 glusterd keep crashing when upgrading from 6.5 to 7.7
                                                      • #1438 syncdaemon/syncdutils.py: SyntaxWarning: \"is\" with a literal. ...
                                                      • #1440 glusterfs 7.7 fuse client memory leak
                                                      • #1472 Readdir-ahead leads to inconsistent ls results
                                                      "},{"location":"release-notes/7.9/","title":"Release notes for Gluster 7.9","text":"

                                                      This is a bugfix release. The release notes for 7.0, 7.1, 7.2, 7.3, 7.4 7.5, 7.6, 7.7 and 7.8 contain a listing of all the new features that were added and bugs fixed in the GlusterFS 7 stable release.

                                                      NOTE:

                                                      This release would be the last release of release-7.x series. Users are highly encouraged to upgrade to newer releases of GlusterFS.

                                                      "},{"location":"release-notes/7.9/#highlights-of-release","title":"Highlights of Release","text":"

                                                      This release contains majorly the bug fixes as described in the issues section.

                                                      "},{"location":"release-notes/7.9/#builds-are-available-at","title":"Builds are available at","text":"

                                                      https://download.gluster.org/pub/gluster/glusterfs/7/7.9/

                                                      "},{"location":"release-notes/7.9/#issues-addressed-in-this-release","title":"Issues addressed in this release","text":"

                                                      Please find the list of issues added to this release below.

                                                      • #1852 glusterd: Can't run rebalance due to long unix socket
                                                      • #1836 posix: Update ret value in posix_get_gfid2path if GF_MALLOC fails
                                                      • #1738 [cli] Improper error message on command timeout
                                                      • #1699 One brick offline with signal received: 11
                                                      • #1604 rfc.sh on release-7 needs to move to github flow
                                                      • #1499 why not use JumpConsistentHash to replace SuperFastHash to cho...
                                                      • #1221 features/bit-rot: invalid snprintf() buffer size
                                                      • #1060 [bug:789278] Issues reported by Coverity static analysis tool
                                                      "},{"location":"release-notes/8.0/","title":"Release notes for Gluster 8.0","text":"

                                                      Release date: 09-July-2020

                                                      This is a major release that includes a range of features, code improvements and stability fixes as noted below.

                                                      A selection of the key features and changes are documented in this page. A full list of bugs that have been addressed is included further below.

                                                      • Release notes for Gluster 8.0
                                                      • Announcements
                                                      • Major changes and features
                                                        • Highlights
                                                        • Features
                                                      • Major issues
                                                      • Bugs addressed
                                                      "},{"location":"release-notes/8.0/#announcements","title":"Announcements","text":"
                                                      1. Releases that receive maintenance updates post release 8 are, 7 and 8 (reference)

                                                      2. Release 8 will receive maintenance updates around the 10th of every month for the first 3 months post release (i.e Aug'20, Sep'20, Oct'20). Post the initial 3 months, it will receive maintenance updates every 2 months till EOL.

                                                      "},{"location":"release-notes/8.0/#major-changes-and-features","title":"Major changes and features","text":""},{"location":"release-notes/8.0/#highlights","title":"Highlights","text":"
                                                      • Several stability fixes addressing
                                                      • coverity, clang-scan, address sanitizer and valgrind reported issues
                                                      • removal of unused and hence, deprecated code and features
                                                      • Performance Improvements
                                                      • CentOS 8 and RHEL 8 is supported
                                                      "},{"location":"release-notes/8.0/#features","title":"Features","text":"
                                                      • Implemented seek file operation for open-behind
                                                      • Now storage.reserve option will take size of disk as input instead of percentage
                                                      • Added Functionality to enable log rotation for user serviceable snapshot's logs
                                                      • Mandatory locks enhancements in replicate subvolumes
                                                      • To validate other memory allocation implementations instead of libc's malloc added an option to build with tcmalloc library
                                                      • Integrated Thin-arbiter with GD1
                                                      • Client Handling of Elastic Clusters
                                                      • The package glusterfs-libs is replaced by libgfchangelog0, libgfrpc0, libgfxdr0, and libglusterfs0; and additional libraries in libgfapi0, libglusterd0
                                                      "},{"location":"release-notes/8.0/#major-issues","title":"Major issues","text":"

                                                      None

                                                      "},{"location":"release-notes/8.0/#bugs-addressed","title":"Bugs addressed","text":"

                                                      Bugs addressed since release-7 are listed below.

                                                      • #789278: Issues reported by Coverity static analysis tool
                                                      • #1158130: Not possible to disable fopen-keeo-cache when mounting
                                                      • #1183054: rpmlint throws couple of errors for RPM spec file
                                                      • #1193929: GlusterFS can be improved
                                                      • #1387404: geo-rep: gsync-sync-gfid binary installed in /usr/share/...
                                                      • #1410439: glusterfind pre output file is empty
                                                      • #1423442: group files to set volume options should have comments
                                                      • #1430623: pthread mutexes and condition variables are not destroyed
                                                      • #1489610: glusterfind saves var data under $prefix instead of localstatedir
                                                      • #1507896: glfs_init returns incorrect errno on faliure
                                                      • #1514683: Removal of bricks in volume isn't prevented if remaining brick doesn't contain all the files
                                                      • #1538900: Found a missing unref in rpc_clnt_reconnect
                                                      • #1554286: Xattr not updated if increasing the retention of a WORM/Retained file
                                                      • #1593542: ctime: Upgrade/Enabling ctime feature wrongly updates older files with latest {a|m|c}time
                                                      • #1620580: Deleted a volume and created a new volume with similar but not the same name. The kubernetes pod still keeps on running and doesn't crash. Still possible to write to gluster mount
                                                      • #1622665: clang-scan report: glusterfs issues
                                                      • #1626543: dht/tests: Create a .t to test all possible combinations for file rename
                                                      • #1635688: Keep only the valid (maintained/supported) components in the build
                                                      • #1636297: Make it easy to build / host a project which just builds glusterfs translator
                                                      • #1644322: flooding log with \"glusterfs-fuse: read from /dev/fuse returned -1 (Operation not permitted)\"
                                                      • #1651445: [RFE] storage.reserve option should take size of disk as input instead of percentage
                                                      • #1664335: [geo-rep]: Transport endpoint not connected with arbiter volumes
                                                      • #1665358: allow regression to not run tests with nfs, if nfs is disabled.
                                                      • #1668239: [man page] Gluster(8) - Missing disperse-data parameter Gluster Console Manager man page
                                                      • #1668286: READDIRP incorrectly updates posix-acl inode ctx
                                                      • #1676479: read-ahead and io-cache degrading performance on sequential read
                                                      • #1688115: Data heal not checking for locks on source & sink(s) before healing
                                                      • #1689097: gfapi: provide an option for changing statedump path in glfs-api.
                                                      • #1690454: mount-shared-storage.sh does not implement mount options
                                                      • #1693692: Increase code coverage from regression tests
                                                      • #1694920: Inconsistent locking in presence of disconnects
                                                      • #1697293: DHT: print hash and layout values in hexadecimal format in the logs
                                                      • #1698042: quick-read cache invalidation feature has the same key of md-cache
                                                      • #1707731: [Upgrade] Config files are not upgraded to new version
                                                      • #1708603: [geo-rep]: Note section in document is required for ignore_deletes true config option where it might delete a file
                                                      • #1708929: Add more test coverage for shd mux
                                                      • #1716695: Fix memory leaks that are present even after an xlator fini [client side xlator]
                                                      • #1716979: Multiple disconnect events being propagated for the same child
                                                      • #1717754: Enable features.locks-notify-contention by default
                                                      • #1717824: Fencing: Added the tcmu-runner ALUA feature support but after one of node is rebooted the glfs_file_lock() get stucked
                                                      • #1717827: tests/geo-rep: Add test case to validate non-root geo-replication setup
                                                      • #1719290: Glusterfs mount helper script not working with IPv6 because of regular expression or man is wrong
                                                      • #1720463: [Thin-arbiter] : Wait for connection with TA node before sending lookup/create of ta-replica id file
                                                      • #1720566: Can't rebalance GlusterFS volume because unix socket's path name is too long
                                                      • #1721590: tests/bugs/posix/bug-1040275-brick-uid-reset-on-volume-restart.t is failing
                                                      • #1721686: Remove usage of obsolete function usleep()
                                                      • #1722507: Incorrect reporting of type/gfid mismatch
                                                      • #1722541: stale shd process files leading to heal timing out and heal deamon not coming up for all volumes
                                                      • #1722546: do not assert in inode_unref if the inode table cleanup has started
                                                      • #1722598: dump the min and max latency of each xlator in statedump
                                                      • #1722698: DHT: severe memory leak in dht rename
                                                      • #1722740: [GSS] geo-replication sessions going faulty
                                                      • #1722802: Incorrect power of two calculation in mem_pool_get_fn
                                                      • #1722977: ESTALE change in fuse breaks get_real_filename implementation
                                                      • #1723280: windows cannot access mountpoint exportd from a disperse volume
                                                      • #1723402: Brick multiplexing is not working.
                                                      • #1723455: volume set group description missing space leading to words being merged in help output
                                                      • #1723658: [In-service] Post upgrade glusterd is crashing with a backtrace on the upgraded node while issuing gluster volume status from non-upgraded nodes
                                                      • #1723761: [Ganesha]: truncate operation not updating the ctime
                                                      • #1723890: Crash in glusterd when running test script bug-1699339.t
                                                      • #1724024: use more secure mode for mkdir operations
                                                      • #1724184: Thin-arbiter: SHD takes lock and inspects the state on TA during every index crawl
                                                      • #1725034: gluster volume help showing multiple commands for top instead of one.
                                                      • #1725211: User serviceable snapshots (USS) are not accessible after changing transport.socket.bind-address of glusterd
                                                      • #1726205: Windows client fails to copy large file to GlusterFS volume share with fruit and streams_xattr VFS modules via Samba
                                                      • #1726783: snapd crashes sometimes
                                                      • #1726906: get-state does not show correct brick status
                                                      • #1727068: Deadlock when generating statedumps
                                                      • #1727081: Disperse volume : data corruption with ftruncate data in 4+2 config
                                                      • #1727107: geo-replication/setup.py missing license details in setup()
                                                      • #1727248: [GNFS] showmout -a cause gnfs crash
                                                      • #1727256: Directory pending heal in heal info output
                                                      • #1727329: glustershd dumped core with seg fault at afr_has_quorum
                                                      • #1727852: gluster-block: improvements to volume group profile options list
                                                      • #1728047: interrupts leak memory
                                                      • #1728417: Cleanup references to Hadoop in code base
                                                      • #1728554: Spelling errors
                                                      • #1728683: [geo-rep] gluster-mountbroker missing a brief description of what the argument does in # gluster-mountbroker (add|remove|setup) --help
                                                      • #1728766: Volume start failed when shd is down in one of the node in cluster
                                                      • #1728770: Failures in remove-brick due to [Input/output error] errors
                                                      • #1729085: [EC] shd crashed while heal failed due to out of memory error.
                                                      • #1729107: Memory leak in glusterfsd process
                                                      • #1729463: gluster v geo-rep status command timing out
                                                      • #1729772: Disperse volume : Ganesha crash with IO in 4+2 config when one glusterfsd restart every 600s
                                                      • #1729847: Fix spurious failure of tests/bugs/replicate/bug-1717819-metadata-split-brain-detection.t
                                                      • #1730175: Seeing failure due to \"getxattr err for dir [No data available]\" in rebalance
                                                      • #1730409: core file generated - when EC volume stop and start is executed for 10 loops on a EC+Brickmux setup
                                                      • #1730715: An Input/Output error happens on a disperse volume when doing unaligned writes to a sparse file
                                                      • #1730953: mount generates errors after umount
                                                      • #1731920: [geo-rep]: gluster command not found while setting up a non-root session
                                                      • #1732496: [Coverity] RETURN_LOCAL in __nlc_inode_ctx_get()
                                                      • #1732717: fuse: Limit the number of inode invalidation requests in the queue
                                                      • #1733042: cluster.rc Create separate logdirs for each host instance
                                                      • #1733166: potential deadlock while processing callbacks in gfapi
                                                      • #1733425: Setting volume option when one of the glusterd is stopped in the cluster, post glusterd restart seeing couldn't find vol info in glusterd logs and shd, brick process offline
                                                      • #1733935: Open fd heal should filter O_APPEND/O_EXCL
                                                      • #1734026: Cannot see the \"trusted.glusterfs.mdata\" xattr for directory on a new brick after rebalance
                                                      • #1734252: Heal not completing after geo-rep session is stopped on EC volumes.
                                                      • #1734299: ctime: When healing ctime xattr for legacy files, if multiple clients access and modify the same file, the ctime might be updated incorrectly.
                                                      • #1734370: atime/mtime is not restored after healing for entry self heals
                                                      • #1734738: Unable to create geo-rep session on a non-root setup.
                                                      • #1736482: capture stat failure error while setting the gfid
                                                      • #1737288: nfs client gets bad ctime for copied file which is on glusterfs disperse volume with ctime on
                                                      • #1737291: features/locks: avoid use after freed of frame for blocked lock
                                                      • #1737484: geo-rep syncing significantly behind and also only one of the directories are synced with tracebacks seen
                                                      • #1737676: Upgrading a Gluster node fails when user edited glusterd.vol file exists
                                                      • #1737778: ocf resource agent for volumes don't work in non-standard environment
                                                      • #1738419: read() returns more than file size when using direct I/O
                                                      • #1738763: [EC] : fix coverity issue
                                                      • #1738786: ctime: If atime is updated via utimensat syscall ctime is not getting updated
                                                      • #1739360: [GNFS] gluster crash with nfs.nlm off
                                                      • #1740017: tests/bugs/replicate/bug-880898.t created a core file.
                                                      • #1741734: gluster-smb:glusto-test access gluster by cifs test write report Device or resource busy
                                                      • #1741779: Fix spelling errors
                                                      • #1741890: geo-rep: Changelog archive file format is incorrect
                                                      • #1743020: glusterd start is failed and throwing an error Address already in use
                                                      • #1743069: bug-1482023-snpashot-issue-with-other-processes-accessing-mounted-path.t fails in brick mux regression spuriously
                                                      • #1743094: glusterfs build fails on centos7
                                                      • #1743200: ./tests/bugs/glusterd/bug-1595320.t is failing
                                                      • #1743573: fuse client hung when issued a lookup \"ls\" on an ec volume
                                                      • #1743652: CentOs 6 GlusterFS client creates files with time 01/01/1970
                                                      • #1744519: log aio_error return codes in posix_fs_health_check
                                                      • #1744548: Setting cluster.heal-timeout requires volume restart
                                                      • #1745965: glusterd fails to start due to SIGABRT dumping core
                                                      • #1745967: File size was not truncated for all files when tried with rebalance in progress.
                                                      • #1746228: systemctl start glusterd is getting timed out on the scaled setup with 2000 volumes
                                                      • #1746320: SHORT-WRITE error leads to crash
                                                      • #1746810: markdown files containing 404 links
                                                      • #1747746: The result (hostname) of getnameinfo for all bricks (ipv6 addresses) are the same, while they are not.
                                                      • #1748448: syncop: Bail out if frame creation fails
                                                      • #1748744: bug-1402841.t-mt-dir-scan-race.t fails spuriously
                                                      • #1748836: Application should know when update size/version went bad
                                                      • #1749322: glustershd can not decide heald_sinks, and skip repair, so some entries lingering in volume heal info
                                                      • #1750387: Deprecated log rotate command still present in \"# gluster v help\"
                                                      • #1750618: Cleanup of executable in tests/bugs/gfapi/bug-1447266/bug-1447266.t not done
                                                      • #1751134: Spurious failure tests/bugs/replicate/bug-1734370-entry-heal-restore-time.t
                                                      • #1751907: bricks gone down unexpectedly
                                                      • #1752330: seeing error message in glustershd.log on volume start(or may be as part of shd graph regeneration) inet_pton failed with return code 0 [Invalid argument]
                                                      • #1752331: Test tests/basic/volume-scale-shd-mux.t is failing on upstream CI
                                                      • #1753569: git clone fails on gluster volumes exported via nfs-ganesha
                                                      • #1753592: Segmentation fault occurs while truncate file
                                                      • #1753843: [Disperse volume]: Regression in IO performance seen in sequential read for large file
                                                      • #1753857: geo-rep: performance improvement while syncing heavy renames with existing destination
                                                      • #1753859: Typos in glusterd log messages
                                                      • #1753880: Set the default lru-limit in fuse to a smaller number
                                                      • #1753928: geo-rep: non-root session going fault due improper sub-command
                                                      • #1754448: Re-alignment of Structure attributes
                                                      • #1754477: Thin-arbiter: Raise error in CLI if replica-count is not 2
                                                      • #1755344: glustershd.log getting flooded with \"W [inode.c:1017:inode_find] (-->/usr/lib64/glusterfs/6.0/xlator/cluster/disperse.so(+0xe3f9) [0x7fd09b0543f9] -->/usr/lib64/glusterfs/6.0/xlator/cluster/disperse.so(+0xe19c) [0x7fd09b05419 TABLE NOT FOUND\"
                                                      • #1755900: heketidbstorage bricks go down during PVC creation
                                                      • #1756211: tests/bugs/shard/bug-1272986.t fails
                                                      • #1756900: tests are failing in RHEL8 regression
                                                      • #1756938: afr: support split-brain CLI for replica 3
                                                      • #1757399: Rebalance is causing glusterfs crash on client node
                                                      • #1758579: Rebalance causing IO Error - File descriptor in bad state
                                                      • #1758878: # gluster v info --xml is always returning 3 for all Nx3 volumes
                                                      • #1758984: Enable direct-io options in group virt
                                                      • #1759002: Spurious failure tests/bugs/replicate/bug-1744548-heal-timeout.t
                                                      • #1759081: Spurious failure of /tests/bugs/replicate/bug-1134691-afr-lookup-metadata-heal.t
                                                      • #1760187: Implement seek fop
                                                      • #1760189: Use replica aware seek fop
                                                      • #1760467: rebalance start is succeeding when quorum is not met
                                                      • #1761759: Failure in ./tests/basic/posix/shared-statfs.t
                                                      • #1761769: On some distros bug-1272986.t takes more than 2 minutes to run
                                                      • #1762220: [geo-rep] sync_method showing rsync instead of tarssh post in-service upgrade
                                                      • #1762438: DHT- gluster rebalance status shows wrong data size after rebalance is completed successfully
                                                      • #1763036: glusterfsd crashed with \"'MemoryError' Cannot access memory at address\"
                                                      • #1763439: [GSS] geo-rep entering into faulty state with OSError: [Errno 13] Permission denied
                                                      • #1764110: tests/bugs/shard/unlinks-and-renames.t fails on RHEL8
                                                      • #1764119: gluster rebalance status doesn't show detailed information when a node is rebooted
                                                      • #1764129: quota_fsck script KeyError: 'contri_size'
                                                      • #1764208: cgroup control-cpu-load.sh script not working
                                                      • #1764418: Add Mohit & Sanju as glusterd/cli maintainers
                                                      • #1765017: gf_event doesn't work for glfsheal process
                                                      • #1765155: replication shouldn't modify xattr-req coming from parent
                                                      • #1765186: Problematic coding practices at logger
                                                      • #1765421: DHT: Add comments to the code
                                                      • #1765426: test: fix non-root geo-rep test case
                                                      • #1765542: Add Sunny Kumar as co-maintainer of Geo-replication component
                                                      • #1768407: glusterfsd memory leak observed after enable tls
                                                      • #1768896: Long method in glusterfsd - set_fuse_mount_options(...)
                                                      • #1769712: check if grapj is ready beforce process cli command
                                                      • #1769754: dht_readdirp_cbk: Do not strip out entries with invalid stats
                                                      • #1771365: libglusterfs/dict.c : memory leaks
                                                      • #1771577: [RHEL 6] Geo-replication session not starting after creation
                                                      • #1771895: geo-rep: Improve debugging in log_raise_exception
                                                      • #1772006: NULL dict messages flooding fuse mount log
                                                      • #1773530: ctime value is different from atime/mtime on a create of file
                                                      • #1773856: Set volume option when one of the node is powered off, After powering the node brick processes are offline
                                                      • #1774011: Heal Info is hung when I/O is in progress on a gluster block volume
                                                      • #1774866: man page update needed for gluster volume top command
                                                      • #1775612: Remove guest access by default for GlusterFS volume SMB shares added by hook scripts
                                                      • #1776264: RFE: systemd should restart glusterd on crash
                                                      • #1776757: DHT - Reduce methods scope
                                                      • #1776784: glfsheal crash on unexpected volume name
                                                      • #1776801: Bricks are not available when volume create fails
                                                      • #1776892: [patch] .dirstamp should be in ignored
                                                      • #1778457: Missing error logs(afr/self-heald )
                                                      • #1779055: glusterfs process memory leak in ior test
                                                      • #1779089: glusterfsd do not release posix lock when multiple glusterfs client do flock -xo to the same file paralleled
                                                      • #1779742: tests/00-geo-rep/00-georep-verify-non-root-setup.t fail on freshly installed builder
                                                      • #1779760: Improve logging in EC, client and lock xlator
                                                      • #1780190: glfsheal should be installed and invoked as architecture-dependent binary helper
                                                      • #1780260: v7 fails to build on Debian 9 [patch?]
                                                      • #1781440: event_slot_alloc not able to return index after reach slot_used count to 1024
                                                      • #1782200: glusterd restart failing to start.
                                                      • #1782495: GlusterFS brick process crash
                                                      • #1784375: 'gluster volume set disable.nfs' accidentally killed unexpected process, and forced a data brick offline.
                                                      • #1785143: Multiple glusterfsd process spawn when glusterd restart during a volume start.
                                                      • #1785208: glusterfs client process coredump
                                                      • #1785611: glusterfsd cashes after a few seconds
                                                      • #1785998: change the error message for heal statistics to reflect its supportability for disperse volume
                                                      • #1786276: [geo-rep] Help for positional argument SLAVE in schedule_georep.py.in isn't clear.
                                                      • #1786459: unable to enable brick-multiplex feature
                                                      • #1786478: default option is disappeared in volume info after volume reset
                                                      • #1786679: Duplicate entries in 'ls' output after a volume expansion
                                                      • #1786722: Functionality to enable log rotation for user serviceable snapshot's logs.
                                                      • #1787122: glusterd allowing to set server.statedump-path to file, non-existent file and non-existent paths
                                                      • #1787274: heal not actually healing metadata of a regular file when only time stamps are changed(data heal not required)
                                                      • #1787554: Unable to set/modify optimistic-change-log for replicate volumes
                                                      • #1789439: Glusterfind pre command fails
                                                      • #1789478: S57glusterfind-delete-post.py not python3 ready (does not decode bytestring)
                                                      • #1790748: Remove extra argument
                                                      • #1790870: Memory corruption when sending events to an IPv6 host
                                                      • #1791682: fail to build on recent Fedora
                                                      • #1792276: config ssh-port can accept negative and outside allowed port range value
                                                      • #1792707: xlators/features/quota/src/quota.c:quota_log_usage
                                                      • #1793378: dht_hash_compute() crashes when it receives a zero length name
                                                      • #1793852: Mounts fails after reboot of 1/3 gluster nodes
                                                      • #1793995: gluster crash when built without gNFS support
                                                      • #1797869: bitrot: Number of signing process threads should be configurable.
                                                      • #1797882: Segmentation fault occurs while truncate file
                                                      • #1797934: Client should propagate ping event from brick
                                                      • #1800583: Halo replication is not working
                                                      • #1800956: Rebalance : Status lists failures on stopping rebalance while it is in progress
                                                      • #1801623: spurious self-heald.t failure
                                                      • #1801624: Heal pending on volume, even after all the bricks are up
                                                      • #1801684: Memory corruption when glfs_init() is called after glfs_fini()
                                                      • #1804786: mount.glusterfs strips off \"/\" from subdir-mounts
                                                      • #1808421: WORM: If autocommit-period 0 file will be WORMed with 0 Byte during initial write
                                                      • #1808875: [brickmux]: glustershd crashed when rebooting 1/3 nodes at regular intervals
                                                      • #1810042: Changes to gluster peer probe in nightly build breaks ansible:gluster_volume call
                                                      • #1810842: frequent heal observed when file opened during one brick is down
                                                      • #1810934: Segfault in FUSE process, potential use after free
                                                      • #1811631: brick crashed when creating and deleting volumes over time (with brick mux enabled only)
                                                      • #1812144: Add a warning message during volume expansion or resize on volume with snapshots
                                                      • #1812353: create-export-ganesha script: mention labelled nfs parameter
                                                      • #154 Optimized CHANGELOG
                                                      • #237 Validate other memory allocation implementations instead of l...
                                                      • #475 Reduce the number or threads used in the brick process
                                                      • #613 Mandatory locks enhancements in replicate subvolumes
                                                      • #657 Structured logging format support
                                                      • #663 Add Ganesha HA bits back to glusterfs code repo
                                                      • #687 Thin-arbiter integration with GD1
                                                      • #699 executable program will crash if linked libgfapi together wit...
                                                      • #703 provide mechanism to test individual xlators
                                                      • #721 Introduce quorum-count option in disperse volumes as well
                                                      • #723 Provide scripts to reset xattrs of the entries which could be...
                                                      • #725 Disperse: A way to read from specific bricks
                                                      • #741 Client Handling of Elastic Clusters
                                                      • #745 storage.reserve enhancement for posix_write
                                                      • #748 Improve MAKE_HANDLE_GFID_PATH macro and posix_handle_gfid_path()
                                                      • #753 Remove fetching items in gf_cli_replace_brick(), gf_cli_reset...
                                                      • #755 [RFE] Geo-replication code improvements
                                                      • #761 Improve MAKE_HANDLE_PATH macro
                                                      • #763 thin-arbiter: Testing report
                                                      • #765 nfs.rpc-auth-allow gluster7 + gnfs
                                                      • #788 run-with-valgrind option causes gnfs and quota to fail to start
                                                      • #824 Migrate bugzilla workflow to github issues workflow
                                                      • #832 Permission Denied in logs
                                                      • #884 [bug:1808688] Data corruption with asynchronous writes (pleas...
                                                      • #891 [bug:1802451] Optimize posix code to improve file creation
                                                      • #977 [bug:1811631] brick crashed when creating and deleting volume...
                                                      • #999 [bug:1791285] Changing permissions on root directory(director...
                                                      • #1000 [bug:1193929] GlusterFS can be improved
                                                      • #1038 [bug:1787138] Crash on rpcsvc_drc_client_unref() - fails on G...
                                                      • #1042 [bug:1806499] afr-lock-heal-basic.t and /afr-lock-heal-advanc...
                                                      • #1044 [bug:1790730] Add a basic test file to glusterfind
                                                      • #1052 [bug:1693692] Increase code coverage from regression tests
                                                      • #1060 [bug:789278] Issues reported by Coverity static analysis tool
                                                      • #1067 [bug:1661889] Metadata heal picks different brick each time a...
                                                      • #1097 [bug:1635688] Keep only the valid (maintained/supported) comp...
                                                      • #1102 dht: gf_defrag_process_dir is called even if gf_defrag_fix_la...
                                                      • #1104 geo-replication: descriptive message when worker crashes due ...
                                                      • #1105 [bug:1794263] Multiple imports from the same library in the ....
                                                      • #1127 Mount crash during background shard cleanup
                                                      • #1134 snap_scheduler.py init failing with \"TypeError: Can't mix str...
                                                      • #1140 getfattr returns ENOATTR for system.posix_acl_access on dispe...
                                                      • #1141 Make SSL connection messages useful
                                                      • #1142 log the ENOENT error in posix_pstat
                                                      • #1144 [Disperse] Add test for reset-brick for disperse volume
                                                      • #1146 gfapi/Upcall: Potential deadlock in synctask threads processi...
                                                      • #1149 Add error logs to debug failures in ./tests/bugs/protocol/bug...
                                                      • #1150 Avoid dict_del logs in posix_is_layout_stale while key is NULL
                                                      • #1152 Spurious failure of tests/bugs/protocol/bug-1433815-auth-allow.t
                                                      • #1153 Spurious failure of ./tests/bugs/snapshot/bug-1111041.t
                                                      • #1154 failing test cases
                                                      • #1156 Spurious failure of tests/features/worm.t
                                                      • #1158 spurious failure of tests/bugs/glusterd/serialize-shd-manager...
                                                      • #1160 sys_stat should be used instead of stat
                                                      • #1161 tests: file offsets and sizes shouldn't be truncated to 32-bi...
                                                      • #1162 spurious failure of test case tests/bugs/glusterd/removing-mu...
                                                      • #1169 common-ha: cluster status shows \"FAILOVER\" even when all node...
                                                      • #1180 (glusterfs-8.0) - GlusterFS 8.0 tracker
                                                      • #1179 gnfs split brain when 1 server in 3x1 down (high load)
                                                      • #1220 cluster/ec: return correct error code and log the message in case of BADFD
                                                      • #1223 Failure of tests/basic/gfapi/gfapi-copy-file-range.t
                                                      • #1116 [bug:1790736] gluster volume list returning wrong volume list / volume list time out
                                                      • #990 [bug:1578405] EIO errors when updating and deleting entries co...
                                                      • #1126 packaging: overhaul glusterfs.spec(.in) to align with SUSE and...
                                                      • #1225 fuse causes glusterd to dump core
                                                      • #1243 Modify and return iatt (especially size and block-count) in sh...
                                                      • #1254 Prioritize ENOSPC over other lesser priority errors
                                                      • #1303 Failures in rebalance due to [Input/output error]
                                                      • "},{"location":"release-notes/8.1/","title":"Release notes for Gluster 8.1","text":"

                                                        Release date: 27-Aug-2020

                                                        This is a Improvements and bugfix release. The release notes for 8.0 contains a listing of all the new features that were added and bugs fixed in the GlusterFS 8 stable release.

                                                        NOTE: Next minor release tentative date: Week of 20th Sep, 2020

                                                        "},{"location":"release-notes/8.1/#improvements-and-highlights","title":"Improvements and Highlights","text":"

                                                        Below improvements have been added to this minor release.

                                                        • Performance improvement over the creation of large files - VM disks in oVirt by bringing down trivial lookups of non-existent shards. Issue (#1425)
                                                        • Fsync in the replication module uses eager-lock functionality which improves the performance of VM workloads with an improvement of more than 50% in small-block of approximately 4kb with write heavy workloads. Issue (#1253)
                                                        "},{"location":"release-notes/8.1/#builds-are-available-at","title":"Builds are available at","text":"

                                                        https://download.gluster.org/pub/gluster/glusterfs/8/8.1/

                                                        "},{"location":"release-notes/8.1/#issues-addressed-in-this-release","title":"Issues addressed in this release","text":"

                                                        Please find the list of issues added to this release below.

                                                        • #763 thin-arbiter: Testing report
                                                        • #1217 Modify group \"virt\" to add rpc/network related changes
                                                        • #1250 geo-rep: Fix corner case in rename on mkdir during hybrid crawl
                                                        • #1281 Unlinking the file with open fd, returns ENOENT or stale file ...
                                                        • #1348 Fuse mount crashes in shard translator when truncating a *real...
                                                        • #1351 issue with gf_fill_iatt_for_dirent()
                                                        • #1352 api: libgfapi symbol versions break LTO in Fedora rawhide/f33
                                                        • #1354 High CPU utilization by self-heal on disperse volumes with no ...
                                                        • #1385 High CPU utilization by self-heal on disperse volumes when an ...
                                                        • #1396 [bug-1851989] smallfile performance drops after commit the pat...
                                                        • #1407 glusterd keep crashing when upgrading from 6.5 to 7.7
                                                        • #1418 GlusterFS 8.0: Intermittent error:1408F10B:SSL routines:SSL3_G...
                                                        • #1440 glusterfs 7.7 fuse client memory leak
                                                        "},{"location":"release-notes/8.2/","title":"Release notes for Gluster 8.2","text":"

                                                        Release date: 23-Sept-2020

                                                        This is a Improvements and bugfix release. The release notes for 8.0, 8.1 contain a listing of all the new features that were added and bugs fixed in the GlusterFS 8 stable release.

                                                        NOTE: Next minor release tentative date: Week of 20th Oct, 2020

                                                        "},{"location":"release-notes/8.2/#improvements-and-highlights","title":"Improvements and Highlights","text":"

                                                        Below improvements have been added to this minor release.

                                                        • Glustereventsd will accept IPv6 packets too. Issue (#1377)
                                                        "},{"location":"release-notes/8.2/#builds-are-available-at","title":"Builds are available at","text":"

                                                        https://download.gluster.org/pub/gluster/glusterfs/8/8.2/

                                                        "},{"location":"release-notes/8.2/#issues-addressed-in-this-release","title":"Issues addressed in this release","text":"

                                                        Please find the list of issues added to this release below.

                                                        • #1000 [bug:1193929] GlusterFS can be improved
                                                        • #1060 [bug:789278] Issues reported by Coverity static analysis tool
                                                        • #1332 Unable to Upgrade to Gluster 7 from Earlier Version
                                                        • #1440 glusterfs 7.7 fuse client memory leak
                                                        • #1472 Readdir-ahead leads to inconsistent ls results
                                                        "},{"location":"release-notes/8.3/","title":"Release notes for Gluster 8.3","text":"

                                                        Release date: 23-Dec-2020

                                                        This is a bugfix release. The release notes for 8.0, 8.1 and 8.2 contain a listing of all the new features that were added and bugs fixed in the GlusterFS 8 stable release.

                                                        NOTE:

                                                        • Next minor release tentative date: Week of 20th Feb, 2021
                                                        • Users are highly encouraged to upgrade to newer releases of GlusterFS.
                                                        "},{"location":"release-notes/8.3/#highlights-of-release","title":"Highlights of Release","text":"

                                                        This release contains majorly the bug fixes as described in the issues section.

                                                        "},{"location":"release-notes/8.3/#builds-are-available-at","title":"Builds are available at","text":"

                                                        https://download.gluster.org/pub/gluster/glusterfs/8/8.3/

                                                        "},{"location":"release-notes/8.3/#issues-addressed-in-this-release","title":"Issues addressed in this release","text":"

                                                        Please find the list of issues added to this release below.

                                                        • #1836 posix: Update ret value in posix_get_gfid2path if GF_MALLOC fails
                                                        • #1796 afr: call afr_is_lock_mode_mandatory only while xdata is valid
                                                        • #1778 volume set: failed: ganesha.enable is already 'off'.
                                                        • #1738 [cli] Improper error message on command timeout
                                                        • #1699 One brick offline with signal received: 11
                                                        • #1663 test case ./tests/bugs/core/bug-1650403.t is getting timed out
                                                        • #1601 rfc.sh on release-8 needs to move to github flow
                                                        • #1499 why not use JumpConsistentHash to replace SuperFastHash to cho...
                                                        • #1438 syncdaemon/syncdutils.py: SyntaxWarning: \"is\" with a literal. ...
                                                        • #1221 features/bit-rot: invalid snprintf() buffer size
                                                        • #1060 [bug:789278] Issues reported by Coverity static analysis tool
                                                        • #1002 [bug:1679998] GlusterFS can be improved
                                                        • #1000 [bug:1193929] GlusterFS can be improved
                                                        "},{"location":"release-notes/8.4/","title":"Release notes for Gluster 8.4","text":"

                                                        Release date: 01-Mar-2021

                                                        This is a bugfix release. The release notes for 8.0, 8.1, 8.2 and 8.3 contain a listing of all the new features that were added and bugs fixed in the GlusterFS 8 stable release.

                                                        NOTE:

                                                        • Next minor release tentative date: Week of 20th Apr, 2021
                                                        • Users are highly encouraged to upgrade to newer releases of GlusterFS.
                                                        "},{"location":"release-notes/8.4/#highlights-of-release","title":"Highlights of Release","text":"
                                                        • Healing data in 1MB chunks instead of 128KB for improving healing performance #2067
                                                        "},{"location":"release-notes/8.4/#builds-are-available-at","title":"Builds are available at","text":"

                                                        https://download.gluster.org/pub/gluster/glusterfs/8/8.4/

                                                        "},{"location":"release-notes/8.4/#issues-addressed-in-this-release","title":"Issues addressed in this release","text":"

                                                        Please find the list of issues added to this release below.

                                                        • #2154 \"Operation not supported\" doing a chmod on a symlink
                                                        • #2107 mount crashes when setfattr -n distribute.fix.layout -v \"yes\"...
                                                        • #1991 mdcache: bug causes getxattr() to report ENODATA when fetchin...
                                                        • #1925 dht_pt_getxattr does not seem to handle virtual xattrs.
                                                        • #1539 fuse mount crashes on graph-switch when reader-thread-count i...
                                                        • #1529 Fix regression in on demand migration feature
                                                        • #1406 shared storage volume fails to mount in ipv6 environment
                                                        "},{"location":"release-notes/8.5/","title":"Release notes for Gluster 8.5","text":"

                                                        Release date: 17-May-2021

                                                        This is a bugfix release. The release notes for 8.0, 8.1, 8.2, 8.3 and 8.4 contain a listing of all the new features that were added and bugs fixed in the GlusterFS 8 stable release.

                                                        NOTE:

                                                        • Next minor release tentative date: Week of 30th Jun, 2021
                                                        • Users are highly encouraged to upgrade to newer releases of GlusterFS.
                                                        "},{"location":"release-notes/8.5/#important-fixes-in-this-release","title":"Important fixes in this release","text":"
                                                        • Slow write on ZFS bricks after healing millions of files due to adding arbiter brick #1764
                                                        • 4+1 arbiter setup is broken #2192
                                                        "},{"location":"release-notes/8.5/#builds-are-available-at","title":"Builds are available at","text":"

                                                        https://download.gluster.org/pub/gluster/glusterfs/8/8.5/

                                                        "},{"location":"release-notes/8.5/#issues-addressed-in-this-release","title":"Issues addressed in this release","text":"
                                                        • #1214 Running tests/basic/afr/inodelk.t on my VM crashes in dht
                                                        • #1324 Inconsistent custom xattr on backend directories after bringing bac
                                                        • #1764 Slow write on ZFS bricks after healing millions of files due to add
                                                        • #2161 Crash caused by memory corruption
                                                        • #2192 4+1 arbiter setup is broken
                                                        • #2198 There are blocked inodelks for a long time
                                                        • #2210 glusterfsd memory leak observed when constantly running volume heal
                                                        • #2234 Segmentation fault in directory quota daemon for replicated volume
                                                        • #2253 Disable lookup-optimize by default in the virt group
                                                        • #2313 Long setting names mess up the columns and break parsing
                                                        "},{"location":"release-notes/8.6/","title":"Release notes for Gluster 8.6","text":"

                                                        Release date: 30-Aug-2021

                                                        This is a bugfix release. The release notes for 8.0, 8.1, 8.2, 8.3, 8.4 and 8.5 contain a listing of all the new features that were added and bugs fixed in the GlusterFS 8 stable release.

                                                        NOTE:

                                                        • This is the last release of Gluster 8 series
                                                        • Users are highly encouraged to upgrade to newer releases of GlusterFS.
                                                        "},{"location":"release-notes/8.6/#important-fixes-in-this-release","title":"Important fixes in this release","text":"
                                                        • Improvement in handling of gfid mismatches of geo-rep: Geo-replication gets delayed when there are many renames on primary volume(https://github.com/gluster/glusterfs/issues/2388)
                                                        • Resolve core dumps on Gluster 9 - 3 replicas: Initialize list head to prevent NULL de-reference(https://github.com/gluster/glusterfs/issues/2443)
                                                        "},{"location":"release-notes/8.6/#builds-are-available-at","title":"Builds are available at","text":"

                                                        https://download.gluster.org/pub/gluster/glusterfs/8/8.6/

                                                        "},{"location":"release-notes/8.6/#issues-addressed-in-this-release","title":"Issues addressed in this release","text":"
                                                        • #2388 Geo-replication gets delayed when there are many renames on primary volume
                                                        • #2689 glusterd: reset mgmt_v3_lock_timeout after it be used
                                                        • #1000 GlusterFS can be improved: fix getcwd usage warning
                                                        • #2394 Spurious failure in tests/basic/fencing/afr-lock-heal-basic.t
                                                        • #2691 georep-upgrade.t find failures
                                                        • #154 Optimized CHANGELOG: upgrade script for geo-rep
                                                        • #2443 Core dumps on Gluster 9 - 3 replicas: Initialize list head to prevent NULL de-reference
                                                        • #2404 Spurious failure of tests/bugs/ec/bug-1236065.t
                                                        "},{"location":"release-notes/9.0/","title":"Release notes for Gluster 9.0","text":"

                                                        Release date: 05-Feb-2021

                                                        This is a major release that includes a range of features, code improvements and stability fixes as noted below.

                                                        A selection of the key features and changes are documented in this page. A full list of bugs that have been addressed is included further below.

                                                        • Release notes for Gluster 9.0
                                                        • Announcements
                                                        • Major changes and features
                                                          • Highlights
                                                          • Features
                                                        • Major issues
                                                        • Bugs addressed
                                                        "},{"location":"release-notes/9.0/#announcements","title":"Announcements","text":"
                                                        1. Releases that receive maintenance updates post release 9 is 8 (reference)

                                                        2. Release 9 will receive maintenance updates around the 30th of every month for the first 3 months post release (i.e Mar'21, Apr'21, May'21). Post the initial 3 months, it will receive maintenance updates every 2 months till EOL.

                                                        "},{"location":"release-notes/9.0/#major-changes-and-features","title":"Major changes and features","text":""},{"location":"release-notes/9.0/#highlights","title":"Highlights","text":"

                                                        Added support for:

                                                        • io_uring in Gluster (io_uring support in kernel required along with the presence of liburing library and headers)
                                                        • support running with up to 5000 volumes (Testing done on: 5k volumes on 3 nodes, brick_mux was enabled with default configuration)
                                                        "},{"location":"release-notes/9.0/#features","title":"Features","text":"
                                                        • Added io_uring support for Gluster #1398
                                                        • Added Support for 5K volumes #1613
                                                        • Enabled granular-entry-heal by default #1483
                                                        • Optimizations for rename dir heal #1211
                                                        • Added support for monitoring the epoll/rpc layer #1466
                                                        • Brick mux: Added support to spawn a thread per process basis instead of spawning a per brick #1482
                                                        • Improve rebalance of sparse files #1222
                                                        • LTO/GCC10 - Gluster is now compiled with LTO enabled by default #1772
                                                        "},{"location":"release-notes/9.0/#major-issues","title":"Major issues","text":"

                                                        None

                                                        "},{"location":"release-notes/9.0/#bugs-addressed","title":"Bugs addressed","text":"

                                                        Bugs addressed since release-8 are listed below.

                                                        • #718 _store_global_opts(), _storeslaves() , _storeopts() should no...
                                                        • #280 Use internal error codes instead of UNIX errnos
                                                        • #1855 Makefile: failed to compile without git repository
                                                        • #1849 geo-rep: The newly setup geo-rep session goes faulty with syn...
                                                        • #1836 posix: Update ret value in posix_get_gfid2path if GF_MALLOC f...
                                                        • #1810 Implement option to generate core dump at will without killin...
                                                        • #1796 afr: call afr_is_lock_mode_mandatory only while xdata is valid
                                                        • #1794 posix: A brick process is getting crashed at the time of grap...
                                                        • #1782 Rebalance is reporting status twice upon stopping, resulting ...
                                                        • #1778 volume set: failed: ganesha.enable is already 'off'.
                                                        • #1775 core: lru_size showing -1 with zero inodes in the list in the...
                                                        • #1772 build: add LTO as a configure option
                                                        • #1743 Modify format to contain more information while raising glust...
                                                        • #1739 test case ./tests/basic/afr/entry-self-heal-anon-dir-off.t is...
                                                        • #1738 [cli] Improper error message on command timeout
                                                        • #1733 api: conscious language changes
                                                        • #1713 Conscious language changes in various xlators
                                                        • #1699 One brick offline with signal received: 11
                                                        • #1692 Test tests/basic/0symbol-check.t should exclude more contrib/...
                                                        • #1663 test case ./tests/bugs/core/bug-1650403.t is getting timed out
                                                        • #1661 test case ./tests/bugs/bug-1064147.t is continuously failing
                                                        • #1659 wrong comparison in glusterd_brick_start() function
                                                        • #1654 Rebalance/migration per directory/file
                                                        • #1653 io-cache xlators lock/unlock are always accompanied by gf_msg...
                                                        • #1627 Stopping rebalance results in a failure
                                                        • #1613 glusterd[brick_mux]: Optimize friend handshake code to avoid ...
                                                        • #1594 ./tests/00-geo-rep/00-georep-verify-non-root-setup.t fails on...
                                                        • #1587 geo-rep: Enable rsync verbose logging to help debug rsync errors
                                                        • #1584 MAINTAINERS file needs to be revisited and updated
                                                        • #1582 ./rfc.sh doesn't pick upstream correctly
                                                        • #1577 cli-rpc: Call to global quota rpc init even though operation ...
                                                        • #1569 Introduce a compile time --enable-brickmux option to run bric...
                                                        • #1565 Implement pass-through option for write-behind
                                                        • #1550 MAINTAINERS list of DHT needs to be updated
                                                        • #154 Optimized CHANGELOG
                                                        • #1546 Wrong permissions syned to remote brick when using halo repli...
                                                        • #1545 fuse_invalidate_entry() - too many repetitive calls to uuid_u...
                                                        • #1544 file tree memory layout optimization
                                                        • #1543 trash: Create inode_table only while feature is enabled
                                                        • #1542 io-stats: Configure ios_sample_buf_size based on sample_inter...
                                                        • #1541 Geo-rep: some files(hardlinks) are missing in slave after mas...
                                                        • #1540 [RFE] Rebalance: suppurt migration to files with hardlinks (n...
                                                        • #1539 fuse mount crashes on graph-switch when reader-thread-count i...
                                                        • #1538 Need to configure optimum inode table hash_size for shd
                                                        • #1529 Fix regression in on demand migration feature
                                                        • #1526 Brick status is 'stopped' if socket file is absent but brick ...
                                                        • #1518 glusterfs: write operations fail when the size is equal or gr...
                                                        • #1516 Use of strchr glusterd_replace_slash_with_hyphen
                                                        • #1511 Crash due to memory allocation
                                                        • #1508 Add-brick with Increasing replica count fails with bad brick ...
                                                        • #1507 Time-to-completion mechansim in rebalance is broken
                                                        • #1506 tests/000-flaky/bugs_nfs_bug-1116503.t is crashed in in gf_me...
                                                        • #1499 why not use JumpConsistentHash to replace SuperFastHash to ch...
                                                        • #1497 Removing strlen and using the already existing len of data_t
                                                        • #1487 Quota accounting check script fails with UnicodeDecodeError
                                                        • #1483 Enable granular-entry-heal by default
                                                        • #1482 [Brick-mux] Attach several posix threads with glusterfs_ctx
                                                        • #1480 First letter in mount path of bricks are getting truncated fo...
                                                        • #1477 nfs server crashes in acl3svc_init
                                                        • #1476 Changes required at Snaphot as gluster-shared-storage mount p...
                                                        • #1475 gluster_shared_storage failed to automount on node reboot on ...
                                                        • #1472 Readdir-ahead leads to inconsistent ls results
                                                        • #1466 RPC handling latencies should be printed in statedump
                                                        • #1464 Avoid dict OR key (link-count) is NULL [Invalid argument] mes...
                                                        • #1459 gluster_shared_storage failed to automount on node reboot on ...
                                                        • #1453 Disperse shd heal activity should be observable
                                                        • #1442 Remove Glusterfs SELinux module from Distribution's selinux-p...
                                                        • #1440 glusterfs 7.7 fuse client memory leak
                                                        • #1438 syncdaemon/syncdutils.py: SyntaxWarning: \"is\" with a literal....
                                                        • #1434 Inform failures while fop failed in disperse volume due to so...
                                                        • #1428 Redundant check in dict_get_with_refn()
                                                        • #1427 Bricks failed to restart after a power failure
                                                        • #1425 optimization over shard lookup in case of prealloc
                                                        • #1422 Rebalance - new volume option to turn on/off optimization in ...
                                                        • #1418 GlusterFS 8.0: Intermittent error:1408F10B:SSL routines:SSL3_...
                                                        • #1416 Dependencies of performance.parallel-readdir should be automa...
                                                        • #1410 01-georep-glusterd-tests.t times out on centos7 builders
                                                        • #1407 glusterd keep crashing when upgrading from 6.5 to 7.7
                                                        • #1406 shared storage volume fails to mount in ipv6 environment
                                                        • #1404 Client side split-brain resolution using favourite-child-poli...
                                                        • #1403 Tests failure on C8: ./tests/features/ssl-ciphers.t
                                                        • #1401 quota_fsck.py throws TypeError
                                                        • #1400 Annotate synctasks with tsan API if --enable-tsan is requested
                                                        • #1399 Add xlator identifiers in statedumps for mem-pools
                                                        • #1398 io_uring support in glusterfs main branch
                                                        • #1397 glusterd_check_brick_order() is needlessly fetching volname, ...
                                                        • #1396 [bug-1851989] smallfile performance drops after commit the pa...
                                                        • #1395 optimize dict_serialized_length_lk function
                                                        • #1391 allow add-brick from nodes which are not part of auth.allow list
                                                        • #1385 High CPU utilization by self-heal on disperse volumes when an...
                                                        • #1383 Remove contrib/sunrpc/xdr_sizeof.c
                                                        • #1381 Optional FUSE notitications
                                                        • #1379 Fix NULL pointer
                                                        • #1378 Use better terminology and wording in the code
                                                        • #1377 Glustereventsd to accept not only IPv4 but IPv6 packets too.
                                                        • #1376 Runtime & Build Fixes for FreeBSD
                                                        • #1375 cluster: mount.glusterfs is stuck when trying to mount unknow...
                                                        • #1374 fuse interrupt issues identified in code review
                                                        • #1371 [RHEL 8.1] [Input/output error] observed in remove-brick oper...
                                                        • #1366 geo-replication session fails to start with IPV6
                                                        • #1361 Screen .attribute directories on NetBSD
                                                        • #1359 Cleanup --disable-mempool
                                                        • #1357 options should display not only current values but also defau...
                                                        • #1356 cli: type mismatch global_quotad_rpc in cli-quotad-client.c
                                                        • #1355 Heal info desn't show split-brain info if halo is enabled
                                                        • #1354 High CPU utilization by self-heal on disperse volumes with no...
                                                        • #1353 errors seen with gluster v get all all
                                                        • #1352 api: libgfapi symbol versions break LTO in Fedora rawhide/f33
                                                        • #1351 issue with gf_fill_iatt_for_dirent()
                                                        • #1350 Simplify directory scanning
                                                        • #1348 Fuse mount crashes in shard translator when truncating a *rea...
                                                        • #1347 NetBSD build fixes
                                                        • #1339 Rebalance status is not shown correctly after node reboot
                                                        • #1332 Unable to Upgrade to Gluster 7 from Earlier Version
                                                        • #1329 Move platform-dependent filesystem sync to a library function
                                                        • #1328 Linux kernel untar failed with errors immediate after add-brick
                                                        • #1327 Missing directory is not healed in dht
                                                        • #1324 Inconsistent custom xattr on backend directories after bringi...
                                                        • #1320 Unified support for building with sanitizers
                                                        • #1311 Data race when handling connection status
                                                        • #1310 tests/features/flock_interrupt.t leads to error logs
                                                        • #1306 add-brick command is failing
                                                        • #1303 Failures in rebalance due to [Input/output error]
                                                        • #1302 always print errno (and use English locale for strerror() out...
                                                        • #1291 Free volume info lock and mutex
                                                        • #1290 Test case brick-mux-validation-in-cluster.t is failing on RHEL-8
                                                        • #1289 glustereventsd log file isn't reopened after rotation
                                                        • #1285 Use-after-destroy mutex error
                                                        • #1283 Undefined behavior in __builtin_ctz
                                                        • #1282 New file created with xattr \"trusted.glusterfs.dht\"
                                                        • #1281 Unlinking the file with open fd, returns ENOENT or stale file...
                                                        • #1279 Fix several signed integer overflows
                                                        • #1278 Fix memory leak in afr_priv_destroy()
                                                        • #1275 Make glusterfs compile on all recent and supported versions o...
                                                        • #1272 tests/bugs/glusterd/mgmt-handshake-and-volume-sync-post-glust...
                                                        • #1269 common-ha: ganesha-ha.sh bad test for {rhel,centos} for pcs o...
                                                        • #1263 Fix memory leak in glusterd_store_retrieve_bricks()
                                                        • #1260 Implement minimal proper synchronization for gf_attach
                                                        • #1259 Fix memory leak in gf_cli_gsync_status_output()
                                                        • #1258 dht: Add null check
                                                        • #1255 Improve snapshot clone error message
                                                        • #1254 Prioritize ENOSPC over other lesser priority errors
                                                        • #1253 On Ovirt setup glusterfs performs poorly
                                                        • #1250 geo-rep: Fix corner case in rename on mkdir during hybrid crawl
                                                        • #1249 Drop 'const' type qualifier on return type
                                                        • #1248 Fix thread naming and related convention
                                                        • #1245 Spurious failures in ./tests/basic/ec/ec-quorum-count.t
                                                        • #1243 Modify and return iatt (especially size and block-count) in s...
                                                        • #1242 Rebalance - Improve Crawl time in rebalance
                                                        • #1240 tests/basic/afr/gfid-mismatch-resolution-with-fav-child-polic...
                                                        • #1236 glusterfs-geo-replication requires policycoreutils-python-uti...
                                                        • #1234 Fix ./tests/basic/fencing/afr-lock-heal-basic.t failure
                                                        • #1230 core dumped executing tests/line-coverage/errorgen-coverage.t
                                                        • #1228 seek functionalty is broken
                                                        • #1226 Gluster webhook update throws error
                                                        • #1225 fuse causes glusterd to dump core
                                                        • #1223 Failure of tests/basic/gfapi/gfapi-copy-file-range.t
                                                        • #1222 [RFE] Improve rebalance of sparse files
                                                        • #1221 features/bit-rot: invalid snprintf() buffer size
                                                        • #1220 cluster/ec: return correct error code and log the message in ...
                                                        • #1218 dht: Do opendir selectively in gf_defrag_process_dir
                                                        • #1217 Modify group \"virt\" to add rpc/network related changes
                                                        • #1214 Running tests/basic/afr/inodelk.t on my VM crashes in dht
                                                        • #1211 AFR: Rename dir heal shouldn't delete the directory at oldloc...
                                                        • #1209 tests: georep-upgrade.t test failure
                                                        • #1208 warning: initializer overrides prior initialization of this s...
                                                        • #1207 warning: passing an object that undergoes default argument pr...
                                                        • #1204 GD_OP_VERSION needs to be updated
                                                        • #1202 Issues reported by Coverity static analysis tool
                                                        • #1200 Handle setxattr and rm race for directory in rebalance
                                                        • #1197 Geo-replication tests are spuriously failing in CI
                                                        • #1196 glusterfsd is having a leak while only mgmt SSL is enabled
                                                        • #1193 Scheduling of snapshot for a volume is failing to create snap...
                                                        • #1190 spurious failure of tests/basic/quick-read-with-upcall.t
                                                        • #1187 Failures in rebalance due to [No space left on device] error ...
                                                        • #1182 geo-rep requires relevant selinux permission for rsync
                                                        • #1179 gnfs split brain when 1 server in 3x1 down (high load)
                                                        • #1172 core, cli, quota: cleanup malloc debugging and stats
                                                        • #1169 common-ha: cluster status shows \"FAILOVER\" even when all node...
                                                        • #1164 migrate remove-brick operation to mgmt v3 frameowrk
                                                        • #1154 failing test cases
                                                        • #1135 Fix @sysconfdir@ expansion in extras/systemd/glusterd.service...
                                                        • #1126 packaging: overhaul glusterfs.spec(.in) to align with SUSE an...
                                                        • #1116 [bug:1790736] gluster volume list returning wrong volume list...
                                                        • #1101 [bug:1813029] volume brick fails to come online because other...
                                                        • #1097 [bug:1635688] Keep only the valid (maintained/supported) comp...
                                                        • #1096 [bug:1622665] clang-scan report: glusterfs issues
                                                        • #1075 [bug:1299203] resolve-gids is not needed for Linux kernels v3...
                                                        • #1072 [bug:1251614] gf_defrag_fix_layout recursively fails, distrac...
                                                        • #1060 [bug:789278] Issues reported by Coverity static analysis tool
                                                        • #1052 [bug:1693692] Increase code coverage from regression tests
                                                        • #1050 [bug:1787325] TLS/SSL access of GlusterFS mounts is slower th...
                                                        • #1047 [bug:1774379] check for same hostnames(bricks from same host/...
                                                        • #1043 [bug:1793490] snapshot clone volume is not exported via NFS-G...
                                                        • #1009 [bug:1756900] tests are failing in RHEL8 regression
                                                        • #1002 [bug:1679998] GlusterFS can be improved
                                                        • #1000 [bug:1193929] GlusterFS can be improved
                                                        • #990 [bug:1578405] EIO errors when updating and deleting entries c...
                                                        • #952 [bug:1589705] quick-read: separate performance.cache-size tun...
                                                        • #876 [bug:1797099] After upgrade from gluster 7.0 to 7.2 posix-acl...
                                                        • #874 [bug:1793390] Pre-validation failure does not provide any hin...
                                                        • #837 Indicate timezone offset in formatted timestamps
                                                        • #829 gfapi: Using ssl and glfs_set_volfile together does not work
                                                        • #827 undefined symbol: xlator_api
                                                        • #824 Migrate bugzilla workflow to github issues workflow
                                                        • #816 RFE: Data/MetaData separator Translator
                                                        • #790 infinite loop in common-utils.c - gf_rev_dns_lookup_cache() ?
                                                        • #763 thin-arbiter: Testing report
                                                        "},{"location":"release-notes/9.1/","title":"Release notes for Gluster 9.1","text":"

                                                        Release date: 05-Apr-2021

                                                        This is a bugfix and improvement release. The release notes for 9.0 contain a listing of all the new features that were added and bugs fixed in the GlusterFS 9 stable release.

                                                        NOTE:

                                                        • Next minor release tentative date: Week of 30th Apr, 2021
                                                        • Users are highly encouraged to upgrade to newer releases of GlusterFS.
                                                        "},{"location":"release-notes/9.1/#highlights-of-release","title":"Highlights of Release","text":"
                                                        • Provide autoconf option to enable/disable storage.linux-io_uring during compilation #2063
                                                        • Healing data in 1MB chunks instead of 128KB for improving healing performance #2067
                                                        "},{"location":"release-notes/9.1/#builds-are-available-at","title":"Builds are available at","text":"

                                                        https://download.gluster.org/pub/gluster/glusterfs/9/9.1/

                                                        "},{"location":"release-notes/9.1/#issues-addressed-in-this-release","title":"Issues addressed in this release","text":"

                                                        Please find the list of issues added to this release below.

                                                        • #1406 shared storage volume fails to mount in ipv6 environment
                                                        • #1991 mdcache: bug causes getxattr() to report ENODATA when fetchin...
                                                        • #2063 Provide autoconf option to enable/disable storage.linux-io_ur...
                                                        • #2067 Change self-heal-window-size to 1MB by default
                                                        • #2107 mount crashes when setfattr -n distribute.fix.layout -v \"yes\"...
                                                        • #2154 \"Operation not supported\" doing a chmod on a symlink
                                                        • #2192 4+1 arbiter setup is broken
                                                        • #2198 There are blocked inodelks for a long time
                                                        • #2234 Segmentation fault in directory quota daemon for replicated v...
                                                        "},{"location":"release-notes/9.2/","title":"Release notes for Gluster 9.2","text":"

                                                        Release date: 17-May-2021

                                                        This is a bugfix and improvement release. The release notes for 9.0, 9.1 contain a listing of all the new features that were added and bugs fixed in the GlusterFS 9 stable release.

                                                        NOTE:

                                                        • Next minor release tentative date: Week of 30th Jun, 2021
                                                        • Users are highly encouraged to upgrade to newer releases of GlusterFS.
                                                        "},{"location":"release-notes/9.2/#important-fixes-in-this-release","title":"Important fixes in this release","text":"
                                                        • After upgrade on release 9.1 glusterd protocol is broken #2351
                                                        • Disable lookup-optimize by default in the virt group #2253
                                                        "},{"location":"release-notes/9.2/#builds-are-available-at","title":"Builds are available at","text":"

                                                        https://download.gluster.org/pub/gluster/glusterfs/9/9.2/

                                                        "},{"location":"release-notes/9.2/#issues-addressed-in-this-release","title":"Issues addressed in this release","text":"
                                                        • #1909 core: Avoid several dict OR key is NULL message in brick logs
                                                        • #2161 Crash caused by memory corruption
                                                        • #2232 \"Invalid argument\" when reading a directory with gfapi
                                                        • #2253 Disable lookup-optimize by default in the virt group
                                                        • #2313 Long setting names mess up the columns and break parsing
                                                        • #2337 memory leak observed in lock fop
                                                        • #2351 After upgrade on release 9.1 glusterd protocol is broken
                                                        • #2353 Permission issue after upgrading to Gluster v9.1
                                                        "},{"location":"release-notes/9.3/","title":"Release notes for Gluster 9.3","text":"

                                                        Release date: 15-Jul-2021

                                                        This is a bugfix and improvement release. The release notes for 9.0, 9.1, 9.2 contain a listing of all the new features that were added and bugs fixed in the GlusterFS 9 stable release.

                                                        NOTE:

                                                        • Next minor release tentative date: Week of 30th Aug, 2021
                                                        • Users are highly encouraged to upgrade to newer releases of GlusterFS.
                                                        "},{"location":"release-notes/9.3/#important-fixes-in-this-release","title":"Important fixes in this release","text":"
                                                        • Core dumps on Gluster 9 - 3 replicas #2443
                                                        • geo-rep: Improve handling of gfid mismatches #2423
                                                        • auth.allow list is corrupted after add-brick (buffer overflow?) #2524
                                                        "},{"location":"release-notes/9.3/#builds-are-available-at","title":"Builds are available at","text":"

                                                        https://download.gluster.org/pub/gluster/glusterfs/9/9.3/

                                                        "},{"location":"release-notes/9.3/#issues-addressed-in-this-release","title":"Issues addressed in this release","text":"
                                                        • #705 gf_backtrace_save inefficiencies
                                                        • #1000 [bug:1193929] GlusterFS can be improved
                                                        • #1384 mount glusterfs volume, files larger than 64Mb only show 64Mb
                                                        • #2388 Geo-replication gets delayed when there are many renames on primary
                                                        • #2394 Spurious failure in tests/basic/fencing/afr-lock-heal-basic.t
                                                        • #2398 Bitrot and scrub process showed like unknown in the gluster volume
                                                        • #2421 rsync should not try to sync internal xattrs.
                                                        • #2440 Geo-replication not working on Ubuntu 21.04
                                                        • #2443 Core dumps on Gluster 9 - 3 replicas
                                                        • #2470 sharding: [inode.c:1255:__inode_unlink] 0-inode: dentry not found
                                                        • #2524 auth.allow list is corrupted after add-brick (buffer overflow?)
                                                        "},{"location":"release-notes/9.4/","title":"Release notes for Gluster 9.4","text":"

                                                        Release date: 14-Oct-2021

                                                        This is a bugfix and improvement release. The release notes for 9.0, 9.1, 9.2, 9.3 contain a listing of all the new features that were added and bugs fixed in the GlusterFS 9 stable release.

                                                        NOTE:

                                                        • Next minor release tentative date: Week of 30th Dec, 2021
                                                        • Users are highly encouraged to upgrade to newer releases of GlusterFS.
                                                        "},{"location":"release-notes/9.4/#important-fixes-in-this-release","title":"Important fixes in this release","text":"
                                                        • Fix changelog History Crawl resume failures after stop #2133
                                                        • Fix Stack overflow when parallel-readdir is enabled #2169
                                                        • Fix rebalance crashes in dht #2239
                                                        "},{"location":"release-notes/9.4/#builds-are-available-at-","title":"Builds are available at -","text":"

                                                        https://download.gluster.org/pub/gluster/glusterfs/9/9.4/

                                                        "},{"location":"release-notes/9.4/#issues-addressed-in-this-release","title":"Issues addressed in this release","text":"
                                                        • #2133 changelog History Crawl resume fails after stop
                                                        • #2169 Stack overflow when parallel-readdir is enabled
                                                        • #2239 rebalance crashes in dht on master
                                                        • #2625 auth.allow value is corrupted after add-brick operation
                                                        • #2649 glustershd failed in bind with error \"Address already in use\"
                                                        • #2659 tests/basic/afr/afr-anon-inode.t crashed
                                                        • #2754 It takes a long time to execute the \u201cgluster volume set volumename
                                                        • #2798 FUSE mount option for localtime-logging is not exposed
                                                        • #2690 glusterd: reset mgmt_v3_lock_timeout after it be used
                                                        • #2691 georep-upgrade.t find failures
                                                        • #1101 volume brick fails to come online because other process is using port 49152
                                                        "},{"location":"release-notes/9.5/","title":"Release notes for Gluster 9.5","text":"

                                                        Release date: 1st-Feb-2022

                                                        This is a bugfix and improvement release. The release notes for 9.0, 9.1, 9.2, 9.3, 9.4 contain a listing of all the new features that were added and bugs fixed in the GlusterFS 9 stable release.

                                                        NOTE:

                                                        • Next minor release tentative date: Week of 20th Aug, 2022 (As published in the Gluster Community Meeting, Release 9 will release updates every 6 months now on)
                                                        • Users are highly encouraged to upgrade to newer releases of GlusterFS.
                                                        "},{"location":"release-notes/9.5/#important-fixes-in-this-release","title":"Important fixes in this release","text":"
                                                        • Fix rebalance of sparse files (https://github.com/gluster/glusterfs/issues/2317)
                                                        • Fix anomalous brick offline scenario on non rebooted node by preventing bricks from connecting to a backup volfile (https://github.com/gluster/glusterfs/issues/2480)
                                                        "},{"location":"release-notes/9.5/#builds-are-available-at-","title":"Builds are available at -","text":"

                                                        https://download.gluster.org/pub/gluster/glusterfs/9/9.5/

                                                        "},{"location":"release-notes/9.5/#issues-addressed-in-this-release","title":"Issues addressed in this release","text":"
                                                        • #2317 Fix rebalance of sparse files
                                                        • #2414 Prefer mallinfo2() to mallinfo() if available
                                                        • #2467 Handle failure in fuse to get gids gracefully
                                                        • #2480 Prevent bricks from connecting to a backup volfile and fix brick offline scenario on non rebooted node
                                                        • #2846 Avoid redundant logs in glusterd at info level
                                                        • #2903 Fix worker disconnect due to AttributeError in geo-replication
                                                        • #2939 Remove the deprecated commands from gluster man page
                                                        "},{"location":"release-notes/9.6/","title":"Release notes for Gluster 9.6","text":"

                                                        This is a bugfix and improvement release. The release notes for 9.0, 9.1, 9.2, 9.3, 9.4, 9.5 contain a listing of all the new features that were added and bugs fixed in the GlusterFS 9 stable release.

                                                        NOTE: - Next minor release tentative date: Week of 20th Feb, 2023 - Users are highly encouraged to upgrade to newer releases of GlusterFS.

                                                        "},{"location":"release-notes/9.6/#important-fixes-in-this-release","title":"Important fixes in this release","text":"
                                                        • Optimize server functionality by enhancing server_process_event_upcall code path during the handling of upcall event
                                                        • Fix all bricks not starting issue on node reboot when brick count is high(>750)
                                                        "},{"location":"release-notes/9.6/#builds-are-available-at","title":"Builds are available at","text":"

                                                        https://download.gluster.org/pub/gluster/glusterfs/9/9.6/

                                                        "},{"location":"release-notes/9.6/#issues-addressed-in-this-release","title":"Issues addressed in this release","text":"
                                                        • #2080 Fix inability of glustereventsd from binding to the UDP port because of selinux policies
                                                        • #2962 Fix volume create without disperse count failures with ip addresses
                                                        • #3177 Locks: Optimize the interrupt flow of POSIX locks
                                                        • #3187 Fix Locks xlator leaks fd's when a blocked posix lock is cancelled
                                                        • #3191 Fix double free issue in the cbk function dht_common_mark_mdsxattr_cbk
                                                        • #3321 Optimize server functionality by enhancing server_process_event_upcall code path during the handling of upcall event
                                                        • #3332 Fix garbage value reported by static analyser
                                                        • #3334 Fix errors and timeouts when creating qcow2 file via libgfapi
                                                        • #3375 Fix all bricks not starting issue on node reboot when brick count is high(>750)
                                                        • #3470 Fix spurious crash when \"peer probing\" a non existing host name
                                                        "},{"location":"release-notes/geo-rep-in-3.7/","title":"Geo rep in 3.7","text":""},{"location":"release-notes/geo-rep-in-3.7/#improved-node-fail-over-issues-handling-by-using-gluster-meta-volume","title":"Improved Node fail-over issues handling by using Gluster Meta Volume","text":"

                                                        In replica pairs one Geo-rep worker should be active and all the other replica workers should be passive. When Active worker goes down, Passive worker will become active. In previous releases, this logic was based on node-uuid, but now it is based on Lock file in Meta Volume. Now it is possible to decide Active/Passive more accurately and multiple Active worker scenarios minimized.

                                                        Geo-rep works without Meta Volume also, this feature is backward compatible. By default config option use_meta_volume is False. This feature can be turned on with geo-rep config use_meta_volume true. Without this feature Geo-rep works as it was working in previous releases.

                                                        Issues if meta_volume is turned off:

                                                        1. Multiple workers becoming active and participate in syncing. Duplicate efforts and all the issues related to concurrent execution exists.

                                                        2. Failover only works at node level, if a brick process goes down but node is alive then fail-back will not happen and delay in syncing.

                                                        3. Very difficult documented steps about placements of bricks in case of replica 3. For example, first brick in each replica should not be placed in same node. etc.

                                                        4. Consuming Changelogs from previously failed node when it comes back, which may lead to issues like delayed syncing and data inconsistencies in case of Renames.

                                                        Fixes: 1196632, 1217939

                                                        "},{"location":"release-notes/geo-rep-in-3.7/#improved-historical-changelogs-consumption","title":"Improved Historical Changelogs consumption","text":"

                                                        Support for consuming Historical Changelogs introduced in previous releases, with this release this is more stable and improved. Use of Filesystem crawl is minimized and limited only during initial sync.In previous release, Node reboot or brick process going down was treated as Changelog Breakage and Geo-rep was fallback to XSync for that duration. With this release, Changelog session will be considered broken only if Changelog is turned off. All the other scenarios considered as safe.

                                                        This feature is also required by glusterfind.

                                                        Fixes: 1217944

                                                        "},{"location":"release-notes/geo-rep-in-3.7/#improved-status-and-checkpoint","title":"Improved Status and Checkpoint","text":"

                                                        Status got many improvements, Showing accurate details of Session info, User info, Slave node to which master node is connected, Last Synced Time etc. Initializing time is reduced, Status change will happen as soon as geo-rep workers ready.(In previous releases Initializing time was 60 sec)

                                                        Fixes: 1212410

                                                        "},{"location":"release-notes/geo-rep-in-3.7/#worker-restart-improvements","title":"Worker Restart improvements","text":"

                                                        Workers going down and coming back is very common in geo-rep for reasons like network failure, Slave node going down etc. When it comes up it has to reprocess the changelogs again because worker died before updating the last sync time. The batch size is now optimized such that the amount of reprocess is minimized.

                                                        Fixes: 1210965

                                                        "},{"location":"release-notes/geo-rep-in-3.7/#improved-rename-handling","title":"Improved RENAME handling","text":"

                                                        When renamed filename hash falls to other brick, respective brick's changelog records RENAME, but rest of the fops like CREATE, DATA are recorded in first brick. Each Geo-rep worker per brick syncs data to Slave Volume independently, These things go out of order and Master and Slave Volume become inconsistent. With the help of DHT team, RENAMEs are recorded where CREATE and DATA are recorded.

                                                        Fixes: 1141379

                                                        "},{"location":"release-notes/geo-rep-in-3.7/#syncing-xattrs-and-acls","title":"Syncing xattrs and acls","text":"

                                                        Syncing both xattrs and acls to Slave cluster are now supported. These can be disabled setting config options sync-xattrs or sync-acls to false.

                                                        Fixes: 1187021, 1196690

                                                        "},{"location":"release-notes/geo-rep-in-3.7/#identifying-entry-failures","title":"Identifying Entry failures","text":"

                                                        Logging improvements to identify exact reason for Entry failures, GFID conflicts, I/O errors etc. Safe errors are not logged in Mount logs in Slave, Safe errors are post processed and only genuine errors are logged in Master logs.

                                                        Fixes: 1207115, 1210562

                                                        "},{"location":"release-notes/geo-rep-in-3.7/#improved-rm-rf-issues-handling","title":"Improved rm -rf issues handling","text":"

                                                        Successive deletes and creates had issues, Handling these issues minimized. (Not completely fixed since it depends on Open issues of DHT)

                                                        Fixes: 1211037

                                                        "},{"location":"release-notes/geo-rep-in-3.7/#non-root-geo-replication-simplified","title":"Non root Geo-replication simplified","text":"

                                                        Manual editing of Glusterd vol file is simplified by introducing gluster system:: mountbroker command

                                                        Fixes: 1136312

                                                        "},{"location":"release-notes/geo-rep-in-3.7/#logging-rsync-performance-on-request-basis","title":"Logging Rsync performance on request basis","text":"

                                                        Rsync performance can be evaluated by enabling a config option. After this Geo-rep starts recording rsync performance in log file, which can be post processed to get meaningful metrics.

                                                        Fixes: 764827

                                                        "},{"location":"release-notes/geo-rep-in-3.7/#initial-sync-issues-due-to-upper-limit-comparison-during-filesystem-crawl","title":"Initial sync issues due to upper limit comparison during Filesystem Crawl","text":"

                                                        Bug fix, Fixed wrong logic in Xsync Change detection. Upper limit was considered during xsync crawl. Geo-rep XSync was missing many files considering Changelog will take care. But Changelog will not have complete details of the files created before enabling Geo-replication.

                                                        When rsync/tarssh fails, geo-rep is now capable of identifying safe errors and continue syncing by ignoring those issues. For example, rsync fails to sync a file which is deleted in master during sync. This can be ignored since the file is unlinked and no need to try syncing.

                                                        Fixes: 1200733

                                                        "},{"location":"release-notes/geo-rep-in-3.7/#changelog-failures-and-brick-failures-handling","title":"Changelog failures and Brick failures handling","text":"

                                                        When Brick process goes down, or any Changelog exception Geo-rep worker was failing back to XSync crawl. Which was bad since Xsync fails to identify Deletes and Renames. Now this is prevented, worker goes to Faulty and wait for that Brick process to comeback.

                                                        Fixes: 1202649

                                                        "},{"location":"release-notes/geo-rep-in-3.7/#archive-changelogs-in-working-directory-after-processing","title":"Archive Changelogs in working directory after processing","text":"

                                                        Archive Changelogs after processing not generate empty changelogs when no data is available. This is great improvement in terms of reducing the inode consumption in Brick.

                                                        Fixes: 1169331

                                                        "},{"location":"release-notes/geo-rep-in-3.7/#virtual-xattr-to-trigger-sync","title":"Virtual xattr to trigger sync","text":"

                                                        Since we use Historical Changelogs when Geo-rep worker restarts. Only SETATTR will be recorded when we touch a file. In previous versions, Re triggering a file sync is stop geo-rep, touch files and start geo-replication. Now touch will not help since it records only SETATTR. Virtual Xattr is introduced to retrigger the sync. No Geo-rep restart required.

                                                        Fixes: 1176934

                                                        "},{"location":"release-notes/geo-rep-in-3.7/#ssh-keys-overwrite-issues-during-geo-rep-create","title":"SSH Keys overwrite issues during Geo-rep create","text":"

                                                        Parallel creates or multiple Geo-rep session creation was overwriting the pem keys written by first one. This leads to connectivity issues when Geo-rep is started.

                                                        Fixes: 1183229

                                                        "},{"location":"release-notes/geo-rep-in-3.7/#ownership-sync-improvements","title":"Ownership sync improvements","text":"

                                                        Geo-rep was failing to sync ownership information from master cluster to Slave cluster.

                                                        Fixes: 1104954

                                                        "},{"location":"release-notes/geo-rep-in-3.7/#slave-node-failover-handling-improvements","title":"Slave node failover handling improvements","text":"

                                                        When slave node goes down, Master worker which is connected to that brick will go to faulty. Now it tries to connect to another slave node instead of waiting for that Slave node to come back.

                                                        Fixes: 1151412

                                                        "},{"location":"release-notes/geo-rep-in-3.7/#support-of-ssh-keys-custom-location","title":"Support of ssh keys custom location","text":"

                                                        If ssh authorized_keys are configured in non standard location instead of default $HOME/.ssh/authorized_keys. Geo-rep create was failing, now this is supported.

                                                        Fixes: 1181117

                                                        "},{"location":"release-notes/glusterfs-selinux2.0.1/","title":"Release notes for glusterfs-selinux 2.0.1","text":"

                                                        This is a bugfix and improvement release.

                                                        "},{"location":"release-notes/glusterfs-selinux2.0.1/#important-fixes-in-this-release","title":"Important fixes in this release","text":"
                                                        • #rhbz1955415 glusterfs-selinux package should own the files created by it
                                                        • #20 Fixing verification failure for ghost
                                                        • #rhbz1779052 Adds rule to allow glusterd to access RDMA socket
                                                        "},{"location":"release-notes/glusterfs-selinux2.0.1/#issues-addressed-in-this-release","title":"Issues addressed in this release","text":"
                                                        • #rhbz1955415 glusterfs-selinux package should own the files created by it
                                                        • #22 Fixed mixed use of tabs and spaces (rpmlint warning)
                                                        • #20 Fixing verification failure for ghost file
                                                        • #rhbz1779052 Adds rule to allow glusterd to access RDMA socket
                                                        • #15 Modifying the path provided for glustereventsd.py"}]} \ No newline at end of file diff --git a/security/index.html b/security/index.html new file mode 100644 index 00000000..87031ee0 --- /dev/null +++ b/security/index.html @@ -0,0 +1,4426 @@ + + + + + + + + + + + + + + + + + + + + + + Security - Gluster Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
                                                          + +
                                                          +
                                                          + +
                                                          + + + + +
                                                          + + +
                                                          + +
                                                          + + + + + + +
                                                          +
                                                          + + + +
                                                          +
                                                          +
                                                          + + + + +
                                                          +
                                                          +
                                                          + + + +
                                                          +
                                                          +
                                                          + + + +
                                                          +
                                                          +
                                                          + + + +
                                                          +
                                                          + + + + + + + + +

                                                          This document is to be considered a "work in progress" until this message is removed.

                                                          +

                                                          Reporting security issues

                                                          +

                                                          Please report any security issues you find in Gluster projects to: security at gluster.org

                                                          +

                                                          Anyone can post to this list. The subscribers are only trusted individuals who will handle the resolution of any reported security issues in confidence. In your report, please note how you would like to be credited for discovering the issue and the details of any embargo you would like to impose.

                                                          +

                                                          [need to check if this holds]

                                                          +

                                                          Currently, the security response teams for the following distributions are subscribed to this list and will respond to your report:

                                                          +

                                                          Fedora +Red Hat

                                                          +

                                                          Handling security issues

                                                          +

                                                          If you represent a Gluster project or a distribution which packages Gluster projects, you are welcome to subscribe to the security at gluster.org mailing list. Your subscription will only be approved if you can demonstrate that you will handle issues in confidence and properly credit reporters for discovering issues. A second mailing list exists for discussion of embargoed security issues:

                                                          +

                                                          security-private at gluster.org

                                                          +

                                                          You will be invited to subscribe to this list if you are subscribed to security at gluster.org.

                                                          +

                                                          Security advisories

                                                          +

                                                          The security advisories page lists all security vulnerabilities fixed in Gluster.

                                                          +

                                                          [need to check if this holds]

                                                          + + + + + + +
                                                          +
                                                          + + +
                                                          + + + +
                                                          + + + +
                                                          +
                                                          +
                                                          +
                                                          + + + + + + + + + + + \ No newline at end of file diff --git a/sitemap.xml b/sitemap.xml new file mode 100644 index 00000000..5664bad2 --- /dev/null +++ b/sitemap.xml @@ -0,0 +1,1168 @@ + + + + http://docs.gluster.org/ + 2023-11-21 + daily + + + http://docs.gluster.org/glossary/ + 2023-11-21 + daily + + + http://docs.gluster.org/security/ + 2023-11-21 + daily + + + http://docs.gluster.org/Administrator-Guide/ + 2023-11-21 + daily + + + http://docs.gluster.org/Administrator-Guide/Access-Control-Lists/ + 2023-11-21 + daily + + + http://docs.gluster.org/Administrator-Guide/Accessing-Gluster-from-Windows/ + 2023-11-21 + daily + + + http://docs.gluster.org/Administrator-Guide/Automatic-File-Replication/ + 2023-11-21 + daily + + + http://docs.gluster.org/Administrator-Guide/Bareos/ + 2023-11-21 + daily + + + http://docs.gluster.org/Administrator-Guide/Brick-Naming-Conventions/ + 2023-11-21 + daily + + + http://docs.gluster.org/Administrator-Guide/Building-QEMU-With-gfapi-For-Debian-Based-Systems/ + 2023-11-21 + daily + + + http://docs.gluster.org/Administrator-Guide/Consul/ + 2023-11-21 + daily + + + http://docs.gluster.org/Administrator-Guide/Directory-Quota/ + 2023-11-21 + daily + + + http://docs.gluster.org/Administrator-Guide/Events-APIs/ + 2023-11-21 + daily + + + http://docs.gluster.org/Administrator-Guide/Export-And-Netgroup-Authentication/ + 2023-11-21 + daily + + + http://docs.gluster.org/Administrator-Guide/Geo-Replication/ + 2023-11-21 + daily + + + http://docs.gluster.org/Administrator-Guide/Gluster-On-ZFS/ + 2023-11-21 + daily + + + http://docs.gluster.org/Administrator-Guide/GlusterFS-Cinder/ + 2023-11-21 + daily + + + http://docs.gluster.org/Administrator-Guide/GlusterFS-Coreutils/ + 2023-11-21 + daily + + + http://docs.gluster.org/Administrator-Guide/GlusterFS-Filter/ + 2023-11-21 + daily + + + http://docs.gluster.org/Administrator-Guide/GlusterFS-Introduction/ + 2023-11-21 + daily + + + http://docs.gluster.org/Administrator-Guide/GlusterFS-Keystone-Quickstart/ + 2023-11-21 + daily + + + http://docs.gluster.org/Administrator-Guide/GlusterFS-iSCSI/ + 2023-11-21 + daily + + + http://docs.gluster.org/Administrator-Guide/Handling-of-users-with-many-groups/ + 2023-11-21 + daily + + + http://docs.gluster.org/Administrator-Guide/Hook-scripts/ + 2023-11-21 + daily + + + http://docs.gluster.org/Administrator-Guide/Linux-Kernel-Tuning/ + 2023-11-21 + daily + + + http://docs.gluster.org/Administrator-Guide/Logging/ + 2023-11-21 + daily + + + http://docs.gluster.org/Administrator-Guide/Managing-Snapshots/ + 2023-11-21 + daily + + + http://docs.gluster.org/Administrator-Guide/Managing-Volumes/ + 2023-11-21 + daily + + + http://docs.gluster.org/Administrator-Guide/Mandatory-Locks/ + 2023-11-21 + daily + + + http://docs.gluster.org/Administrator-Guide/Monitoring-Workload/ + 2023-11-21 + daily + + + http://docs.gluster.org/Administrator-Guide/NFS-Ganesha-GlusterFS-Integration/ + 2023-11-21 + daily + + + http://docs.gluster.org/Administrator-Guide/Network-Configurations-Techniques/ + 2023-11-21 + daily + + + http://docs.gluster.org/Administrator-Guide/Object-Storage/ + 2023-11-21 + daily + + + http://docs.gluster.org/Administrator-Guide/Performance-Testing/ + 2023-11-21 + daily + + + http://docs.gluster.org/Administrator-Guide/Performance-Tuning/ + 2023-11-21 + daily + + + http://docs.gluster.org/Administrator-Guide/Puppet/ + 2023-11-21 + daily + + + http://docs.gluster.org/Administrator-Guide/RDMA-Transport/ + 2023-11-21 + daily + + + http://docs.gluster.org/Administrator-Guide/SSL/ + 2023-11-21 + daily + + + http://docs.gluster.org/Administrator-Guide/Setting-Up-Clients/ + 2023-11-21 + daily + + + http://docs.gluster.org/Administrator-Guide/Setting-Up-Volumes/ + 2023-11-21 + daily + + + http://docs.gluster.org/Administrator-Guide/Split-brain-and-ways-to-deal-with-it/ + 2023-11-21 + daily + + + http://docs.gluster.org/Administrator-Guide/Start-Stop-Daemon/ + 2023-11-21 + daily + + + http://docs.gluster.org/Administrator-Guide/Storage-Pools/ + 2023-11-21 + daily + + + http://docs.gluster.org/Administrator-Guide/Thin-Arbiter-Volumes/ + 2023-11-21 + daily + + + http://docs.gluster.org/Administrator-Guide/Trash/ + 2023-11-21 + daily + + + http://docs.gluster.org/Administrator-Guide/Tuning-Volume-Options/ + 2023-11-21 + daily + + + http://docs.gluster.org/Administrator-Guide/arbiter-volumes-and-quorum/ + 2023-11-21 + daily + + + http://docs.gluster.org/Administrator-Guide/formatting-and-mounting-bricks/ + 2023-11-21 + daily + + + http://docs.gluster.org/Administrator-Guide/io_uring/ + 2023-11-21 + daily + + + http://docs.gluster.org/Administrator-Guide/overview/ + 2023-11-21 + daily + + + http://docs.gluster.org/Administrator-Guide/setting-up-storage/ + 2023-11-21 + daily + + + http://docs.gluster.org/CLI-Reference/cli-main/ + 2023-11-21 + daily + + + http://docs.gluster.org/Contributors-Guide/Adding-your-blog/ + 2023-11-21 + daily + + + http://docs.gluster.org/Contributors-Guide/Bug-Reporting-Guidelines/ + 2023-11-21 + daily + + + http://docs.gluster.org/Contributors-Guide/Bug-Triage/ + 2023-11-21 + daily + + + http://docs.gluster.org/Contributors-Guide/GlusterFS-Release-process/ + 2023-11-21 + daily + + + http://docs.gluster.org/Contributors-Guide/Guidelines-For-Maintainers/ + 2023-11-21 + daily + + + http://docs.gluster.org/Contributors-Guide/Index/ + 2023-11-21 + daily + + + http://docs.gluster.org/Developer-guide/Backport-Guidelines/ + 2023-11-21 + daily + + + http://docs.gluster.org/Developer-guide/Building-GlusterFS/ + 2023-11-21 + daily + + + http://docs.gluster.org/Developer-guide/Developers-Index/ + 2023-11-21 + daily + + + http://docs.gluster.org/Developer-guide/Development-Workflow/ + 2023-11-21 + daily + + + http://docs.gluster.org/Developer-guide/Easy-Fix-Bugs/ + 2023-11-21 + daily + + + http://docs.gluster.org/Developer-guide/Fixing-issues-reported-by-tools-for-static-code-analysis/ + 2023-11-21 + daily + + + http://docs.gluster.org/Developer-guide/Projects/ + 2023-11-21 + daily + + + http://docs.gluster.org/Developer-guide/Simplified-Development-Workflow/ + 2023-11-21 + daily + + + http://docs.gluster.org/Developer-guide/compiling-rpms/ + 2023-11-21 + daily + + + http://docs.gluster.org/Developer-guide/coredump-on-customer-setup/ + 2023-11-21 + daily + + + http://docs.gluster.org/GlusterFS-Tools/ + 2023-11-21 + daily + + + http://docs.gluster.org/GlusterFS-Tools/gfind-missing-files/ + 2023-11-21 + daily + + + http://docs.gluster.org/GlusterFS-Tools/glusterfind/ + 2023-11-21 + daily + + + http://docs.gluster.org/Install-Guide/Common-criteria/ + 2023-11-21 + daily + + + http://docs.gluster.org/Install-Guide/Community-Packages/ + 2023-11-21 + daily + + + http://docs.gluster.org/Install-Guide/Configure/ + 2023-11-21 + daily + + + http://docs.gluster.org/Install-Guide/Install/ + 2023-11-21 + daily + + + http://docs.gluster.org/Install-Guide/Overview/ + 2023-11-21 + daily + + + http://docs.gluster.org/Install-Guide/Setup-Bare-metal/ + 2023-11-21 + daily + + + http://docs.gluster.org/Install-Guide/Setup-aws/ + 2023-11-21 + daily + + + http://docs.gluster.org/Install-Guide/Setup-virt/ + 2023-11-21 + daily + + + http://docs.gluster.org/Ops-Guide/Overview/ + 2023-11-21 + daily + + + http://docs.gluster.org/Ops-Guide/Tools/ + 2023-11-21 + daily + + + http://docs.gluster.org/Quick-Start-Guide/Architecture/ + 2023-11-21 + daily + + + http://docs.gluster.org/Quick-Start-Guide/Quickstart/ + 2023-11-21 + daily + + + http://docs.gluster.org/Troubleshooting/ + 2023-11-21 + daily + + + http://docs.gluster.org/Troubleshooting/gfid-to-path/ + 2023-11-21 + daily + + + http://docs.gluster.org/Troubleshooting/gluster-crash/ + 2023-11-21 + daily + + + http://docs.gluster.org/Troubleshooting/resolving-splitbrain/ + 2023-11-21 + daily + + + http://docs.gluster.org/Troubleshooting/statedump/ + 2023-11-21 + daily + + + http://docs.gluster.org/Troubleshooting/troubleshooting-afr/ + 2023-11-21 + daily + + + http://docs.gluster.org/Troubleshooting/troubleshooting-filelocks/ + 2023-11-21 + daily + + + http://docs.gluster.org/Troubleshooting/troubleshooting-georep/ + 2023-11-21 + daily + + + http://docs.gluster.org/Troubleshooting/troubleshooting-glusterd/ + 2023-11-21 + daily + + + http://docs.gluster.org/Troubleshooting/troubleshooting-gnfs/ + 2023-11-21 + daily + + + http://docs.gluster.org/Troubleshooting/troubleshooting-memory/ + 2023-11-21 + daily + + + http://docs.gluster.org/Upgrade-Guide/ + 2023-11-21 + daily + + + http://docs.gluster.org/Upgrade-Guide/generic-upgrade-procedure/ + 2023-11-21 + daily + + + http://docs.gluster.org/Upgrade-Guide/op-version/ + 2023-11-21 + daily + + + http://docs.gluster.org/Upgrade-Guide/upgrade-to-10/ + 2023-11-21 + daily + + + http://docs.gluster.org/Upgrade-Guide/upgrade-to-11/ + 2023-11-21 + daily + + + http://docs.gluster.org/Upgrade-Guide/upgrade-to-3.10/ + 2023-11-21 + daily + + + http://docs.gluster.org/Upgrade-Guide/upgrade-to-3.11/ + 2023-11-21 + daily + + + http://docs.gluster.org/Upgrade-Guide/upgrade-to-3.12/ + 2023-11-21 + daily + + + http://docs.gluster.org/Upgrade-Guide/upgrade-to-3.13/ + 2023-11-21 + daily + + + http://docs.gluster.org/Upgrade-Guide/upgrade-to-3.5/ + 2023-11-21 + daily + + + http://docs.gluster.org/Upgrade-Guide/upgrade-to-3.6/ + 2023-11-21 + daily + + + http://docs.gluster.org/Upgrade-Guide/upgrade-to-3.7/ + 2023-11-21 + daily + + + http://docs.gluster.org/Upgrade-Guide/upgrade-to-3.8/ + 2023-11-21 + daily + + + http://docs.gluster.org/Upgrade-Guide/upgrade-to-3.9/ + 2023-11-21 + daily + + + http://docs.gluster.org/Upgrade-Guide/upgrade-to-4.0/ + 2023-11-21 + daily + + + http://docs.gluster.org/Upgrade-Guide/upgrade-to-4.1/ + 2023-11-21 + daily + + + http://docs.gluster.org/Upgrade-Guide/upgrade-to-5/ + 2023-11-21 + daily + + + http://docs.gluster.org/Upgrade-Guide/upgrade-to-6/ + 2023-11-21 + daily + + + http://docs.gluster.org/Upgrade-Guide/upgrade-to-7/ + 2023-11-21 + daily + + + http://docs.gluster.org/Upgrade-Guide/upgrade-to-8/ + 2023-11-21 + daily + + + http://docs.gluster.org/Upgrade-Guide/upgrade-to-9/ + 2023-11-21 + daily + + + http://docs.gluster.org/presentations/ + 2023-11-21 + daily + + + http://docs.gluster.org/release-notes/ + 2023-11-21 + daily + + + http://docs.gluster.org/release-notes/10.0/ + 2023-11-21 + daily + + + http://docs.gluster.org/release-notes/10.1/ + 2023-11-21 + daily + + + http://docs.gluster.org/release-notes/10.2/ + 2023-11-21 + daily + + + http://docs.gluster.org/release-notes/10.3/ + 2023-11-21 + daily + + + http://docs.gluster.org/release-notes/10.4/ + 2023-11-21 + daily + + + http://docs.gluster.org/release-notes/11.0/ + 2023-11-21 + daily + + + http://docs.gluster.org/release-notes/11.1/ + 2023-11-21 + daily + + + http://docs.gluster.org/release-notes/3.10.0/ + 2023-11-21 + daily + + + http://docs.gluster.org/release-notes/3.10.1/ + 2023-11-21 + daily + + + http://docs.gluster.org/release-notes/3.10.10/ + 2023-11-21 + daily + + + http://docs.gluster.org/release-notes/3.10.11/ + 2023-11-21 + daily + + + http://docs.gluster.org/release-notes/3.10.12/ + 2023-11-21 + daily + + + http://docs.gluster.org/release-notes/3.10.2/ + 2023-11-21 + daily + + + http://docs.gluster.org/release-notes/3.10.3/ + 2023-11-21 + daily + + + http://docs.gluster.org/release-notes/3.10.4/ + 2023-11-21 + daily + + + http://docs.gluster.org/release-notes/3.10.5/ + 2023-11-21 + daily + + + http://docs.gluster.org/release-notes/3.10.6/ + 2023-11-21 + daily + + + http://docs.gluster.org/release-notes/3.10.7/ + 2023-11-21 + daily + + + http://docs.gluster.org/release-notes/3.10.8/ + 2023-11-21 + daily + + + http://docs.gluster.org/release-notes/3.10.9/ + 2023-11-21 + daily + + + http://docs.gluster.org/release-notes/3.11.0/ + 2023-11-21 + daily + + + http://docs.gluster.org/release-notes/3.11.1/ + 2023-11-21 + daily + + + http://docs.gluster.org/release-notes/3.11.2/ + 2023-11-21 + daily + + + http://docs.gluster.org/release-notes/3.11.3/ + 2023-11-21 + daily + + + http://docs.gluster.org/release-notes/3.12.0/ + 2023-11-21 + daily + + + http://docs.gluster.org/release-notes/3.12.1/ + 2023-11-21 + daily + + + http://docs.gluster.org/release-notes/3.12.10/ + 2023-11-21 + daily + + + http://docs.gluster.org/release-notes/3.12.11/ + 2023-11-21 + daily + + + http://docs.gluster.org/release-notes/3.12.12/ + 2023-11-21 + daily + + + http://docs.gluster.org/release-notes/3.12.13/ + 2023-11-21 + daily + + + http://docs.gluster.org/release-notes/3.12.14/ + 2023-11-21 + daily + + + http://docs.gluster.org/release-notes/3.12.15/ + 2023-11-21 + daily + + + http://docs.gluster.org/release-notes/3.12.2/ + 2023-11-21 + daily + + + http://docs.gluster.org/release-notes/3.12.3/ + 2023-11-21 + daily + + + http://docs.gluster.org/release-notes/3.12.4/ + 2023-11-21 + daily + + + http://docs.gluster.org/release-notes/3.12.5/ + 2023-11-21 + daily + + + http://docs.gluster.org/release-notes/3.12.6/ + 2023-11-21 + daily + + + http://docs.gluster.org/release-notes/3.12.7/ + 2023-11-21 + daily + + + http://docs.gluster.org/release-notes/3.12.8/ + 2023-11-21 + daily + + + http://docs.gluster.org/release-notes/3.12.9/ + 2023-11-21 + daily + + + http://docs.gluster.org/release-notes/3.13.0/ + 2023-11-21 + daily + + + http://docs.gluster.org/release-notes/3.13.1/ + 2023-11-21 + daily + + + http://docs.gluster.org/release-notes/3.13.2/ + 2023-11-21 + daily + + + http://docs.gluster.org/release-notes/3.5.0/ + 2023-11-21 + daily + + + http://docs.gluster.org/release-notes/3.5.1/ + 2023-11-21 + daily + + + http://docs.gluster.org/release-notes/3.5.2/ + 2023-11-21 + daily + + + http://docs.gluster.org/release-notes/3.5.3/ + 2023-11-21 + daily + + + http://docs.gluster.org/release-notes/3.5.4/ + 2023-11-21 + daily + + + http://docs.gluster.org/release-notes/3.6.0/ + 2023-11-21 + daily + + + http://docs.gluster.org/release-notes/3.6.3/ + 2023-11-21 + daily + + + http://docs.gluster.org/release-notes/3.7.0/ + 2023-11-21 + daily + + + http://docs.gluster.org/release-notes/3.7.1/ + 2023-11-21 + daily + + + http://docs.gluster.org/release-notes/3.9.0/ + 2023-11-21 + daily + + + http://docs.gluster.org/release-notes/4.0.0/ + 2023-11-21 + daily + + + http://docs.gluster.org/release-notes/4.0.1/ + 2023-11-21 + daily + + + http://docs.gluster.org/release-notes/4.0.2/ + 2023-11-21 + daily + + + http://docs.gluster.org/release-notes/4.1.0/ + 2023-11-21 + daily + + + http://docs.gluster.org/release-notes/4.1.1/ + 2023-11-21 + daily + + + http://docs.gluster.org/release-notes/4.1.10/ + 2023-11-21 + daily + + + http://docs.gluster.org/release-notes/4.1.2/ + 2023-11-21 + daily + + + http://docs.gluster.org/release-notes/4.1.3/ + 2023-11-21 + daily + + + http://docs.gluster.org/release-notes/4.1.4/ + 2023-11-21 + daily + + + http://docs.gluster.org/release-notes/4.1.5/ + 2023-11-21 + daily + + + http://docs.gluster.org/release-notes/4.1.6/ + 2023-11-21 + daily + + + http://docs.gluster.org/release-notes/4.1.7/ + 2023-11-21 + daily + + + http://docs.gluster.org/release-notes/4.1.8/ + 2023-11-21 + daily + + + http://docs.gluster.org/release-notes/4.1.9/ + 2023-11-21 + daily + + + http://docs.gluster.org/release-notes/5.0/ + 2023-11-21 + daily + + + http://docs.gluster.org/release-notes/5.1/ + 2023-11-21 + daily + + + http://docs.gluster.org/release-notes/5.10/ + 2023-11-21 + daily + + + http://docs.gluster.org/release-notes/5.11/ + 2023-11-21 + daily + + + http://docs.gluster.org/release-notes/5.12/ + 2023-11-21 + daily + + + http://docs.gluster.org/release-notes/5.13/ + 2023-11-21 + daily + + + http://docs.gluster.org/release-notes/5.2/ + 2023-11-21 + daily + + + http://docs.gluster.org/release-notes/5.3/ + 2023-11-21 + daily + + + http://docs.gluster.org/release-notes/5.5/ + 2023-11-21 + daily + + + http://docs.gluster.org/release-notes/5.6/ + 2023-11-21 + daily + + + http://docs.gluster.org/release-notes/5.8/ + 2023-11-21 + daily + + + http://docs.gluster.org/release-notes/5.9/ + 2023-11-21 + daily + + + http://docs.gluster.org/release-notes/6.0/ + 2023-11-21 + daily + + + http://docs.gluster.org/release-notes/6.1/ + 2023-11-21 + daily + + + http://docs.gluster.org/release-notes/6.10/ + 2023-11-21 + daily + + + http://docs.gluster.org/release-notes/6.2/ + 2023-11-21 + daily + + + http://docs.gluster.org/release-notes/6.3/ + 2023-11-21 + daily + + + http://docs.gluster.org/release-notes/6.4/ + 2023-11-21 + daily + + + http://docs.gluster.org/release-notes/6.5/ + 2023-11-21 + daily + + + http://docs.gluster.org/release-notes/6.6/ + 2023-11-21 + daily + + + http://docs.gluster.org/release-notes/6.7/ + 2023-11-21 + daily + + + http://docs.gluster.org/release-notes/6.8/ + 2023-11-21 + daily + + + http://docs.gluster.org/release-notes/6.9/ + 2023-11-21 + daily + + + http://docs.gluster.org/release-notes/7.0/ + 2023-11-21 + daily + + + http://docs.gluster.org/release-notes/7.1/ + 2023-11-21 + daily + + + http://docs.gluster.org/release-notes/7.2/ + 2023-11-21 + daily + + + http://docs.gluster.org/release-notes/7.3/ + 2023-11-21 + daily + + + http://docs.gluster.org/release-notes/7.4/ + 2023-11-21 + daily + + + http://docs.gluster.org/release-notes/7.5/ + 2023-11-21 + daily + + + http://docs.gluster.org/release-notes/7.6/ + 2023-11-21 + daily + + + http://docs.gluster.org/release-notes/7.7/ + 2023-11-21 + daily + + + http://docs.gluster.org/release-notes/7.8/ + 2023-11-21 + daily + + + http://docs.gluster.org/release-notes/7.9/ + 2023-11-21 + daily + + + http://docs.gluster.org/release-notes/8.0/ + 2023-11-21 + daily + + + http://docs.gluster.org/release-notes/8.1/ + 2023-11-21 + daily + + + http://docs.gluster.org/release-notes/8.2/ + 2023-11-21 + daily + + + http://docs.gluster.org/release-notes/8.3/ + 2023-11-21 + daily + + + http://docs.gluster.org/release-notes/8.4/ + 2023-11-21 + daily + + + http://docs.gluster.org/release-notes/8.5/ + 2023-11-21 + daily + + + http://docs.gluster.org/release-notes/8.6/ + 2023-11-21 + daily + + + http://docs.gluster.org/release-notes/9.0/ + 2023-11-21 + daily + + + http://docs.gluster.org/release-notes/9.1/ + 2023-11-21 + daily + + + http://docs.gluster.org/release-notes/9.2/ + 2023-11-21 + daily + + + http://docs.gluster.org/release-notes/9.3/ + 2023-11-21 + daily + + + http://docs.gluster.org/release-notes/9.4/ + 2023-11-21 + daily + + + http://docs.gluster.org/release-notes/9.5/ + 2023-11-21 + daily + + + http://docs.gluster.org/release-notes/9.6/ + 2023-11-21 + daily + + + http://docs.gluster.org/release-notes/geo-rep-in-3.7/ + 2023-11-21 + daily + + + http://docs.gluster.org/release-notes/glusterfs-selinux2.0.1/ + 2023-11-21 + daily + + \ No newline at end of file diff --git a/sitemap.xml.gz b/sitemap.xml.gz new file mode 100644 index 0000000000000000000000000000000000000000..466ef4975e63a80debef6bc160ff508f1bae9433 GIT binary patch literal 1830 zcmV+>2if=^iwFn`l3Zm1|8r?{Wo=<_E_iKh0M(t_ZW}ichVT0nf$zuiO4g}x95{BI z8g*hPavGpF1+_a`vDqazJXqG-ms}}MoU|x*|3o*J?j7082<&RZmjppZRL@9E~C>y zAlX^fYNLH{Dp*IaqplbO@~kY`dpfr!INQ*5de|e8r;SUmcX!gt*_J+PQ`rs5Yly+N zD(I3fb;InR`0By#ld_ zR}Y zyIN(o)aw$+j{U{jPvlFdJCxM!TATQgtlb%I=q{SH2q56Lu3=!`D5EkhEsW~?+M-y; zBlmB%i7n$$SE|n*u5I$&Akfz6G|H3x(YjBKRTT#K+axuwm0`bD^hEo)c0loX%78X$ z-gCKPrR%su_jJeQ+UU;_#rAKOf2Pct7U_zrK?{iw#u`fBzw+}FG{RudbcHYLW zV~}|E{l%Lzx^v3-T&%!gUM#LbKsn^yMjtvlZ*(qS!Er-HCX24oftF5bLn%n1joNw& zmMT`wF6GfECZ03mEHfQlC~i?yey@vkYb{C@=x&{i<;*R0t`zQhU?5-c-uZ&#La!Vm6x|Cw<6g>6akEN|R1=_0FdJXW(Zk^2q zZ2;1RZo5XWbeeZen=ftK>HZYnb*)F>bzH@^BWtKkx7jv5l(L1`$z%T-MBT}(Jbe#X zuXIkv^{(xEr5i?o-i6kZSi$+g57g+!(zgvP@PS-Jb)y89h-fM}6us59-Z>C=vg@yoqrz`Ca_V%?w zSFV#jl|iH%{gGF4ukM|*aham1wY4%ET)qcP5nz?iraKkZ0JQtJQ}_+O zLu;Q~0{V^s`P8|D(5GDTQNz5}69YJWgii45AFg4>i(>^>nxI_a0GO#xAHgalgv!Hy20 z<~HGi&F&BjcLaqyhN7K7(N3XgXHc|rAll=@BPd!VeL(VGNZt#{cOiK$fqtuV?AZh* z^z8#~-@s{XVDWgxkwjnr17iWeI0RrE0WcnmXDk5rOd)^IgcIKo$s)L-~*e!g3b~%Uu9eEi)yA zQH~HGj}aip2#^y5$SDHk3;|MnsRbbI21vTc0E##UIK&$RoZyWCj_&|UKmY}xvHXx? zEI*(a%U9~Le4PfU@=fIH^aMbsC-QwaKs9$FKWClDmzRlrd6~%1Rwwcma3Ws;0ZwNC z&Ss|a)nzJQU8eHYWh!4?rt;NgDqmfu^3`Q3UtMPM)nz7MU1svtWhP%;X7bf#CSP4< z^3?^PO=d3N>CELjow$~F

F8mX5NTl;gD`08r$ma+_JEhy#Vd z`{U7ojXx2SBT)=P-k5|{Sj|+T)T2=bA1qr0N>bcz#w(r13uvzI=q??%(wl0w{dZl! zUU)q$RNC&tW>*I6H1)T3R!fihtruzDD>##-2Es#M2C`2VVJf)VpyF>b#`^SWK3MW` zFlmeaC~ChFp7LQGpoChXQztJe3WS=MqkR?gSTqVlT=ctx35tMz;W&`CmORcwsam6S zqo+zA)LDN+@Z0m+OzL^~82h$J6J=)7$;m zN9LJq!)sO+O!h=@@*5OvlSQ2TRnsh-hK-Uj|IR7N0QYgi%evNK{aAx9_>X-u<#~Tn zc9^p5*RIVR)z3M!{+Y-^Ojqb`X1Fcp0O->A;g9~h+hr+XVPWQ@i^3+Xc;QG+bJMr) zum(Qdki(NW4B2yOwU96mS=;&zoF`i^wOcbti zU_8xMdxL9&w>ch6o12t%9#NE07fWsMsc)QFYO{lPE7NQ9`lcS2#Rlon2CG?pG^(##bW9y@XcErIHfzzopMJ&J(J^w}3~=byV2tfE@% z7;L!OC>((xo`wZEoQsRAN0YJ|)VX3k(Z>lC3yf!&A#^At0owFHEPZd~xQLHucnREf zZ$9)k4pB^BVLKB*Ur5Z(^W!a@xdk$VY^{;>iAbxNo*jy4 zQJS@xy(fk_HOR{>@`rOvjta}y${;u9PB3BS4m)l~DxrZKT?pE%hALkz@1oJucG^Ew zdB_tEO0UbkE*a#v_gUA9xbAK3m*1DEZZ3lw-75^dIXaUt&JZko3YuYqs_ecz3zeQ z$jQ~n3j2%Wz&MSJDe+pQ`0W;-LTLYj8H96PA|?t|NN}z=6JJuCZ$2uVsP3S(ss(@4 zqDk3w(eZT1%MU1~t&GP?UscgeRT&ye2z!Feu9nI2_9^`mk70=`wNY8!?^qG?u}Z@> z$^+rdWHMDK#m$Cj_-Pn`_9dlK9LP~U>u_G~G9+J2771T;52*A#5MREW5xLPWPwz7G zs`HF_sir30U;C-Inbo`A?AJoISMG4~X9f}793rfTGIODO^8pyc@SAJ2tc=NlVIfAw zRpeXhMoli;K_1pt!d0D-rj1R|P~8hBy7b**gYpOPB%Kg~Hr z(28c>LxC%zjbcR+(bfQ5fx)JjXRJ?3dS0d1#8v&~KF-zQcc2L$HV~{RjHuA%2irx% zLRK_4+81v5oz;9SZo(=g$Y|`&qVJ`R<@eGD!pO@d7EXKPTWLofXwE0y1id>fr%N0n z{qP4E;isU*L#oOPaosvU%6hLqyV-s?^=4lm4$<(2vDz*WE>T?V80u0s5(>nk;V$Y{ z>CfnMze68-zVXe>LMfe(XKy9AM68UK+*bzQ{netEN^3GFNZMOZM+o}<>b!z{)DNt+V^K+o^A&HGGAm7k+)Czy3=Ud}%pZ`in&gc^O*+Jv?w)@qj)hEi?IRKh{T+Y(yqppl zod#PrjHDtBqIN z^ylbAt~g*hM6k>B6yFx_73f%p%XPd;G4S-lP6@T%vrS=`KS7&WZFcG1ak_iAaJ>gU z8`6HGWv%)Cft3rBXm|F#ZX?De1={DTt--RP%ng6^aieJ4k)O{VqCLb_d(hekUk=_F zeSFeU5a-ac<~q2CFayr)IJ`+Bz#sVbs+3wER80t3Ky!ti+V@||zhqC+YbVzkb$Z86 z*e-f6T|6xxgl-ez>Nv70WdZwucf~EWMzSx4q9vs zKDCf)bf$2zPw=0~W+f|5cV6>N-R0;d9BTNj*5J(nx;^(inMn0_%UwDZE_-vK$nmY= z(FUsx{BHOI$URRwPdZF?Tz5wevBT+#RpIi(S}yxefEKttC7U>DH=_c{e z;;QkE#Ih->rwa5mIqQ8!AzMP)okNL!`yV-;D*msrLiuf4398Vpie-C0?eq@`^xA8p zpm+}&u3)g~HLBujIuN|&7q()b;>VA6PXG}(|B=q@85m)e1+ra`xY?I zV)h>&NnnO|Iz;e2FXgc}w%q+*%XLV(lY4NUJ7Y*zMZ~yy7-<)BT76*nvv;Cu%Adp4 z8*yHM5}sCOI}sl4T)dSlI1~YgSvjR zI6?GN`Yfjh<2|a2M@gN4)6Wo>*W``WR?a;kL_FGF+xs(Bv?sgY;+vhfAUDYx7Sbg$ zF>#Xk+83Lc80-S(odiNE!FW{idD4;hcRHTgx19xxGIQIn)J5>LYUs%8e?F5U`p!HD z2X%!X7cR}-hq-oihbnS|emFP>H3kZlt{;a^3|_j%Q!0IMLC($e++xC;up$@ZY%8Q~ ztc43s%y$@GL9hPAy6p5=oeewgf8 zrz5l)_Ol*g#rGhr+NXDomO)N#z5QTA)I-o-P71%=u*=H%%u1($bl*6q-P`Wh?@^6r zR~y1u?W&CUFBXq6*_|&2G?Mf1`ciTyhMPt_ehE0Vsv)n6+#}oFBTowme9r%XhmA4b zvpcoL5vZtffj$0q=(EQNjEwq<^^9azVA-+1(qB?B@VRLY>+b8G)U}@6loYQ=mhe(w z3wS?wUyieouP(b!bvtGUU&D65CD@|jkwoy=``g!b;>G+Z9=N2epv#d38oj#z?R)#i z)&5^}fmbfMcDF>4@V0o=i|dZNV(hj9F`b!JQ>%Vu+qi#Y5!>X!_mTjiig*JEpAyiL zpyONv&c|`E`bW6mnnFJJLfXiotnpIAEj^!)a~C-t6p*d!RG<(}Xk@MlCd*xOY`YMt zG-AaDyob+KaV5#K3AbY8;&0=stFDPng1hLT)0y&$bNK_l-jBX>*J6BHzNcH5?6O71 z-M1#$TiL^h{zjsETof@_PGPE^#p@Zkfn39SI%C|>R0f?zMdmj&jfB`J%#o%*Ij&wv z$bcUX5dp4x$a|ygIN-0BfH;&KU+-E1gMDqRyMqw|xwp$?s+sZMs0R3029_QQqU!+h zb}^rNq|Bdr6ji!Tt;k$fo8(q(2coYX%t{c*gvnyPT`b4bLwiHnxEw96fUg%5M#X8; zIIG6$7Jt-{Ncn$3GeC#9$^+KBDFn-u2iAaXicd0xj;DumT9=1FzoM%v@1ZYWCNV16 z3ejdd%+bi41C%h>aXo=S$X%Ix|KT=nXzKbZjHXc?v*aB}WuRVxV<5tOZvLWjZ(!nk z;l!faF@1Mo&E5r$y0hQLjsxtHIMzHCN7K$S+rNDG~as2T7$0{nr`j|Bbew7P@vaHtw%R4H2IIBkMlUdzTTG}zI`auBpAJwg^{ z>EI}-a5;3Ldmh@sPFgh*wdd^%{ zuooXKLhNzyHg2Qz+207UNqkPM!Dtr2Ch|@TK%4GYSx-N;z^we5 zh4dCn`~f@==nwMxT)B8w?te$AbC-dXBSVCmnU-Wo{<-i!_)=%1<*vzO#cXR*Y)+lB z56D;(2E-&~9Cw5H_=5W(QY_EU3eK^V!ADnG|8hr9+^?|`6`&ka61`Zf4LCs#s`PlZ zef~{7H1@T~z8hnX*Qs#qDw4(8R_O^rhxuf_6 zDd_G-=Wj80BGl}Vr~19aZf96%{pM0?4I|48m&RI98>)uRRIrk*ge&w321}&)*QKw3 z4|5{Vqho@4x(!~xOae0jaHt5@uu>pS@N#E!JTD3I=)$iImf(jo*EKSN!QSodhC&ED zd2VmBH$O8|Vzm)Elfo2~G51NPwXcH?vzBV6GPHecE#qWTqYy3m;}FVUU**LRBrnWb z--npEp9BzW{y1GCgB4^4$UV|RMDbk7XY`^6&JbN8c&{uyVEA~Pts zcQSF_e8zh*-QImOel?MJRAmXuR92UN_Ve^xT*v|PaPR~RS=(=2AhaA24!$iApeP(zA5fQC*t#EIX7JAkC>(5jb#&+&ZLp$|p z2V>FFTgjgXX}qg0MWa6K_6xR#=klt7L>dm5$!sL!l-kWNU%d-d$A9Sw6##xh74g3= zRzafcU?3Nlcl!H1>(Z!QSbS!R_QYW&1Aa{bMOgVT^%`vg-#{e{&LfQwb;Or%Yafu> z3I>Rh<|XkP@#dx1YFFG(*RyfYl6y93rTT?tMLoaDw~49~X+D6yM(#DBp^@d%rRWTQ z{hEvF!-u#jEGnYH*8>Q+Y+p`%y;*%hR2M@5%X~h2JfarGbsaX&5IidB(aRapl0dzR zFV*K9?N6uq#k-VznJ{Y$PPRj5LUaaL_#`hNczPPN-m0yA7p$NB6e=+#*6t<=+3IO; zMEl`rysf61sR|cYh-S2Fd*G_xH7_d6oI*A2eZ6$YPl)>?5W8!t{GpWClo1J%?}KO76WXOTfJ+5tSr+ut?3ZQ<$g z@M(w7C9_cq*B$DuE@fxwC3w|2fOBznxLT(->~g)W#c57~rzdN5M|ZJ02W(`sPkv=f z(XfXv>@C{Oc%tp(sWxgcJ)wfLbOhK-Mnc!IGCe!ty}erRmemIv^&MN_ z2=-LfIazCiBs0oZMEu~b5}ufsQ7Xgpwe6wP^{MS|bQWW;0EZ{?T1p)#TfVk@H7u0Y z9?RFD4@-u(2gD^(iMGv7-6VM~fl%>txr^<9>0^vaG)!Dlq#bE2;aXF=hz}*39$b|{ z#h}UYZX}iH%YjdiRH!j%72}PY@j_!;KqnQz2O~tDZhu$ER+{I7Q2O7t${x2x)O_~a z={3tLNWxaGwz*5Erya;awxCk{n)~2ewdjqSBrV|J^dwBL_xc5j4#Lc?uI3CyMN8uO3tqEa(xH5q%KA~pupt<`}eC}q$BQex`B1(9Sx*DxPR z*E@iPRB@5f<3+8S=pQ`l!2 zlcbxFkmbume7%hJgS#~n_gmx&GB}>%Di}lj;LPM}&o)gCdfX&N*;4GX2lanrCklgM6uB*#{yxpgdP;?jaMZoF>hZcivyIZqvI|nB+#D>Z~%{vH+B* z$F`h>=9X;DJzlXvvEkz(J)hdIE3~VlKOh~KyFA$@{K{19aL`^&6r+(rm{g`qrCi#t zZAOLErKkS!kRslK-@=Ak6pq-C7QAJ%>%Y=M9KF=K6gq*oO87~ga7$n1%(*EiDmUBfTe@veDu|x-Q?=0tdmv}Q_Ye8y0Cu}^XX~@s#@5LV^>aD zd1SC-`kJ6J&==LjPm~WU5KphoODZ(b91L&SuS}yq%7ks^2}iqR^z~u%abLsdb&rj~ z;<6Y+^a2q{g7l95j5LY?E;VgZ(@!2a-qn zo|k(KTao;W*^gxyNWqP&{3P+>#zuvZ4nrJb>yO<&)=3=6wVCp^}DIpHGEj zm(K>5uF`0(hYgEgo@^g)1`r_fqF+D>zI!|tJ|2Z3^4c${g@6Cf(%7TD4l%g=>bAL8 zbZLBk8oW{me&^{iZ{2JnBUd-Jc^_RHLI$liYZaxC-NP6U+trY<9Xg)9}_k z3m9l3RB`I|dU!srJyTWscSF@W=uq1!n+bOH?+Z_E@>00MV-P*D?N>9Ka)DNi7iIbD z8DHj>LJ>#xzeZ6Xjr#9LQ3d2na1R8ye4PEb!t!?~pnRZzcLK#I`Svl2?4<^@?Rx91 zoL$%DS$C%+kF2J0BOAU;m%;nK8s$^k@8{t?+?vyZW5e;Pz>W~-n56_No}>ds#646qnMRk{V9k+Y)N zpxfNF=|fezjh@f3|Dcf2PcryYogJ5V+CV==9&ns{5MAy`vR0L%`GW;^pbPW1bIt-5FNHDS$>lVOq$CJN(ALt*1eu?!W4qj-lqS6SNt>)5r3Ire zc<59jO%L5F*UXDOiiE01_CsWOn~M_ydcU;tg?EPFnG4xkPp*JjH%_oqKT3EUQE>lu z#1H8}uH55X&KzfX&+!gXCg>K>qIeOA&@1JPP!+7a-s2N@Rx2Hs0y2+|2K>HY!hU>f ze;0beBH4jxh!CaEB^_gs5U8%gN}Xr`dIf(~lIwrcW)6(?aqjKZ9)Wa~+w+N*&bprC zN+r^#`DC%KojXCJMhx6Bef<`wx6p$Lf5eYn5DVr=P7ym;j0pEsw|M0_{#oZH;Ih@rbGLx3;{4Nb z?!?XD+wYO~sz+h87wS9)!oEtSBH{YxTi=W;p zv^^IeZE^+snNmrf)B0U(0T~5rw*_>|CC|(~SZV2!rY$|5#98!NP6VEE`pibR>8lpOaSaL98O|f{A~LmO-*gHK(B6xAm^JAfNFaYRhkNe1H=q zE|MZGf@tEV&f*EZH&&*=SQ8R$oL@u4RDUlD1MzK8|{NiAjJVvUIbV=X?zWVn(ZA$3*L*$EqDN^};V0UIJ|T z8M0@xL{r2p>J0gPVc|Uo4S9ha&~|5@uZ63y8{|i{U$Y$u!-c+0n@S?ANu~!WxV?Qk z>uoX(m)@eIn*CuHAUdEP7MF{j3X%+jp&jo(jL!IMDL8}Vbw@`avn)V~uAT(TM4?B~ zV>9mNz3G?CH*wpWga1x7>+vEVoAx;$?mCrCW*2Eks2@^}e+AI$kW)guZ&&eIs^`1k zR!`3HsP42s!Y&}!BcLW$Q>8E!7dRvfp#IN1y8)x%nf)F<@NKqW*H5UerxoFh-cb}E zo;+4Q!&*Jwf}fDJXLr#E-?1UsAzOJ21~D`kA1KQN)cYC2_)3I3Y|ei2f8fXst&Ud7 zAYPKu)To33gnm~2)p3!t_tD$Ki;OQ{#9lMz+`kq#&M}X#-5*1X9kEJg#}+;2Ffjb_ zR+5-lz7F05?jbAW8sg|rbLV4vJ|E1Rf#$ z(BK!`S)4Cu2~C(1z7psuGLv9FrHUQ*e4%P{xn~z3GFthP&ECeza{B(B>AamU3Fufu z;v1*x3(88jSFy79^xEvjy@P^9THTC)zdLMxyQz9esh}-++2W}s$#F=fT@MM_JlQb` zqXJ(tU}wG8N_Oum^Mwcvx`#%T6UynC1PW^Teb^li|M))N#&2FK4$sSj@VFgZ?C)Vw zys)-E4e2_IrT|Oel%mS&ANNS&&J1C|-NN3ri{5Jx1~cF|4(R~&W&Azin^2319ma?{R-$#%boF&SK&-g7$&momq=vT z;ta8gabdHgRi+(|EL_hZ!UygLz7;GPqZ{QR=`4C0(#pM{5El|++OyNuSL zDJcO7DS?rYZiX)DhKDX;R2rm92}eq)0qLCK+vD@R@AsZ_oj+bk?Ag8VSod0XRE$S-%h%fzPpvCQXMIn)M5vHx*Kv;Kzd_BUb@Z>g?p7MnrHr`*xii(ldu< z&9)n*uRb4j0QnLx+mJ_JMyt)Z&{hJmSN^AJyywc>DO@so9{KRkp_3g!(Rjt)&ZJI+ z#Sgzg8a2->uK7_e%(8g!_@SZT`JzC9SmuoJgj9jsig1!h?V&e0>F!D$M_28vqKgk< zDa;f0A5U0>h=7VvhB`YP3k=XqTaTkPK~72U64-&jx-5S88LvYURMl^nYx0^us$rS@ z^V1{Y^QqSk@tzqEPl+72afDEOgv5)MG6_xa!7nn66BKtSYRiaeiPr4CX&(Zx%8G}) zWdglDGz2?T<92#nTA(_6Cv7W0q!B(NiWClKe>iV*zSwrtign-tzkrPm$P41(&^rE; zUt{y_@kHRd8zZSI!2k}meRAezY2p6k$Tx}qs9l~$kH}nZ&rxQ%JT~J&0U56$Zh`>3 z3o6Mqm?%@17d0e*aTz;b5QOppt4JpNW`3jidt_CrcW@(|+sT(LWO)0&*p!l?Ielh`crQxf8D1V$09sZ=YzWbFKNL8UZo@g#2U;p zW9=xfiLf<0h8TOC3)7nXPjPN%=?eH0efuxMY&qVV`}VdtD2c4c&J}2&uYOyury<92 zoj|SnSgl+AXCYZ%2?TH;;?y(Gj5xSmXVE9~h_~FXwX_l;I^>_gldMgoNrpk(>T>{U zg~!ua;2o2be4@erKQKkWYztg)%y&CMAn`9?`2{EpfZ>Pv0rcw&%pWD)1#S!eaYAUQ zF1NGGELu|-3L1Qz1ady$+dl>KF*EBs<-w94BLh;Sv>IM@lZf0C?-+;3f&q5ybFKooQZ-7541+ClJZ-xW}AOE#qhEs}X%Rr$0+y(gfYXuTrk`>Ue#y^tOReMU-kzpq- znpqv+xVc@q(8~bSY!A(I*8p{T+^(ffa8V+bkbFjr^Ro4s8xttt?so!dx7X_*(~Bv& z0yyz(+emd|DPXT2B$_v_jI?GOH*tkRvwPN&{{?5nWsR_vfB_hg& z`}3xLe2WM^S>H*WV5-^^AgSi!=nE>dMw&hXm@BHNep&-S4Vhrt=>xR}(Q%$gRXd}a zBJiNP`@g8>408%`(*IVS)%w!!4Poi0W6P!N8luwJy^tQN7PY2FB*;lo+0BI1mL-n8 z*GO0Ca(&gy1@!%95Gw(Wp0g8lVfMgK@6v=^7O(BoX|wBT`?ZR3cT+TT+rV++v*;G7 zY~e1Vk9;m^jv2JbNeC|V+uK$s3Gdr^fCVX_(5~-2}@fO zd}jJUlN;$;i^oDEoYlZ8HcW-XQ$zHdJN4rA{F&6BAFXB;wwiwt$&ch9y^xL#d|CM^Qz2Q8R}ujLq{{9h+CF)5TZy3m(9;cb|c zlif|Rx$usc{|;(-w}EC5reSG^FP2dYjb}ub@fLML#eR| zN~846%{5tASoBRzAE8rIQzroj#s)}0dH(uJ4WJ20a{97^LVc0+n&?$?}iH9LccD+IHtp=`c8wy`jEQZ(JdG(vsjKo zX_9srD_4Bd#kvhXnsLrbpU$-tPEO~#Svzd<+>vR__P*IJ!}SLVm=~&Wy;q$`Gt^~J zx_!6hhehqR^OAHuk9PuGb9UlUx$h^7wTUAhBHKr-;sZodK>7v-;F%8l@@9sHiV6w} z1<4XVe;ANU9Y&r>fT#_YlcTY@x!H)p@wU1#0kxh~!)jxN;Z*KIg=mTiFjxg8zvC@~ z+Dqa7*ljs(y&Ua6D}Ts0ks6|=uUwd%r&rL__&!fuwg`=G^u!fcihEQw*ZMbu+5K}` zc7*&N5&a=PmK1Ds@x)bkiYp^0ii#clbR}{JQUmjt1jzKoYkhg!oJIzI>R5g3%Waq| z<2fzI`m;Y;*s_tO&Yo6jycQHwkN;9UiF<2$Cb4N|6YB|WFcU$f)G@SbTGN0hqX?7H z7#bqYVa{y?gaQm=gDG4_lYj=G9=9Ln4mbAjJO?5f~v=Lk3G}`rmG%t(h_u35DE95NBoVs z7hYCsve#O`H6hTex(#Lo^g5v3XTM;B88~hV{qYb=CnyH14HDlNaPox8E|6<=_wZ0) z%ScUC1m(UZ$J=vLpj-C6QRIy_gP;@;pqkTOf&xsjL4ehpXmwk)pDxoWkV~N}rq`q? zNz;BQ(Ctaac*ooe1`fqsxDkB#^yVDST=-ZvkadexTy4@G6zy6f&IL79HN-v7@N99* zAGswl^sPWNNs+`u?nZblS00RKzo{5A@QQbs>k>?Bu*BY;ym~=LEH((_V(SQTbJhR{ z7S{GVegOgS@3=~hd1oeo#A*UucgdZ56cMF6)B-QZr_1$#2xg=p$QDhg5Yh_AU=z?V z&yp{-y2*WV43AvB0?_pm44uN!Df~JIs8zUZUC#kfey-07-jwz7#gBl~rCiJ?9OrNl zIG6?Sv4HH74ZV6B%HPcKY8#!h%^jj*+r#mqki(S?b=;27DJ`0c#j|1#rfq=7F(@91 zFu+KzVsQ&NKF1CsUJNY+!z)5b>F~W+hTc4N*Abn)xk;H~O_yM*ABXvWn<-8sP?7p1 z#^9NB_59UD zovB?#yk#(%O%L#%^pRQR^46d%p8tug!Hmz{x6k*}-JwG7Xe;Jy~bGsr=O^=CLm z7XJ?N-~YSQjUR1JDcA`Evb+hEI4V92{+>~65Mcjx$8sgh0rb@p{J22_E26&AnA|Bm4QEZYY2gbV>h%GC2Urg}gs;6fWy zJpf?)1&2NNNoSdtBPr=#Hje6g!AQZCpLZ6w!wvz2?h_M*Ow-;l#5MX9z^`{n_N*Uv z?iB69uLJp5j`DyXYf4m$dG+3G{qG9Jg9vJ!sHo~cTuTNl?IWRfu}KMa`h0;6}Q0(W)9hi)01P|fbG0(S{yCO4-NTi7+9a|q#mt-%fEeM&l( z60<*R@te#NK}^qI7nSafmuF)Ldl>zQs|)4t-_Sxl!#V=&HZ0>Rmd?Z_-r$Omo`9hL zgr}3<^$5_k}Ef7|VB^Ay3L@MX`irPk%?g|F!XW z;<=TjXm0BWMGBVBfud3b;XS9YKQNv-!sX2eIV>fu-(l2Bsk% z>~p8C3S!moDpu{ca!97>W5iS+arW=Zw+WEk8?O8G;NA#dQwW=}r;B#PC+|d7ti098 z2U;r|xO}lw01945kGskHXTGek$9vy1X7vMBpQon59($YAbOlit(V5iG3+-B|BMPH= zhRzr(eeOMfi=hv=XuRoz=HISW>&PTvQm0g*=>Ei;U5bm9>njW|R3*#Hny8z|g9C7G zX^iaz{c}5Zn&R1B2;n~%>e%)-PXAHIre}({YJ!TG0#G{|x%5uim!!NC& zPpsQjihOexA!X#l>d(BT=UooVlzMLGl|G_2~tV}4{m zw9xW{KU=#ZALVSTDwa5pn3&V)D?%aTU$j@FssS5K%~MZ&pRGQz2N^~q(Q%~W4-yTr zoMQh8A-4S_>*Z;ud6A{Pn^~FwfwgweMMYL|5 z{I`mUi@0R`lC?n9xPGZ-PUv)aa^I=URhZ2W#MLj6O^fS`w1gW~CM_~=0f;=P` z##OA8nB$0U1tF9~*G3i8zrP?z4y3^mXPU?uTy!9y6)wUMu~<1`EF{U!b5M4UD$Vs% zWfzkGoFhjK0Eh4CHE=V0PYQK3g`qo(zOFMNox_R@1oUqZKp1mG1c8kqgQB+!>!pe; zANPCb2Aq8VcaegQFK^h2bPzEZMDTYT&5F0Exz4lHbn^!8-*eOSlg-$`xNF93BE1!< z-ZoIpSEGOKgX_cd=|;r(!~7`@Zf@vt^4xGCGBt%PAMX!w)keq=pI`lV6lV9ZRM=y8v0$5tO7tYdkfT%V zmf@ZjH5fTF83IUr$+uuqouFzQVCG;fu=)nr=Hr+WkMT$wJG*E?{JTWj-cL)h-(!0U zzRQWL2PInt$vsoTVxkfcS6eqi_w{)_DwbPxu+^7e__2B>wsjR=EfYbhp5#G5zGr3r zg8-JcAr&b$^nBQYf({~P698UMuJ1Z4K7ib1-5Y=ejl@-Jh*mE?Wj@09OxIPn33y`b z4|xVBV$K(IO8W;q>roB{rVy93NY}K4@E)<*+Q8#V*(j1d6yh3X@xnosaRI>Qu;m`g za)_%1sN#ESr=I})e)i;NvL`m5*2qM1l_zv8r*1de@zG%k;q3GQ53Ob6@jhh^o0yL0 z<7ashZ$zKjg%IA;+TUQ{(+Dj^1_$5TpN=mz_xx6$EdRZRRyop=6DP}V&5y1MM;0fX z(C2gTogg7O42jKnX0PChbITHINfj_eIP}$z2Q>AJ+4JR}k4LtPrgR^Zm_wcrUBwHT zF_7N4A_I35H3H}Fg2?S*$?rV1c{)U_6XGKHcP|aypjNq|D&<4u#BckY^gDnk@l!<+ zs|MKlnx)wh&^{c0zOqRvPR&;xGJW6YmO?mng-UcBnbzt+G;7eD4^G+O%zcKy#(RwSu}@D(Djd@2v@a=PX>ENc?!EO2 z9;Md&1);9ylKeKarH@$ul;qH+p;Bc1vdH6i%oYDWwkv#HL{(XIvV)-P!`9y+FgWDr5$@FE?Codz1{#BL<7Xn^Kb6NH-o!QWgeB&Tk&P z5q08?TdpsADW0yMdD*Ybe%!;af>$h9NT}zPxv@AtI5Xkrp81QYVodOc93C@QSKFo12C}nLuznwv^A2W8oxfP^?>I&-!R~t5ZfMBHrFWi?>MJ-PC-sqUY$ohw001LHH>i^IMTyXPB z=LMdmK_^au7$am%jD!-+&)K~nN6;VBVyc(fUJg`xj?H^N_>2`DO}F>$wYtIZZ|r<9lciECLI$|_ z%5STw7w9TQ9t>~dkh2Yk3m5T&pGMS5z0lk_XlCt9r>s4)GM1P91kwVQ zNSfahyotC%NgsVmCpgmmwch&Ypq&h}4lh5hySMwhxDGA<9S&ICxj_vE24wA{`R!+& z%g;2W%bw^+HBvXy&4{eO4(PFfXp*QhaVRiI;Mkhq=pz}DCsw^vOrzDVNWA(GCT8Wp zI`b_1Dzx5IMix)bF&u~fZ4~<4-r3Lo52GK~c0dIjRpC5s68P!w1}lC9HrRwi2w2ExBI*jnRr5GLJovJAZdxoxg}+~yWV9Cw zz|_>B8qMGDUhdlxww)eyZOvcsfkz{OUoaqI_9s@E>o8SCYFg}Kz?p*e}_ zf3__%?7YcQ!DVgFyco*;wn~gU_#H!M0`3TLKA@>~^AT;hF7`*aOk; zJju)c2D}y@|0la*irUngdW((s(h}knp90S?aBQ1O_~b36`H87vYdvIneg%xs$D-@m zR=0!4iqz|Q{aO)`q2D$g>S2clo&K7Q0E>?w)*njx?PLKlNf6>miL4(}+F|J8N3#dG z#O!1 zP9I$#Wj|fU;h0H$s|#|LgkY2 z{5;qEZB*D!V0f;vo@R8k6mi#*kZke**mY5wx*a8Tgr;?~;nev@6;j$b3}=^3GHYbe zhY%BkTtToOd*4EsD}H3!Wf#_~4sk zC=oETZp8#i4)4_Em2a)SQF+ayDsT+|-)yg(e2u`{&16eA*7QY=EMMtJtMSBJ5b8>n zMXRy>Lh>GNhK}91*!BZD-&4+}AqloXHgMbVE%G-UyqB8kHnz`kD;NrQq8#?j)H?>? z2tOT&Mc%Xf5k>EmzhGzLhjPw_Pf_dzB$W9j>!qDt_KVtn@%HsI#wpk;sRE6=_6r{~xDO90SKaV0H4RhQ{oKQ7moxfJ)~_e?3K z0VQ)cD3lPw40DaEpYCr9I0Zxo07%%Z#~Qaf}YiMYi^$NL!*@2xmro#lEfzz*Q|;#oAn1+xDG=LuWUKc7YC zRhsG4I#rGko6+o&rA=H0yIc}w*=_!2@*ToCt&jXiaPjBkOfbeq*2QCffGEyL^|seN zR9zF3V<#&?nN^YI!=yg3WXDac`k;GLb5ZqNY(2Mh z5$s@8rHK9K3AH1qWV%R~RNlnjm9_E8p_7#tPlK($Da~=-{(f5EUv~q*B0;i{^`gtq z)1d8{W?jLW+*B+BV9OIAf@|UMnv-XZ3oAW*T(M+i&?(j2Y6^LYoD`RJs91OQzAH{L zkJwBp;;%5ZW^9BDd{MD=a|bpY<9OucUGGfo`N2)wP-&4&3K7gAZ+$CBAv)`gA^lPz zDbGP>)+L`;OD?-<5b&ZOg*B`-d4{zY$!|sXjDI?uwhd?1IW4lPWh**r=B>*Dgtdd1kvqrX0hXyYT43>SS}C6kIZv&&Tze>P+{%ag+hJ8Al;QTj1f*#f2^ zj5mRvO;#nxoTC0xl(Vaz=Bs%G0HlIc_{JUpB^2JW3@xR1qWiCdUx!Xj%mfd#&`pXa zTIQNQ=cI4Cyl{&tkMnV`DT%Lkri_vbq!zU+Bf*4K{6GuWs>`3%(Gxx=b(z^3eZ*o= zu*U^+UaBL)O6Z8%hwD%qbkw#=t#IXKI3_Y>x!=xcEL?ZqaV=Qy9KlYqxLh!+e*LEL zqro)V3(VDof{<*f^b*<(8XcTKLQ#Fi?YyVQI^ezYW=>96FORVw;Fnf}Jm1CSdI zBiIQ6%0>Y5-M8q&23^JR-2N%4lcy{fwD+^a#T&erV=0GpjV9j@g}5Lkzq`cSz6n08 zcRUUf4ryLZ?OotG@6xUGFQ2XTHRw`Cl3FtW^S1k@EZ~N>74sn{qJLoYdtwU`KW-Pc z+p_5Ji4vir|GU!fTo(!6yFBs_BdyN+&BOYYA~VBjj=#4R&KaB3VuCPP3mn{a7>C|h zrJAU_6U3{L5iwoJw4*~|wE{1iKS8bq;ue4Z0yOpYoiHPIb<|z=r3dzD<%X`{g`XJ} z(OGYpC`^nQ0hHv6wEO4~!!=domsJORi~vJ$ zaPWpqqSIWkzPy!2y~G9^G(Q(0*l%3FUD^y|Y+OBVmnaYH;!Gtv?J{5Qi6wYqI8yGF zarZb?eO;~T@QsyeM6sFG4p6*Po@d|!h{rZrI-H1#bu(LB+iB*FNLCnN*Toe{ts1pJGu3mkI-SNqRl9Qs&h46>Je%E=cV5`SaJoGVD8L7 zw(+^GI3W2xksQNI}gi|Jr{dZ#AsJ0*f64m&( zb}VM96rkJR1wkK#^2a_J=3vd%HcSGwxS$3_y8O{#)XzP-{An$y$#XsGO4QrWbqxRf z)j4ctaWwt(?HAA)1IkZ>Fz+6Y&DXlVU%cd}@^S4Sp?Ned^By(txjuPM=P>lm`TDUe zkgk5Gt;pEQL#*1SgCmXr%AAkfJu_IOYooA>u_Y_o!8%o>0d7HHf6WZ{|K5(V-1$Nk zHvN=pYGvsi0Kj40p0F`vzNHuzItenzk`7{)Z}s9*>`Q!M{7LZj4ZE-CO^U&y4Cs*t zn_)rnXEfid`p5}H%ua?P2lu%WTXMdRYALNUCzIz^@Ag5_1;)};bf3-IT3AKDb@UAz+=U4Yz>aEQ+pl7~IA1X+7t5*$B z;N(G=M7K4L5BASliH5ju_XmgQCZZ}|Sa~S&f8NHn#eoLyq%ioHE^Hg6}vrDo(0>5mrS*vWVnR!#Cr%1*t8{GpBe72yEujX zeB(<6bYGXblirxxPi_!Ey=cX=a+xj>_Z6{kafB3HM;K%%`tJOkma4FaF6?W;0O$(3 z(;CGyRd(7^NKS8U%$Q}o+>Pyh_|ejU+`nUe`tQL0&n27{L6+Ld5B_%yXm`wz;4 z6TUupd`L7bM;*#o897pI52a<6HPYmg@NI-^-AziP&hD6Ws{S)Bzd?cuIOjCt$yoF4 zuHaOjEl>OFV8kb8AD8%@pZO$FA*Ck@Q9th@vMsKMIT}TnJq|mF0hgEDp8%Il-!(?hdUVZ!>Tgk~VBW6A`W^{bOm(=h>pBIu> zd*O4OLb)NY;`LN^BK$%3KFw+?M#ck(e=D5eu1B`w+GbkKBXq7(m|@hGJ(GH}9F~#V zhK`}X?YN79D#NBPQ#AI>VQjH+f93z#PvBxP2r$LH(>IU&K1`reASp{NGu)w@sHpUa zsX7|}VeUyIyzrc>3}u#0B89Am8T!1u9bDOEcy`8zl5n*j{83k)TgyE&`oBH3sK}|^ zT9sw*i{G`Dc<)@Xp5yXe+FwtTxLM)d%-;i00IloP5Z!fxh9?X$Pv4DKx?FIse*KYN zq@raqamZ%-$=`+NNwNiJHj3{A=mcQv^zTGu*G@|J6InxvCb7#wE^17gZyvGh_`VXn z_-lDtT?yvC=b~e@7X021FnYnt_(_(gpVFi1gd}in9$Lo|Ku6^UwFzyNn#T_mRvo^5 z;9)dMJ9smJ8VHSyiI=IEQ2k68lGQ4D9WUFr^A@Z7q}-}^IW zt?KWA&uYiuGWAJ9H9Ilq<^Qp*$}zxAMRJ*nHy{7c@sN^2Oy40>oy$-4624SYEYi>! z_O`k6FcZGC+P@7^>#x*3uk1TFBX7#MeFp-E7WwJrUoEfeUHTawADVSBiN7+{ zKKEMDjPo8gymz8YY2w;L(G$6!gA5w4<2ZMeZ`I=}Y0|!t~v=D2dc7RM#zR zbUxj}Aj7r2Ly~N~>x}dQ&ONl;l=s#@OcmE0$W~IrI9~r@|JP%PwA3$?=P&JlXVeYj zAhVe+?aqc*qyq(!^DFpfQTX@JEX!!Q$%A0x5yb9(^|{jQkJtr74aGm)*Q(E3^_o!) z)+ZtrB5d)HS~$DWg%=_quWk77z{jJDb1~0Ye0ucrWQcXaeBm}q#pe3#K)WvscS)!E zH80p-EXHI1q<82_R=>7ugm=!}onx|EIn~9wCe}Tlal0c!*Z(V^jRb|Hm;NR#j{i$l zxevWgaaH}7P1jN|X(I-zdH*{510hcZG=mbI*yy%&(o)iyrG0JPcG4KKddzEk&57JQ zLXmZ&;I`K%ANX!shYIu?l}p$CfG9?2D>9`xus+`B)@%UN_h@D#L09ujF&#^tI&1Z7 zrp?~9sk<$tFDx&vOJ!!cW^7D}=mveKG&>9Rm4 z$oeAY^{=yvQ?@=d$*eYieibvj=2`z!Vo6pQX^j)Kql(;Ig}s6COo`1ODYEl{H%?UD z`|FYx_v~JY#@jx2JFA;|KZTz@Ewa)(eIk?#|b| z3V*w^A#B{tWRh|4jP}pU+zD92yD}VgD)x+Lg>gf1eZKybc+^AQruhd?0#LD|r}A}6 zt8BaI#gDGyO{Q=oS~9{(%6@tZ>(Dm1DpC?uc+%_;4>_~iOmZOio|P7n1HZ)guDDnA zA9cDUBQA+lj!o?^#y-p%_`;Uk4+|BZlC9)vC`G$Bvi&kU>PZSX>QuRNw5g+HKQsDB z54x?lwi#waIyWRWZNuV(NLM4_^`hqt z;VHN00+6oa=q(4R5li)JC$zBe2#o5ywCy)9jjPQyY3z#t57|FrV|kjQ%HafJZGYHp z*H!_Zs_+s=iEil+FL#|sSH{Dfd~)RcH4zqGZ@@^`^+U7;W9)R*;p&q7Y|cXK`*XgpCbF80lZu?1ca9NYSJvH0Fy za`jq>{(@Y3c+IR#3yWyvuOg=(v;bHWUevJQb)S>hL}Bzvl(M2A6eSmyvf95Q&CSMJ zM%Jb9_Dfzu%Jy?##%emRFHmF6u93qxEfV=?y#^2UVY}=A7#Rx^bP|8e`syNit_CX`Y@0@;aN%P3QGo+5N-o@ImW?=KA@X&o|J_^=J%{0{kagUxP`f*@xN8P35KT|pBG*1 zeZLG5%N6zgte2A?S)KR1o^3gASUH14pFJ-t4~uE)u+^VJ+M-QBt?23Jni-abhVs_& zi+6+o2ZZS5GYpg8-8Z>FC_X1W7OipHc%!wp$M*Jo@ywLyNT(u&D4^{MKS1Kb@0_~t zM$8@ac%_PWc*XA8YEcQO5jXv}jx`NdA_2ChH`jNGe&|oXLTYy3G^w$C6HS~>2QN3w z)ru9fnosP1>)$dAwu<-20;N}68I!?~tD)3hEnLgEdMWIuGQM!x+71!o2^(tN=`7Sm z-mz4z_}2A9htJ#QIn1{w;^Lhi_$}#BYSKynIQUIB5LM}Ko+h2Tx*a4{;h>Dzd~-gL zwhfPeiyIJ_L*7wV_PBdCJwM5_nifq+qMD@}Yba9jZJwMy+i~E6=RjUXNUURf(Yd^@ zM@?LDVZFMhakYy{)DnF?BiVa_y@QkyZ_az(F0yu*a1-kA=bf~mWP zLf~I7_T|QS^)n~W&Qdy_?BzoIuYU+gr%%f#S%uW1JG`AjmG_A7B*n4N4Z|mQ=nxUa z-$c=KviEDXDK+V(KVDrrKae(Byq9nDV{o;@B=B;Mt&#UGvT_BE-d~d57?!)d?hJ2u zRJ>UJfcKRuUEf-5T1{*Xhsy#YSkUDb6O4YXI%caDq$V@qA-b`TkjjxsUOkPt?%&@N zZhQ_M%O?IQg@<~8m`k43mL*1>VKuMcDS62E^V(`FpDHe|nhAC?T)P(<6IaUy8&ebw z-1t3yJPWxl@BQ3gD}4G?+n5obfBymKW=AmgJNtg9*xK35FFG(^_;34(K)Wq|SMCHB z)JCEMA+M2AFnUk_?vI{m8N4pPFERF-ZKJ$m4{=i)y4yD5A&R(y2hgFu{mKyn*5R^y z;5l|ZcjMiV?(fVV*iPP9z1H21-ZgvuZr*i~<%0dlfHMUW&=x3~?5$fvc(IUD)5Km} z2{b-J>*MHkaP!!P#3rr`^k(Y-XbhIDJ{gx`Be~zIrz&cGVUp%0cq(GF!j}}J&ciYA zg6?3*)cGY^afob7S*e6>pfrd^B~X>NitT-oW$<-a=efRVHrKmrx%hYxVL-L~nraRZ zbxR9J0q#n()Pgr3q-|uWWjP5@1}rdA6=n8V8@iN4uSa#N>>(*WD=A+1XI$X@dqgG= zD(nwbWW@3nYUXcH?8Yl=gvWMl2o%PTb8(&E{k)FL^164fHZwei-pS)fr_L8PptAZy z`J>>m9Il(zW)%~0jfKIwu{R-fb^5Xu6BSU__gnATS{khdp&=Xx$|TR%i|D%F^cJef z%qMz*97@6oO0FdPewtyozSqY5&ks#6JqT}W#8w&xp4dT8yj!4Ze|TK8vfQH{B@Gf= zyah?EnHNz==Xy7binWMCN@P#qs-XcPA51P{$$f zFDDc8(F2Zuq7w+n{C+kRvYy{ENDQ-vo)~>bAWyappuG+u&nhO|pl@}J^o4AOySCE3 zEY#A07CqAFL`(B}obHGDzbn^yYLu-#ufn%>+LDi=gS{N6F>IZiM<|?3V%HhAl0(If zmmTU#S?YmhWnm(r8#fFpAsS^|E|t_O?^i?IFQ)*GLGsLsf502R0!g)(%oKKs@Gl}WZjU@5 zVu4M>yC(R2SG(pv(c*;8?LnI6jbyK!kBkg2xV{7PJ?8xMwy@bDPHPU)qk|f z=~)`Ta9`4@hywP;1s#d~9x|gKmdvNpc^= ze6ar&xuR~|=E+Y0nas%)^a}w(rwO~Qt8uVfAi*x>sZD9zW>es$QIC%sPCMVfvsVqb zx~+)^y+lQtpiGj8RaK~}lQi$$o{r>j9X4K`DEauDYcariUp?x{ovKX3R+r9VAuyy{ zTWj|{`0b;W4!w;`B+ReNjMoD5kk5xVX43Lbv&YM^O_PCl7+iXlq-r8MbG0;)RO618 z(dpShg&06vE?SrwVTLNDu|jDSgElOK^&K2+^wqCxo8XmEhOKX|dmP_4G`AL_iw7$e zTzVm=-JyJ_jZtwYKg}ONX>ugA4)tip)nLlps2hRY&QL{DKU^DMMfVp#$(dl0?~ENR zbal8E&79pZk=#Qgy)1E|tNz$6q^PADMGR!AzwcO!r`iS_#*|Ag@Ty~nJ zd|5;~_jTLP`mrtnCa{Ptrvsz=E(;v&?BC4X-`_cfIsJZv;-f0R{qJ9~r}EP5iHfQj z{+#zUgW@Z427!vF&o9{T7WM9Drn7K`In0f2Ux3!|Kx>3@3eHX^S`^UQCVPWJr5c6W z$mjL9<}{BlY@sK$s%VfGLl#jVTrP~?ZQ-0^kI(TlUfxWd!f&hQrjIuC(LV6JhyFaj zBHZ-lxyhm>w574vdS7JhW?GcWaL%n7v!d(f zc{q=LizS&K(9SFexbs}Ho@Xz-P)JAW@U0rI{NAceXqE8^&)I_7A~N<`WdTjCbJe+5 z3yxL+Iem&Y&9Inanxi@&*eJeAdQgL6y6Q_>HC{to=Y7@Q-1Mg2k*gG#muEkMi*dHz z$<^5NjB(_vsBAzfZ%K0}j@}`m*=BX$$VOd1q?AU;(MOZQng!;jVo?ugL%XQu(_Gq# zu0Z)8q8*Qn05j%8SQYv12k7HEL8hmJn(SJf3dE~_S~S?JFK9_rkHxiHP@Rlc-0dd? z@9$9BFXzAb`c+aD^^&K!IUr2?Fm6dG_UEoqY*1{u?? z2M@Nsl}Zb_PGlQ5J7}0;6BY&uq>i@|EI9nkq!V=78`OGnbjmi z9Ot&v4nJ5cjt2@cQKI`8pC#n8_I%X*RB&4&g4~Xzb|PB2>iHJR+YM%? z=F?6)f;3&80vbb{MdiN5)85OhnGJh_8cTsOeGYp<)cn0uMH}XGkdQ>xzlFb4QD4C9 zm6=gyHm`<;txns0QQs;dQ6V03%ksV)V8s0el3UJa&m5QX3-ex_e4w{3(Y>U0DqASP z4?6J>)>%rW!@TMoH!yUSanFRg>!dH)`lRPsAA@P^ZZ^B^mYoTWQLOn|^n<4S6`~>j zTl!SJnfR8N&3%IGrkK80-(-?K)Estxbqd<=I>x?;E9{fRo}}$m(!q&U_cDVix*k(i zJj!ou!HY+Imu^B8o?{X(hI;s$SKOu4^DhOsbzbkPZaYLiDxv1A&q!r=xjrhJ>+6Ca z3(E1GFQ{6tb-|UFNDSCa_gocD#SO1-7fVJZkqInHyT=W;UWH7)rn-Eq#4aBFCgIZ` z+Xefx1HsI617$LS`0;TTld?GMp|leFh^1!5jjhc1Y0(wRAK8jCN1dnTJ=2!A{6V^$tQf4UP zd!;Rn*Y+>Lr*Aohx?5>RemcXCee#lWB0@-X+I!w=OX77aL#(^C<`^HKD*i$s=(T;5 zCd=@toM8QTk45}_Khs}2j^An;jDc1K()z^ix)o)%1Kl~ne7pjvt8^%rVV8TMs13}M zuz{x+mcMERWjq(9DKH2{?;E(Mr%@3zPBS|4ltG#_L9f2oNWP(#h`Jbgrph$?Ysx2< zozNmjEuhrtw#PVbT@C5&2Im&~Y3FGhnkP{1*zaBueJ}4vZ2~Ab7`NMn_;{ZDi5*}? z{zr5j8EkllXVmm1Co}0pn@eIqi*#r${YxA$8)+UIYIB*YaJ!46XG zf7%$i6bx?FWQt0@ZF$?Rr+rWpOG!JgJlg)-&?}m(z|b(f;I>dPB6<#7R`d9YAN@yd zx}lR{!hDwv&=nLJpTe%@bzi71M{6ZHPk`CP-Ik6)MV0E;;%JbRV#{l84fHb=a&|Ln z9B@AGFHoRgLI}uyukVcDxfM{tQ)Eu|a^gd;iNjIzEt+zYuQw0p^iC)A%FIr4(QlHg z%dM~Tr=1S`X+BCMD>?)gP7&UlcCDt!zdvU|0_I%H_Szm5vVa~v1@|YlPC|qr7kKrG zrJWKhxA(|N-l{RdtS94ek8;|w#ZG7$l?%1IH)rDgVsd1#C|o87)&ps}+-l=r#AT== z{f;PZH~4PP20x2i^iyVn(WNlK-rWVJV4fJ7^N7s_qd*Ppdxd3klFEgTIT1W@^Q<~g z3kBVu_&mox&K8UpJAteq&X;25Pd4Ptl(dS`H02LU64i*+ucZA~rD*Eb89GNIN2p_9 zH~#-uoCqlU*syv+csGgtpYt08#+f|0FLA(86e$*7^3$EuNp|>C;ZM`GiTM*I1q=bg zi^uoh8_Dm)zI49T<97B-`vI~11TL1$W41BMZH_zn(^m+`2|(c%l6dQXu01f>jT(=i z-;P}LA14lqD85NNf#EeP!p2-^c<+skCwFR_;h@dql4|Zm5Yl&xycN`9+^CnXEmqk% ziXL^8Yk<0+3*@Ui_Ix0B`7OS0|K3E|+6L^WMB`-_$LNkC^4)@FpOwY|l*%mkS!J=p zZk$&eXs}6f#9?6Ts?0`*sLoC4=WF7BJ_8utW@@UNDO$$CB1x{JZR=cVbTIujbZ)Y& zZ3^5a>c&0!)=hmfdr4CU>A}%%gtSKdYcuC|a_iqA;<^I7L;7 zI2G}E78_Mn@xF(5GDzoF_8k)QVpO6{1CZ{u5$W`p40AXnFx0&4kafmkTp#E&l#l#z z5WKJYByf`5!w@^cJW?d9#UM(XRq-+%)j6JIIAxK8?r{c$AO_f)KoOLWL{&1I4d#%F z_k4x{=7%?+KMAfI_?BbhTMgD&ZO~&PNmDiAuSVNWw;K%D9ECjZ5m|d@JXf1+!Xz&kJc@en_!b0KySJ!n$ zHMwk2EFd5vO{7Q@5EKbTL7J3Mq&MNBKnNhc1rZ665-ApX3m`>lp@>KXLI-n^@F*4( zkRSm<3y7eRUP6-hp}w`=eQUj6`7?9QBy+OnoHKj&{!W=6<+lRKhhX~M>h=GV)&D6Y z!Y%qTMgDn6`-5{Tkr9Ru-WU1=J|C5r_5(=nW`W$Y&ZX5k3UFU z1uYV{?lg{y#daiVwXs{3BkeO{u`@+z+w_F2)X=~Dh7PPh4yC<(o@k6z3Hx*|1-Cd< zVc$in#RfdY)z<8`e*$9A!$41KDAi^^aQ@|G)50Im_|Q@}E;8R-63g_M^gZ8)%1Xs9 zP*fz$!k$2o-5vJ^OlHdSaelT{{c|w%nFT{+Pkg2uH>dvtqG@3elwDa>S~Q8SnhmQ? z4M~fa$^n__RU{9kwc-fz7r(fL(d=#b-5AyLl){Y{5_;yb^h7r4ZVCJ^O#Mw(PgB37 z-eSWK@ybEJzHs--p3E^lVEl@h$puxE@j0^tZ!=u(sIcT~PFjh`$f4A~_Fq~eDvSBh zF`Nixb0wKKq|_1gA7?4Y!Z*vmkLjq>QYz7syTt6AyH?+FDmw&4yHaazRpHi$Qb$Yk z+Kjqw(?Pp0WSN)hL(%%SS&e>I1uo@PM^nGsEy8hSVLE%b(3uOgk>jR_=nL09C~6&P*{?feo8AKVxDMmpF$J=gQbi~FBZVRr9zhPn;2 zj9_?!3Q;PNG`Lx=xLbHnmCIV;=s78G?x8Ovm%96u2?di%d0~`-a1vILX z6Ek6hF47iOquKy!?-kwfrHZ0Zc=s0&j=+L6qqT~M* zrHS>DPG7TgiyE}vL$c3i8D*Hh7$&uNwS@h5Sh7oAiISK^c)za_4lZ=L)JlH=J@KZE zLDgA}&e?J*mo)nL5EiN50TNU;5V!HaP!>4S)%Zqby=@8z;{%GCxD328=q2$64Jfxo zG7Q|VVTb4CN3;=(BCB*jp$flT!=PDpybhT?3QHwGi zJYDax21NHE{^qks&b!&O*yc<|3rS`-_MG~3HZ7m`5}xFqU-AU|HgHFbpcriq9X6I8 zT4~Xf)#E#<>VNJP&gJSVeOw_rqlx~_;ediqSsLZ9!$6F$trs1OEIj(*^F`$Z|Ijyn zj&e@E=&>T2{y3~2NbjJ)-$9Da*|JcyvMKyBV+Y#=C%|Y2DSEO7r0<85>xZ@NHjmxuV5Ba?>20H}e_#ht@wCDTgo8%-@T<4cWT-^pcY2{)2vdr4GXtj(su&jcJ znr;DdjW^~5i5_MLzB$s2*;#or`qEAt=ECHJr1>9MzDvFLuQ^InQ@tO(8iL$v{tBza z)sG#l+|F%m0+nStJ{xxBJsQj3>O}ub=AB5WSd0g zD}7DES3<9_{H-m_1^Q~^wN2WvtRq$_hYrXp$CX^RGZ!ua8cu%nDc=WTg&!T*zzL0+ z6w0L8sVhMrHS_W|f?PzStgWYgT7zp3R^rXD`a)(B07);4 zNmEelI_B@=nBhs>ahp_HY#Cl zn;&qP+%jF-)S~@4{kY@&7Pf~M=!;ufl;k%7zr&IZ&5AFmT%VWvDYXJ)vj<_^k=|_` z))ma~=J8Fp-r~L1eA6_pG#^Mj+Gk+R(ZlkUWmeVJrA*{8RZi13^;RbdC{)zO1HVvNrF}9yo-r4&dc_ek=5Cz+#i73TFu2VQ_50N1#2_7~0$l=|hZl&OMzmK(R^Wwk3inT@13yh`<=uZg0Ya zn%I2msWMrk>eOr7R-@-3TMolIGc z(^ zs#CqN+2(a+{*PgdZ9UN2)BrY(oTK~BE&NOOz`G6@xre`?SuFZ@#rNPf!Aas-;z{CZ z;&(l-sU{6_`QrUeeuCLNmu4)+7uT6L;%zg*XNbp&CaTn4Gt|PNXB?;?x=u?(_6~>k zPv)7UyLd)+?-%d`Dq;6=)uGlD-ab!yE>p=y)&K`aF5!!ow1*yV_fTL^QSl7JN2;uAZ89*yQ!6)l z`8r2#!KZ9j1^a5c3AbL%XWRoL3LXl(IPe`@TL70F)w)u)Mkn;vbUERRpi};W+$T`c zhAi>NuLXO828|`n&0yna61HyiB(_c^t^{XFa~bK_q9Ahn+Lada}en6~01^IEd9)sxRw6bjC|3Mw;8lF}b%nQXEWV9TMk6$5R@!k4G-cY<}v)BHYN_1!0#--4O)8qqi;Ycg(JL z?!G<3ZKij&aZch*LP*@$0-Ea5eR^3@Pi<2;a%EfRA&uB%O&2hdwaO+>*t+B+rl>Kg zU9MF}WKhu;cY4bv=8AE393INM!p{T7HWa)V#&nr4I?uno^<`W|y!FKy@g_4Py8hj$Z$D+K5ye^!<{7c$8*S6#A4+ z|HKCK+4_W~U;t=;gw!3X`8rpQwH(6{0wL=`+KR)eMQcMf4MIT)rm``^o~;bh)R&se z90oQ5k%_SiGn$VvUo}}{r!DOgt$m-Hov!#;gmRx?1@%0@tDE$4NGQQGO_b}rS7T6@ zq{q(DTCxf>OYL~T%0j8Jq4~&HXPCcekr*p$+FoV3^TYz?cUtRl{W=z)Io*Dd`QTbQ zNCRqH(y?>ObbaFRLJoXwrwHwy7lzd5scU@Mun?75<@0UP0`yrU^wDzd?LH|fTd|WD zvf3mkdNz+L?>0S6Nt~#&mTfSVZpN`moYQ6{Yb(^MH7OL+*-Dc-%5HW)V?VV1jxa#M zF{;c<*#$uVTGeP#c&9&W9caL7^+}y2!63Kk117FLd^Iv_LQb%{aaXH4z2zhi#ar}b zQ?mph_ogzsL~u}OZk=mx*Z#T;#pZn|3l6heNGS*B`H#45Uy;BcKJ7Hm$&4ifeUwA; zUqXZerZ#g*J$q1y1!hFE)0B2mn#wwo#Ir8)V0irkw1&@gE(4rJN1|CJy1ObQWQuZY z>fV`=t#RS>_YMQm`oWxCWFZB8zOtQyJGt2Y^WK9;Icd-;BYv|H^~3aEw^A#5G=&I;D?qFFo?;-(9)n5>>B%T2PS}U literal 0 HcmV?d00001 diff --git a/images/Distributed-Striped-Volume.png b/images/Distributed-Striped-Volume.png new file mode 100644 index 0000000000000000000000000000000000000000..752fa982fa6c7ebe1fc6c6617a61af37f8139830 GIT binary patch literal 53781 zcmY)V2RK~c_XduNF6s;+`UJy>mgqzsqqkAg^xmS^D5IAVok2v)=pu;Ti6B~ZiB6(K zCrU#p();2jE2ji)}q^}T<8HzH?lz9b+BC(uMF8$Vw> zM3KB?Rd4o7ANVWOE!b&41oyuXvcEX&Udg_lKCc|tt9KynZ}Y(6e?Mi`qG$6Q_f%Z@ zyw+c${wkF6>uT?1wU@g4Y5MHr$ECP;-#mO%W9sKywO4>g?{qtPmccf58wQ^4k2BgA zOkVeRm)$%3Sh=xu>;mD=6V{kYWg!qV?Lk?a@N+GelP;7mG>A0X6bgH){Yi2SE9;kJ z35Il{Sn~O`L}H-$L_=~sgAe}0?Zbq(^NxF|pfA@t3^>OGu`9G|Ax8`K=BMG6WZ!gSdEciB)5H&BY!#CmeE#fg(t_!xE3NW z1nLepq!pH@f22JsL+UVK(8@kh+W!fEsg{&kPje9bo#=PknzkK`Eq4~(=}R?P99 zQj)U@5)=nULJRw0#5^^qO-}B;#qygrdB1SZ!i(h%C}O zkElGkJkPgQ)Su?ZyRI1=jz*}i>kL=vI)Mvadw4d5N#nDKu>cE*20h@r8G} zTn#pNxm{Cbos$|s1O3bl+papTA#Dw?2sS zL8v(vGOl*R=)N6_MV=}+yi7+byC*C+Pf+zb(}NxD!X&9Gqq_>&)VSiO792_Fq&X|8 zY%N^}6+ZVX^hKhaYleQ{79ymr0ndVb(r^;T1&Q*5z9>K(i)bBnNZX>{4Zi01q9Aoo z#5|{Q!h5_s`sGfV25!4)F6vuJm&=-ze{dXLAO0&L8&|ert*rvDWd4KTrV4nAU{6ZN zs5eV-g*ca4zE6cP$~s$W@n~DSdm!lgK;*$V=R0XLt!z;=sJC3xU<^OG#p(L9Bo+3vBC50EE4IEyELdl zS3Mt>+K-Mlyz^nZ2(_-G@n$|doiLdAgG{jPhko2iW5C(Jj8bOg3F_TC%osIS9p4`a-`Usrhj||1%0INkxwuD`$ZqtM7C5sz1VDD z=#r-MYT-fc7<G;+O=FA8ACX26a+{jF zS{x5u`3@h>;Dk(Z%s`BKH9oHX%a?d1qYP^b<7g3W75xVm|Dv4q7a92$7s=fZ|A!J9 zI*G*CPmf=7{onP(e#Bk!E!F?$2iOR)f8?4IDoP*b@8I^F^|K{{GaCyQR-Xnp(R{9x z{|^b$u1M%cIwC$wHno}0EjBxHK4q+%T`iCdz&J;7ZDOe2r2rNPUR=H*1I?UV98Y|C zHs!&Y@uivi(ACsr3D?+AJh!Old3ccV#%bgG$Kh4>YC9PAmk(jPd!bveMKcjOvZjIi%JS++&4eF7mv>(Lg<_Fx6{t27jnL!JZ%M0=gF?#f-kK~8{5EwSSf>y-tD@ZNzl!+}2cJF1 zj`+;J)il3fXe&IB_hviD-8pQHkMDV~;R{~FI8uY|gIQJMv-~#&Quq2eT)K^tyv4GX zG7J+npDj5~{NGKs!)0#y2z)oxz+`(3QBN-pcIi}oAL;{+ws;VS#W$MNk8ya)|GX?d$y)f+ zb~3%zCiQtc_Vy9MMb`iK3boXCpFs*JA6C!e|9vBOC7A8Z8(e$Z?ISM3uyc0hj5S%GR81y`bi;m3Yl~0}4T^OCBJBDRN&7DNsBL>E$FTrgUKsl7?&BIAeTCmV z4=a$pXUG3m(*Ii{E`T5FtC733PioUj>BGewBiZ7x{qMf)awq-t=X+JP)BCkOdLEr8 zq@OE|;!IlHb-RKu%64bUXF7kMISr>n8*K(|F1C7cS6lWJG`lXqStVWRCWM}}hL`E) zs%?BsD!!u>3N?dTnjrPCb}+i?&76Rg8a@Q?MA!P}VU^+GVpFAlp+cj>7>g|G zQaoQSFkLTCS~(^rCQ^q(F*J{cU6zL&O1_!Q{s6|u7bhwz3KI}WkaSz*3D0eT%dj>c+Nm{I_R-+twlaCO{fM( zQEIlZc@ZU(@Z>^+V`W7v`o)VE{b!IgJnoQQ+Vg9;qG(P*fgV3UKPLSZS5Z1(%Ng{h zOWkId_m-A2&-PfZdV|9lIBgjCcEZkPGzGm=KN=)%w?oGw_xv99?R(*uKljzUf-Z{O zSK8xc?`U0qND#SKXLs+m7UU(*A~z=RWAyY3Nhy&)F@m$JRBW_pXO?PW+>@Ws)sq4Dvu#zt{v-V>&1?ad!>S34)60QA&bWi8ks4g5GOFC^Pu4%4;&!Hr zoj<+2FYdd8WD$2tMQg36($XCG?oP+Z`X5waqon`LbO!pkOqay}!uOJKP6BSw%YvgK z?GyWTp=vUDN;9v{EF$FbUpR4R&@)-~eTI2in zc0!cYll9fE;Ffu@_^b4Vg{NFx@kG+zJv#gC#=G(W>T*Gr^^cyQXu zED~CRUbYivzw{QTunZ@AW~;t1ou$VV>OXQ%3ZK1GxM(eZr}sa z+llpYVlm9nILB(2K*#>aqyK{-0E-_Ie2C3hC=6NW&OmF}#Iy2%<2(uIhPl;~e`U8? zr*x%D`$my(eHZFRmxMaA2J>x;ywnH0x1`Ny`!arWb2#IwyiJ*F3w-fDf=JaDti3!h zve4$^xB?E)R_0o2bk+spYI4YqKIw@h<0d$aRl)^v+>NgpdbaBq{BKw!<=bbXsxSe? zr)d^J-C%kA>_)*H)W9I?l2XNdw${=zZ}xhpIE5H17jX0zf`Oz-4<~K8vAw^ML;S8^ zh<4S^!Qlne{UDz>kV=mNfB+X|9W+XZ^MJ6+@2tBNp>#N%5vY5r76ST{KVlOY zGpG1-!0$$`{yAiUreFY`8^gY6l)yNK;;GlPRpv2ma@pm}AVBjJVSWjCKkj~*d0`A# zvNeLjp01$AI8EXCHp$*xjqDh!MPdE-7Hq>(@UQrd(oYr&cG2s7i3N?`X514VE?jq} z@RZyq5oicbBNA!R=B;{=No=PDLGYz;j9t)c9Ap#6>*(s$TK^<@@eRmeVqNh+xA3W5R8jUV7AEP}=-%FYyy?=n$jJdh)D>=l2=FV!a%Lf0A(iw@~ zI%Q~C7U4G{wC>^I*kEW85kgk(QIqp<*aiY_o}Q+W119HdJL%D(;JMHq!6>oIHcCM= zAgR*dl3?~xq||>TsaYlIY9%JGyMKN$wb5ML_jkB=dpPB+{FHe7mZSRYDI|NaO*!k}?83T28>S0?Ke&G^-+V>$ztsnP!6)fuHx=epQ-CO;m`)ca1yjK2H z;)^c$&Y|Yy^Kf?>xKVpeRg5pPTCv=wsuf<{PdcNwXHN#3c&=91y%l z%hDlHP!A108%@@*x=+>m6wz1dqC%+(&{VVaj~fQX>c5Q$mH)DiE?#1kh{$MuloLl1 zChJfThg(Tf$cE5&_9t~^KBwi%pk|q6O4B5W21lP&mcK*-Y0ZV#^(>9zJgBinmzONegjQ4ot#{O*6+;7C%jI`rQa6#gf*21Uu52n8Gs^%N70 zpF`mVb1&n0!V%j_Oa5+^qV!s%Q6RD%iqgvSB1wI+49y#2yQ-hbw{~u@i^Fr^k1zBR z`S+YOI9lS?mvJ?OWRIJRFV>O^8XpFJr*JXEtFlagb2E{Bqi|?4tHm8Rm8Kp+9WgK4 zRJYBZ3Zx_25CNiT!aH~4x7;d&qWYvX%xI|H=>U9*GA1o2CdB5+J2qQuL13$2y*#xb zd0&qfo2meG((}iAYe`Bhi^T8PI~muH{&?v<12~>#bqIF( z8x^*(W0So;%#g2Vr$3u*zWCOPe6Cs{$4d^M*(tPi!4l((hNbq&g=+Ji+3(}xjYfW6 zf9 z$ZGNO6*a?5SE|V9vd$KDY>V!k{ZYODFF-cF2!EfSjv05|)mbW+Dz#>*bA5PxE9kg- zyTZ13&e`zTWIgrM)aS!>I0wm>rt)>a!#}qtn_rc)-&-zQA%G8?tWsr%Gbqoi0O5V( zLBJtHFm&tRfRjQppR8ImK2MWQYzP_fPV)fU8I}=8?os2k*H7Lnm*SyY;D4Zy-3lso zooP>Y$VP_ABTWZVr`;!$Rd%5F`wAP-9uCMSqcP1dgc-`su;Zw@d2Fj?i& zs^lwdJ8}-(7dhz94!O!?+By0y*ly7JC9T3%di{3fm;y0d>PfitW!;y!q9!*INj=xE z&SZ(tPuH@tTEya7Z`)HsFv$L1Ln%W->xJNrkJQ&!>E7|dzfZTggeD_ND2bJX>pJBF zN9+zyLcH2dkBW(V5u0E#cclX*-nxDvD8ocpS_fOtV1cP8&Zwb!8K9l^fk?kgPzkCQZP6t`Y06IJ@{Kk6>o zOx1TaZnp#@Kw(Qw9*$bP_I6;9zJ}QPnI{ZQ0gdWKGv$2`(*kRKT~E{Xy|CfP*WXP$ zH&EwZf6`i}%m1ha8~Ly_Z0U0t)LPcc;QG;Gt2s@Mw@=_XcQC<5-)B~61E%6~qTXfYS8P>O$e@&wLC`MBj)3gx4gqK&HX zFDyB;@9J;q?pQ>PWCnkV8w|M%-y#l4dAJ*%Q#tCq$FQa#5e}dT1rQur-2sP-{O9eU zn0{~ofEAm0d7^$V!g@-cYC!~LMxpM-7^O=9>)+BR>qH_xFydiJXr9#s`pv#rTA)59 zODKJ=346*P*K5^JWFL#HDUf<~WU;7{9gwKMSYeAzR6J z53IN*3P@;#(oDhA@sJEP{s^VJ%EDe;56rY66m-bCSvNQ@^xZ)DxF-#Ue{@}&Z$^NA_PJt`}!uh?af1q2v;m+ABIiElleZ`Y*Gm2ip zc*Ay$yL~@nY{{f%Mz(xZ(ElcFRAF41uHjI_hnYP1XrWa?H!?C_l=z9NuH2AUaIn55 zG=2#Nr6afK@OQfczxX!9L(Y@c4?>A^r+eqHZZgb$2v&ll%7^f1vuw1QjXvKVF9j55 zpm6qSrlyfyfsAkEMv37EWhlZ~nIp=rj_DQ&#pX(9VXGo+uEv~xgR!8``YuCB>}h)s z^9^EsMnq$jW!PcU+@C|U28Y)lm@h>PO}|N;3kVcG?h5kzyZuc)er~NWP3{MK(L;?< zR=!Yz@}|xu@!DF@2+{dMUFHb=V0cBb`|xEePl|y3?36lNN_A?ODY6@q2z^op(nz`& zDfeB(O*t+*93eS#Es0f0cr}e3(B1uEv$htnR-rTT=&lHQ%Sb=m%%vw!;Mb6SAd_aP zfu*#UFRR;9XEd+gZ4WKpC<^GaK(V4;BYk<*0lpVNg+rr=McllT_vd50$L8z)zhVyK zbi{hXUaY0W@N>RpaKMjHO}VwwXW8${TTc|p2j43bHBKDq%Ln-~r5VsYKxjj;Y}f?k zdY$QXLOKs=c8cQl69mQ2X2{B<_8^wGYL0B`L&2lGmo1kZh-eXuPS{a)$T&4T-mElr z*>~L}jU4*XqLKaPQPs7?xdaBN7N$l;dR%wUfV^foQjr?TBXrDFxy8P!Kq_?XtC%yJ zp;g|!v-HriJoquq)K7Z;JN*W?PGK}7#6nN;@f$bS{t^m=Xn(%0*bwE6Ks280)I0bh zxRc|uF{uMkg%8g?4d^3?C|PGBNLap0m7&g9Ghg5E^Ed~0{c$A;`nG%X+-&RgyirY1 z;Gv*;%Cq;Ptvs1}0q?(dr)IakDAUhn%hDjF*Rr%ZbG=c^{vz#p_&L|CHxHKhDLF3& zOUefFEdI=#E>dclcxzhlW zt4INbCy$KXffbeXMjQ5QCaI_Z^jWmPAurI)0n(^6(&5-ex{;NRfL);UPkBzA$p6Hj zg!v|irP7Fj=Ldyli66yZGarAVWdufsKo*9|URDSVp?D%M`|0%&JkJ4^uBhSxT{4nz z-XS6~v@(nXA7}ufck#XAD-03c4CDiFZ9i(M+h*W}&8|WooZg~AE1Wq#N=fM0HRG%G zE94Xf^7H+v>tyAi`xs<_mqyUkT!g^%ia^{hZa{t&tp0a*uen?0ewFEOnvu8X?5SzN zY|ubrFU0K0OEtEE)j@k9?IBwq+4jaZk-gTE97rmeD&XBLv=K_e=b*u^GuBJ=#xiN6 zqdRI*zJsB|0j1O-p|kfWb2X@rtLtegSJ$Zy$(vK-7>MC+0@8+Jd6))RD6>%^^4U zt9RbO7>P;Bo?ft-qbm98$9Bx-gEGI=E3gj;NZ?|*|?Wwy)Zdi zHp@zNH(dhB;wKm{?IKcE+Om*<7v<#X8O%44I|#tY|#uY1PZNqjq3QUXSUyb?~n zJ(7)qCr)=4+c55V%X)>3t|i>g4HbJ;6A#+Y&7VJ~tTvahWbLn}*Hl}GVU2pN(SR@SdJ^=lvTfRZ(2g>;!zGVO_ zN`5-m87SuR`AsQu`~)XmI~y3(Bh- z6A?TkhQ(^sCv03Bm(^sa?Z3vIe9FnU04jy}$ArTR{LIT%C`GkF?Jb3{jWR(J-RMdgTTR(zT*05H>~* z?Jg8NH4eCkCBBhPvSyIZi#n$c+Jp6Kucjl5zIJzB;g)OyrNCI?HbTu0qW6x3ZsMF4 z?_ku{RN?8OA<1uPoA163e@>F2bihY4oIW+^84V7ng58{cy1R4(^=Q$o-B(kNEHats zfpkB2e8(RPJvuE01Ul-NtSIG|8H!WMGM4>P>?Zk6Rc$xRk2b&u&+@45=Rq-PO`QRV zMya_|;l#IF>^i*p3QZk~!4fMSCWvjZxWR6_E|cKUhfC@7U#kD~gZQc_A7MB!%CjKt z&x3#X-f9mOC(33CKE~6G{Gu6U!;F50ZhHOgoUYig5Hy{*Rzh$DIG2D72J<0FK!lal zB}0;AW>#me!&2&b=ZajZpJupBjkoFU{CH#GE9xeP^7SNHk;}hx>V4B}xzajLNztt1yVE=nEDBEW(AK!Hmp}2%_1YIBV^3XIgzYQoi0>FtKry+G><&B#tu_#wR48NKp!UWXtb2dmb02{`$D{Ecvg@LGgeOI(CH6u-d~1;ct(BU&}L04Ep(s z?minA#=ogke{u3OdFysthNVA#G0^vThThj}kY_)T{c*M$J2YgouIVqP=}EKj@Gguy zrJH_kOk3|scIx1OZc~pgYV->fq^`+vk$|?fU6iWD5}-FMKz@Ie1fM2UZ0HC+eb#Rv z_!K{6D|xT2S&icvnng0Q#wQTg8ZBgW<5;JH7XkI-9vKBV2$i#LMqQWSQVf_wmL&*7*I*0k+r70zG zPZZ4Lm~h|}SgzXmSh7y$oz<7;NyvgC0sQyej6uL(4R}c`AGW)iBkPYr(I-yG*^`R- z;~mp_h4r=l9qvRf)f7lF3!^yWJYR9L&;Cq78?_cr_L63p{8YL)Sq-vpI4*wRH+!$C zSq<{+8F76xTFyatnnS-z8=viq{oq!-^I;Y0duT_x(iMU}LixTtGM5q5Yg+5@MsBWk z2HeR$D$v9R^UY1Iz?0-S@}DWOIg-*1K8TBOmenbJ5LY%mS>wmv!KcSI@=Xqt_=HXe z2FhHlN*jraqj{kzjX?@0>cF-a`7jU}ArtWw4Hy}TDopt+T@LXS1719i`e!0?k9JFg zkSJPvg-BDYQT`^EpQz34c%)5=!;dDw_-q8}6L;RkMXEqgWddI!)cz14-d2OPm?LIH ziphxL>g;;E27Ts(`QvSXt>Y^h%}wvnkz@WPO1{yp0?l)Zzqd`jJ=KLXSwULobyh{_ zYhc40*=Z<1jvNyMyVAKTLu2Kf!h|Q8mxgN%4VF!!QDg;wK!wWdDZ@zb4cRIWM=<~X zdq1o|U6ak;0w}u-1{v3*f2Ki8f3wjFow8#cfC z{dhr9+5mr3|JtYF;}=64$qOv^%~#^|>~zXTD{CxbaTnaGlkP$9-D-D(`P zQ!A8#HVc=l@lC!hR`F^ae*O2?5~cNMd6J=pF7GSAhaP3yChFdG}tY! z)Qrxot@EZ$@#880azwh$)18P?`Q^SGD-z%T;3Ha3Y7<|PlenX*4Kw&5B}|(@9hPp@}Oq*^kLYoXQu7RGU@0&b}IRA31rt_rC)5lk^aeb;s z0GMZ(wMI*Fh5Xx|_-kF^^%dwOlZPHfCkbR-26|6Z-}h)`-d*Qu$%r=m%Umx^fHT@# z8`Lnv@WU6nv>&tw9I~NI@)(u4O69OW;Qho!zh-H&x-?OwFr_wnKsO2b`TW=NfJ z*s+Tj#S3xKr!cP60FljU`P9L63EzwPNc8CEEcC`5POnasry&LJ^dXUg*(a7eJj^Qx z;To>pels?v@nbni(IQnC_7ly#N*f9Lfc*3JrpH01=cF5pb!osjKkd& z7OhSz0D@y`h--%S&aJd=_RV-t1jE=Owd5%xmoM zSV=;q)-)9`NSyo76QOU-XY?0B*zCR{s|-$qlU@+GRp?98q!R@PT4@QZ7GG^Tl7`XH z)O>9^Txy96k<9usrPs=cGvac~xbFHT7eZ>GjsZslUvFRU>b)t^O8ZV_<;PRe=@%N% zBdNXEj8SwnJj?LNhgeA=jh{f#&eIK{aw*`GorP_<$k9xoH zE_Id0Ib;(>)P|Yp!5?)L)pCmn?4`&S)ArH?OIk;95}tN>PjFw4Y@ZuA0~?RILL4rl zo*{&)FbY@4%{q)^wSR%^Ho6i@LRyq8aWeg;M{{OU^bHgY84l!d%Yp+Z^|`e5Orqqn z4g&fW)YD#(Z5otp2kIaAoU>#2t|9?NoGNJ+dD0E#i)oW@`f5VfF4Wlp*Ig}5jBj$<{__aa zV=bLOVv69rWj}RT#iH4Hk`^lfZ9S-pQ{y;}lq;Ah3+9uW&W{{OKJ-b5 zM;H)FT_nD~J0ZtBN3%0(zPC^RAGwK%2P6f_H%9%`Q@)eYjl6yP>0Pw#0G+N%(E zqcR=F2@$haBtmem##G>xifcWF9E9fOvWL8cM4{Cp=?H&uPH?xAC%GS_0o_l&Cbzjq z+y&LdX;jh}G3k)S*8t^-2Pn0^hyI$QxquSr{Qa<7@Ke->rTtUn$maK5g_nEJvlq$s zo;GL7I@ZwR=gw7h!vgszj`loUW&I%#5_zphKIim3^$8Rs3M?@O4r!) zD!bR`kgaG8bYq572X&aC6GsMM^0AOfu1MqQSaXPH$k1XXUR zKAC1e2D5~H{)N-#cw!u+#md)E_cqCBu5E^OI&N?}{%Dz}J-CXYrDj^5tw=fj2hiQr zn-Wcejh}`T7DJNppU@i`05HpiORp}HP+@aJsmgL6kQq&6c>uGa3hQ#ZSV1GX2%TLE z?+gJXvYNP(?`eNLJv^H^8oX=x0j!lI*sco8eG0*(abctzqe|Tc`)q$N|6l9AmuJeU z8&|n`AkLGSMP6C}3DSjK9BY6OKYYLQ{4E&nOUTV+>vYx#-WI2m;RW}Ce;%+TZboN2Y&D`tM}xv)o;zB(&k881ll`mM$^Wv5%B+AiHJE;kJSGY;FWt*71K;JqQZZo>@~^X^ zFmU(nfBhLM`SVTWL4r8NZRRvhZan_H(;A1r%qudoQXf&t^y~E#Q?6~Ya$d6;gQ3tJ0Z(qG9hIC0 z)As2bx>AR+2>yl-4pgZITL1C2lYEkc=>h(FjO+>_Lm1?E9UNASMy}t}6{C#vP6QTQ zSs@tb>P23zo^s@+={gRm7lZN?yJq4t0402T{|crN7Xn)wY`vIK8lMX?LnBw`rPr--Vy<6`18t^L;LQFsFE@JvjG>t5iSoz()|893==I47~xY z_Nu}rK^Jk^dF7TCS>hjVZXugkg08MPngIY4JQhjZB%|5bp|E#xdGq9SwJDgX)MXPKv^u0n@e{NYG|{XCD}I5bX;6bH z-&W>TW&up6zqZQd0UhF|*?{Pm@?f~t5`T35*TW2-IcqzOu6N#0TSt!b4dif%Qc$RR z(K-5~K>43?40A=r;ZIW)qEcygj!Lh-I|rK0Z$B4^MJ0bh+aBNRpHk=Wu)DEBTtn$w z%2a^jx+DAgKVvC{J=KR985$AdkZ?!e>4~G%~5xM<{|BQ-ETU(pB|MC++`P3QMfOddym>yVCK`>{kU+cmRtH(H*o9i!a)!2S+PQ3)imAYdbnd~dy#qD0!6@c1H0OhE?3WzHiqQUWfe0q7WK`oVl+}7Kq&O3&{BC0yk3LXUFp%3I$zWc!c z0LGO9$u@8I#d638k*|~IsHGwioa%|;u@^|M_ht4~S+niB0wA|@p+H(m`K@X`3_YJ- z2_Ot!Jp2EtpZ1&ri4=YhX!~wxVdo$4r@x%2(GL{cGsH1izIoj#BZPV(RcNVEfl9_2 z0HtaNDR@xFc6~=B4%}G2?|is>*61%@WczSJ_O-=mE|9JXg(qsl+uZPyM(}tQSs-z; z2bdF_*t+Z!?*f@;v8kp9U#hE8Eu-IZk#kuvy;^;xTf5p-;ghX5+icF7B;NeGSB2Ez zij)(bv5K~Ob%bTJDUfvfn8+Lof7%m@npnyNv=QvBZ?=-?+j|$6zxUsP3Oh%IS8*~< zAL>s`cK?P(dw(H=-%`&z;;_uX2_LIoxQ8GD6qYlPc(yS>ceDP|S_f!}oMVMh1$U=n zUwsK$ZK82nN+0_O^F1r2dGgW}Bn?=5!=i;Ba9h(>&!F;fd!gfWaTQQg=U8Lg*vmJ-A))xO zfo$PLl>o&a4g7b>^HR#YcQ6142!NH{hDLk9mFS90+i*mq+{wcBZ4I^u%=CljR_hgj zLSAcfF^=JN=KG~uWAAFBSE|rb$rcVr9gTmD!@T!^E?Quk&~1e!9;p`)gN$U5be@(L z{l~%fE9Vb#hAp4r0DQNnOh*y|qtPehdYjdfgDGOZgU&wZ^8EgUV}%IIK<3RdIiB+q z)`DnZCG&nETJ|jZlxszyTO0s%)Ozzo9Y)&!od(-dG~M8Yk;&ydb|-_#@8`ayH*5Tq z`Yc6c-Hr&}opbfscLMCOjlP{UTm}_;3*Po#0!~rwE(WJqx+`LQ(#if$I1yEVw78dP z@rkjPjDF;u$?6;Mh&Rg%8oSA^XWPzee|XoUTOb%HU+0VjJka67Fv+u^+%&Fy(7Mo8 zO8q}vtfq?Jkg#H%{FEZV%G$Ab1K8w@RRFOE$w?o$^$s>qK}Zy9=2VEZ@vyfQi1v1Z zVf)V{TxNfC9s+Xng&1}?GyHC!b6~EskLiNqZdq19n|I38l)Pz-?)QF4qb@Ndf$;Z`Hu}}KWxO-xzBeO1KfPIMR$9jUui;{In zS|n00K?hbGk4)yM29117;sTFg`(n}C@8m2i(}VH3|59I?Kg9QovI6`eSRG>_j}q#| z%;xkXR8TsNptF!S@+MqnOTKP1U$%yHr9CA2$U?@|w$S$R#Dz>OK-+q`y2MLgPKGPc zSLheI5h}d?$-U~`M5K0|md1ErC46=D9}0`mkwmAD0SXpp;1EHwIt&|0pN!;kgdS8q zLd~2t1NKR@vbr@@kQLxy^DHn5>ELF8xTs;A=`D2|pzxp@IT-F{!#LAFOPD*#oAC23 zEI#pW62r5p6elN9580})2U77SE1d4A$X~j0BOhD2AJ3T|hQGOB0OaG1lsIL3vDP3GX0FYiyT3WtPKp!^xEZQ6YY5{20 zz-1fXMk(5-PW~uBW_UYFfNED1+6JOf$wI}8+fMP)?vmWerJE-WNq!;oKe{=K7rR~} z|J!;nx?kh=#3tmpH5>rVIl>Oq4>C|A*pnu>CbK4vK&*Ssf2lFg1qKGi!Mm?N?w|iT zo0!SaSJ?d>U<3He16W41cV9ZtFWT7feXd$Q&4$T9MSCBwkFPhGl8k~K3T^qlT&_R^r_=xi*`%$Ae@Ui^{R_s6ADO{$@pFJ~dc%=_DCOcom{! zmVhS08tabY@0;9ni&kSvL0<3GH2OS}AIbf0G_1+`mxg^wPAB;A-ioZW#0V<+QFc}S z(za8+^cP9qf0W}))A0^6O2OPH%BAx(x|}_=sS|L(s9q$e@Uy`3rO8vhq=YOf@~uxN zM7yh*D!#6Mw2Lwg!douUqDFKhuZ%IGdclW~mE{Lt22jaKY%ANatg1Oy1UnIWqr*CW zqx{($O0!$fb8BiySBtjqcI2*&H!5PBeAGvjD_ax?fr{zh!LR3$l;r^wPo_@Q*3UE+ z*GB$%-1u$9a6m1iKltS*14ye-YrF&Qe`5T__+)V`Uqx&K(4cIjvTWT2!`->7-83?=o_gLd6|l#q;2!yL59>Xrmt zCU_kJG#^B6y*)nEcM;C+oH%}|63{ePvwg1l5|tdWSr@B3YXBZ>p^oSNF+3u*yK&27 zT77iwwZN!Fxwf`tdDM)Be;3iKtDri8E^^V~XyC*O;3*!j`7jRC3EPqBui`RXe3Oxk zG60#nK@FGjIJIiWe{)Yl;%>>V${4NDgS75+w1(9^r*@76+ZWmrc$oniN>wJ^;2YWX zU!>+=&>L~u7R3n(mc^6_7NuvDmRKK33v9!gWtlx{Wdb$$zb7X4|C3wZibWpPtH9VN zPldfLMp;K!Aee+QYM|8^VE8Tey3r>#1?iv@l5Hps|2>@ca9!dcHxX56o8Jo1M)c}Y z6&va-ftdc2bP6vy`D3W8#06AVW?NPEKAz-m!B4_lV{hS6x$PqR4vgTIs;#mAo9Z#$ zyG`~-qrs2KE5+*{n~a*$jht>d4S%v`asE{NL6<1xu1u#3~hZ)O+d*b0%1Qmbyq+pV#GCRejj0uP*(`$x-nnX{BGXSSNGA{#1R zgGO#O#62syANN9%wxkoxetb@Ge3Sh+mk%Jne3$_QH|Q}FLT?ZzNnaKN&nr$&mu7V8 zKWSBCOVLG*>$B172gTCphP#ZKH_KRX=;6siposC*qfLdWX8eE@n zYeZgc|9c99!O*%;RE!o4x&3AIE6n}MW?YEuim*xbvcjMkFP@E(8VyJh;&`3=4Y*Ut z5VLjlfLZ}O$VcL}3@FSxzoi%*y?cR?ZzQy*u2jm28a29n?|pv#EPmlemw$y55(_7B zS!x#2%@K_OPD$-dmumONB(Rd8C{eC^vz0{?*yF&JzYj_9(Cp`#b|<62xec$84*2gu zr;QH=z89`6x7_xQ9}jDC{-rs5xndlwkm=5Din#}*e_2%*iGhZxgZahc>rcRbpV&Dx z*u+RP={N7Yy|-;_#}|vYZQ0INjTfKqiN!AggMoEaSait(<>~zXNuoBh&`$79lm$M* z#j0m>-ConUClLTa^&n7meUTchMeqyxR?eYsh#M_7JwtvWJIZ@4J$e1NX`G{K-9(+q zCp_DLcheXuS#9()Bw6OVqF9X$1-qryQa|0Q*2WR!a4!=LJv9}`lhjr1#) zZe%vmEAAQZ z8@xGNEH*AZEiR(&Jo0@+UTP6^lJ{_TfLL}bDI$`RDf8z4|ACLd!y?+HOg=(dBC**n ziIA?B4kN}t?WXnbvS(Q!XhvS#693R-#*HbLocRWIgXs?mMNxDkc7R^SSYVS`cbP#5|&Yd4D!_XsO`Nkt@cV$w5 zy?e9>3us?pv3w-yG+K7yWJg&W@$Ql|27TTWJ}HE+M4 zOdO>nDZy*;dAyij85k5R$MlGw?>;$H()4UqQ1}hw`4~c;?tSPm^r?2yuTsC=0VI64 z^SmfGy55W*Yo<`0qQ*8ilXfc~z&j3mUAW;{z|KZj!|qYM-}zLQWWkY-&Tj4Jx@R&h z3!4UePxkKz9X}L&9K80l6N1@JPeiU0b216vWjZ&gy#h`oP-eeRCl7Vj5Au49!0(hY zB~WE`vSTa+p_r7ZgQ*1rwX(jg!qof1O}(%mUaN~46NDBU@L zGy?)ki>NdRN{k>#$t4w}Q>A-|VP?KP-h2P=`@L&it~Cnh?6c3_`&ZBN{3dA!8+(__ z8E9jSIfe1D_beYcm@xuN%7;YgcbP2`dPCLCAXuzDjBGsFjbluQk8=IYnF077mys|g zUmF7r<`57mgi)7#&9kr$@ydzMF%v~f^_|Lk?U<*+{Hv6?@?mB4-WawHRCn32bFY@d z9*!*+fSfCfyJ-ZQgaO#0lTGVun^CO}1T8{O#O8_~=b4NDc=Augpf2Qs#HH77vH?Bo zU+GG4|A*|4fWZ(y;Tfs6`oP`Bjx3>_$Lf<~u0^W|blTXA?KQXHwxmfE%I|nVFm3E{ zPM*vRPxb{H&j5Ev|3Jov*S7!vO`d1iPVSY2ZbA>u3KcVU=|n14J=(^0WuN_Coxju6 zH}w1X<9irFmUFZ9gExWIoR7WyMr& z^C1(py>v1C&w+ ztKbLzd+pe#2hU&9>!Aca4Xu$cQFlDjP0KB_^<1cwBS@AKI#}JTA<34wgr85ShquJClCOZlIl!IaYcy71yu8`BRRw4ZluX7LDhg#^ipm; zsu)Jy96x+%G`{xU$T;_Qn#WrG6$R@L(d;FOUNFe1g{n)9`HeRShHHE^SPJUghsRZb z&?@=hE2C%hU`c5y=S?A@a=^_U%iv1b0E|A)j~O{RJcv1aaVvPZLiPPN7@+a$lhO+n z15D;b)5b*4az1AZtV>ncm#ZUe~`7S?K2W#z4m86fdj)8Eb=^K zUDb0&&g^wvil5nKd{WV8{uPIl$MZKQI~S78;G49^-;>ikUTfdA3oNt^skzc|oTAAq zRWN9Drp*s)L zqSCp3`w>6j_$mw4KiT^J3@}ppONFK<;uLJzCNYt=5h$s8o4$%i|KEgjF6%p34HSAl z+tR3Azai#n9NN^ba#Mw0FuR=Uisdo&(BbhrO`)Qya@03;-|AqRhjT+|n*Ke0yVSBQ zU+!$xf;Nj>X?~ri_8mH*0*>1mfSk-0aaf;M2gtTuywzn|Y2zho39fq!9ZgPs6c|$` zgo}eE8S*_9%A(cBLFd@%W4}{;*nN=xEbfXSng9`K?@62ghU$&m{>9&`t&{Qa99P`G zn-#R@8^^?xHw|n(VG{S(es1TkHrw`t5Q_|L{gvn!bpM~d`Zf+_dPBW-{Y62b)k3Bj z48oDao|Kl0qc_8zo3n3T+#Yx%L_}|ZTZ?a1g+ER@z&BmjFw-^2S~y^Lj(QAwr_yk& zkIiZPwji$%*&_mizGj`RVhcmHV=f7aJNhvtch@2=i7q9ZTYV7z6qiN!J9`kMWo<`_kBqz>A}g zNFZmvS;~&OI4OIn5CUWa<~0CqF z{qk@KJymbC1C$Qx{e|^!Dxo%vdRcF^Ts5-U&c%L~lcvHQ6O6BD9z9`cY9A)92zkCd zltVS-m!@D9Y+s4*XKAA;5%zRp@trfk>#Rl(pZ$UqR!Te^Nz@^w{l5G;q1TTIHZJPs zKzR)|j)=}I*dW9^^QtOL{F$ayh>xilK95m)s? zUk24<$O-hGTfcuqZ6dIp5%a(`{b)*8AejK?die=m?6*(~bz zt=6sx(9ziup3C^|oOJrUcId<0UiC=YmW$|tjpYP3ip2E(kRw1yPwbnXKzyqWzk8Wh zVIZWm6469@b%r&~E_j3I;;0C-4L1a|cs#{Q0CPuAb)NM?uBvj9`q85=rr`9o^AQ-- z@r+09K~Hb^0Qk58i<6~IoPJ0C`9l^QKh+^*3O0h5q6S=iad)t(WK@EWT|1Rdh^5cD zP};V(HF-_o0mCe(*Wm8oZepM&;K@Sxj9zkcvWcH)1JI1dCbdun2P#qVV(g>AR-}%e zNG%6bv<~T~$W#TRJbc;;vno>!eS9U%`ABb%2&rjJljkfj)6KPE22ELW3A}tFOKB08 zTpbsiZyoP+{^%h9Dh&yM`7VscjOTW8bMo_lduz@x!jZtYry+>~dF;v#T97|4;eA(lX~Zk+{gyz_Rx9nsNhRx#mWU$b zDP`=49oW_B@Us}kQXU9*J4L9jrrDc-FI4#n1viWg- zwx>{LrO3!p7syi{%!TX~*^6@6bOhgRkAK?xcoDT5fVaTKCo(698Zc>uZh^N}-WITz z^eg-z*$$)yM|Od)fYv61DX84`q1(U2&5?vm+Mp=qqJ z!ymMYsJ9Y@+V4>!TyQt8Z{SvG)j>R#y9qce=PwC5QI^wLaVYJ)rZv-i#S`LE<}*9Z z+xyq^zFIYV@gTe$HppmUiWgBj?`%7Q9sQWaofBKbIj`v}CQ^m%g)Gu$T`|llRnKo7 zHN0??_H}?7!X6fd-IKrNx2Em87fs2k&3+|1xt>)kj$~0mI28FJ{6Y0#})aBl)*Zi|7bMWtSnm{HXFcV&WPJ|7j{Q*}=6TsIian z%8P&n>I}~905vmh_y8M`;97hr%FVQmmI3EVR7gyiRba{h$$EwsD|OmBT3V=U*{n)F z!wQ7u2f@wm-_*kvLXQmoT?BuY@ZNbLDE&-eJy2h#mF`c;T~zQqfVCDxfHFpRm5I=* z{QzktW5yZ-vT7f0qbs4S5FKXk*FCPKBMZ~&qr&XjxS#SwIc;=n>3ywaioxmU?Fh-YY zF)aLfW+!_&m8L-}j=*DehD+YGMe4NwdRzgLjGG*$5&Unh7Az<})1(x(0tFDQprD}a z{Cri9?`LbXSF$*e2p5hMZQx1t;CM*7IdpgTT$6W$hFRt-z?MW^oetI139_z5LP;yl zh{#x$PV(HV*f%3}u8y&XwtymB;?qfnCSk;zWn>I-@shL13@h^0F+qaxetY5BTF*%f z9A)rZVwK4i;wryI>by)inM(Z#w5lR~Jet&BwpwvHEI;fiJe23~qx}F;jI+ym|etAVv zt23*d{O(NkTMO9ML`zG=xN0^**7v-6r%T!?z#Z#gy_3W5w{r2B4ZZZ_u8n{#k_2oQ z7s)u(xb@itj_ltFw^(r)lvK|{oaRa#F{+1WBHO-EhT;nQ)D76&EBzOou=#j$t(S7! zvzx-4IEm^0yW10ln&tSkX1O9SdYuWLw*c{pB)fBMmuS14v%57#tvLO?hRV6Kd39x9o#WrR(qB$_y?X)Z6ZI4suk^*e)W1mQx`?d`%M@7e z@3874O-*>wT6e%g%K&@ilSDTpZs8SEO2o^lZy}O z=Bqha4dXM@N@4tq)?6p5oS8<#yvF9o)|*z-zdgDk*Q>}1rr2>Un#cM-w0(ul3U*MZY~q-q@l1NJ z!K@V$Vf*^9aHBTYc%zr)U}{cLmV}3s12{XhZ&>c}im0ov^~5C-@d~Ibi<%5TNwdt} z-V-M0tHD*FW$^p$u#!nl{jzmX^G_uH1M(r3z82L(QPNbq-Z9~+`dr0;8P4{NA!Nfl zqD;pQ^YItH(2po-O>3>kY57uB53`JaX6vX1{T@{T2bh4m2klhxYhICQr}0c$2E>DB zEV8PF{5h36K`S?z|2v+`>W}pcAdB_OyNZAqh7OlNC!U%z3!?8p(j)8He#a7vK5h{ zmF;>*WEJtl5_AKdG7*B`LMz`?Kw>J)Q}u(HtC~JuJ_|_-@mODjpF7kThk)@_foXus z8w8*2bHof|tTnu(3wiH%hCVmmjNE1}<(jsZ3O(CThmw-y_~z3MJ($jo!yf={@wM|q zpBcJC?~97&rDN_aUIUukB^>ZE&qT28j7KPn4m zQ=!YGP0O>3^cF`I5Q6siXy!$#k=hBu$F&197vA9$ z=DD?D!{3p9MG-Hr#*>va+o9ZTHlZO@8^sj6JiArQVZ9y+e(o#X#i840^_d-3CA17Q zdz-o6(_#3`Cl8171&*WZ<)wqax{BeFGmhGE-`$<7XtXf>EN<2K)1d0^);&j4iP^W; zSV(Y1e^iO9P;O~kBTgC1gY zT0bCnkM-#KB~=A&zNeA62o$~?xzCaP>1-^lJDj>uCN&ES1w?{`|@6$;Jn&pX3W$8He{-uFMK#?3(HrR**pd$wMV6E=n9KlTdy&7pVF zAF{!ti&?}x+h|*|jcqe;&GkB={rkP^*@;IrSaSEc*3zT3k}`bsJhcCsYj7c59md3@ z@YaZRh6bo5@V7J}+7?lUWSg?GvMXYofr}XQY42lKrEHMUA>DBQb2(~O*nAM1%j zytZn-x$~n@OPH_z+)Cq#3s^*)elb(N zX~VpFLK&roK!S}n&9~^_47jmQHZ$M;S>%PV(miejYc|>F(Vs1PJNqnY7Yl#-|Iiym?1R^>RrHs#RWgJ6QW%`08tCW0V8cp zVSn-U6!_>yuPJxZSPxHbLwO zUe_8Vx0WsJ*yI3H8-DVZ%2I}IjlXBh*jz;xWKl2@DcdhZ{x+i(N#W2$!QF3TWm>&b zdB(i_C6kZ{=A9~auO18Hb*&jlT1bl*)Wu6a!f%l7VAbBJv5ZX(S_kz1PLrV3$7pcxe%5pV=->6$V`zCMV4f7!QL3Q%Hnc zCu{Cox5uk7Dzx1o<>i<*(}^!n z)DMqbvtW9vB&CVbJa+0oy$Y3#&Us$g(deoRn}}4FkK%gbv_1hES;2{imK@pq3{y4N z^kxGTF*fRW2UZtK7@Za+be>nCoR;i}{3>6BW~^Vq)2<)Cvn*xxD0_5gj&zQ8jY?yNZ%!}> zr!U}eD7dK31E85@6?~)Qw`bvLAnB@sJl#uUBsnm!L47u~nLhKr?VGdxty__Ad$?O& z>d~38i&2)N=@~YTD)oG%R0=qn(g(^*iY1ST{j5gdQ*;zR+je$bp+{x3E|2r$g98|9- zNyNE17zGeLR$%!`65u-PE4){S&c<}1H$FJtC{Z($_bOx$hACvXGH{&gwG{ztzp)Lau^9U) z^4V(P8S^cKkxgQ>y_>Lvgxa>w;3#@!s%fmJ*=JdGlWn~xj=x)`8k5GPpj*%ypVXo4 zR|_lbNjI}>HU)WNvAQzyIylL=M?*i9Y>||}g!DsC{Co0SzSUrzk+9u|FZi!yBD+Ta z3cLid@tNl_@HYGRF$|si1Y&L*4wJ9^2CPMf9Y%877w+yPyq^8Xp)wnToWzm1@MljQ zN;gks=&+wawVZgfx&`IaC0Xx*c77I~ zslAbHulfNM0H;lZxc|joxMIE7pwC~Q;0OO%&i+Zme7=j6} zOr2klZ@B>)h2wQT`<2Z_puZFM%eu)`gjb}Tr%Rm4u$bceXqyq_;NzIvIFQIp8o5{9 z53K+Rn-$1-T|XyZbMVO~w9gMRl`DnmDtD3e<)|iMT~!yk3VSikTjRLXcRi`Fbw8-# zH(w1lbv4qnI-#OO@ocbEBRO`5D@~r0cQ!}RJgJW|=aSgJ?Vo7p=0!!PXbE8B47vvu z%o5QI-WXm;es26z9@TWlhP+BjNI}MtEklqR-H~ST6K%n)y*u9_2rJ>K|J+%D(KuRk(faM5iy|8tSTb(6BEl8%?eUoWA0L z07WDj-Ys*m#T2wDdT#%mY{*kPWv)ysfJw@|{0tsYv~b09w)xr^mA*wxieXmjk%5gi z(id3WR`QqkNQoJwmxstt)8SbJJMq6iqw?(>bhhLc&r|C_ji@AW-W9~Scxmv{j=eGD z9^7d0?8`03m#xNB^WCdT9!iR0*GoIZesJp2*rfMg_9>z3i_8Ysy|x>Zwf+ejfUASu zzAue$R;_#H7_+_<7k!{gk{Q^t=4}4zyG$3*D%&}t!$a>y^kTLv(ybMZ8{TiFs4G&RQ-$z^U z)p$(RKoNazrsp#tAyAM`$J?+(C&nwQ|1nSgU}0(R=e>Vt;6%?d&-k_h|NU==sm^TL z{6=g>71a?JFFX(NXr7oowt61oYd8Nfd$IW~dm?*YpaILIK>cFkgkwG}gPUQ;)=mpv zO|!vPY@$Br&R-9z%hajthpFxKvUV`0E>8Bp2IR>*cVjnvp! zraPxwowLeKA66!8IHO_dY&h|;4g7`D0sJLGdR7bk(?qcyvm>|gTD11=y7@9 z6b~JKw9<_PClaBjx^D44H6(|=<*H%U42N^Nyg!!)<;=nMkH!`D=$vS%Q4#xX)_=|> zNx?T~6%61Dh_!Mi4=DnOxgyQn5q-D5mP^?|cb&z#o)+(d1wsxuzz-L?6jc=cw9*}Wk-d6FDN#kIw6 z^(DW0-hiNu?VLLzU)XcE;m!ID8&MO7MJiqaeeMS!GW9xvO8TDcX$_O{is+jKvY{RE z!T^a{<2X|9hv47d6i@CbD1xc0s=5ZvcGr7GPy3XMcV_6Q@ZLB3RDTut*ws){g7Y7u zvJO{(SgjzCe$9XhS<>A0J=wUyK{IZouYUnjHoEn>pfVevF_;fb2k)gRQ|@n&f$heo znR%MP8I-Yf9J;GeB7_UGtuGq!Xvo=CW-btZEK%bBbY-03(d~pxwLhg4uG}yCHfb*v z^!Y@lSGWbnpDg*{@^($xh8RpgnrYH`Wc;i9pDF)SFU=Cy_GIY%ZcN-DK`3g;{CSrA&$^`m?_v=H=nu2t%Dr#a&#@ z=1OOB0sxpMkkqcn2(r$M+R(iH=isza1+abIyTu@~`FH7u^q=`1cMU44_hb`zmeDkF+?5&jN;ss@ zN7YN?)5kDaq~-XjDN+(%T?a|Bw_~jCfT(~v|23T~px@iTR(;G*R;K=1!L)a+ur@d|NKnpu23K&)?!&LjCSm^KvpMy7l9?z!rPZs26iR8pjSeFQ8Q#;p>hm74q!Ph!EUh*@|7EvQg zV#kHv#P3}Ls?3}-L>ZlR2+Me%zZ7S2coBv=5c=F3!rme_?LvRn62qTLJJh2#>J}Db76ObL^*TzcObU;p^@fhF%$6$YfSVpcM?JkfQU5(N+f=$-o?h3} z{gj8~#!IdjSS=8^5w4Af(!S0>v`B@Z`~va}UU8TVY9lrXoy9XdUT(Y0G$?H)b-Xmz zta~WzjUAKze~;;|s@tn!Px=Zo)uFb@g&DCYTsz)-JJoEB0gJZoo^75ZRts2&#h=h$ z|4SlI>6VvDZ}luWRaX1L!MAm2-oAfK7O9QAQLal#tiSBlcDD zG3$^J5$J$@AntM48BQp!BJSe8ySNkFS+xB`k2gD|JbISmXnYZ^nFU8b86hGgRMYr$ zYl-WCH)}hWEdDh7I+5x|&2j07Ezd@`dW*T9t_jaZFARRkBOI}WJ3>T6sOIuXYf12c z^Uu#Sge1y~1=4O=B@u&i<(qPiDqnqO3K7ufa|3;4O;TD)HFy4M&NYnn2&1uvZ*pKRzUOYLo$B(M?(nAm!Om9_`}k(AY|NWR_(+sU^YnV>ZK9p9?vjJf=bPX!s2(@MBHV<;-K-YK-2YV%%xF?tcwrtBz`Sp(dE1n@|4kKGx zufeFwV7BtlD>IKvr9xn~Db8$=4UN-srWa4C3{!31*WYWy@bT*J3*Ifqtg^kF68QUK zJxNlO^2%Db1767$h0zzYK6dza6J1i7XxDJe$yK$~6ce~}Otx2duBB9(*3fW;7s8u8 z7KO^B7Hqg_2EedJXUomqsuzL34B>XHds8Dv7d)JW2))W*j{%4s{INoQs~ums{p5Av zoXl-8U(Oru-;&;6UjD+Bw$S`*Z#>S>@TGpjY;&2g3UcKs{D|Vl&cX|6`Lp6YeJX*u z6U3~4xf;4{kP#vm>iooQio@{FH@fnoH2wRPQ*$d@clAw@uQ$3&m zIF=6yje6RC+3;HLuRBI2gAtfp^D+U3`uB>q7lva^>!Cwids&GV$eh~ImsZWMLBO|6 znK08-QcbBqXWxj;8F9V1O`;T8L;&x6YbCYpRDj-s^RHsiMrWrBGtIrt`=NhHjNnfr z!Ljv{)L%$3sGZ0v2?;*FR#TEe- z88K_`*P~c%gxIYpH7V;^(BaK?H?XmdC{pj~C1+L*#byjpwZA&fIOjh+6<#`#D~d4u zFrx6QC8INe4SMP|PlEw@v}f3y0~*6GH|w3umeQcDO47g|W(l*-h1{80ME{InyEMG` zn4%qR&lFZUA@T6yOZ3jH!hfD`{F7#oCF0v#K2DlMb^Gq_tu1i8d<S>;79n!ew0dna@i=r!>+oh+Wk*A4H@e0yka#?8t#6Lfm<9MBzH3<|@{?{oD?;rz@69-aBDy(R8rxy9f>eefjPEMEc z3<^v)xza|veh$Y$mY=-iw^4dBs73YgYq2LBEp=n($HZQUhb~P&Ae;MvT%jsA09vcl z#2L9OElG!;wsq5wsb@1k97#ImdLwYE#L6Zb`5siz{Xzcc;7m98BfKWlIO$;Nx5;vr z-}We8#_+6p?L|1g`)cx(XJMkYvg&n8Vec)_ZR8=}$1p#Sppcz?XyPyk0F)Yh98TP4 z5Cx0)?JtjxCo1rF>@YOWMr;H<3mVfps?gUf@!k=$nD9rRa%l%r&26bi20BvvE{>_w zPQ&)2Al-gD`4(S3KU}+4#rYC%ESnaTV$3O7ilyu>GM7(rIUcB1eeYR6In|N0pG{2p zzDo2Bv4~MzI{E6ieiAVt=ke z*t%!6+u?JM(<29JbimQ8MB5dKI!_I%F}VOBkiN}b(*E?a74yM<>1gfbKrNke{q=J0 zrd!agp#+_+rr0M%cF%O{nMG;P5n?F`5fG{GgkQZA?b_V#Iir<+pR6gZu+h{z%?w}O zw<@Z;L=BZi%}XkjJb+3@GE0^EXoy&XHxg#rHKimK_A!v$M9;?D5H_nvW6MCsM2C>z zcFT!vaP1+U*z1lp?f83R%Y1JOs4{X#M<;g7!4vtNS~+vy@sz}R6WYN8w=E8sYLavC zK5(b*@CV)yBv?-3uFf>>ZXt&cp-v|@el&(DNh@87t`P}-mV>914-1>TS>I91wpw_w z;aK_&O)>sc#OKbCTxJ-Z)q;x8J>+aEpk*OihSh)HYlibw`5PmdooNO}f>un464k@A zvsQ~l*Ba<4w)(Yumk~Q^b?1Ca)K4>w9iy*)K-@8cPSxor7+mldZU3MZ4_c6kSlE|X zGyTAwE#BaU&zrsZ_iiOX@PDRsId`5CIcPuTpfX@7CqXa>8GSaS8GJkhi?flt(+0_0 z+X+0md!Ph+x4xxy`{}X9Er^};@c=o`2MwN*?2KW1d%MPV!%aK%N>Bl>_#7uzL`!d= zyvU4+f4Ntr!*xKhnj80{1`qQ{6VO&bjVBX|zQ2_TOo%sk#p`s$DO7Z3Q3FxIRIzZ& z$mZ%28i~icdfY5zk)KIRsXNGqalKdodQ_DSD)c5IHP(FS713v*VFAyW6vu;6!Jo!$ z_8*Ft=zsn5f_#V%b+%=*38>MLVcnR^%8iiE)VkAQwl*wFSr?xrGo2UO!e-&B_Q%(k zi7V!&pgaL*6~A`!mwd8}f3REj|$ ziK`?=>rCC+b+M*+hbk9gG(*!L*Ih63dOEeU-y%TE=IK-zIrnDh%xidb+z&*S)jaJTzIv-Bk?DnqkxKwl@x7IUMaS-`4B z=0R}ZzFzn2Q!IGr5mEW$I~we2iaN4uHt96lti`yZV>Saot#!i445)x|3N$cQkaulq zC)3y*(_W(;>?v#oomilREc&D(GIj5_s#`9lExvaaX3{t&_^XZc0{xAo7$J~_iccj^ zlc(`XvZhbhb#v0>=i2F7Tld~Wjemr-0KZt!YJs{m>($Ug#nB9={5FiRDK4#VVJOtL#LPw#3AZ5nS>D8f@nvIxPV}rGkBvY6n**7 zcSZ6{{SB+OBSpay{#P20&NB@aA>VGgPro+o`aD)pJRv?T$k{pCkaXqfW(i;yERTOXywBfTGY z8ei>=rg{x9r^OkeH_bUta>HF?pV%Sue|Z+V`Xn3YyFD(@5U?DY6$5Rc+V6ftBMgUT zJrW*@X5wOfyuL^n?0aiZE z)jvP=KVN|n0CL>}Sic@{%sa8xDWOZ7P-Z6=%bWTEcvDc3VPhRd-6u`$5<(t9)&rJR zEiKjSJ1rv(4{E2-PxXdoi(o2Ia`K4rPP@Z~2*51t3u$HWuMMCKCjmbk8;=eB7MXmcu{|ANq&Slk$ zaB%_O?;*l_?6J+jV;B1zX_oI1G@Ikz1nFRWR8$dZ<|JAr+1LVVs-A%>j-r!zNS>Gy z);aEM2E!fE!dkI3qV$0~G$jaM&g?1@)&Drtx{&7fMlzj>sxTTVU4DIFuKsvxad+ST z{_>--r?<7(b5v8SEr7xEmH989tD#Eo7%KOOkS>n&2)*W{O%P2K$VD2zyc$PRk`e=J z8Ov-L=6Uq6xqbc{TH5B2q7DDu8$`=BVedwsz5bcSH=u|_dMN`L**f5JAe|8@T;p6! zJA+wZujogIy`nqziq?cFDgEWsS_84-sUUN6o!ABTMAfn|GWHOS+%O)3`2%;Owl6;x z;;PnEVf!n0FCl;jhU^M|E)PJY)uows7tJ4fRhScqE5^j%uiu$9d3l%`nj||Mx&6FI z^{VBU_a2b`yWAlPC1l=vb_kL2tj|9GMu#j9Ky>Ia)j6Hi5&l6=usv*`p=FAefu$h1 zQG*O-VrUbpR0tWHqeKj?2?C78wQHB-aLA#v?X;qwW#q8EJmnIpGLQ^b#=u{ zh4~0&Tsi&cg4Wmz+FtMg0T9!!w@^|ymeyA5^n0;;3AcOyLfOOh7aS|uKEaaP2*N-| zqnM0OAh79j6b3-PG5}&913>x>K!h$!Q@lu21jX;$kAIR~qo+4jQHeUUHZxPllIQ_U ze**Ld+5iP}+cXDX0avF^woN*@(-@R^5Gk_4s#g!oC#+kxWbmG|!p9B99k4!Y*+Q%2mI|K0g++KGwf}MjB!RC#1pf5!unKwJIG*2OPHQfyL6Ke*eo=)nM}R&#J&;YHlY2Y-vXzmW zl2u+C+esQV| zKIlGGt1i7=lJd$&sm|^!{Po@T!=cE+OoHW!OBHt$U6K4Bi@zR%_CK#IxpR1-!=AI1 zaV-#CO7^e^hzHQo9bQ99Z?^pD%O*F+BW0vENCm|Y-lpD(p~*>TyruhyRYa_%J6744C0(Q#_ z3t0yH`>A5M!`-|A)}AaAWco^7hG0-!fU7;11WF?Liea~hU&xtX`sEgpa0f3VOYZ6L zst%w&p~63b=Rz*im*9Ub&+KYDzL=-cM1%`_tEsx_*3ErK8-rIK?R#Di_TD1`-s67V?wC4!Z?!x3LG6~;997I3&~;YLh1k)@ ztO2gV20;8f?v2zt_5B8TZYhTS^>`GoAxpl44YP)Q zo(|+PZwbDRWPWX!&Ts%@H)n|uPmVj)AdRWzZ*G;8FN)b_Ry^4cZ8)$_P`^&}Z_x5k zy55LPRe(t65qB;!QpBcpX%S?f4*_t;k&TLQ0H|IgPjM?ME5Y9m=|EVWyxO8Z8l+iO z0gHnU-th3RLLHE{Ez`;39c%Sn?O~SIBa6V@ zxRv9?mOf{eiv9j!j;JXd6^v12C(FcgNMs-XOjPj$iPb+%*5sZFJ!6teiYI5*1Z`_d z(I{l{`i*UkZhLZk$~;Y%{&WuiBEe08jjzk@hj;)v z1osH*iNp7ms2k5~tR1qcT+xU|McOVoPVK{pX4aN;pw$r_WmA(^6|2qft0$h8gDY4-+=$BsYYQI6h%pBn`4cMUh$4A!TK$`}8B8{oS`dJm42V-K;|Cvp~<^b5p}odYa)aMA0jVgQs} zJtR=U_W+f+_+MM1$*uu*i>s>muqV!LW}3*k2|JlCnr5wzN3@mM}A(e$6;HBwhjaPzC7bBFKfel!)aviD- zT^C#l1APA^_jLc@l@%m49GLt8RODVl@abnaxPP!BP)WB}y>7PF$$mKl)1|Ng)r`Pl z=^Q8TXF1sU;l?gmDncEh#pB(1YqESseKDY9M+3{KD~#=;iMfR!eU7?-;qkzVMJ-k$ zE=zOqco5tKl-c}B7h`F^u{|TEdRh5p0WaSiUJ4Vo9%q1`X;3@tyJH7~`Ojd0U}Q`N zJJ7m+ZcUL5HVe#Kbjcp4yV&8$9og`me6Zr!<1zJ*VsPo;nPS>25U_gMmU6-3*(YTg zi2jVaps*c0ANCICDkaJw)K$mb-(n7l>^!OVJ}1)MChtD{s|a)X@M4t>!+2NKp_2_7 zqy!j-Z2KtVy>P6XOUIYpwZXejN*9yK!te5t^wId2ND}+~9?i#Od5l8!^(^pKFe!Lv zfQCwiM~@&|Hvc#$7i;$m{NLA@Q~M2~q9{8w_+q4kzK~|-H?f-}P@dlxyg{%?Fv5|O zAY>8W9RPHvNuG46M@Ij?f>7M36+9*XfL#~+=4v}5O+4jUR?`h7^8LiA^+%cL*%vAARf z69*nQhU6Jp4wA3KKOK--P-HX8I3%AgF#)!PdQ4(uxUAL*)_P|0$7Ujkm_lURRz5^;WL)REniV^mb#*TMn4fzJMpwkwq z5yf$m z<+CLEpL5j^S)o+Pt*+he9#ok;shY8E07@$vgWyf4z6Hf1I*o=~qd)KIBjpOezqfrk zc4Nm|&T8Rc0*&xn5ohMk7^NSg2k)FL+yd6Mz;+p}=}x+R+J!R>8t=JJydszKoXO4y z$yCsK56iK-1Bf!cZp!8D#J7wcjeBcDxvzYIgkCoEy?QJ(#J#FKfHlGY*(1W(%7$=^ zo^daU;Ull|LI%xXF<~HILk%SAULgWZxT`do2X9^Vh#<<+bwaw9Gm)3mT^mK;b6DwS zhZ=mlP@&P|v!%>F6S~ZVkmB(yQa1nVbb<5g7J%fYYaTU!ICgv;MiwhRoNo3}lV8{^ zf`}Hx7;p&zs~ztEVwYFACC8>nIXwYYsNKAFYVtZwA$*ib5WGEpHs2^7i84bi<`#uDsSitlQfX6VPNTh+ahB3J+pny z+{vYw24L23^)5xzayeD>u=>%E_RQQeNd*+vGS9O&j)HCAmXDhV@4aq@R}1O%m9Be! zTL8)^b=4L?X+aG+Yar~6M1UYg=2~JDF8pIXrPj~8M_@=KTIJt9*>V-KtKX>0xDj)W za8ORAlHou$P5v;kEQXu=uXJ;RSAELov!F~)&E^|@ipOGATpwp|yM~?KtzlSc4L&qSz zr)*l4iD-#alMn;0QFBY#`VFIOJU%W={dzA=wd?mAWPeymWU&kaA$Qls_sYUE#+1*`n~L?`!qL`%Naf{Px~ItQvJ zvc8mi3uYi{Jb3A5;nj0kTY$$yC_t7eLk-=EQC^zL*ZGQrW5TJUu3~m%LK);ottUPG zx!mqc*MK(J1MvVWKo0Ln=GELouMSm6CJ9IUC1EuzHvwvZolKoEUGz8I zwQEqj4sD$%yVu46k%M@LrLwZds-|RnpKaM~XyL75+KwbdA%4dL!OteyiiHkd?1lCz zi8_elE1oUk)wjpsQWC`MM4*2~BvstuUew3{Gvchyu&ZBYHf;Bk7 zO77p^3Gk|{9X&>ZQ#W{ulDkB5lj!vXDWskKz0z+aA=2d%Me~GUpeN+-A^#mNex=cs zwHWkg)Ets!R2P?!=*1z7BYG`oZyckFN67T)FXbO4YDB;+O-U2VUaEe1B1XJw-to06 zenj&|U2M$y^Cg3y7f}EJiK@aNLAgkYVJEVSO!>Y^y671*C<`#2iL!4I=?Yl_4`(W? zH_;e96@kk2pk-L3JVHh(Smyn%D`!jtn_%O_Ags?rUawt!wsGM$jnSt3#bQog#b0slz?l6hkd8H~VUB z$)^3%)#rB^wV@6ILk%91}=BUp%v7yPLNfj#TWE4WduL$-I_9#$4uy? zU*!%AXcT_5r+VVy_G~ZV0QKqg6J~mXfD`xA!tlS(Cf>@%?nuOmXBwZ#G3 zGlC>s35zUxJihQ}T#$Tx*43bP3b{h<9_8dH!XW`5fXw2+{sl+i#5E%oPfyo0iyq#( zm-l1k1#L7xI2DJy^ zNS*eh5$N+@sI-HygRS}MVN9q}${tW~R*3Cj&C{N52g!Lp|{tkldj!Px%h1dx!h!TJ4~}8+gOvMgR#KOsr=4@z}SnckR-!5W1YvGdQyZl ze4rK^UM#7*kUSC5H4t*JMW-ji_jg{E>m`2cgO;rA&S0|~wpa=`LrpA1Q*@Qk6-+*)^HGMl^1=&i3OvX{Y})bo=%bc>Z4{{P)vsn?RJrplZoTTH;*wMLFcOf(aOy$;BnPdU~*pQ}It8ghE$E|zahTGGS^${Dz>mEqo@0H`(5{(&x9yrS!Ck^E7ogLc!JF;3!S=}-UH z%klyh7QO}pJmp{-&<)gEq=WGw-9Xax*_a;WBiA?A)lvO-rq48>Lf`nyDh7BuT$}MJ z9sQOjDPP{>SbXR|?99ZzLy-;Q;E!NE&ryP;OMnt6z5Fs%&$RtY#+fDiMjO&#R@Q=W z*OB7w24T(M^i~XuN56SIeup8h?2H+aY0y$z8Tlx^7umm;Sy+5{?E-|O&ASN`e=ru3 zu6^UlWN{^%J#ly9UxXlDEne;{9)H#v9HSKM)LjfI!42D~_B!s($Kzb-x;0}8(p@*0 z$x2+3J*>_jy*>FXRUL`|>H!+!I{*QhIQPi)Vd$;rL=aY521U{$C^(Ic874`dNW5(c zS5;MAukDzhuSa}t_A#jB<&ry5V^asx5$cQTY#Me?4#Qt>=PCq0sn9~7%Uuk?BMj3N zF;oh7k2r#>&W$#{A(Rpm8JHB;TJ2Wy?CDlXnC)@)~xjUfR31B}EDcx5M6Ua9@lZX#MUI(0G3^Q6JJHpiz0XCeB0i z4nyTu0?JTZF=YU6klIzEg;xaY&y93HoH6L*AT`and^-KKZ-#QJTk6Hl`bo+#y0B|u z^kGv_WJ#%=B4#|xY&XvCPBpGlh2m_49-cVc`)7*Gm=)}3$0mC>E_mM9J+(VD!d}5| z22xhdcENvf^FrvNCMg`~cx9LwyR8fk3|AzeFN`jHr+AK!epEVk+i6wGD%LIM*MA?{ zxTtvb>govvw`i(MvElyN^S}NtE)>pJo9)Aq)moNuirt=7O%c4D2W(?`{ z$1$u$%NEFGD??h^%FP7Dluni*l<@!r^J@`asaO!9!4;syA`xGH8{FQ2Z}TU(eJ=!A zgRcDd_TMP~zIjGutf@xzPEWJBftqU{0Bekl5;tJUwH4zmRro=M??!x$age_QxR{?+i~DKGAvZ+Zq>*{KZMcA z{C#Jm6F}zj&v_p7@2oe3{~o$&U{ik_Gx#lBB`Z~aoA}Gxp9lT>Zq5@*H$0TEYt;YG zkDmWO!rn3F$z7kQf?ih5-d6q@)`phK7+=U_e5;o1p|m zau{lc7zXY^-+%l*+X;hDQre>}5q-dWPWz^LaRhsJv=hI2bTC!+d^Os)=^^9H|C+`(AfkjL zhcb2Hseo_E?x-0%ti=2mW#?G z;h4+KY`r*qwAHvl&5!Nuc>d-9-bxvGNB`6;QaT^=bC5ip%{Q@taKhWt7Mqd4* z_0T7Uel$6n!Oa=tptz?{9wKem6X>04pauvcS1MEa|j z!q?F{JZFEWYVy{E`ZNiI6ha0ehfo08NpI~|pE@Sk4AMAudA@|&V5AIG*MOgr>N#OZ zCy~cG_F@P4WLmJ+!w8t08g~xT0~i03*TLJj=So2XC0J$uLp|%w^4?7Ts#e!Ley{iU z>6xyp4Y4J~XIphqELK4P{4f z{I1?$`^f}bo3KdY4AqgL5{6}`FP%`@N4loku4bT(oG+K?9w>z#k@ zX)I56de(xE!(#&08MJbm`;XS1D{QuCoreB!`1--AIdTi#kD3}B#cE9Cn6|h%=R-ZYZF_ZSH8;F)mOBhyOHm(Q2k0HF<2C>2myj>^ zN;WE8n)tMPFq7ap?q~Z@PHu+<33_b>B-UZv^8>tg!%72u*T$vO>o$GIBg7tj{Rv*t z(}m?M1dzJ4>cVmJ#W^Ye3$Vqz}z@U3T@dQ2NZ9lc<(M2^g3|A&K_XfV`!d6=K10BHu3k5R3_(!qtASiDZrB| z-G;G0wZNhRHN1(&OlybFpY3$wDhRtbxUHg79_^boL>l5QmcHd5funWLee6Gg7FM++ zYq=0pLvR9b$(6}~V~3NqDCx7)BkBEC`(uL@$y)3=GNSv*G;jKqtt{L@(s`{gR2_Nq zza8ZzGVcH{Jheg7HJ%voTbs(!HXWb}J!2Jcy2mPqSS&^Q*Rv)5fXT~GmwHXD`b2(ue$&!N@$tr8%kpk8_3s|0&Shxqm1 zJwy*UZp#)8nJud2r(C|=MMu^%cJ}iQB3za+4IB9ykvOk)*v>ct#oIS;9`T&}LV4OH zfYmX2Cu*xG$h@&=4bpsc>nk+jM{gmHezau=&<t1V8$Qn)p$~SNi7dPkhG)mTtXpIbREplM8CVf)Dc65J&T6_Hb12 zC@=?fjidUv)_-1R?pjM^y}ireR%V`eV*L%%q0M)p zZhTqrRa~=Hu>=dC5zoWrs4I7<>w!7(Dm{g=h6k~xb1r{S*zA2P49+^r^OP>h)6I{o z9!isj70{2L$EkV(ivOM>)O{?**7NL_R{EoZ9(%Fv6~Y`4WPx*GD{7pF5C;xW-#K)*QX> z_>mmXEsA*C4fGr|^mb9f2{GfR4&JgyZigif?Nd>jc_iI_lDdceaXNt@b>ZamQ2n<7 zObFJ8xt$DR4WC>BbN1IO2kaG_VuuULa&n*@9LbU5NzRkJS~xxvtkUm?3@nZYHiDXe zE@DhDOGmTlNanNG{t{|4=>Q$to5^#&_pI5^fI2Jo1PScJFhc7OMj;2a4!53DNtYN* z?`vb{dL&*%0h!n89#QF+m;L53y>D6tpYcAK^%=jB+bNLA&{;77bH{`wLM}ug?+%~x zhu@*Txy@>b-H{d0tQhpGJ{|?v0Ur|3Z<$69KKOX$j zY&9<>A3X^A?M6ha*4luR9<%$WSJukF%Yut$*80ryL$^mHxt=~8r> z<_fLzLW3fj0xLc(Ryw)&e?}WzID5R%;vTFHNoC0!EuCMYq06DX8Z9QvOe1G$ufp+H zV1B`R)zmOi>%G_QT`h_U@8igF`Kw9B4(bCqizdv-RVZs)$2~f=DUm4h*VVBpn1(1D z`RZ6{?3@OB1t?Gy$%G8V)TCx&>QWG$s+?{_4S0ezXE-zJ=yUg8h96Evu%GaN(vEW6 zg<$JzS+6QdvZB@2QZj*>3C;bM*5bJ{ik#C&8osN6`669Lk|&?-171SU8Y}_U!cgPm z!SVn_YSzn~`rP>9)!w^lZm^uKXAZ5vTad+>?r3ywp5#U{@~II~qOg&Tl-YB;VPb31 zm(g1f9B}h{{bP1k^|yIY^WhkFQ1JZZfxZg22|W^3T}-)K8$1DEoLIg?3@Y>(>|1Cgs zpufQ1)%08%*z(>UTDOc{SS#z^*j<>D{^X*BX!`falD;Ey=9LTI3I=eKEMxKBf9216%J)L(9WQAFI*W{Z@Li%_%1wx`lZ)4%6fAH1ph{nlxyu^1uIpzP z28cc#Kfy8TOz)@^DswweYj~#Cndc*^J0X)dIk0-)Tm71wHZzNta8KvWPl&Gs-vlU| z<1Z{bt=>rVjMeG-@W|44!u6#tn74A-Xo;_6X=TEj6T4@JaQCNzq8J@ZYghbmx?xIK zC&aAMyn(oDwRsY@tQ(Q}Coe!AEM`@aV;rz~_}f~n`srF`_eC(?V3kBk2Nfz}8lnS8o(vjum>1yuUo8=`mpeJUCzbIcG+@#T z)Y&M~NsHso*Ew9l121W1c!opO4`w+w0y2{!N&}SvYfbWDnYZIC``Qz2kYXDi6 zNIyNAw(~#4mtlV=7B+3MI{Ckz(7h>O|Dd0hDN$cTtOiawBYGQ_4H0AKb+qXsd|6=n z>M6`F;R(LRh8am?guditt{?WLmgIl^Hp^Y|mp)kQy^virM7j3gZ6nTn*HSRmBaXbi zzbu_$EQxKtoGg%Vcym(GTVRS1 z!T|uH8!!I}b1EzC&@@8KS(Y<;N1Q3K86GfjiFQIklqms8* z#g_oji=S(lCW8=5_;T{s3JPT_un_R?Y|hhXH6%3!4z(es8$l4Q-;hL1N`a0VfnB2t zX8BmF50?-j&v|*EJ@|KO{97k$uYQpY-a;1nT^`=yZN7;8`w9Q%h3mEEN{u`zG)4!0 z`BCOai{ef`KcU56_N6hIm$xy*vzcBC5|hkn{57%u8=cDh<4?9W>LI^nctFWL*`Nun znWAsDym@JHI~0+ccsbQ*Uv0H)p1?rB?+^{LU*hq3J())6-k#*}nA|QBxmx*c zYGQr9sayE)_E~pJ_t2uf|G#hli+NNzd8E-|?@QOUbg%aB7H*ikI2&IRE%xbZyI#$} z&A*j@|9icH2-#RlG@*Sp*`kyH$T>h@LYNBfXUf%85p61;1xivEYTf9E=_9W`imDDN zj9!W@9;6p(of|ut${MQ!s$JtT-kq6Aepny7K;s5#!e$(rg7S&*H^^(ql>37e9tqlZTw8z*Yu?**26+1}s1E2Qxe^-7gB1=zD)6 zE+I=R#v+!yD2f2#n`?xa)VAi2-I->S=G_B)8rvHC&9}q!{f~7et^ZRp!0a~%Z526Q z=|xRA4v64r^m%irv@(p}30d)=G}pwkwADXdFX4aW?*?FLzs<4NIqiag6s$Jol4{#u zFwb-=aG(F)=Px}~7Z}d!zoVNo@_&Xh(1b`E{8M^>?vwXv_+yL^%cPVguOV7@a0h^? zvCl&I04xGkjN8aXOoXO)bF!8k+h$XTeQ?vGZ7-jZu7~q=gBZ&y03v7j<*q~EoH+k#3#K7kw-JV;@0A$T z_88+=-mKOamT)=WXaU-Lu1L#(hPe0NJm#P^u&r>Ybqr2y?s0Y4=Q;PEaQ;A(;;{TW8Tc`9wWj(wG+op(; z%P9N5bfvl2RT}l{))T2BM>w$MCpG>G+w?H^t^c`eTYUhj4}LRgc>n)>h4*F2$(8GB zzs^dC%}?F&9$U1NHYm9mB_%%IMwRQAXJERH4YJM@yaOjoT8%Ne6pcpc1{q;pJB`Vm z(+l<(2vct2cMcvkT77r@Eu_`xg%^N|#fC&})U~UO-7_4!(~ytLA~!Y4BY4RzgPa8Z zcFEZc{(xhtA-?s)#le!%AF`3d>`nbE6!&!^i~osJ3JKzp48~iT#pYW0j3o$?zN9}x zE%%i-Dg{#io&qDI^G-P~eZ5BE)4d;K=o(KU?7j*&$lF#r?Yj}P=gm$qzcX8eFyK{{*nPV5()sRc`wuSs1yP}d^t z@KHsZMvGgFg|(r;a48~1|C=XvfHYH3ZNS+vkUa{{6h=;_n77m-Oge>RQ@UpbV{zH9 z5z(^^^blTBgNiHkQ$W$uj$4F&Sk;ju8A^P7QkHy3#zFXl&+6^wgXG)*Rd0scn zUq`}}DDq&gDD1P+bp>S2J&S1x6*90AdB?0R9a9u8V&wY7gxNMn7iKnlmX6Cfv&I?~ zRqyg{&N!)5g$g{0kOW#P&g!R&gw7vT9Si3Sx}Hn_kTU%f*!g@0S;#S!itM)y{~4J_sSsQnH+(mtpMni!M(bcS3Iovhj} zb6_;a=0}g{=cnKzsGLjK5u{F=`j><;g7RCJ_{4)`Xqg<*0FOFIpqrh)Yv1D73Dq01 zdm{&4mzpuzJ+qT{yrr;n$&gYxK9CJ6M&G9vv4p8>udoX&E2DItGy4osl1e&7f_Seizfbtln(6mB=6BHp#P+et8D7{P zP*LH6*%^Lp&C3yae^k(p1(E^rZh!MwESpT0lgC8z?Mu!~NGKaN-w4MrHi^nujUDMM zH;sdtzeS$;fw_&~Lo>#c#Ki0`{$LD6!$!XTY~9bPrEgtf)b;nsx0;Uqc~L9K5My!{ z2Hbl!`7-NMOyzNbM1)0by%gQ%+&!o8dJmYy6!tJ8s3I@6w=oxG{fEe7g3+|DLHOcU zOio<44E`A2PZlCIONT3+`~stH_p#ev@+FHXD@3Knv&p19_G8oy5$vYn_v@Zu3l9mk zfgZ1&GyB=1J1g{&;s`va0+$+4nqiEoq&~LL!8`P*p*QWjAk0OfY`PL&WAtMbVT*k5 zFuuiF&7Q@2D+liW`qcs+&k3RR?t=nHu%FYv2Yq5Y?P|Rh+d?_NG=dL5?zY>>qJzah zBVh|(ero?@37Io*Zrm7laEQE6d`FEc-GnVL*ub-s<$dcCzEl_qxA4C|>`cn1&FfwS zKjxiLp%>i!0`*PPb)677{lK#re7r(i7DP<3TyvBY-8kb5@A|ZG9hT3N*&I*~%ZF{4 zE0~Y9#VA67@@>TD#ZO+yv{xK4PSx|-Kr(&L%;e@YFyQc5px~C}x+g?;w&55Z_z?a- z?&hU=PO#U4D!ay>Sf88h@n|kgK@3W^M99pePAZK7^R)YK+M*vokO3GEH>gMxceG~@op;<)uyPwpB0xi7S33%wRcU?d(Uw;Z1W`vumL2Bwd8^-5X2T-$A(RurShU zYw%5V-_@oN4iDc)ON>q{GiaT|pvBfP0^4x%6c}!)JXVz$eDmUaj=i*F0WOxf*RN>3~+chj<8s7#6Bi}olkLEQ@J_xS3 zfPYI~e46T-`}8bP*d|27jB!ce1e>omP}%yr>B2B_S|L#JZexI~G|lqahRP^5(lpRR z-)M>kyT^sNF^)IOoan3~Kuan-4W-Q*aHr8Ik>|#22FUVGZUoilOSo9S2H)h8`&mGr zf*Qc^&IW+b-$*XUV;+U@2|T+)ovdkpjqo|MhxQ%5)IFkcQ%04(SbVaUHy~&TIe|Ci z-dC5WLah>*YXJ`apu~MJ1<82J`OXh1&M4j0mh*@E45t?^(URm$5E|7U&6I2HDfRt{ zd&_5SFv+T4*C^?ST{q539>G>|e&qCiINWuR@ZN=s!;+yh&Fkb88dqFu0egoc^!5zU zCw12WmdL2> z%`P(?rn?z_EJp^GslX4Hj9QmST_O3H#ObgKi8ZoH=+Y`kxktIuMfT+Am^*cEa10^c z%8EKd^H3-+phs;-yhp-DQXU>sYe20R;<|vG>mXq=wok-QmdW2uZ>q`?mR+qLuKtb5 z;||yU{f{*ocp!z~&dsOFQlSPbN-rkv#Oi&=2cP3P_Cu#rcA+J+qS8A;yS}_(0h!eePb;VvC8QE1`%-baGIzNIVAMQ| z2ZC!m3^bOlE=wM6#kYQ3woP|~_ZBDS%|fxa^{_3EmB`MJrsyTRodp_L9&6$oMe7H1 z0FQ3SIi!zFxrP9GCxq|w>-5>0+Wct@rncQ`iptxBTTBzLdi>AA5p=W#{^438h%_R2 zK6mrIUZP^iu<>Tk38IN(p^){1qBi!0QJ~BOkv;DW9l@_lwKS*Afruw-+o+GV79N=W zJ#&GsvYF&&WbyXKPPvi*BF^hbx^%St-*NC0?CmR}gq2Ig=l3%YRZ~DDGV-{=BPjS%}2PvSTEVGM*;K_o$6{}Y7 zCH|^TdR2Dd=vGB`Q?lNBT;kv)`5F*XS+I(*7ub%^XFd^$&tV1*cQdEame2b5_zv;3 z7b{0~Jv`voF1p>#rX%B0|Mt-Sr7{aA1Wq>pGN(Oz@caW1aQ!9D!((B8ma9u_4|-Dr z@#)mz78^wdYxsaobE@Cuyf*sb$QX$ICG*m3It;1DpwT`j`ic{o#TS#I|7{zwcdxq+{i>t#HNPQGwET%yz4B&&NPqSB`E?Q#II_r#JC6!?QX!yD&B( z+V&suxsOAdTH-tkz(5Grle1ex!~Vjequ+>U@s8!c-oi6SQM2-O&9BSs0i6t@v8K#zUy z{kL&o?a#Y2bV>@16>`}Fh_zmu9Qw9JMSEU}tFfOWYMvmP#3WwR!rEiQh*7ywUvL%o zb5)EFr7jgyi517ZPHVrb zmz{5;wlx->D{ePg&1KZ|U}0#VTQhai{80peII*EVEGUVn@RdU!r-5^?dbYs;K%P5jcl7fppUiy z0&+S^7wx~INhGiYxcgnj;`x(L8=PIf0(Y|R%fwqPXX#+{_1W!+ zM_TXe`J6S9YFlR)4Z$w8@w2@7hbrhZk~#dpjXfbh;>i?V?a0YSCAZStyP}y26}a*} zVdbOOG$?G=coB1#4lC%4=ga$YAlQBZ^<=;Pcp z6>p?cf0i_51@asTlCsfWy72qR=vmvd`X0DlSe+Bn*-Mq{!7~iq3K^RfOAi2x-Wrv=Q3^c9=b{&nZ_wV( zJ+dEhnW|?IK+Pm=3EC_3wwy30@tQ9lMsU)e%)MX;sU9HHY@+ZwAk~DOxua=eS``Qv z?a2nST;-(4zPsk5L4Sl`3YFzB$Y_s#76d-G9%~)^!+`?xEFEWx&t#6I z-JakY^@ggT;nw~B&ib)IW#+nsZGAJvJi` zPSTp{SglLL{$kUm+=)CLl4y(iF9v45Nssk)Uohrqc=lLa6NqCMwX)BlnOd|Xe^w*u z&@W-cb0FJ5GR018n?vah<^{+m^3psCo{?_8v;ysP3QjaztNFo*3I3EFP3UNFHC2Bx z3)K0!xO+mn;A3qqrb;SZ!VwK*+iyl#uaGLkO}VO6#iY}yUOlA<)x?`KVo%)6COYA1 zJSzMcCQ)!3U*`GcHCHVQn;|z^VQBzsW{_%(0_w@~XTf#XmtnT`bsd;<7Gh_7%*twxPsBJyVp}27==;Y^AL&JoW{)bqX)lFL)L(%IP6E}r-VO;C0tjYH3*s9xNGb3 zY&lKt?W}|d7)7vV95CiHLeTvl6!4IS3{a>xYRvJ(aByZ>GUfg0+cx%Pq0M z7ka+Sa@pS=DB+_BtOXF@x7Dx0)X|$s24;nLL`%?%V55;BHaHh}TDWgc$c!ZwXSWeP zHBo*FK04cmY3m?S7x_AN>mT;{ZQZE-&NlmYQlh?IIb^If&9r+N9vkv6ys|M??Nj3X zZ-JG3?*F82cSMSG03+_FgK`@ObyoB6zg*>1>*vY~h!h;3hpgKfEfUhg#HT$rbTxER zd?1$(dIPK2r;Mpq4(BCgSa+C}6KUN$`jJND7xe;r+@zy{VYP`ge38J#Aa2 z0IX@o>ihODpT0%$__KSCzc$Aj0DDG8^KZ5cZU(GkOhIQcI<)HAOA$52P4cW#oZT+7 zNR~)onhbP~$ySTiy4Bfk)o`F6ibjhJn5ZmJ7ZG}-Hl=OVZ}m`61AupIx$_X$ydUq| zcuI7p@%Rl{66qR9CVN(HArko#3st8HY#Ks!YitCho^4MChMgU`$m`g875wD(yE9-o zBlo)&u*a!@gW6#w>uwi;?3e`${1URC)+6l(edn;7DD~LNkLx3bpM45`S%GLMa+XmT z|1#ST+UcU_XKM=Ya+*~MUauG>_~T!(6=@c?_!bTHm zvPv9|Lnlg|)V5|(rbb`ODR|7MqY>VvnrvCI4heokhQ3q#!DvpG!O^lxIO1&Y>7I8)Q`HpkRt0 zTX69rd8D}RB|lf$`!45M8HyJ%qdS0bDBAxk1@Cb}H;vyE+ydslNLG zq1!~#gxT~gSc%hnmy!3YE6C+Tc~(A`RX1mP!#5712lLV_sVMtfhkAb7NTFg={D%V| zg|aDLZH2kjXnj+RIA!o%Zd> zE9O)~D!L?e4xM!@Iy>}TtvnPQpZe8e0#snS3Dg|HW{jqh1A zrM@VE@S5A_CIe*iDIYrQj=D_wpVz#nZ9x5*J>FycG)y3M(s@uiXDk1eM9svp^2u5h zJOQy$ir7|yri8rfZgIPT!xtzj)ibC5O{S$RhF79GOs_=an{U@fPCD>s;_H4BbC6*L zVtb;|i5~IQ^V!-Cjf`il1D6!%s%zlF?xlS+$>SW5i1@r(!WulobkiK|S zKo7WIWjwo+@0VV&Bqmhfxy@bx>U@4E9Bq;zhb!dGeHtuJFvdyp=ZID`I`j!bakZZQ ziyK!Ms90VQ9G%0(5UU|656HlNjOHiS7Mjp!j+X^h4TYx82HW#aU{RxvW#mFq=^+4m zK1T3fIOsv8$#jl0iixk~vhDEZvK?AZFmg=mlBSZ!ab^#%4ukm?^?!dyI9oCUYqPIbkixAf>yiAsv8DSod&BJ=^yw$A~2livv|3fXlG+!V(OSao7=oK`8p% z^2d3RKc9R16d*e0?pb_o5s)X7EwrPMpgHdg*O&o7(N|Rt%px$ zdh1NxSQgGPRM@6cKbndR10~8o!+$#SO1w)I3aIj=oeW2rEWCU9i#k*zpD8!_jcVu; z-pPKEqC*it?S_AuuUYOz6?kUIV&uG#6?WI>>b%VyW>f&jkS#CLkoM6-Xk1rYhtbG6 zn>Z(rZHWA{u#o{@(_)QFcHP!C(d$@tg(zn=6@%m4sAjUns*W%bv@ReUh^2}Pn&8&r zKPcka7rClFoh(j74j)B9sQUVEfRY!F){If>rQQ&7xb3+msry+zH*~y768%sp0q-fI zJgy(dZ#izp*am2L#PJRbflZ{K)2*t9b;!^1oT9$UoM|Lt?{hc{uxUk^0ThTFdwdM#R) zc)Ev;fc=883d9&a@Ap~aSLYwV&fA7)vwUD~Pvv-S!V=a2+ALk<_xErhi!R4Z(FKVG z$zte7LTm<6%En`O<5Nd%E0@SL8DKsiz|D`yraYqpd`#=57^d?sWu@G8MOtT7|yD&q*m|; zowmHjV6x5w0JIASX$PU>{E3}hz`DNq+f>BoBk-$rsktIYIA(@C>Shop?Hf{pKZ&<3 zC811-=@57_J%9?vN-1`z5`F6|d{r}An3hTOJ{N6r<(3>vcWt2{a%&`A4Hp6=Bjttv zMW)$mCW8pPc)8C#JorS*v9D>bk*NW2#BYOBLipZ4`;*F|W|8__Y*A(tjkA))1Fnmg z5wCD+KmrlhlEYLNxgoz9Rf_NvO_()Uj{PZm*M%8`t%e^PEe2^CF-6)aQtu&~j?y&& z1E=@(*ss}AnueOSTK}60DB`v8ycQTzsRGylM6~h9h`@KU-aLktUok)8uKju++W_#5 z(H1f@J}Gjlc49QcJYmPa=V!DYG!sBtp~ieCOL2G=b9_w9h7&jOz5O4MU2_@7E16$1 z`RWb!cYWh;C0%kaPchS3hFeS+)F@uz%8jXV_*hf_ZAcnY+#49nxMx&LH2OsYjSg&QAC`*@v_2IfY2`eQ8{T#^()R`Ex<1w7B`n89q^{JyH(?9YcKBjn8-8!qECUn4*Ou}|8l?J+TW8XqcNAa-QzxP`D3a<(C)kwZ+*W*rqO*|R zObYaUnf;n^DOL0FM@}Gt(bD}{RiuQ{xqFeT!j-c2Cz^KE-Yya>u4KrZ3J~Db42u_$ z4IB#u%_ag)qHIN&df^+u7yaxuZzT(WtL6?lUi3w}i6Ws!l})z4<|R4u9U5=Q6n z)wimxjkbUmk|_%RgQ+%udtV6d$33BILAWAsTvzuqq(0+(rj7DQ4U zB{nF3hEN>XK_9=2)4&<)Oz!tA{^9a8K$<31>rO9TJy3W*PP@zeKBI9jHxh5vIbzqr*JHKhd>22pcpDPp04nY89 zNiQ^2BGwbHOVt*2QuIoUCi&#m*-F!J`^POFk!BL+>%J{#eKWnt#l}z{YlmvoCZka9 zCWI>EIJl6=XoJj|%w-OfA)w$&i>%K=ScOx@nkm8fPW43B)m*%J0@-9qvRAw2n4^d> zJKmNJ+d39XLq-Q!&_#y^CaHOn$+Y(@_zu8X@2@NbpnAItzY6NyxjLP#d+2tt|4bbST4q?*q2W3?+J&p#k8 zcWn6qDjd*7N~>xas!0R%C8n5rE2?o-R2GXH^=iS~i`E8UhOroZyukX{p^K<@n9ZG( z=V#D991j`tX5cl^7cI{LXoLs4s`+1H?z1{(2v$-|e!=~Pi*Eto;yLnOe?YxQk*m@1 zJDmr-GB(0KC)dDs##>rpwZTU`nEu!2+DL3!D7;~n@^7m0nWL+Laj@r0BA$R#6{CwE zH?uWF)UPkyUMk5P1WN9wy#C5s5RT8U^c`Th+zfjnyeTL3BskA*NU zt1+A*u-URR`l4!Q(C9MSWdxW5(Q9R>)|~pwU2k16KYl0NBrIeBKCTP9R<_i*^G z==n#DLWu)K-JI);OM5w&G!!L3W>(^_{ky-aa7-xrdiA&U6N+{KNd3;!)VG+H%a#)W zl^I3Qq%zkK+3nDJKslFFRb->qczC*gWEc>j>iN@~Xqw87d(}&KpY*{TM>RYnz);ND zBwS;+R6qI(U)=*WL@y!^uKxwI6FB7#9!X4% zd1+u^f3-NkS{oLwZ9KmPHkqU%scy*Z;tW8nT(QXAtTvRrwJB!Fmgo&LFA5&az+NBw zhUqsiM7CbSPaNbq_Yw8xmx#o-%%$35Hc^2qpOQ3Jn$qQ70c96uzgj3#-3Nn`8zV7A z30-D?nrrf+oefmPpYJRjsVH&Y4I?_l{;prT(EluTlJQ#Z8A~EiTYrV{-KzHNS53ElDUDsq5V&7b_>{NfD#Q?_P(!JCM1ZI@QyT#Hzi@fZMEdq<=iAS8MI z88ca-(oks<0W@kf(IN-=%+$F@(nO~}YZjhwY8=g0SzQ$W*-U$Jn~=zG_agRk?=rC5#r#bM0D|)r z=rhGhK`%@Mf!KmiEW`-0;1pwE-~n0SjJSioX1(^4&NyuG!A!$3giHAih1LYKC&cU5jdR(zGV9tbBxmXOH; z3rlhCn_97lI|J(X%~$vbh!4-IHrN5tEsv=>fsQgLKd}m9Nfh1-eTSRiT27E-kq__Y z=!zA($i$+G#%pX&6>ZU5F=x=@lx9hs zuIlR_e;6-?nR7cAxf1~p{*8h2(}1_hyr{>yo!*rFGuJf(3At!?2J7BaEJ%1Etyr*t z(jr$br&4T{^}6a&E4vw_nJq#8D*->9|M~g7onhIR6NPH35}U^=)+iUtlN4nPpu`Dp z)z$v2kbB{&UXn_68dF$j#gQm@tZC^&-Pfy>pqzi7EO{W4DN!_ivYrag^8q$kaj$+% zVshozP7++L!mI&#cXn}r0V1F;4RRn^1HhA^+YYyHuRMEb)_u_n;1(P8pko21USn<4 zoWT?D&C$Y}hghg35?0X4DU`jJ`w&m&dT|2Y3c&8<#sSa{0_gJQiSN3J8{@Opy(>-K zvjEvvj(6rLyEey>4Pgc`(#6%&UeKW&UnRd^h>-|q9>DtYg~bX=gt z^-w}=>ujZ|aUr`DJ^Z%*3jZ!qD|28;+W6wg^@z2?@I_p$*vaV?Yn%uIE(*O^<6U`9 zjQ49FU}9*a#*JK9qVz_4;%>xY-q3GOL=f{kKlBver{6b)JG=lzD@t^X`sfHVsS0slj%^VL#vkz?k)1!UQ zVq8+C99qcdKrTriP@$*|Ptg|-u4tHhiLaayn(oRNBJmuH`5P@6)5v~{D}#}I`$_}% ziNpX3&Yw?CcF(l4fU^93hXw1M8D1JfJm5M!nQJPVRXZiRw@-*8YBCFl9`V#0j7xmS zY2z@q;O#4`6e66ef>=q8FW>(40ocI3tFlMHqI04n0=qNM#TZ`E#|=N#D0{Kx+&n7j z<>Udzlnf!LWgQr~)@?q+fATXIITq?E-vQHs^fT)E)uv&^`IQHZ|#znuhziQ8DkkVl_*1 zO)G!AHx5OP-y<6}0|m8X>=6ocOS%UsA+@ap(uTirzXt%)6!O3bv3ST1@T|y+CvOyr zOe43vM2T4y#St3nMt7fEo^FcLq1a?H$3MvP1Kd8wnb8WVB)*3 zH@)a2o&TUs4~eCnVb{M@x`q7~YY-{L$qkmA<$GX#-Ag?Lf6iD;&N=KWLPXBl^Cf&@ z@$pOk>ISb;MEiwm#Aj=|Cd^QFwb&rWx>JJu``DcEBdh&dgDUaa#k=6h)j5h|vWSF5 zA!ZGIpv>S6b?KT`WY^*lcxF))IK!x!ksw3rZM)2s{2H+lX|}C6xg6OQ(Dm`r`LQjh zAUOUZt88QA0v$Rn`OTs3y}!Q?%IFQRo87TdD=s46_^0j8Tvhx5Zu;S=-N(I0H=Sg9nuU0Ae4xp1$Hh@9$v72co zVo2F=Ly|RL!4*<~T!wF-tIWo8?hYCQIgvZ$C9{v>BZg zgOfHjHd)WyZxYF)HT76U?!0&(#%p66EQu{_M-&PT5y#edr45Z@0btZ^AJ0P0)u2#( zW`Ks!#0^7|s>}BvN?TTnG`}uZkyIxNXexHc>^h`?|DP@FeK$eKpYeA4=5UW_OD*71 z_~6jrdwNu*0;Ba$Bt*FJroRJ=yV%6s>jIa6~U^PhOKQ~0L6!J|IleS9WCCr{t!VPo!4Uf7fI-fm$E z)2qKNXTRmFUsBt-U!kZ&BjMx4+>nx#?uE9Oel>g$eV%Xl#eITD?K>|Q$%!*$*z3Bs zerLP=xb)59BXK^pwE`zvtHr=4hCJWDQXF1!Bn>h-1Xw(SM3d_R>Tyt#JfhW>5#tDp3*`~Caw=A)6x${&w* zv;<8L(zyHEvj@03itmlg1I}&RKgrE*xvFJZa%i8zrrH|5k1px?Zv~oxhYe4z4*ABK zbN<`LZ|2*7?~nSISGdRN(x=??E~C1fR_!RIDOO3GuCgagHhnV(rt>4RVJ9tb?@rHu zZkzM|@86`~8U=r+_`i9!zRY5gu9KvTo_2P`%7q?>-KFy8ecBkge4hX$bFdwjZP={n zP~myFkoUBSjij#rpZ|(x55m`(?tRF>z@S><8c~vxSdwa$T$Bo=7>o=IjdTr+bq!5J z3=OS}O|6U#wG9od3=9;!wmm}8kei>9nO2EgL!dZc98iNK$cEtjw370~qEv?R@^Zb* fyzJuS#DY}4{G#;P?`))iiWody{an^LB{Ts5Ji5gy literal 0 HcmV?d00001 diff --git a/images/Distributed-Volume.png b/images/Distributed-Volume.png new file mode 100644 index 0000000000000000000000000000000000000000..4386ca935b995eec51c2ce96d777147db68531d1 GIT binary patch literal 47211 zcmY(qbzD^6_dPsAH-aG23W9@_Ae|#A-6#z*AP-ecXvqV(1PG| zeSbf{KOT57z`Zl~?sNCqYwxr6I-%;S^27u*1RxNISmBL~CJ2Oq0D-`^csRh3U2lg@ z;1`a$lDrJ)@4xS?mclsT2)^qZ19uSU!QQ_g@W%;)BoGJ=QjmGA4Vynq$8MmVNxnZs zH0N{X9Uj#!9eJe++`4Fms1Q|d2&vNHypkULzc1y)u|e0LM3vHB=BV?#ZSnCjdIe@X zxsOs^1l{o^EDYEG=3|-pPpuyP2P?_mP8sQc^$7e z21HZDBydsYK@=~viu6tW%?v}CdJ1v63aP`nCm&{Vc^LRd(gn`Z(yEA2NJvC-QROGT z(esxJmGM{Hx$bzp5q{of^BqAZaPLNe)(d+T5Js2DAnTu?CdA5&)}tfKEQ^i0KAU6F zyqBAWZT>N!57Y^b8>h-35LR-F-a3*t|^Wc>IEbgKNCl(slczb zjnR977>l%wzcb0UdkP6&dIkEY1YLVX2@2m=UE2tp+F+sF#ZaQ$%LOBG4q*vwH?_Oa zV&sk*#i8!G{zy=jKF6c*Zu3f`vfHpnY&St{z{$~xFhD3|DAFRIi(Cxqf$3{@g`t>y%9k;O&<|M zEJo+zg{)LjqI3D+{{rQXtT+~NpJ4cB-SZFr%9$?k{^v{>oAK+V909mir9`8$Do+}qis0SQD|XDhTn-Iwt6-!R{2Vn|6|V0>oz)S6hqeP-1v7O~^^j*eJ&{;|BEKs~Lta@r|Cut^F<=x& zGfls}?1;TiyYBhIPMEV84g4=BgJ7u=mzY^XGYP1D-FV2q{Q4?&6FvOf9}oSzPKZPO z1!m8)z&Au^xESwv#k6tS&181MnOa9}{S{RD$lnmrGwL(*HeJ`8p!b?$2_Afk9>FWR zO(G+g!X~0JpwFZ(eej?#sx3#e0me@!_fh8Xx)gXPO_W3tAG-vsu{+Yr<+b1DbsRWiQtj(hYB_?FijrX2gCr^jaWb4&ZD8JXzoT3{QY$Lc~ z8|i*6L@O@kJKSD9U;c<4WZ@$e+XEr(C+eh6>?zrzA6MY}+dW-@Tl;4xqjzaWsYLFp zto@r5&~~q1Qgg9C*3L6x8M^L(bvb~nzQQy-#6Yb1H5(Jb?=*pUcvpF>jy9m-rOC81 z_pb}-=*lF;>H3BalI;pW&zWsh;Ji>qiue_AmD7tzpgvvGyL8c(4;rnyjiq=?A~h1Uz})KgXBkiPX%>Y><+NU+wj*#>_> z5}GT%#l45d7KI`G~=rhw>xaMy&Q*D!lkaaE34$C6gSxch@dVZRcv3kZU z33fR@O?IqZ;gr5S_j+Qf?^n801y$VyG9Z0-sgwrJ`BkqI6Ns%TwC>6X`mpW&U!-9U zPCuljabu%)XnAqot-xEr&uM}2n1kD{9wj=o08DOb{0EuC=<+5L)uz7>1@6Ct6gN}1 zFZ>(+A2#UI+_1yl@-az10YLFDz&BM@K?=M2c4NbWY~L`4y4Tb}ze74|~$*Y=kNblhQ! z@)0Ai7y`$buX?r#S=5jd!)@_bg|&ABwt<3lF8AA0Et)KtKRlNgy9=+zL-=3WE$)1n zEYPv|ZdgkoezoY%fr~$rr&1!v=cUa!|HD_rSSZoCH&xWD;t&tb19LLv=*^0B*dB$w z|I5FA8~ty5_$7KJt;$Ys1<5vk-{?$G;w(tpPgnQvBN24)vMS9%RbvIueeLQytm=qj z=kz^9YNaYXuhwMY#fTFjvhIv`MoWCNWi~*<;2-o>Nxysf<8hBXdl?NgDF5}bqt2ht z)qZyKc|XtpoNYQuCYSDfauBrWe;KV;V+FN~oDqP+dM4NW|NeWUA1H^Q7hfEo zYO-j}{(MOtJ|jS~c=WKBS03KaZayy>Az0sutOLh+WeWZW^l5p!S|Ns8#^r|6$vF>M zr7hWX&~M7ztRe3|IV7i6CM>Sk4qY++=f|HNvq_1S!{pqjn)jq#N41gNnMiA1^e2b! zCbP8muOgOA7XHtSa7x3+%p;j|HceOFYyqo9rn4HBSAFIN%tmjkkEvS!{x?g)DoP)Z zsNgC(YSrJ$RF;kjZO+dVNBqAFr1luT%t$3c6rI^zh5q-+=Af8k$^T3?kE7gZ+=ceZ z=04PGsPn>PUJ3j!X?3%*_qY>hMGTe9AH{W5>Bb!4Hn-FrN=R~e?|!B-QJ2s!8F#2? zVLiXuuORHpH1#{w@x9QAesFaA9i}t*!!1o^B3kCG%Q22DSkMrc`BFUf+TxmJp=w zUu}`-%>r%)Mx9+1;o2BF?I7yr|FvU_X4L-bx0$e#&UfwX6n+PS9ZW0?I`6}IS(}lJ z{Er_oMBKNC(#3pYqZlPsaY^X1?ryKNoSnHgH#f&J#QkhnS_7CE)Elh^9?dtrXD%~t zPS7ngWT|mo!!Z2uE(kQlrd6PxDG^X48~#YU#n&@t2&;?N2*8QwiWoWYu9qgudkwDW z{9oKV7h5E>6}|sw;JqJU-rI`!oMl+^2s zL#cwhHy8UJoBfn8UU22)LmO@3*pINa^OiZw`uzc3k)RNJ4+n+8Yc;ig2 zwM;AE_~1#S$F8PfgKKuLGM8b^8_VZ~YMH7VsOZ8m3%Q9Rja=L7lQlk%9d#NZ+b0)? z3wm@9tMV#ZFJA<+gm=4rDmR`4Bf7KUylS+N3z+*ELXeP`kHslCv3_a$i&gZGpaNi4 znJeA|lS*X0t#=Nqm?<}Y2n}_7#vA7rM*G0$`ot{z;J{H)N$KIs&qaZVe-a-|x{-5>Gm-YTA=`ZU?iK7_482X<2!5F>#2pKHS}CjAV!x z1J|Eybhp_X%@&{fZXEtX??X&shXF@I898)!YU*t6=|hnet>x`bU!TIgPko~XiVUL< zP1hbB?p@3au(axnE+3tBhPBb~+dnHv1xICmxM5prcrU%PH&x>C`_$?M7uO>weo`03 z6%Y=)quJ8tVYJZX*eW&nujLkouU&F`y}3oRwYF zQusny)iRVVdLpu8nB~Jtp9%ba*Qt8E+)^wb%@EcUJ|uZKUz+J2+yy1Y^ zp;W9Dw*?KA*5uI8h+n%Ch21^0%I_J)VCtpGUA}z>2OKdmT{v`&Cgkd&#DZ=u&!FU5 z3rSl&1q|)%UcQ*Dl_qZMU7^7699V%EgjB`C`;L~HwGz3EiyUUknfwTny7Wp*9B0Zo zYwf2LE!jERZROh4NZ8MEBigJ;0#Ose!MOun-nkzg>1dLPLgF4XJ2tE$CFD8n*ZcK{|% zu}O<>PI>;)b{t?>;vhp{907z~heNHR?G!`D2ko;E-Cb5i`*;a}_(3h;23@L$(GoXr zK$}w~dRXchOqFr86AQC&%%o2*yEz4yEv>D?*qlq#vBYo^u;!6IaUg|*^#qn6P+Zb_ z&kX)U*6V_XO!+53Pij$Q! zx5z5}(qCkl`6Th2Z$p;-a(O+S?*&w-XW?~&j?yT>i>y>?%w65BC)v5QO%p{Ko-i}y zkl;I}@PG2O|72$384P{nV5K_3>CM@d0ArDOy?KU(9&P`cfTUwH%1c|v(Pm;V=E8qg*nqz4)nJy@)hti!q7a-`Hg8%;%syC-oUeD;4(gpQ z!+PU&F#8iX?g5nCK+&JFmfb;ngFha~=+5R@MCzMV{S&vICi&UM4rAl*hae6HHceF( zZKQNA)+FZ;W_?CWqAX&qxAPw~mYy{SjoDo>vfoR|J#csiMlgQ3&emtg?~8@@s%8kg z&*Og6=f?l4tfE4>&&)H#SrsQ*7k*snrK#$T$qM+F<(=E*!JK&u;Bau<0|r{j4{5Vg zksSG017J*Cyim3yFQ*o*`HYA>gw36$1EaE{PeJQjbpTprpL#WqaEEs*l5~S1>ARc8 zb;{J=?{%&AmiLTPBpRMNr7k6?I_>Ua$}@^te}bdV!B3~8eN>vcOW#uLet5Z%x?qd^ zhhQz=Q~=jO@xLNHc5_S>p7>`bChn)M%L$NcExNY+^i^e(j>axfn zvR2}WBz^m)T&vH#a{Z}83z9gVxN3U9Pd`%tQEP_5NqsA^S-vARqlvpSEE0l&qP4r@ z_q0|b^(~HV*?%z*&neBnQm!m+*sAghXS$I7)!VV7#xgrpX~JaP*nZs#Of&l!H^5|} z6_&od_!B)Rb+DP9_Cl{bZ8n`a2}{V_p4Nd4E&fjP5l3S4ze#LBjkeO5sUzD1s&EM;z_SKrGa)C65Dtvc<3d+e>_S7m14{TdJVTQMlb$P9v=}o**3^+G8{E(qWSrUws%MK9^sFjQpF1>take{hNHFO%#FMF@22DA! zxEBl6=VlwgC|1JgV{`r^_I_Lj@a%PfefW@Ikk+F9 z+t8KQ)>S@d)=aPP!4Pm7cWW;ca8_v|mLde9m5v$^UEvncMk-8)@0uiAw9HmG5c+!f za4!i9aPO5`KwOa-nmG<^0ZKI$+_gnlDZ5mqn%!=bHC{S2SBN&4(5g~*pC zk3*^W#}ES!UAkYmp-;=Gsz1Kt)kYRWLxWZ#1cmKj-q;YWor%s@=KUh}9UHChjF)EQ zcdkRqCyT7lM|Cgq2_~Co6BU0 zRm95u7ffa9@nEZvAkXQFY!cMhySyx5_w$|THg2(1 z9x*h3sNP`VU$RFyS=mR+SFA=qFO*MBZ#px_=lW^bE*Rhs9~@;b;39LIM14^x%;*__32+OhPRq#NxoUaU}f zzf*VuVreDiz<)OL?P+t*4IU&bnpx3jpA2|=H`W(%V34l5;_r& zuZP=-Kmz+R3C9!d{;q#drfFTIy}H270<&g7{c- zX`1%sVvg~~^5UM|!5cI+c&?Abocs6}g(8+2aXtq=)}iDN)6#gvKFcPB;GT}TuixhP zc=K1oO&9wzEeh=b7+}rNmL_(MFv}&BCL**J0f0zbmrJN}PaIBp8Tz>47t!OLCGjKQ%R1oe@6%Z;c@H>r-@uRZ=^U7Fh2_fWzzJUcvCF&O@69b|NPQR>?ecsFd z@eIZTIewqtD))(mV4|KJp7y!~M-Z>>OgxGXtMee6=gDm>6MUB!$UAtsB|ziijcaAV z4Pv2t|8goa7MgUqTxrql^3NWVdgpWqVU7Q9x2Nqa1@$&V8To&Z$_GaY`w(zauaS8t zaXZONAamPg7i9oxy*1$WD~b&BY4BXG4XK*J6Ktl+N1B!_sScfagKwCWfV95#@Padj zrS|sEzhpidpY!bGG-P5ZxFq>}Rr7nXfqebXT(wjs4`;3CzKvA@4y8FQ+^URA3XRmX z$}KaGA#qwD#fJ5FUj*Orii@jQvH;*anMedo{-vd)?1;?6XVRxKJ8h%a3 zUg}rYHQsv*&CSh$l6wmk+!q>9@_jb>D7x3_BA(|IEMEf52VyT`L9+gb^JNZik`T!X zmSSc@U)#Q@ye2{b8Qg@|?~ms*btysB!#sJx(;Y*_zn?}#?1L=UwuV{eCzA;T5`d2$ zsLU+FWecxHqsrJ3ul-**E&o6S#Zp#3Q_pdHP|&L3K>lwc)YqtMCt_h(ng`?=aTn{5 z<3*PgHef1Y6c;vtlMI_1tmMc$CL12T`CixGjF+_@CR1Bp<%zrvCoyK|vZ| zudc$A3m|7t~G8>NPY}1o4|d+Z^!rKhQQ%noRiQ$@EUMBko`{^!SCYjE>`?@^Ar68kEirZYwzx=$>raJ6m4KW zYLAz{Fsz73WzxMpFoaQ1%TFQr-&HYWAwQ~`%IQO zn0?niQ_1kS^Q(7M0~Lmb6og$goR{}iSBj#+54wrvJvQeu0QQmP#?99p6rDexfylMi zdnH0J6oiL}g}NQqyg}I+#=nAP%anknRl!3PYY_Q9#MHOAie;N55La~RDMGVDj>Xar z9351tA08HhajI*gX2e)CB5|wi@4Uw2SRf^B2WdVQk%+3+&w#OrTCmE=(T8)U=>U-$WDvp z={%zh?ziyRKEOc*=bBDy@LL6_c5L7zd!EBL<}OYh{6sk6V7^FtL^b@pL2N zuZ|`DPAboJ>#`q~%#ZfzWK>)%m&xduK-k+)y9wY*o08jZ3=JF(Pqpor2)9QL@>`m9 zCn9zyq&h60sNeXwXO2woZkD~ErO2_)U!q4?m6`aXWnV~`(t!}aC9-7jpC0__iE^3D z>bg3k8l(a}FKnn)#CUr9&^b)hwdEPZ@lv7ptnuPCLpzW8FV{^M(%k#I+t5KsJIS0T zK4L?MTJeu#{6)^EuJ%hbiZPrcTjbFwahr#u{rK$-h_lqLJIbe}EjZ#Nl2)+mA~xCp zj409}7*Kg-!cMNfUQM-4LOi2$M|Z?ugNkOHpmjlJ?;rE1SCwi4Ee1T3<^Jo^I#a%- zfwv#?46S{9r1?u)q@yLpUVD!eVg==Dt!g+T@RO=8gEo9euB7(GwWeB2Hw;OkObak; zr?(>c@QuDH6MdsYaL)$&TSEG`V_{L8dNrk#^aj)8C0mmfoBQW-@owk#n{MZB`BmBm z-~p=s#O!TKGa4hQ{5ipQt1aIW8)LSH(kUN3!wec9NquWvBo>?Kt#0Oid8#V+)0fOy zm>dEo41$IdJt$~%cXzitY^tpZKzV^*C?#-_&GiCx30!6OXBl>^;XX?x&+`YYXv1EA z^CnmlMhrls<^7b#A%seMQ5cLvv$WNGEGEl?cCsPd%!bjnR>RAOuOG< z?Mzpt^ctMJgO`wRF>w!7nL$M?kZo9DT4~;$g&M+ARGz`jxCMB)@Zwun5*3&#H>i4% zInd?k;K7EAAs+X!qf=HykFN$HWT?vIe6f9VckIja-Jg-tmmhOoygjK#3X)Xl#c8bs zU;@3p{88i#;HX}a44rn3(g34{pM=qeJA)}+8KoP0br?^g!%K5&DF;>< z)@lO$nJgc$ssg`v-x_@MEi{6)Q?mEK?1m0Qh=~wie}As}i+(2C zYw~0T&Md4qQM7`({@wFx2iJ9bNQEph_pOLM_e-CDWKMK$d7-}%yAmF($?GtM)cK`K zqr^QRL|RMb@b_Ysuf3eSY`FiuSZfBrExs8d+FIq+|*(3aoT(PwPM zMMm<;DF^cy1NqKzoZpX!qX%u77*J1NvsQ6GBCnWI(^OG+VK}s!f0!?R zZ)e^vKaZj}xKWe5o4i_fb@{%6yDVCsD<)a*vhoNuOp79yp5Z#u>wC7}Jqk^Brd5~9 zG?`b6mXuJU^1ckaMR*38PLGvbuxL%ID@>22zSs7!FY|6;u8MLHTZNg&YQo1- zJ;j<9<9x)v!#CDgICoAou^FfYABr^eVn*I>3+DUF`kbXe>}49yXta*fRpLUt3XDrwHOB7wJ&rphnv8!0s{-wFdzTjTy-Dx2zNH$g4c<*@ zBgr>av3k1xc>Nt)Mbe=a@LpFaezWZECg%WtpZOM?Oj(%Qph_Qc^pj`6&u_g8-<(PS z$A33#A`lw5H`Ppau`_1eZrbeSRBVAF=@3R?#)@~dG7??GRlo@5FJG_~{}5X~w;4=( zy2_gY#b47OZM|M6WQ1U0un3HRgnk_t78ZsVyIHQEhz*}DHG2nW?EnlkHat4n~yx{$d>ScN1(xXxkdR3Gg$YPey`~I50-?BXn9s zCyn|at|M%KC`JXJF^>ofG|mxAb5sYpwHZK@bMWKbjPovU6e8JN;*iLEu5{&WR3^pf zW=k+kcL+&af<_d}4EC3LS*a1zsKpMKnhqK~Xw-iZukR9?vwbL9LuK%CGK~ly zLB@*!>9rruR$6eBNmCO*(D#;c%}tm=j}l|x(1BvYu%(_%$6-`iUh>eXOZ@=LN(-}J z__T|&TR1Nx*%&ahEILU9&~IE5j|YgdWTZocPJEx-er+ph#dYOoCb@b=13E9OUp~EU z21uw{`VjpsjQr0g%ezfTB&ON;0UIiJE_DZ8DC@!LZgikEor*vSdZCYg9PMDrACz+G zn^tMy7|1K@WL_Npxa`Os6!9%A0Z>*f)hcRW*PdI9!Pqd?Q!K)6U-meBa084@6XUr^ zYMVAA6V%g#0k}m#yVd25LG_+iT~2B8h27uSX%DvzXwx@=NC}bFjMfIk(>*tWrP&II zDa8jWkdIF$M)bmt#+a|KhqJJ_NQ?fMRQ8uJYZ8|sXrH1`R#z6I9WhHraJ2WctgiEV zjjT`2B-6@rSp6~CLF7+_KXky>kQSb1?&46kfmJN2$E##>!uao8S-xxJhOF%Gfcx7$ zVL%T0`?41WTDFCACd)=rLNp8sh%18KF|*&a4o;dmJ?)q83*oOit$sO<5d6eKYRl+< z5_C=7+&uQ91}tgeROGl9QIuwqc>I$i3xP5%RHa|B(3cNKeFK;-^)iX=-h=+L;14WM zA3m`cd{C*3(NE|Tf<#s{_6@wXUrES6zrOToijmX`G)Aeg7|64cig}$LRy~@SUQUVP zJ=>Xkzck$3IZqf}G7m9bfEARrP|M>-%ggpu0;R&d6y7RK-;#cXN^6>FzX7B*O&}D; z=0GZDEnQ6mIhlpRnOh|rkPp+Bc*3~0hYJ_By~aEDad**Wx#!ngWU^m&$ z45MP08PBaAps+Rig^Bu7NHyB2HR>O~HHI8JRBmP!+Z=KuP{$DBGHq1`7G#f7m1^MR zET2I!F+pyXXrj#@*&;!je*Ls8?m%NL7ry(htz!#~qe$VGxcZ>ngR~uIm5a7OiN~sh z3IYxeLQ#-F-m7d{2n7a#^05&B6F*9f=%rJH_EcgM1md#fTWYyj|K7=sX2M|c3f`d0 zo~kfi@kWv#fPPi2F~qh#&wg}OPL?IJB}k{P3lz9`V(%FT+Gz0*4M3W&1(MEQ)grrv z$+8{U2L0PvI>T1i6buB_ZYT4ARObdOw^)D9Dy>pM86k3?0UTk2Q59v= z9kiHM-rR0f+1c4!)^6}-GL}T9NHl+Y9qpW+gyu}EdilcVCtY+j6NRC6GhGoX!n*zc5imdrS2}5i{USP3 zhzn=!U6v6UH6-ydt|Ct6kRY{ldtEwx-+g49aO&RUd( zz8~A`fADgRi%ZNexp|A&CKMgmOi@^T*gB$r+(R#{Q;v;jKw-QlyRs3=Ld@CltM^}F zVPmiy$?~OXHQ$O2&?JlBi_k!xM^Q^f9~%`!tGRH2P$=N_)Z?`nz)w=4nyi8DWNta!)a&j57l57^`7JU&$k`|iD00Gi)j zS3JgN&@>OeTkz9E!s5}ANMrnwu_$CZSmORVZPxbudgE1?D*FlSBZKkC!-?tW5tnRv z>=H_tcF@MI`l`?fBVEhM^+}gwh4onHM)gimPDt(Gt(s=D zyFJ@~%lGI>u^zqdvIVnLrB!9a1E;?0bgMeQ3evn2{8b6nS1Z03Pw-qkf z(`hnJ+ro_*&rr8y6vVP&=(bL-J@h|;5Pr-&w|)}M?Q%G0)}nn><$Tm9!o_B=zcUA! z=)#OXdnP7uqHHj7S?WHhjbJG#6`j~U%-xkb(!6PS63^fdGgO|Eysw!pVZvr5l*Kt6 zyD;)4f^jS>M-OvF3mKOY(y0NJ^-pxN$xs$9N~|e{-$b9+&<8W;$M1~v5+X_gn=DJ& zl6P5ymv5yTT~+XgPDqoA~hfr`@(r!sr%at zbY{gKK)|B5NsmyCiNNDLI`uwbm{M_e41EOA$YK3a3?|uT|It?^XP>3*k`tqnz$nGW zbm8+&v?E4?$aDDtvv-X=hGKh3Q*mM4)5+d0AAH9K7b{jRLI@bWN3MG6-)HCf3Qit7 zfbr`#LF+EYKC)^tI}f8@fHQS$W29ni$>EDq*e9hDsbe`Bps3jx+~v=Re3c{AP=ubH zZ(gp<*Wbr;6>I3y%Ge(5h>ftg#~2Zjubh5g?wvb$fRk|%^-3I+EMACDZ?cd&e3-7N zM_N|6ka)T~R!FsjPG@I|ySW}_k)@)9iU=5R2=zgR{=dhKfl$jFSjIb13 zO`D)yk{9M=sf@n4X83%_obC-`Gp~HDZ0$bP!Aec{(1Z~bJtVa!gh0p45WBK5R_Ik^ zG|XruQlg?Qdc;2omCnf!-IIN5W1kUQFY#ASk57nRLk^%^Kdf?mmI!GJKC|c&h~0E# zp;VtP){zGH)1I$xFqwkUud3cP=sz@A7jjp`jzI-H*j=(ChMkvtcZtYrq~M-F`p@_% z_UV9%2bqkbzJPeYRuXBogHYNG7LuwS<}?echFYL^n&dN$&p>%_6rHdCdqMFRM)IU1 z9K6V34ZkMe0=en;O*mqzI(7u&LKrHK_GYqm7hT$!Z|lcnyWIh|6%b-BW=rE@!g<+$ zak)5-!$nD|5UKT!1!W6$Vf6i}0#4*m3MF9S*#jd2y5P@v-@|2*!>d1oWy%ySpLI@U zcNr>Dyh((HR#a#Q%w7Ipn+l-ET11hx8$Yt? z;vT|S6!LQi_}eWN4D6PH)hpLpnp-HQCf=WH)Wg3|v<))Z(-z9dPcLUFS`3j2Pe+1;=VXJ}R( z5syi3irN~?AbSLvLiP9$x3;nh!S1&jNc^qhgSluulEK0NPy(NO z^_}OMWGAU*dI+hR;HAm#QaRWTqbyp@gjmQ-tFk^HfkI3!tI#?6q`ISqK;NfK=gg&WW9}~9=ATrV<6s?ccxxi%5&>d#IonrfqD7xn z(_-uq)L)!AmcP~|7YNx$J_=!92=k0_%@EFk|N7ZI)O~~c^E;9f=D5Q~AfZ(5?Wp`^ z=DeWVuW({2_wCMQyv6y>8ENRz%H^I`4!4BHIS<(?diuSuKYtuU=87We2v@yhx<%>g z&KOoxARf)}M5xN0avNBBnY6zp*ia`(V&Q+>wC#LN@dxN`sNNjpkyS|B3Ig_cytC`x zJ_*(=3K{UFq@)VneGOtMSmW&ku`s-?bF`b~M_k+BdO|dc{If7Z0&c-Lw}gyGuY+;% zz0-~eo|$8+h9~?mb5t!etfhE4SKyTT!vP|m`d)5tSbaEMFwvyQ@<=iW&^zVlJ0xBb zhB9rMLCH263vu$l9GJ;k=SL zta*iLqy;r++x*#S!jTj($2UIVhFY(0jGErR_$<+;BNNT&uX=t`E`QB{0bOo{M!PWN zqV&dhG|+OsFzfe5Gr~9RJRk<8_V=3luE;=Jx+O`xNF6((vd1gq=lf(Y9EvuF0vca- zQn}yksP+%dK9FdIF#p;wF(Vgzl}GtsbEKj~ui|TZmqga!ARZw?i5Yl8$V4vwRv=&& zf0#sIA(#QR-=I3v+pwh-uT0ZN;qi1$@V2o#>(_+h0}HU6CK4p)h4I2{jtXLv&oTmP6tiH=`$ZCgi?Wl5$4EWDS7ZwN^OLj%#Mm|pXRwa!Y0;dm68-CT_R zIhJiaGK7DPuqyiN_0e3`->df;nsg3H4dTWBeDLe{+4g5{AQNqBc3|XVgzJK>-qJ&< z*`z~NM3eS<_{0iE4kZAw8oThbAk7`~)~)OC_XdQJ3gbav(q;YYru2MOE(@f zis5|Bm1l0Dd2Ra3PkW;*QMrxbY0&tSCn=Z+VIqVA#6SSg6R&cgkC|DVL!E036ETng z$bj3Qw)Vtkckw8v@oCVF{8*Fb zY$fW#JYZuA`I&*y>EM^ioLp(L7_tVNUjMvxwk%z|E7qXZ}`lN z>fhTn*|W7?XixJ{D2EJdrnIr)8n(j9L5lj6z8qu=Ups#Fs&@Rup>tUj;IN*$m|IlpUC8qYS~mFUR_G8zpwq#Q z$+TjS!ok+1s^^G3bobT%L}zp6194wywE8%TWuFcR#ynM5x;@+|2I5l)E)9gw&ow~F zxxM$VW!`VEKk}L$=v}jo!=o&YeM&rpon3K`?fF{gmmNT%_Y>L((~!wBdPJ!nHC8(y z`T(NgY@2}*dq?ptP0XsQ_Z_b~&|HKOR)5XNkBmuH8{~Lo^(oTZ!_2+NG&2JOA}LnA zpk<>OJsOx+_V(YCP906R@g-d*Ge6zp`L|H*g8GYi5f&F<7C6~U zeA_P8G=TCBZHkA>JNCRzR|HxJsOb8;|JtA5<~Axef~r(X63n%RpLZt%QVBxYN-`Ub zS25{>$xCoviY$&dnQ9Q+^PR1+Q7az~{cV_Jh-cJI`+N1ruIKF=EgVG^qsG}1=l1ag zAPL`H(=57Vw1 zfr19Jw%KMi7qs^;8bwm|1WO3o?|%rUD3VpG)tmsK_1GQjst4;A2K;6Z!KAgl83@C{l zZ+}}=>i1;}f%4sv$fwbF>ncrGX4(3pY+*^<_HsUFsp^-GYQWEH_|+t`HLgw5SRn;!30cJ>9v12ppFT-O|jgyv0)?^Pa`u_*iKY=&GU zgThcs8@HD)b1=0V@#fB`Rq)nG<}qf!^-#*(2NOU5bJ`1N9p1Sdxnef!CO>6gJ}+dX z7!O`Z-0kJ%R_F3JZCL-hv^23dzUGO!i*AYDS;PHeR6ES$F|C0eMLoX?SsTmcI`{dm zZDX(XmvX?l{o7Tr?DFCV^mju9ptV~3uAcqTwU-Rq=&h!omDcv|Lzjy1Kq77CX%7y~ zA3A5P;ghw_PHTubR9I6vav?w!u*hxqCLdGE7m3ukUev1|FEd$QSD~WMkCi}# z5y`*E!~c^|K2s+s;uyx~xi9$rGY`Su0;GJKvmG8dAWE=Py`|#IH>z+E6!=A9HK+wY z#Ss^(XYKKf3f`z2uEe5i-p4`>zh1reQN)eDe0|trFCT9Tv6&-&yw{evQ$}$|DIy^+ z4Lqp>``I7a`VXl|AlpMTKziiFRD^kc!BFhd4XFM8KaJ@ml)B%StS=LqL}WF!1Iyyb zE!xq?5x+m60osCw2Pco9yGv{7<)F>YUw9GLd7lOjVWMQZP z82!WTC_s)=@h9jMZUEgi;y&iJ%E*274iA|>ojk(FkWsY#iI^t7&r zF^D!TRSj!M3f0YEp++w2vPUkVWjgk<+q(R<=Qu?1W%i956XB<5ed@ztjvLVdP1TTlPUHYEzq^8ZU!JWuu;1RpTe{zO+VsVl`B zj@mREsvj--CM?4NIW*1HmTqUB2GBb+=XOa*F!4L&*U+YWzy^17w#X(7l!amLpt*Kstch8Q4ku|?{d;)-4a z*YJ|&*-RIy6MiH1cKT^(}?>BQ|-y+5UlrKgYH);ReSE4lnj zp-KDy!veRcZ`hyWhW=^hfsG3VHjuTC4@n>z)#T`@?=3dBjqfV;OQFev8xlq^5T8E+QO0{gYCy@o{st8Xin{u$9Pc; zu=66er8zJ^nIZ2Ey^;$Q*dhYJeQDQ&X#;>pjTG@ZfK2n`WHm^tm%k)I#68zgqEz?V z9sdj{zMm3QSl~7-9VY{&MMvKOdIk+O&;|Dgq*9G-s1fXT%^$VKzV7Js^EK`_o;4J^ zTJ3PAofh``YM-9V<+?~oK&P~d9**i^aB}%I(N$j2xQF9$bB9t?USpg}H1Sn{T5t43 z%371He&t}s@}|RE!F&w<1`&&z1q2}KfD*3Tu-AJz;ERdRCgpei%ACXFP{57|2wRZb z?cN+f_qltx1&qYfAgaZnJ}@0_^y1ye+H!+!eT=9f{&pUrec%s!m?Ol)k9M-Xr+6yG&iYVsHGA)3exViOSd$Q{?X6;=g@CWR$pB9w~`e?lt~LFl-vNv0aq_ z+rJ$J2*H_+*f@bQlJq5hU7w?8OOg`L`#qz8?#a;G?IK1xTHD}UDh3yq;#^Iqrj|y5 zsSnj3%LJbQq5IFoO@HB$f#zQ35+o9gAS{Rkc;$?&>eQQwdu$bm9=Kl_jPNXmSbaZT ze1EfkLP0nqT*v2`o|{y@ToB`Vj*0R$`-#5XGL*_Bn95TVe9gbJsW?4dPv|2lv$0v$ z`JW<~9`SF76^CF^*MO*AVWKze_!v+**gaLyA&!$Ms%amfg$f7GyZZX?L(-Q-pq3DE zF5!z$hwI&?!t0IPm(g`T=@t){0BU4af8~MDG6@Jpu2ZNcC^fY+EwVol_vvO^ia5^N z*PS-9zB%~64Q`t_yH}?YtH<9TGD!&RcZ0HOI(#cjF%aR_5qT&&L32yueOQRhnM2g21=9o+x410`ys zDjkzeQ5P<+7rL#^+RM35ch~;j9pYMggpIO);p(60%(i;{bFY54=X?F`O@IB)e3;X5u(-{4jz(x`R?qjBZ4bK)N&k}K#k+3(P zcvEF2K|N{i-I+KvnJ1Sa9|HZ|R~)%6T$&mYkZ!TjQ0jgjlq)xlX2xb<5n3h!MYD$6_^P(B7L+Q+#66VxOL&KeaVKY66~|8(dFCY=}Vj&HN%z8%Kwc#9V2Ra*IsQJ z%?Xay`=^em!iY+wa79Ae(fo`;kA^E0x3^lPY_?mKZML+tC#p+Eiw(Fn&i1CAdxJVS z=UdisfqZ{i}GZOQ)^L5?i(iS=0hDoQ;Qxef-}HGX+BsOF#( z^s-?7BXMM`94YsK+YnH??b>70QNlQq{wk_ZYY<1FfI9(_ff%;y|Z{!^ro3?r=^Qa1T1zC&5%u>!Y zN#wN_P-jpp#Fb5@s~<+Yef5m`Otbp6bR#sGKLEW5z^7{mBXsGe>X1|2IsIGFJyxw?`?e@z|CDof6 z5ky;2-nA~xaKzHf(WkFUx z*bwguP59}lgh+jUn`>WR#%A$rA4^YC^z~gFVXpqRzWA1BlzG$qs)&5qu07l$ihI8Z z4hT93#0nx|QC=h;q8@qrB*+N_CWuV?CfK)KapnVxrMkV^^G_mF=^tt2T zA7+&Y4~_!PgJ1#Eu7MUT)$3y`TvR;+zC7z(F8gIvl>^QGbG$V!Lx334jx>FFR11{P zVgJE;;2rv!O1`k!JUv%ZD8Z5*9L3g|ksIHVk&j>YH`;ePFoCA+jzg#Hyi^F+>t`-m z7RB_}?(fFS3HsbyG7D2t*TM0NEBYM!w{Op%u|(^B!g^LYwGxkK@&b(TS-?749zUCW%|QSB%o3E-%!Vv zwaDh1#I$Y7ti=|$2#hFEEcGpb)^U>ySc`QRx4{8a59tyEG>(DJ^13WBCu75 z9nZc{rY|)cTPUvbTokS&t~fMK=xyXuJaEJ?PXz(PrVnj1bkfjnQvUsp;%~r%`i_0f zBl8ry6ADeOUExu!U1rRqwi#65Z_Ju}H+=u95mbA2(HL@)xhipvK5uNu9nz0z>p=4f z)}u{tQfj#M@)pxfiW{P0-9Zw&|n@Z4)@1Xw<+hC=BlF#+_%Z6Odl5=qo zPtWA%wJN~EgB1+gh`sIJCNoi2l#n4ukN1yr-sF<R^aeU0ucMxb)TgZzS&n z+o$28Uh!zbJ=Kh8u6FUXtn9*^HPsz>b+PQUu~qM7@A%Moj{#trh<;~=mLI>@QGaJd z0@nCE2cJ14dyx8)>jnh@D6R%N;;jl9SAFua7bxbCS4M5;qDBeKbL7{-bL7~ZEQhW! zMP2dqsKYVMM8?7e4$Z3&xy8I^-7pwxQup!sO15;}aOCito@91^nu`_1D^8B6LzA}s z%gZrV71T+tvphs&&($Uol0cwBDF#}(6cYLA$r_;lQ)SE{S58(S+oSi*UBj&=SAE)f znIr$NGZi>zjygFGtyc_hIQtA%?${(aW#n{^<0IEo7)&?hom|fG4JH@v zt}xZS(xu$6){lb6IYnQ&J~FW)S4pGBU5JqM>^}85C*uDbzR=jyel*Y#{#BtFwIyG& z%B8z)HxZ}F zSfZOeljBsZ59XpTL~sX$qR*xkP*~*Qkn+l7l)ui0ois&l$fh9Y9Uqe-S(&<8(=aoT z)N9#0^FhH{mwm7063U>Sic#a2_7mOck~Zd^;DuU_?Cr_dL5XUQuf7H?eb6rC+`HTg zu5Q%Kx{uu7=!xqYO&`Z^VswScZKHD;g_OEYvgBcKL@?wsew(MD%6o59JU+qRdu#@b zj5^`CIvEYw3^d&V-Te03x0x9Yx(B{f_!7kgyG+i#VCu`e?un;gMVx}JyO0+hhV|QH4^1x^N5eJci~iJD z2jeam1|qxHXo3*4XYuqsqY}B~ksFSsC97j{gM-Hvh;A5qbyJ<}8~39Qg^YAGNuDR_ z0+ENh-kq;+qiJWqiuknxeT7ED!?84$Y_T+$x3=Qo{CxV$ad-3wLReWU%+~w!EH7G` z^wkK4V1XBxThBmi3h{Jb;i?+uxL@vwkTPte7_XBz_8bS%A;?u_gv7MzRf6^2ETpGOJ@7?dQDP>`{?(3v+inS z@+h@#>>mehFSDj$UkEsJ6(egt`C4kwmX~5#0QH|bnE`Lgj3#DE4eAN?*6O#YQc4`E zvF$y9qO`5w)e3v}- zbJ!v}_vZDxw?B@8++61=@xXTJgwpcCzx-s|Q?HlPs?(_X$P<@l+l;TX%CcV0k%~KU zVm2!#^Z_wBcx`)NQ0RW(yGlO{1}J@H6cj44%cY_OhXUeLOn>zf_7v(JLUIj+ zUf{4{8v4t*tG2?;H*)nW96d^1j5Q&_N9|-GOWY-iTO;SkZ$oujQ_1u6n=NPqnsp2U zDm)|u*4PqFKxHy-D&D{KZ^oig{zOF@%!y z5>);+0*xU%p01E(9w+>CfS#)Aew22znVHk)0y`%mCg+rw^okY(?WnfF`l9Nb^tYlv zwLfGIdK8ixcWixrG?F2_dd70;8;n%~aGP-yTbZr2>q3RVm6Zmp7D+3JXT=|#te4eM zL#Evbte@7&2D&Yp8B|}wxr;}qs`$fd%)0NwP@m_h6&#ikW`nt5LV_sz;u>pQPMk+m zqs%UouVkW}7=@Ob;6I6#WQrGG9y7Dt1M5SYlXR_Fkp+R2l;@wYVCfQYM4tbJJFpYhv}xK%M@RZxj4&ZtzOoI85&D|Tc<>2|q&O{rTha@`U&)1J^Y5;u_2aKme zY!z0O7*Al=?_)OB63S(5M#5kvFB>4TeWvNPza68CZQbO-`XW?^>0$BpWTGWsN~nP~ z)?~owZoQ3!9L(?fC?6Ji_Z7em=NssIa=&!u*K0v7c0Z8rp{3J^PE}D$i&KW5#yLs* zm+4>*xUK(F#;kW7Dr5?ob&rJnmrQJ=cDTdtjm39i%@Zr7C(xsY`0B zUG017x_fYX(hBk-|LROOc&h_HEyp<_whZ+V zja~xI`ih5zh)tiIf5(g1tu5^NCBHGLL^`=H8D2jnzBP*;gr1bsSLX5?jR2ON(DyJB z&SQVr3(5`A57eVW5OW+Yi{R9&>co>{C5CCS^0lAl3Jt9gjCVis)7E-BkFTv|tcffM zSC~#GtB|O~Lp$3mB$-qQ{vh6=>uy`|HAR$njGewI&!I8=%5+gUOd2wE+g?xQMj%q@ zt&R=PRHpsCd*CONPKbQtada+PeW@AM?@cqt}7c~2H=+30;*zD%qNW$wh(GPjXy_l64jxVqpE&VIae$h<$x;$y`MP^q}g zu|DI1e@sP^Sy-KY?&$j93;Wfsvl2%BtJ?d!?N2>?X>GXn9b+2a^#5Ln!wN1jfs+WsIR$tvzO6#a20C7imrT~IWt-hiCA=WFFBV$Eq~ zNn6g^Cj=tR6ja9tl%VVMr+n`@r^^N+xdz(TZ1dC>F_HDn{n0BS&rQ3KHKr^6?eSTW zJ?JR!?rkD{p6DWuWA9FEHZSip2UAl;h>jw~w85ZvxH$5FBA6CLut$FeU<`vGS&!&gv;G+>PGXYHsf6;OELRpu7wV|j8sBF2_O&}7D<8(Bk_4jn1eQT9}S zP+U-*k8dr86%{t2oBpda0io5_=BF-0cIWU3vM1;y)51+T64$J>((~SBh`gJW7Moht zki^;ZK=4L~m(km1k5yYRJ)tk}bHeTd1<9J%q53T^O%M7ZX!Yy1KS9x+ryKkzOhXno zKv#pr|Jd%3^6r%KFnibHXBc|`FRyHNNnXZ^TKoZ5Se=i0h z$cPVIKhpfM4+(V|Jr$++~XEUUFl@(v*9krP6bm)jj>OTGpe3jJXZiNAKJQcH`7QlrdR|7P* zaM|KE7~DaY)r$)-AYn!f25OsAS-a8kJsz&0I9x5MzM;$w8RcKhvjD29!t|eSei^)d z8qxCaQ>p(vzd=n3HawLx%v0QA2DBw4GcWR;>_9`SerC4a%Ro8U;L) z@(%%l!%o=;LMC08n;zOWe-JY5)%u5w%ljenys637R9XR#1Ub4dGTEJ|!?QeMJ?p~8 z8Tef_g8<5LL5QbyZgA?ZJX*vXbvbIkh=gbk>4mfd5^A2&o-v-Wo^hTG2WS5EyF)BD zf%h6rPC30=QpFp`{LYVUs`r6;{tw7g-wXR|0!3Be4-z{}lxyp`?w`q{V{Ah9j2cXT zDm@$#9Pt*JDRtf}#t#$Xuu-tev#QSG0U)T80ox||2^{n2?b4^E~QS&HwQsru@&O@i&T-}Y*NJ1dZNhHyp< zkQo4_|0!_W2*#;R=v}K7UGopMtX}2#w}+Wz?EITie9V;TMW@|Sa~pi(3(Lw~Q?#*^ zra=K@Qx&@END*;c_5+HMR_KoFd)@a&fVnBUZq~@<5J*t-gslDxQRN#FrgHC=-HCUf zp~z(tf_jxOa7uf#$=(EA{&dC5R4gzMZoX7CEjWOoa{g{s1@*w2|A~Zx_Qc!FQ4z57 zY`9;-1`v|}g>SSS6HLZexx40?t zTzm#=0}$oA6a8^euK?iGap>V}tPzlHZ$ew!`c7beilH>JAq;rne*MD^!{PCw;6&$; z)q4;pb~7^hp+C#ht|7?{{=%idQ!JMdh0K?&?3i8vp~`_>&d+>EpisEiE<5%8VX()= zPl?S!KBKFieazgb61y&%#>5p~6~dN4Chv+m^tB&)2jQ-%4$KW0eS5K&EP* zYY3uR+lh+0x;)|y0F{x=E2|sjf1mfsHutP$1PU}PTY9Y7^RGJ&C-Y((U)w0RDb(}*pyTQm7=}(b%#zv& zTp|*-jV%k3@%0}dM;tN1ZP;W$)|Vdc`*x=9AsVQh5JRl>gS9?%g5_2sh+#^yr~Yn> zT$7DU#wM_BRFhl&v$5%Gq!NYcz#|#Zh}l13jGb#Q&ho`7=;Btl#;Ezy6z*v$b%FVX zJdu^gg%iQ3E1(h~)vi%TXIp=7+UsVT4?Cd$cn4dhj^3yty6hk-7xc6y%p}J;bHx4OuF17aqkc-+SIsu)8S5D4_<^o~6ZO%NO?=tUCu9f0v7fMoozUX} z$FfDD%&W6_Yf<#&mme3F82bZe(qQ|3#a8TR@{Yw4gCb>_~DX5hC#!Ri%TGSLeypzU{-rcu72b_T6YJ4%7P_1 ze;u_lMV zIEOtqTHXb;G|UZ2bDK9SyG)g@N}0f#k`9fVpD#Ai+Bey{!LxQ|G#6C^0Kq}`&^1i& zFbSXbmC+MdmohVcIK%hW7SUZW)!8v4kBLF20A%I_}M65`xR9Ftm6#Ap$#Z^j%%fj}uf-At<9O z3I5T;(U&gwrrk7GpdJfYedzd1`b;C#ZGT5E91GKYxBWnQe_R&?Z1Vn;y*CI-^p3d0 z`T`G|%_pqT{D8H%S$<@+q%V;v#@_CpkG^jV*U5(@)}GPZa`pmhEgTwzucGru?W%m@ zYdB5fnt#hQ#uxgaDo)MuRoIr&T04Brsish^6_Tm>heZ5ov>C;mh5Aj zJiI4^WYKCH>p-P#=)uX!R*xl8{_0hba;i?~Qd--3AZe!SBav1Gf9EcIgs=%Ntxl`L zJpf}gxjqcnlzY$~T{S!BObOWdI>%$8SVnk%>f=4SVM$pz%TmQqAt261bacfV$$X_* ze=pST4qDqh-z>&Z4`fQqX!UjsNqe=(Yne!;sRV`~>AN^6??-qO)ts*;v8!);<;vmQ z$4>XtaGFH)!Y@|HcJvaN^hC^Y7)uQ%;PvO2(VOT$PX-V7uaKaUUZkKeBfi(p+{s)aOsq)sBu6S_atF-}wkxrRv1U zHwI%nV+_5dlMSkgwlIt5mn<#?p}a+;RR=u#8;qA(pv$3_$T*`#`a?+`F;X|+WqeJ| zMYk^_O)2`6nbz2L@x^(UAfWCi@9T zWk8C%#EdWgg+2E7Bp6njU)gYeWs4;IxuRx${n3-pY+RXCB4Fbbn^cv$$-TC)$B?8m z{?>2HG$ujjOrSCWdMH|9hpO15I%m{nR>f|DxY{Vt<~8<2)Q=H%gt8y_FP6(Oy2 zoOwE5YfHu`<@M^C3IDarp=%Q|OYYVnF{zxxBxt_;urEC-UU_vj17Qc}2McZc{eY~X zGW)ltru30)X%75GfHpe;5P*A@Q!_JFGcq#3U%#pV@&HBI(!S$>3Zy-tVOQs}BxP9d zKvnU+S%)~(Gzbx~lsmR);iSR5+yrq4seao$#ZVLEiNFA%B)`!}W>%BY0c!ZuD(CNO z$?l9)BSQNza@ zqj}P^8_N$yBCJnN=E#fSkWD=9aJ-xNj0Jl+c6>C5K>9Jzlhj2gR_eE?<6 zUv4WO^lQD(_YI_g3PVQO06k)t%gak}b~Yb=BN$9M-x?_H3~S47aGHA#Xq)DVI!;G- zI0O2vfJE$b!KUG_qH)FQnJ<#r)yZ-58I~tkH?KEOv4Ko5vQf!F1MWINSogk zJLS$5QGQhT-|bc5aH{bgjvm}7ZUth3n6?WGTm-ko=`%nM;4gHJ&3%qdi&hMpp62TX zg@r*Es{=tC6IdJs;!aqf_ZllrA%{kgh>n9l8NwE5YWzk(OX6zUwWy>7HSKEdw$S1) zTww~yd0S8of0DWpgV zncE_)3Bso1836K(PV+5i*P^R7Ax=}qyX(*!g_BUaDlA&`dMm=7O`#0G=QCml5KNbp zQ&oJiR_oQPT1xz|%DR>2aa(`W8_SqN4G(|+%qfA-X5wYHPxdBkkGHmgyrdb4frzmXmCl8R@{o5TwVGroBig>KQ0+ayK(^I?X zgotajmKFMjGlu`!Gk{FtWEK=wfINn})};BGIq50bvp-^gthIZm+b&E@zZ55RKL6<_?92dykMy@Q^eWuC2RQgYVj&-ylIZ9MG4gkzv;{969&H z-!)A`Kp@1|bQ`tvf9%-k`f35$hLk;Yr4}ziiUZZ%D6|zY=H-=6HvPQT-!7h4Tu`DB zIF*lk08)KWkXfpbN6;^PwHLAPy3C97FL++8#Q_Tuc`Qa=g+FM8I2QYE9;z6Lx7S!F zc551o#Gz+K`Ty^qySEb+(Lpko1V|;vbyR5P%2~9xn?)~G^mAL}>5_R=V{J4-(9TKO zWq_6d4Y*vd>~W6bTYL}^3GHL25A9~LNM7bNl2J>(%pY41CE8c?lQKFa)f)ci$Iyp< zL1oqt@1TdbhJ&9arup7eGx_~_>j}SWFd~}h6w@FtH{635&s^9ALgWUI(B!@5&umGg z4E;>~s{{Y`Gv|M8i5g#A?KdFn{EFCz22ZfvoP-vif|#0SUBJvN`(2#@1=dC)=o&b! zg#?RK(~3dYYs>(iC*1WY!csx(4(_ArA1l>>cy=_6+G{{dB(Ckp5ElhnnV3n%M09AEEN^HkiB_<#F2_%|nu_j#l$;}1{ zCtL#l$4=6Hx{&EEUF}icCNzsjRJaWIcdeWRNTPq-iIwU5B8sgQa^PrMIAaRR22#NC zvDjn=j3Scl!Dru1i`dn_Mt4ZMh3-v#PrIOnByI@?N3#=1wA=t{#4(UCt^uom&x~7R z2mQy`fmrnxOHqlDXup$s@7W8S@EC?=s|<1{!gd9Ml`iJERl&dJpSzw1SgAaB#t;20 z*nt(!_GsZ9|~U+VvNutNVcSe8Ic_2n)nE`b;^ zQ~GA7Spn0szyW(4#~M$_3;a8{_osu*?{|VT7PUfQ;abOd@~o(^um|Cf#i$+)eb|vC zcF>=R_;YXCe*l{lm&pPl?MI^KY)(j=!Eh#$l~kN_+zY>%0ix=Dmu1-@Yz>=de{l$! zAegf%V6H`V#U2%yKJ!bxl^Q7s#&n;aeqF;()E~?2z_=hZXfZR)Ahqv((7kH=V^*zfJisT zw0pkOy^v|T<)h#Ilgr)$ZWeSSym{TE^UT-F1gG(?L5eM@IoOpzb;7Ex8h?TLvT`zD z4!gdvVDU2FJgHkp2QV)Hp*(b*76I0|tsy_%9F+<9&~UG3Xcu5u8Hh&uwsOTJP6v#s ze18!>9oNHF6h0du*c3sxsP&%KAnX1o#9$COX?A&NKnngId=RIBcoU zHULXkTKeT(2&k^d>aeZgOAtr)kK~043kmo4o7G|!<6B~@c-!E1BJy@mo8SYes?y=4i=Nj5xs%e`e_`X8?-&J?7ZMzV^rQ3qB-@L^k`|@9$M|bV zMkHlqtX)rsD!uYegx!p-s9YDJLBY{`Iq^XBKWj z1*C5Msd-Nbikp89QG{n^<1!R4A2t5$qQWI8KDEgHux1nNd377{p^P4j0&^(;iWneM zvE|`gzYc0k4i%DB>m7QUN9^2cC#z+Hk87IrW;$*R$C~qd?>2zO{ZYu6osI*xp5Kzy zf>?9pKXSjE9qrRB{}iU$(E2>Y(NyRqG&P*IJrzQc)=4)S51% z3-t(9Y`(ANoBva&xJY1w5{^x(!OXe%X{qc2rT-~$M}Wcf$06h559j~mr4FcQv!`Dx zjDP>W#aD0giIVwq(Wk-kvCy-ELqGI%T<;IM!fF#e=fUdv0PS}zbeya`^o=GWE>kqy zYuUgLHPm(MPdEETgq!svFzxGC?kuSv49m;wVI3sW28gTKN1>3!wa?NT)j33tqkdPz z&Hu;ocx=E`UJG_e_bNKGc-@lxYp%{|{f z=Ssqt5b>s^sIZr?;u8`U?rF!hC2jIB6ZlYzzbN^L756>zaH(fGlK0;Jik2m5Ra^(f z9E|}H8Hm0wv`l>eBgrB|J8qV6CJ=cYx-P=V%K*RRzqaauXBx(Xc(_Br38ggsT%(7@ z^8-1fkgkyL;d{YX>Nzq7e)yJjBpc5n$(S_&Uy~Y_vCZOd*K!n>7#P$3mG6R#Rq?|< z5x$rjoJG~+c>Q%!MMoVyzM*8(%B+OZ>i>0y0Yv>RIX2tZx4|;AmFAxnr*WjUfc^Z- z<8-Djg@@a0pMWEXaXY8;TRvr#eIPPgjnsTiX!_R$Dab*`IGnd$G;s3*Rc38xgRm~!dZkpfQudw zhJNa5^(=}7Otb4?dZ6nL|J`+)>NMZ4qzlU4L0~9M`9%w)Yk~8sNBR#66*~om_3Wdw z)f8f=x5Yn&;ZK;+_z7T{Kk76?b`UT}ul8~Jf2cR@FU<>K{D^(uR4{tQ9r%GiCZ4~= z(+%_^ku1#*A`}v#nO*nF3@}^k7?%A%W~Snx^5Xos%}3CbQaI{yfWIkuJqI5^^1v<% z9sG%)gD#SuU=p@BD&8vbyGpk}NT{ESucCvYOmMgo^YY(G+&G49D|}^A9~!WL1WV?J zFv8ALTKuP6*Sxvmd-$u>8mgpreth0O9|^>e;{P3F{4VxrZ6@#iN5s**wS`T~WL*2l zBIWnqf>&?j@v~eD8camUt2V$C^C;=NoWS72A&iA@^;Ur27xBd)hJ~`lq{b>{mg*A;L%$W^-fe*~=SP@_bs>5b z4)4!jmtQZq{uRDCx4uF&s!UK6scM(${7{;r(3E*5kKWPT83kd4ThHem3+~GLY~f-k zsv%`7T0i25lLW>;ysq$*=XV8_ifnE;FQSY9-#0*MmMFtpWEXAxjRP8;)(A=+r-QqS z?z1H#ky8SQ7_oL)zl-SJ%2^f7Hx-ISZD&<_S1%!X<2NVb0jvCPc2xOA3iw;c=c}&{ ziM-49f8S_O?cTIcbq3l^m8bA2%vD>y1QfsUQ}|7DLIAIF?AEAUX7Y64wZqp%NM2wuhuK9SEYu0kW3w@u5>zQIW}3*noHzgDiA)YqWGZ zm*DRa$k6O24Ldh=T2Tiu&3caiCasj`rrL9$@8^xJ-#I2g*LxI`1%J}E6-soxEI*^X z?3BfSyN-Qynk^#x{wMe5dJCMIT8u&h;p#2M#eNB74^#A1z}S^rr$wh5a7=W_tREiQ zVodqnE?M%p-SRKU*!E({d%hNeNn$UM{RaX@sxz7^+E zZhZ0U-FFW6$nn9*9eD2bI!EWIu_oXS2?t(V=UK6VfduJqPb5?D#&7v=H*_@_7~dYM znO)5j&A+9SwsxHz|FlwK#feLw^;Uo}YN<0upndKRcq-?HmL%Z2NU%sX_`A0iqb+BJ z%aLu>oA0TW`hZ)3uR_h!5mvJWjySCv1?NMmAS(`U(v#J@8Zq(348eoI^t)L^>NF5i zao+`PE72_P;|#)s3Pl)cbz441 zqQVaEEe!5sBopZ0C)FP8gCx4Tf?%Qt)#p@Q_b(^w1AKR|=^D3OMUadC^Fo;oXHuJVpf-`E`>+3%X+cJjNza2RQtdb+}FekFXwPHnP#% z1WFwUqDLItz~LY9RO(hv-j0j6-8@0pE?AXkV={Q&*e)o4^}e7r^ulnxBZ7Ct(5gVe zmL?!Y)g_sXkn?ql$(0Iz?T&pNFHktj%Yp+KV!N1MLknx8+59R9<oIouWCN~~?8D~5)oJ6a%d!w8JVH|(c-+xAxoT}VmpH3Tw#Q%|o^ zFP@dZ>2I1|VAIT$aTf3uWQi91`V}E==yUl`cerR(p3++xgA`HAgZ2*EYxV)hfkiu2 z5H58^`m};GTvhGEbsaBv1FkCzBd&lZRzhba_g9!`p7)7_BCTp*x|cVn>LbA)X^n*J zT|ET){P;XhVtO1r>6bpK96r?PVQkA@NPT4)331Hm5`HGBDxta1z(yD`0y}WUn&44CwE=4n+#W9?dMKuelXrUdo51vM0-JiN_Zi1dy$~!9a=kfpI>8Js=E#X22`j=d z%hoL1i|o;m%nv31TPlhPi2X`{%NCpen~0Oh5>Mr!KgjfUNLwJ;a_vjH7*u>iu=@@6 zMy|}up~jNGKMhn%m2Z+J^Hfb8rYrdC5604do$jgzdnu@-g@o_WQzQ_s1pYp_Q#cj+ zY{`CV1a=SjnAk^{Vc!~ki*08mUu7B`jM+ABJLJNNRsvlH#sHw(0(WrvqO`b0c8~A& zpzV91|I$84Wj+H6dpC;$k_`Z42pnHC;x2V4&jiE6-x@{iIn<4kzWtGc zf3c~&k^FiB6kvjU^_nLDUoa&$R|I=(5mziAjhMAo17&*oyrh>JtJVmbl=fW7Dw&Fg zjN8_6Re}=xn%F|RTDMLyHV?EMGy@%5n?OPL^md^UjMhQx#((U8nT>)Br3B=ZK|x75 zexh17ALAiEVj%@Qa>lsqtUce&`6sllsZ>{|0Ob@pvw0)65`sSI;LxE8!>MVqWEHJpf^R#_V@vJ1{|3)FS4IZQskM2ujF>9(J!!FVM^akOH6R^Y?EEYOmmGV#G_< z@f+bg7$lJX0JJ*lPUOM<_`bH=bhguEwZ2* z4>gJICF}hA76Wf)Ptb%*qb~{RKhIMAhSkyWx5)skyhXiVbr;JqFE`hZvk6Ws>}^?b z*evevP5L0A-t*~KebEXrG~MihU9D{R;u|KOLs)5d(FZFWX*(*3uF7~S-H%d@EJVc5 z|JlNl90}`1qQuTdIB|cbohHd(r*OKO{)-n+canyg`ZcMzXZ&n7csl zHa0Pv)3@n@$=}%QNvmf!3ASBieVGq)9UNK~@FniXHCD6Uw>H-L{pXLxAA@&r9s6v7 zdo0wDwK|`@WJN+w%nNa?$JCHyUJ@knAtRW(qt&V>GE@mmRI)0TkrBlS)zL(lIoX;PeF8djXy?WUKT%lNqNE&1s zxHuB!nL>n!@|^X)JVKKukimzH$#aXp;}`L_3C#N$)B=HaJZ{#TdHr7^=wA~mO9EUG zusOSxx_I-Hr^eLMp*m(h%J&{r&8g%J~lG2%LRWtAt21ucTi zSWW@gI%2S^=OKLznfBe*zh*36@qSFpDw7zvD_!JkuItLBKK}arV_%Z4Zf9K!aFp6re`XcBWAc=TXNb$smpdv~t z@kAz0#BV>6-!Ouqxf*g#?0Cbqs*+NF0WF6f4kj(?UL3FuP64d~E#dODvG)(`!9Qlp zJVF%r3*(VdD)=hStti93Qga=`Dsv=CAGZE-tn1sm+qb?b z?83DG(EBrR<0&R#U?{T{qS$M+hRz1yf)NBE*#UYO_dVBFbofnP)hAgXCA=znWa!hH z7%z_|tXM+qrH7GMh0WBUemyB@f}|8M!uXWa=KDy&ek3iaOYo?T_XhXC??!DM6Cg~0 z>-+g*-Kt`s|I0-yifp8Boe6Xw%_yZ>#xDnWz1}SpEfCxi*wWSpBboP&qIJ1WE9h0Q z>#O42&Yso!{QzeA!H~>p-wXFwcdQ?Gbpwyup2lzc>!iJ1Mv=7fQKBg_nEs$X(;THB&f6BXHA7)LVSd@{~9h=7o za!jd#0GBy#eV9=OQ!7{wQgu&rJ>EZ_Tb}@Su%rpc77no)roL^i7_$Q(;NaveJf<`eoVESB4 zWbVFPjuplKlm`wQ^1FI-<-L-2%|1jk3d)JWU5yby%-F{84(`f`D3?lm(Awf15RtjG z@(X>v)d2dOUZI4o0^M+~`YYZ90cdUqt~JD;1*l@SH?rvk+rItu534tUCY)kOe>#Zl zqm7q&;c!*+mIhqQVI*dRm^;!X2kBet2hig8?kyq`;07%GfM>Q=?xk!N< zvWo8|8zPD(|E?3Hyl8FVJH|Xs0!;ml><#8#R&~&hEb@Pdb2j<}wR_O86j|hhnyD;8nqXb9WJ+#r#jW4&lueG@b=m zMZfSjB4c6s>O}|g#Yztj?H3oC`rNCGs#zWxTmgnT`M<~IoalD*>o5g-V7|W>8HKDh z4x!E(G3F>iKw==(2;g*D2x|3yb44!fa{Nx@FpSSRmzK|S8pH56FjD~fdJl)tfYu+{ z^sC;Q?PPKJoG z@?Nj&hXVc4%yYmKYOCzC)#fiG^uNShB%tNmnn9Fr<0A&Fr|h1{Nqz6Nt6*HSZ0+}` zET6jijWuxTm|N_E#=M$xyjeQ8w5Z0JY8r&22l0)!^}p`6o)Ro1#`S#p-EgMQZz7uu zECX;r`?QXoj(6&t!nZ753l)rv)>Aa^8^=rQEtU3qLg!O94W)N$9qIr)%@K!48h6((__(qY{ai;qb(*gnm){l z8QezalDo4fnx{c`BGpnoC(ox!?xLVU?c?uQ<_MFIwLg;lo>S~wj%XkDNT7Rj z&twf{(*x038k9i~*SxI82G)ja#aVJvNsBHzzoxy7%k}0gHEGFv)p1JyaiQ+)w1Mbu zjJVAdr{$zT43YuB&fgSDiD7`;$~UDugASrM8je0`PU8lxdt3ok_*LJnBz3KaWK;v8 zHwpT>fGE)t)Nc@&8Md_R;-7J~B7LTbX9d0B#uqJ-X9aEQk{E6kLd@A03UA^*j%&xqdosq{hc9;SH;ibT1Pi!{{AOUPNl0 zmjl8W?8+D5(HYixQvWl87`5y)>Qy0NyuaVUeq6ju%Fx4|I`TyaXCB~7 z{mx}B&LX>H5EC_LEMLkMjA3suv8wJYb(rFA@YP9eEnnDn#kmXhTL6(1EwuxxfvC$Q z-4HnPdT~lqx7lNQspNoCK?ja;IT{xZ8C@Xp@{v#cRB?5QThXSmF?3`jUHh?md>@e( z_9aCNpcp3|Fs4{Ty1~}8H+}o~)FIHhQGRXAy`)idJ{Os3m<9JkwvR8~p_K3Q&fUHW zq;qHCp>%3-wqW80&j06?96YkuwIbn2Cj`w({d^IUx3_gWOLfw9p6f9VAd7- zo`o|L482O3{qeB~d+qzp;<=@Yi209O@^Ysgx_qa>SFeO83E0{6t9OTeF)UcRX#Jjt zIaKZT$ZEDiht`Xi*y+1{+-WyaKkMpHvx~f+Qtn}aM~+8)0$Y}#a~g5{a;;Ij(`7b+ zw%#@5-f7}-))5Yqmq5tAl|kC*QAnzq3k6;JdVL$7>DKd;hL%#K2)v-qg*v)T9Ld2W zXG>dJSkXzGtq`Kq6p~;1_s!sT8Sk1JZHjiFFs`u_wpguNAqhD{IK2Cea1^*>dsN;O zdakjj2i%6Ee>K@Klxshn^Tj)DcAqNo%X405l3-*Sutyz|XMSz^_F2%}>5sZ8YnIF7 zxu;jXrjrFqE${MIS^fM=J~A$!0e~q0G`Ar5|U`d zDdFhQ`;)qCeLO8|%cWR~?nLFcw8VF(ISs}D6%DZ$G_4R; zm#>0{R7`QmM)xW=StyLVRPAxq5sz3v1gg*Wa($q3HxXjt9aNoj;gnMIxZCz`%E=rX zz+ShKVZ!{>xmv}QU;OcD)aOl$34EzLestyHV}kEI+Qz&ES5EV^w{LsH*;^(MWJCc6 zXb!&q5q;uvZ$j(l5PK)WAI0`Z3*rfC*4$+5se4~mHhtbl*aVz;$n<#z1+8VD`;$M- zAj3rWhf|EfT)9Besk-@q;J9psUvb?mgza&BBRqu=>~tz%|+WUN|Ds*Yw?TdgCXQ|2S%_}}(CRF2go;xs3xg zGgyXJtut7R5#=>XQZ055;5@3|`hHK_wM3J^z3E{W{m|bU@etl6nQt=!gEA4c!V)=a zqHByE@_igo%qVk8ny<>|&gln}+&V5_ZynPTKhRzb@*=!%6FqFQf?;1p; zadB{7`D>SxD!ZPXvsOK}k%@E4y*E_HwS^vEAR!GAmgyn$2<&+OC%G^*VUNm1H-Q4q zY}=QKyrd2pT&UyP_w$b+P$*K@g(e^g6MrVubo0Ci@-G&C5d^Ba@0uXSg$4*Z-9PMo zGYfv*WSl&_@(yE&es6Gw`KoAfe^*cL%-2p*s@!4{Fnh0K@N(oJXViIby_B3!71S!8 z3`Zr65me9b5ii$nXEjV*yCxuFFCJ;+YM;^Q z=Vh`I@c*>-o#AkGZQDwaLPQXKB#|&8`sfCUAR$^Lh@PlJ^fo$a!YC0n+6;o|z0V+e zh+ZbzD5H-ydiQSk{XEBeyg$Bw->*6L?7i1sYwvYk>snVi&)KNV6J$;5!+BckiwP63Q^@dAT3l&-BWP9`~;HsdMIz33~p9=Ro zTRN1bo|(t=2BwF2R_#mpd{PPZ{1y-V1|z0?B-MgC@euj0^M?7HS&(<@Cq3FbT1+lcIz~hK>!Cw6qO93^{tfl z#lDj7X+u)LpMHgfy+uBOZPSLMrY){YQ&#}oJ5=f4^L`!++L`mgpVTwxgZ^~)nyasP zViqebPIK!muU??3{j+jC)J?9~*OI20N>X#=o0j6^sy=BOA!l)0fcWGCN^>7zUIaj^ z*weSI_qF#q4tM6CW#{x9eW(OBbQw0{^Tqkmsp?Ys2Z$gMfx0m%Tt3_!pf$(&5p$lg zMZz`$1mspEUdc&_c8RqkJBa;cnK}{j{j0#?-g&Hw>RjY;L!l z^yYOSgZ)2s`w8VxJrJ{JZ3ZY=&2_lG6=tbLz z-#1W3wHi*7eM1yqk?hhWB?W8nFKspS#5Bpww-C;DJR?fVYKZYZ!m>wU0fwrha{qO33T8CX>-Js_+6>ykxI+vwI0Kq3!r z_A{nqZCgO19Wl`YS9f&QJlWZKXYegrDI1;SGP`CBQb!+}EcUk@`!EDYV7KHJ;EQeT zgtLC8jh|=CTibWekdD8A{M9bks34IW4;5SM0ObRki6HBQ$X8P{a_XVNn2RdiUyjRI zeGKtr)yl;;k-vo~4otwqc^5uLU=wa$6;-m4G%;@!Vhy-&;hTt` ztq8pR^0SN-{==JI<90`T^x(R+IZ=DZGfzIi%^yVR*x64WEuxFvsb=lmc^(=3Ud~lw z)F_nKq&MsP%M>A&2Xb3k^w81g@=rsoqSXNw2MMoB7QF7QY=0p-KN*p4jnPX+QM2UP z?TYQD5qC{eeuuGdAT!IkO-^T`Vc@gS@r!iMJo&!xHfyCXv>gUdLIqs{(DLtq^rTjq zHUBaQ&gMM)aJwbBSCOrgQb|X*tz$!@#$$uE?!=IU|ZyxLS%|1h?%cA)r05?se1Y?wcA<~ zJki>Y-eIH@@;m#Heft3iiIvU>!D4PXN)$xvl5tzR!DVAT-yPi4DkDe$M`|*&$`9hk z88{LZ+Ee+IJbVyI1f__e#&y!){E2d2Up(Ge&|`W?RXrLst1d5g7JHWcg4ctNKJ|W^ z-p<)QH1E^@<>Z^ttB9CYr&RsWBW?9s49FEHmCH7eksjbPGDU9}Z~!@1PW2&m6&X6Dn^C*A(QBWpm;KoV+BA9I?c$#Zchy_wdL0;EF`H>n1*opr=rTBy8`b;RX@<`a` zOu>p4mP~GR6*OVq?VNe~ps&=S!(UUB>Q%oc_H19>A8%>K&7Khj_HSvv6m*F>E%upq z&TETgxl!zU+e6T-9@K9)@w1+;r@^_%+Q__aD&Fvu&*;*rquz;S^elbxyBmNHlcU#U zW}!u9@OT_XB<^l0^GnrHfvR%V^V+g}NVj*t%zHyZ4^ODt@T}vf^%J>`gg5PrhG;zf zjSDGKLw+j(m%VPP!4qcKiMvj5`PCo|Cctsrs%kJQ_P2a(AZ5&bLs8wJGVLOBxHFF5 z?wEu*{N}v(Q0}Stu;T&fRo)plX0z#9B@%1V0NS<1sH+ni)GBf@fJ7->>$ymp~V-M(EPQ}#lCP0W>Yi^o(eW3|^YQ`5$|hWPFu z_5a&_s+bQp^S_fG86^09{>*QrW*i)mwg)}t3mi?w`EK(Ab)4VbN$BUjNx+(n0+(MH z^e`lfWQbM}^dnit3m4#x&tnLrD72@G#@EmR?t!h}8~`5PNjKv(m05kl6KwM`pk*-0 z^Pjqjx-E6>*T(IM9yE|OAKG+A9*2P~d>@s+fd_T%o$}kq55x7*QMCXK)tb+4_Wrn= z)62bqRe~k<*k%mPo$^qBde~P!MN%(444nh<(~wx{%aIeaO((G`sA8uOjp}U>-mt2} zsf=fdEv<)0nRM?BF1D+=$@$$aNFDj^?fFxUpOsW7#SfZ%=EZD>R#$*20UGGa0;5Az zRW)6aQ?M5T@d83;&oN9Ch9eALwSiR~osEgYUWmGWgsk+2ONIRpV{-L69Jy~Fz||kIEpB!E6~7sDX42{qFh_ns8_R{O<5+5q?{BO=KYs zPGt>FSz#;`_%th0w0WBcB;kd~ref5zc64T>mgzB^@G_Y*AXbuE7Lm&7vp0Clgd0~z zS*=6!74VEo*q5~{gR{+gu*NYr)>2`FNx(M8IF*{1R^mLxJ02}n{p5yf&hF&FB#(8r zYy~_!r>r-mJv$?Nu`jgZgUw{zot>?&p1(94ghSe}Dthq!hYqROVBOxOXYHP0x+v$* zv><5W%-|ZAsLfO;X-u8}?N@#^rEA#7CbZ6Sh!g&6&5yqGm^6R+b%nTM@N2=_YaA%} z@cLvoD$QM4k>Ssi`gH^mQSzSJRCdx!_v|>cm|eQ39zF=^75tW`vz`Hx#@h56(ttHrh+=bztuQ|cMImjLB40Z zKBVJUVs0{9_be;%8xChkO!-FJj`&8_?!u8--K)VE2~Z zsDd(Orviz(o1-%U-61H&0EVLAu~yj}*@rcsAo4|DxE(uDZ_47edhayn7zcX{rKwr3 zh`O+c8x?=fTKkh3WwaKCDOjKM^PsN3cZ!kY&s|rVPWn1fbuV1nqt*uX__*F&+545uKAez^T#~YzS&soP*15ATR|MJag%d|6d9-X?qwV&;c zzaa1Etx<4GTJ`qAGD{44#N+H0`Um0vNgP7mfeh$IUMS5ki3AAjDdBulU# z7yFT6z0h}NK4?SCm3vvUg{UMC+dGWx`A2q@kzg%an$@%VYLI=>aAr>P*g&c!`I73$ zz1lPqtG1O#5bNiF%B>{DWEe#X*s~6-Wr!|~oINY|c}AN%8%WT0Ik#qA6};7E*Lbgu zIRl9lAnR1n3Zs0?)zUW2SSW}C_S9|S^OA#xMws<)m{zWK!zF%)zX(}5+`I?b{K;v& zZ({x8jc}9NkfX>X)s$LKJ(4nhcY^#EO7#>?Ja#?nHH)>4g9aUqagEyZ=-zG_&B2)V zv9J_7Lg9!$W+>%SOt8kE9oS;MFvQhCb7eSNYB={al_YVW5yc{c=HNG$3KZGm4xwIX z$~~e(~G?)f+ z)4(zU%}r|)Rg7pc`vE(|px00Qh(XBUnOQ-?7f(_x zY4x55^uSVY)h^S1u}=2@;ctFiVNjO)emz9+JU#sy1m5;|MZzHzh`YkZ))lWlG(o{g z{#af$ivWhNLQ8A5$EHA@)swk?tx~Ju73}e3{AxNkhtJx|2CktcYoMW&HihNZvZdA880&-!QQZMO82+q+mk~#SP_qsgIL# zk8N`@I|8G2Gs{iqg{TJ|q(PJV4Jg2oj-0$^#2;h{cGi2SH<(17=Gs(3e=f{EQK(wX zK5;TuM0YRq&nrLJ{@u(CNZ!mB!ptS0d{o{MU|0jZgq^?$EvxFu*waFi1}I*7a7z9y z6V5i2Sdp<32{$T_$Xe~l9+hf^xbMGVt#~K8%O)RgK+F6roKiC#q~@VB^@?}Mtx;cv zI>DhHM4}qVRqhSQdzykJBF-Jk=4y*kfqIK&z{+X|<5IYI&pRMvvC_Jve{e(Drq@>f zW(v?S>qf>EEh`Z!_cGU^1mdm`DVQ|kMpgxB?3%$4Bb)2WkZ&ZVdjL}R`I>651Z{Rw zP4v=Gk9N6(t|n`LHp?zSD=F*xPsQb~+WsJSD9i>ceZ>`;ECA$0B4W!+X~1=RSyDBv zkMY>dK1n6f!CtVmx;b_=?a?4ok1RrKmpV^Xvab~Ai#`o%eiZXvUfX7Lh4iXfCGg=vnB5V>w~L;76gJ!d zxd~e(O*Xn&$HuzTrc+Mx``(YpOe_B6C_2~eT@o!^C-Ppo<0wg57YlyrTRKMV_)6ou zsU}ybE!J0W^~6M^a{{a z?XG$P>7WMckG#k$o4*|%ZzUwmC|;|cpxuT^RC`mNvUt(kNRha@$=LQ^RoLbRY{%>F zD+39Ye4F1g6fE(V$e3k{bpP!5mLbCzC7|WsLk^go9oy>`ur*Qz8!n!d{~BM-q9eSa$9;wsknIo zR*yhoZ_Mf?RSVWTfn1G30nKIxA|;}`gn^H?5Bk0k`YC~jS7mV^yme-9{lL0XPL|*Y zOC(D;I81a=H<0k}6$Z>pWN=9v4_K>qbgaK@b9l#ij#%ok!>Cz>9TT3(zwN&I-ZPo; z#{--EAC~&sTUJN0er1ECvukY?ZL}NuzGLr3tMx zQY5j^D#HDe7G*5X`6Ht}ggBG;zTvrY2St~NUi?Y+W&9GDjF~h-Ut-j5?h>nrCu8_z!ZV#0nuFEd zQvYm104tjeH5r2}YsGhXjP1(p7y!wi7}u}6cW>Y5yS&!TtavSMM_ZvJmMp9S!{&-( z{NDLYM`rMdUlA-CKxRKC7naqtlpAQRLyAhOq-Bhhp#hthjFr)Xr|3tOmL^i8%I*C% zDHsgFjrwXvv?Lbi0_rxuTOwkr9Bvpt<5Z-QxSqwRBRzO~o|BAZ=~s9szO$m74cuKC zDrT!>7aS`8`0_ODXom*si3eRGq~cmQ>$u6gwqG%OsXP7UW-v!q*?ciEI};TW znh|x6^|6FD&6Ov15;R$1Xd?>xzzKkt<}R)9y8QWeNeSlqDy zSIk5QiI9MYLn*<=mry-qEpU>|PLTns7|H^!YxGRuqz4Kit9K!8Gu{-p+Tr30jWpJM zLaajWa^Xfb2<~+`RlFyA)=Za&);X`MpG?CzU!ar$X;+Ak=8G~7C<|DfMAd>Rjm5QA zhl)a@SeVK|ve=kiJePz2b$|SxWw|<@Z8n}#2n7#DP+)AfzUmGY z)+~Cz)GJgeB#Vn5P#(P=V`b3V#&&}RY)+<1Pg>sik=Mmo_p3mK2lXZcZNs{VaQgz5yS(SEbSbTm4hFc}2Y&Kr>D@{JJd+?M!wZ`;Pqyp4*+F zt~yUIAQ<=3EYnNv-5;1bg5l>x0m(HI`l*d>W_6g9c`UXxpxb2nP{OvtTH@aZx;mvO z*{urJHLtZ*GSyX=2vd#z-s>g|%>Cl0c$qpWmVA|=aiXS}iE*9f5tEL}gQDF+i~?-> zEON(de&^sGuvO6i$24ReYoQOX$Vh!>b!A$*cdY-ZZ~jQcI|>#76Kq!8rSgb!I}|LR z1ki84r0?9OdmR2d%1#G1ou3KVN4q3e#~wqOQwkJ9)`LsxHFqfo7x&05-A3YroIS|y zlK_mAXRHJr+dX{)%o`M~knmdg@`hEn-QaXu0j#SqP4y3RJL&x&SZP5aoCfNWg2{85 zp{GcJA7MZ8U!A@TJgg-7Bi?O0*z$;fd1>L1>YvNwK_UldSUX0$Ap>zrfG-k4eyb9TW3@ER%bZ2p_l6L~D`PEC5i%)&iy*97I*Z83l*@>Oi*Feh_S)hGuweQ!zSv4Ju*uRed^Sk`k7%Yc3+jSZJ}OldJGW;L zBLvvy+6YAdQ_rQPxLWXDBGuRx9hy{Y|td3fzlp9)k;uSU%EtTb#*y7vzJJ^r6rAmrx z*oxZK1a;G;ho?|kySOxg0^7dyl7g1R4h7(ngxgYS#a<#n>^gSZB@S@!%J2a;xN-qd z<=)>_gjN+>k;ppYAA2#_gURXo=mi-ee58M-KIU1m_Lsi3%dkMiva=5%SPv#CGx4J} zi}>F6xpI|#A@3E7h6bf-72J!;B(k$}_owlxJFQt|{ll+xCdCCpa#u`D8{+@U4Hxh~>Wz;_-$lSU8ZabMu_ zZ?lnCfmxP>M2O2fzOi!igkEE+ZzJIvmsBx}@+TNP9~Q!F z?&LFN)?L3>z?MQ1^J~$xeKB^J`00Kd{gJ$UbGxXP0XGwa29`rG=np?`Hf@%E)KK;c zrbB&GpvuQ-=z{j_MJzy^z`9GCV9(IF`YfgG&&@z7C_b4iX3rAb4Qx@4jR+VJz!aBm zJ4~A3sIwg>25U)(YZ{kXoVK6<=Z#+%>Q6mygxB>}L=m*}#t1_{d&Y-Kyl4`|&{7;) zB-`G5a0>7mg)bkbthp`tR2ITobb9z(VOjqtge>NcZ>xHR$Hg|>yPr|_E5%H*%Y-8Z zEP|=#xPW^NM4iE*V4C~lQ!dv2$!=0rX03Xn3>mn)*rDurLfHpu0-vGqE*eXES#>7! zvTVe#hpdx1e{WT;s>CQVaRc)xoKUamZa3xweO@L_ZRc+1j+P@BQ+8H2XF?Tn_UI)z z!1PUqvO?k|nAYengltLR01!v`CpMA!vnSx`w5X^F=WP+KT3zkuQEUKD5GAS~j!2qe zTxW1}<{(BSHHkMPWQ}}lKeJEsgUr#B{0$)_a?`6zVHwOqs+vJOdQ*C-^sk|;ukYNl^KP)45vG-&cvs>Ur>5YR4So&e zG$Bxi30(7+chJlKX9DcwJz{;&ek{K~{5#8K&6W|VIV{Eonebhh@2 zk{Pd?SSJNKF{}pF@@U|!;v2cxC+;6R*R7uUB0OdPqYj|c)yBTi{)a1IvMlgy-@(dri0$xw&S0=(RJVjP^1J*p0+l4ILwJS^?U)~a~~*? z#wh#qI4I;)32>8J9vlG0Fs9lbj{iV!f*xLE3J(Vy!B+MFY>MiQ0Aw71ol&0il2_$g z`xo<8VfJvrFKa@!4W~-|NWxyhY2kwX$sfcE5|`Lx9!oO9TK;wVUk^AVPtm?>EOL~MVC(3t_yBC5xKph zcmh2S|7!bD)7-RVs5qY&giX?G{+0Xt{XUuroh?F1^@o^o{CyqMgXO9ur1_nNdrQO{ z38J}8BbN%P)-5yk8!*HfIr-h!8$j~jV?|_Ie%8L1F^ejmgcYi_@oe_=|R$BkYyZUwi@s|T2_V?2v#kMw^c$0TKJvO9Y z2BTM{;a=;cJ5}4!xN9&o3$z>n?Ln%C{2L&_R~&jXV?e_#7ITKf0Sb3|sS?drVza8ZRg%g6rT;c$wm4qw71TPMJ1a zrlU8o-5XCUqhRLj|In_f((ib@{M2V^doXJt6E@}OuLy9P06Oo?k0g_}ckVk?^B193d*Heys1X;oY?Nd{&Y3>F!?uU9fzksv*;U z>QKW7_)If48|OB7E_Ia)jmn#Pu`-6L7z=PU;Pc^rGOKDPGTQiTbyjwduZd*>S`R32VPy+daDCR_FThwkpG#~q?Dg@mU*iWji-Q9p=wWd z>Q$;^>UKjfXlS+uP3`*}c555ZU8{C`KaNiI}y?{xaEW#Xy@GPx11_7M4 z9K^avpokFQ6?upO4f46{B~oUk4GmWcub z064iY0?Ctnl<&2^Gsn<{FoHQJF>3AO728+(s^{}e8NWyKrW-*^T(bMcRMm2}tPXl?`Wkx*t_s)*6WR%XSN_&zFjWGi1b?4jie!@Xb z6}7Q($`Y0D(f|^h!EBYtQ0!BHmgdm=uM@O|pqcoj-Jk;Cg0SrekWgpEzCt~=cb#(o z4P5g*e+4U;QM9ks58C?0gS-UwFMlm6+{lrurwRD+Mhe%uJnkIBf4~M&*hbWGD-~zo z1^(b!FlQTe47|v|u8Az0IN=Sw*ZAWc&*>kzaX2s7ijR+f*ncVEUr16Yq07VApjbgc zbDCg28gz2D`t{CT@x{#e1Mk0X(4kr(>xd5T^`pr1r+~i>a*<#9FiutfIS|MMKu75+ z=Cn|fBk6xgR7qw4y!lUCQ@)~iwblXZB>LCt&qv>EqHDdyE-+SIg(en^8?4%5ByfB{ zG+jm&Ic?NILmA=(bj{4xz=Srr;VH=nLg04NtHR`t^qlwyHdcH97<{ek0`=tiQbxka zI&rtp89G))>jUkf;nV;~JWzs#rr4A0Ki&Vt;sG-^|MJ-09PL|5RrlyVrt9GShV6)h zGhNlIDQx0lh4z1M`}YLk!@mZ!Q=?jZDvo`DNmf3x`eqEm-e{IVKQ@VAYOMgX%A1_{ z`j0`@8gyzQ#s<}W@Ge}|b`3u?}g)3_SwqxYw82t~yd(I{xW0T51*#LD5KZPn)Ca#g&&uCZ8qtJ~n`Q zB+oTM0K0zl;WbQaFAJ|)c_gNTKfJ1=PWCNLci;gS)3ZGe`N9FFDNJig6e$M~XX7kX z?mkaRB#k+=XItUpvdqx)F-bZSCJ##!^T||0{MDy(! zTWBS9Of{&}XINU^8y$;hL{5nxoxPdgZ4_{HU*@*p>3{ouuMK0pdm7LmLX_@o1EvRi zy3N&WLKt?LALN?mzIRtb3is#ndWly&GC@psT_FTn`^CcIVDR$t&SqKfL#m56 z!c-_PXKv$5s(i>YDhOF+sIRe()Q^nco<;Z<1x+>^6gW*(|BPhX5IZ=rVa&SjDOV64 znHiw(1>G_UnAAI*-oD24ME16u4|UQPC+KSZIUZEBy&WC&ZT_Ybi~?4|RC8bVYa($~ z2?X$+Oj-o_7i}Y~_xsnUluT{s;Nz9)HeS3#6CwNf^zQgV%+`CcXS$ z7wGli0dKSp08ez2w;#lV&U zidrkj$A35Z$W)m}xl(lm9=MJ#@|^8z;WBi31I%RCQ!IcuJ@ot{7x!gpqv1pZuZs?* zL+5?ta{3z*cLVe*+RR`lU_5$K4AKGeWTLXq<~CIH&k-7SU;bc@dHy0u4$F>YJp5|-2{-ew!W_{ z!G{a^w6x1;62XhJxwB*uUbS*U)gs+~(71kPqls=?eDmqWlb5PqcW?>KZ(l#)E8xNq zYe|Kr^PbriaCH|ambVjRr3!eX`Ay_WoYC}!dcL#QG}p7DgR4R}G(lszC+I=J%B3Lz zX@4{N6lkU5_AZhA9$piKoArBFcEd!c%|x;;QiFZsxyqJA-*U$IiT`5DFS9$KUZnQh z$OjGNDKhoVo5#b4hkGpgF90g+rgGn7Dx+I`EEadPxMMwQd@wyM3R0i7mg0y0LCJU=bZ!f3&m*PZWd%Mbu@CeU0 zC;g{vAIjce&3^>+flcH~$BV09#y4o4sY*w#{#6{&qKOH7V!=!Fq2Td2oE84WUy8ux zI0WAJS57|Keq;fSHJ9=m+Lp}fpbLh%9m>p#y*)#3A*Q&b0Doua2l-b!VE+$5V$NHN zPMD=kdWgi93@~K&jY_x$GqkwcsF7{@2HDeDDVAU{M4)72vhQhhaOGvEmLpX$t-EEs zX)mu%hy2O-_y`Spdmo%CT{^s9wRv2#ocHAO&8e<-%eb3O8?FBSCm^4nOFWMhmOph0 zx5>!@DMA<0@=c}VF=+QO?zdtR1vA;iX0^1y4KPAupvYzoJP`AxVDu z@y(M>#>hN8S<*W$Yx}v1FR&Vwn$=6^I`KE%-gwM6bHzp{co>0CZyL-BgB_oNvsv2&pA5=e8G=|X#~aReJfx1MCugVeV#Bj`zVD+kO;cCHgbQQT zILGP~#~jmX4QOI}(W27r%MydH`|uOxbWv5VJ(*KO8;!22?qO!dIK(GQGb( z8VXe$8Hj!}Q83y-616fG3+D)&{hl!%i=Ct&3LwgkXy;~EK#R)M&=DhxR{)Gcl4tI> z#1eb+_>^8t>4ladFSmx@2@)~OxV~`)u&Of{8b6Q5^A~0;JJ_kR zB}(=4ml%S1U4mchD6;NJvQTyD90rS-f$xls0#<1b#?FgoH)-g+%#)MDBayHAuZC~-Q6uE-Q6G@x;yVizrW|+_s@4f zsCdraduBd0Ypt0HmXi@hM#Mz~fk4ROVqgUj2*v{hdaVcl3iw2uC~g_}2f;>6-5vx& z!g%=u1xiYJ2Yd+UATBKow*rHMjP`M7Oz!~rh`>}y%|Xc8($dh%0r)QnBxGm!)xpq! z*xA&`15u9Vgdp=nU zvYE)jyC_Gw7;fw&eqRHt!zgApNiVihHSDzeY6|Xuof^X?a60mcXW2nRrxGVqw)!=F zr77Q#MS158pF)EY&I!CoPC1(%Ju=5Vm-s!IU#HwtYjs{K)l!z4?1QcATE{oa4mbD~ zOyIr~1wl23xhliZ#!9k8{{7W^s zW$i$0^2r%&8d8(p5ZO!rcn12!C8Sn6B&8S=rS{6dekJ#8E@Rybsfg8TnkyFa#{5D6 ztMif%E>tA6oED39nO{9<-zO~mT*RawR4K)BHu;rWx0;g80YtLg=C)Q8HQNbnaG`Yb zm6d?`taRR|%UEBkrS9PLb~VJ^Oql`X|NMZUJpOgQ%8;kbn&v%nl`#3eTk1kmxoRLCQ~|8CgDw=hkje?9W!|UAVl6J z@&6Y8lHNKJ@S!NH$4@P$QW(i-TI*&)Wj&Mwta#}CRmi36AdnBS^{g~`_N0h5qZn+o z0f%Zx4~r1G3l;F_TCRV^PKDjp0Lm;Y1$m8p0>>V*=SP!QK<_#sczT^LWe#~q(m31!? z9UW}&2K9M=DHyNmjZh{G$NzmYGviA%6{7}v$lV@)xRMgZ*NI=4<3C@6K=8XF+DL}X zNm3g4AEFa#r`(nuJ>~TTkp6Gbxg0e@M8f(%CH$IJg{xAY5v3le^8Jnv0`U`=Pfa&w zUov1wiG2930ZsB>bEkgs3ZYqD1F@spliBThjT_V7ddv#(3xobXv}`hUX&bgT2<`Zg z^O^f$tUTYkaSIz$_2++X!hCk2s{P1Gt65j#Uapdo&^0F(C??GO2e9IvLiW*xp)b`C zrWK>Oh*M?*Go2;z0QK5`@!VsrBT*WJqU(97JXnl}Bs}hmMH3MY7D54mGEms|fF`rY zb5Qk|KXvun=UeZ6#)rY;U&{sF>l42r%g}S3)y1}2Vi7l?g+8!V#pqd)dAd*_Bg(7Q>j@dKY@}T@P+xbF>wFpDYmKfxEc!v#d86tW z^#OWKgLyMWm#&@P4-d$cAH84wt#$B0@f%Fz>(i>bt#zvArF8W%>NN<7XeIE#Fum$_ zwho(q(HW{FOUD_iTaI&3*bDkux2Xp!oT2`8j}N~qRU0}1HiMT($IqiG7K_6Q#OC>2xEq2+BPUUd{5KMaeRM)Hx5c@ym+bvSmr0?k)uw;GQC~;= z`;bqNKU&!5OWhF|ecvExoTzTGs3?Wa#roYUr$Y@)B|4~o{Vs{$@CzpMUVbKsmWm%7 zn5x6KMt!mVfJroKq7n`o+`cjS&1o;(BjaXx?34thqiaQ7tSxLvofZfk}n zzEMOI@F-|!JdZlYFaW7c_*0Z1-}4p0KtAh#4=hO*M($Jq*6o!}DzfAd5qrsA!ulQ7 zTH$()twBS>b@EBJ#n}uGkAGoK{H`TtFBAop_vTCAzQO>ex_m?35$4~aWei|zYiEWa zSw$NbE7$$D6YOeG)%C>M+NAj#IUs?JTqSRrK;3Kae0V=j^qL;C^^mMGhwwTeKR`AS zoOTL~V&u`ExoMX$jTi$G$^0C&@A2f=dH+QM8kvp+%Lf9xwkOnRDKky_LB_w0cF@$Y zAz&8^*9~aNCw$SYw*vQ1;s328{3k9{)9Q@pop3}4-ik!U7S$St#nWch{Z%Dsk!Y_h zIe=E>J}w|CgULRV-k&M0qoenWTe4cOgpn4p{_}svnL-g$i{AcBc<;`qd1}ppfeSyf z@i?FqfYP$*WU&a*jTmvm$rZr{{$$;b1lHhnCA@!a%ehkNpq9UsgI{{T8nb?@y#Lpp z<;5g;kQGn0!6G)X^Je;*DBGq8?qAf*wP2P9s0ji?R_zmHlu0Epn<+@2_!}vjqDvL_Bsm2V?l@#;B{;C$2 zarfY0gv!d#+a~A1oB74HQ?MO=zeeMAOfY9F49z4=cLVmB+>)fo4@J9q{cB+SOF?a5 z`H@(iAai^?O3lnR@lT0wM6#*4Nx5l<9F87(p5AxKF$rw_>%dQ3H!6|Q(WyKk%`!@@ z^{U}KoAs_bt@I$;!NGVhH|>ZK<7ZATuZ@K5;UwWcuy7AJATUv_c4h%|i7&7*kz0k7 zn+q9@P2sqA)>(Q@5IPh_dp-q2kN3r14 z9 zoffUD2zQEltK_1&hPHEJix4y$+!p&pd02Wx5j|LO3}P}r14WDbYl1tM+kx+uMwSn( z4uKiU_+Myzo-G2#GZ4^VpBF*~%F&T9pU~0sl81Z6lbIdMY9=go^ZP;g0#!q>OXt3ns zu!RLTLle$?H5vm}ExWM(+|Z>PAWf(TQ<3Q}aM@-wVX(pCu-@}8&0E;1yDgL!VDtUg zdMwa7j^pCUFk`&u%`1U@;Xe?o&k1ifk6P$RGAl_NDyUdH#o+}6Z3MvZaj!ZmJyls)B4YY$=F%(urnRGR%dstKWnedm{S0C9STJo49maNI7E(+&)*HX>) zeKWeWD#_D&!nQDf`|#=ujYGX+W9iybdYz!TF;F#=&97go@IN)i2+wfb`a~DYNnGXZN_VnS%z<)k|=n zl*TIr?3|=Ox0wA53Yx+cueZ^@bU-J4tEE;F7iFL8d4Gk1#I|5CGU$%J#Xu(?(1EIj ze%8^br-*6w{?ng4zd)4YZ$2Pi*~b|MG-NQgQSTvyn>cL?0l(zR=+7;UJSw@?5_WzMU=l8&^IHCkk(@yfj-bdUA}0X$`T3 zD{bj(8aImL=|~kULTEMVwX1X}c~&X1;UeYya>qkYKB_TRg0k$yLcr>SZryuq?7+wFZeu=4FafE7#pz_rB|C&8lSQ6%} zk+eMQDiFRDMvL!3b>$Q3OGxy@a!`5)kGm!CbIo`pZEvY@GGf)bvOmTC#}~Ng*Dr&( zFsTd;*VC83sfGLv&WyMgOy?{iB|pm5?O=N=R}s1 zmY&*%|N0QFMOhV&bW`;WRQJk1s5GcpS@WwdT(ie-euD<6E#2b(>U)>u=O)P@`CErE ziQ9tkBm*%Ul0}1JWm7ct@lR!$Xue2!Xga!g@7}yFkRM0rWT!xUKEE-QOkB$Nmc{!a z#a=v04*c|4rvi)kw{-D{ivM(7sT{_ikY9~@@JK?!|FeO{rNUA$3VHTvR8LXePTN4- zT3wgukG$*s>=BqBMXx%8`wf(g#RTM`2mE1@ODSo&1;CN$>!%3e@&X;tb%OE@@H>4r0fDz=}$q530X#v&+fZL@jj8TTMFwO#qBB*sn)Y?m4811=4E5eOdAq}5v-L$MZ6eq zOBbq)x?0%jy62zum)-r7r>Ehv>34&_!zfAl1ukz$HP|zHa&11cj-*5_Jv`h-t;}A_ zEh-L&@q5CX^>n^w=*jJ|o+OW@NkP&Lon)0y%c(#QPQt@$@$&C4i4~DUn}=S1R0o~b zmdXtW^yqOoZq(cvPv&%=oSekOkjV}e);TsFFru$4zZ`$gFByB`e3Bw9tQ7dUjI>X;4jqa6&@@r2y9wOSh1jm+vyUgw30gzIHz zeR$u6{}BZ8bRUqEWJuvtb-Nl`U95jPyMOYdQ&41NbT`|3?GU)ed`aj=MK2r(8$JL@ zzu1x!Pj52p5jMWdjhJ{!;okbuXTAA}IX-QS<@RRw~2ko-M zxytit${UT1T$s>!1Mi*wwB1A=oc8nbfmO)X(?#cm?74kCXK7nlo99CT@6Dph4i6m_ z{v_vAUi>SMQg$`Enbm8;0bH&s^J|&m=%nuff#%CdF>gU6KaO?n$v?6vv)v&dxL-tM zFV-DB?1hF4j{P35=XZv;Th5dKOumCQP5j+{wOPR|gjcIG)d)>xbXP0C(pGN9=?ESJ54=GPIgy&l3Akv8dakU;3C-qd$mN@HSsT3IXMO;h6 z&d>oxx-&J~PSJ7f!B+XqVUwb5zS)?7HiJ06LUYb%Tx>k zf(+D487Waf{B}06^dCsY^-34=>P%vnS|PQ^?f3FW^?Vn=Bu2~qrb%eWjj9Z83L%k_ zHUVO+@w`PIXzgUR%Jb#7bb%uXgkGvs>vS~)q4DN(9U%C;oiGul5SV7;buy)&+L&=9-)%DJ39?03`0Tw!}syMoTtMWnr!FFBM;{6S)< z?-_De29S$3PId!}tqF?!^$ z;Lqfr3#AuTCW(^k$c-&WD0B^$or;UN(DGd11zyZfoER4Jwxu%!4wZ0bY3yPsZSU&3 zQIi-D)Cv^1z1@sP<<*6nuXeJI$9P%KzePnwtzJJzHs8WoLgWsbkCyHE!?%BrF5~)V z`K9$M9yM4#h_P{jMf~@lweF5YlL}QMrctvK+@8Cao4xE=4zkK>x3t~`=X?r6$z0E! zvhP{HxAwNMcT8UYU>*G9N7aJGuRKa$B+|En#6wWf<*$56Ph^M+QfGc7AVzkYkjdDF z;~Fl1e#l1UU94HZ_j+v&kA(W+P3_B2b~K9R-qf=9MHH-`w;tvb?DYnfkSx3F6uYGq z+qWe#8v=q_soW`OiUX$uNwd;zTBCL2lXGVYdiHmA={}1eyOB5C*~WwV;4KO6HeBglBlaN^?yi+n{KZ^y%=lH7CiH_&uHcNXVN@=bmTnEhiX-w zBiYN&uiZJpUKa}djz106ZN64&WDUu8i6V#m_@Mw@-(MwH3oBq&;n(30FmF`}SbaeP z?ZJ2PhQXg1arLvZMVDC*ZW|fYORz~1yDmgXc3x_uRJRLSIhe)!sCcgzIk8oow>y@o>>;B#lG&{@gExjVFxh-~B_Wn+q(S_xo6BtR=8Mm6Tk7RAIUTRC1$tI^+RYb(r7`y1^JTOn27E~RNY-+v zWDV4}Qx+{|>MS*C#iUgLW2GR`H+Zqa?VxmIhQ60d{N{ zIaR~a%$yv9>$26LojnxGO^l68yWS(+D=o@aG4S|jdF8S!NU;MbiEj-Il2op>=Zcmf<94Y~~$65F8fJpY4l{Y@S+n?W`-Q&43fpkNm!og0H zW0R1iH_;`m_AY$;9)0_XmO?gV=I#n;fr>84x$y|Mdj24IS<-S^F!}4(zPeOCYX%#$ z(;^;b;r9FxEMAw&$J)cJcT@L!Y-%gvXR;&sR}76eNc2WT4)g0pUQ;p z5$@@_3omYo=!)M>G!0$3jaO{^YI-FqGb#vXGUnrUxB;n72pKll+jMSxoG047zV(4Q}dPORQQem z4Z8YPHZ}H;XKhYScQLan?O-C~`uKnZ6#5p#D*2~lxfg#R(F7DosY?YDJ8jHu92i7q z!6^Z!i;G&jZCQ=a!;+<$b$SqeygHBgSRsc-wVCA%opU2+sF`rOhlQ1`d?iJGK?(a! zNY08B1 zp^w1u8Pgo3EQ~0S+tU|IZe$?9V}(I+mN$Xz9lMWNXl9G?qVt!^ywT$$=QYnWQ4jd0 zcuI>Y3biR4Gp-YiN4(Uup`Olqsz&YF#vU#a)UTVoZGqVK$U@ktD|=tEJ@F1csFdC9 zCZUeU#H3xWWg&i>VFDa&8{J#5i4zhUGMdh?tXLt-CtxHYF)=icTCCsTbU0V~K5>^R zZwKRG?36lC@g*$cesrnJq*FhUSQq0~MW#0&*2b^6Ufrr6F+E)2NwPGvxegqTZ<}}; znThp{sG6T&7F;p8-wqNkIJGCJ%prn;fO(gkls>dn)5+m@CQ)1v4WV=GC$J)sZ;con zd~Q6#Y#v!-aT^6CbeAQym$8G!jjI4yHjmaspGN881Fap~^G>FIk#qcUi*bq9iGBHn z)x0(Bq?6ip&#a7ore`t=C}Uv4Ezq=ewp9}zYrA(Iah zA_mB=JC%h=%QUpy>olg9mFkn{_QwF`0^$f=U8wg5QE;XiF?(ywofZu1NH!k0hk}*d zh?ss68dk-psFOP?GgLQJ5`CzHtaH%dw#j?~mx8&fe@UYS0wN`g+!-~psAX2A(%4yx6T|ask-~q8J z!*ys>?b`7Z&)hD=QP-o?^5y3IYSuSIEa2)~nm8{F!zAFg2tL<<<@2wfRFmsUb-azg zJta?bm{S-mb3w$z>OjPlHL#9Cb^ThDfZ~LJ#D+l9z<-SlS~K_iFn}|~?Q-%7Cxqu6 zb`ujgP-Cq+g<&c@v6`stfPNpxZnhou0!(Fdr|2R0umpoO-gAFNcp=0I^4ZI5b)m@m z9rP*x{?FEv!?Locw&ylC2cJ(;2B%8a)!67SF6T~Kwqgwvdz)>D6_>+F#l3;?u(XOr2s+BWlb)>_2gFrsy%(I2Uc=I%$!)$KDAofIy- zKPCN;M=gb603wrmRP8+K#b&ILTI~e6dUZ|ZZsUZ?w0omC7)Pa{>Mh%;!*IM%Kieb*#Vw zL8Iv$-7%2{{LX_92-@_EfFt^tz*~S)&iiEL)VM|#BVh!`?{LE6j(D}tZA9_fZP@7i z4^_SSh{<~F{n<1SP4oe&zqMs` zYI(PCf?Q-a5_;p`yKptn6?J?b?cQ;XpSS z=mxK85Rqho^5^4O!13|pN1z{DTTd~-)F_K@!9aa_)BkJpB*j+JZ>;y7psO}8^E>Kq z9|US2M!VV*Sq}83E;`>kr3L{JmG0?fbdvKLG{!_~9>h9+u*DcU>DeodpE^EkYt-E? z^T=ZOD;sLGy@|NTx3Rp)o0r`YaP7kHlFZhLW>;RXPGlSj29rhW_R6WQY#D&m1D%KIxz^4sP+Nm&Uf})c>miA zz*fS^aGk%am@fB1CVf2%2m$h-;(0f{kF+}DotQR%F23yFSKD`C`kb3|r(R9esQ+un zO$VnT*KdB!hm(}^fNIWpYYq{zBdS*NY&Ur#?(9EtsjWc(XdMxpjS zn=LcCX_F8Dji!&J(``rBH1B6WL7JT~O%_A3+wguOg|Uj><~#r(4TxxsN{igK5}+F& zXJB7t9LcM>{L-sT;k4CwtTt89b8%^?kL!+oi@c<$pul+F4|ySG9_i`LmpkkFUM8|E zkoKAjg#yflPQSqR`29*H3_Y_THMSIoeWyz{b z+{D?vPUQm|rR1CBKY~I+5_ryGN@J|jKP_4OI1M!oC_R~R0 z`%fqBYx?CT=cBZlX$t3(UdeOaS=^-4`)gHqOdPCcd3-S($%M2-)}xWnyfLsv6sND{ z>Jh?mSm3UR%2vL)6=rSc7+=8`IeRW83bfbJ}bhFrd>U z<^AIL46hq;ZN0Iv9-$Mq4p1_<9nCc{u`9uVYoU>eP5< z;R#b%?T3qM+ore`hXXlde-c=uw$J4?2VR39j zL0!+u8TOgUXxLCq`8nTFI%Wk}Z&O!fB& z+zkiW>iH#}QuFJYGN5#70U#+9+eowzoO$sLl0!z3OGe=+yN`=!K6BL6a_n^xe20;a z#`ooS_~owa^|Lt1T_7P8uFm=}H3e6_>QKXx(J~^BTkKcyD~gXFk6u~nCOv;oPUBCr z(wdnjShyo00P$Zg2gT$hR_B|0kEGrHN>avqNP%npwbNnFpzJ*yM6(KM?hbD>pKUV? zuy3~$I+@K2ln&82mOzk%fH!En}N&a=yXv^(R@+)l@KY&Beb&i5%4x;8~P09H+9JzfYZ={!ZS z{cGlL*MpxTlG)9`GhbSdD)i>5uLrX4KYf%PqX|yuNPZ{*h~m00+a;2Ml96nu7hAj4 zy={JzysJV2%?9m{G>2HKhb_M3rfoniPzKyq^=cR;#HyzzH=#C!vw0ZI&znco*89iZk5 z+D~3`3dZi}ld_f#5C1+;&(-;4HDU5-Wv*g5Hj2k*I$)`sHl?}?0-601ida_EQYt|f zQv2jQ?%No;P4AKgAQ1#m{x>cMY6CUAD4E89c+hbT;{1`Rwltbzc1H z;^O_c=QtN_H*1>K6W`N@0mt@);sl}`t2Soq0B=}b@xzf1qw&E+y_)j5y#G(Fw==Mr8#M!+X zD|RBwHGaR$w!Fy+$sIs9tSbtO6_9HCg*(r$t8M!Ilek zMJroC^*&y7A3LuG3brtuY(DXFR-~ighV$^{wfKZCL zimhLzA$|%f?%FSe0tAXQ&!^z))kpAtL9wTW?2l<#k2S35 zyrn!xA3&USus=<$MlM&TQK9B%Yk_L;&Ro5m6;EL!H*S-lJ?JoZ+7Gs_wS zfT(MAFuNGus3^Fwsr#LwYJvKUOnR9fZ2QQ*|(e_8b}xv$}C9~ljnHF@I>y;r@nMz!C*RN`FH%IO5m zr7V!Az?Q?Z?PxqEFSX=i4O4=sfwIFT7+DuaoYcQeEak?(;XTu_^!}t{&qOTYNJ~Q6 zy#7+ocx?6??wl8VQ6?(wHg~7J$=D9;X4r%6YK4 z@2sttXlhB+0ErF+Sz%T$0JkX+I1dBL)N8Hks-GHr0g%1zdA|vuj zSvxhS`HjO`_cSHIALzL6|kVF=yPq&mQa97h&rSJl@I zX-99_FyTPsogYF%N6f4)Ld&@i57%F0raIYlYP)L4iTrCWC#!bmdyGKz*$-8)K(Gwi zS9ZOWFAqTxBVZ~|WcS_@^uA0+7!vXcEy!B!PaDYSl zI8fK2A!ij*+ziNvTIKCLtLKjdKc?e@HJl?O(SwIKYCT|kYksMK)?Ni* zk_K>l@m?JjCLA_PFJj(T^18`g-r7Y~HQ2$SCg;&yEmd09&p=XGAYYqRin}Sgdnvl> zOcytLy;|%498dkgWwZ5fU2K%%8k;}hz%r(y16h9cCM!;tCuT9$9)c;dA~5&VfT%DUE?z4g0zs{HN0Qq@L^1+) zdb6rBq^}U>Zdc<%&Stu&$H-=Kq(GodUk=s_?2KE_XCjT`aZ`tcd3>fXVn)(3l}Di= zmA%d@_Q~3|4ZeWXV2S}MRZCJdgjWMny)V5*HeHED1yp!d>;2FPS-4t$PAE{tAIMEa z_<#!*9yzsO%%kS;I>_mo>#a5(Ih<4ZtUCcHAj2*jI;e`puH(B5-ZLqp)b$ZY_CpQ9 zAIi;E@6NWWs1<3bs7iC#iF={=9Vp=#0ztqw1z=m>>iDPt#(VU2hjhS(s4|`t%7`fC zsgGftZ!@nbsi>wyOn`ZKNo#xVw5vuq28Doo&nqt(5Daapb#B%dLV^0$`nay{=bq!} zq*M9tl+rNK%c`@PG%aQ3sTDTb^5xQzOg zx|MzOv6aSaf_!Osbh$16i>{-mSbQ@z+K#Ou5C?<756?kY_k(-WCb-<-ae5Y+-FIoO z!2p24G6I-U-e>>&pNUC0sZTo6;+87WIV)cJjjZBz+O{ANu*&uKQgwo_H8~$N91*Pm z@H%yN`6m98cPe!n<9ekJgb!Hu+CJDt+NM`{_ORJg1W=BM5%O;AO6|aV5AJor-H)CW zlU>|>~|t86SGFh#aMA5pwe`r^cwV5`%P=K3=U zz*EdXV1RHvKoI@w$|W3wU`Dj2Qp*7yhpGe>{e8WFdo3Vlkdl@jm(pL0(a%U;#c6XP z)>uA1;9RaRJo5L+9HysUcc2SGLRt)T!`h`NhwP)L10JKs!L@~UKpggnQm3Rw=oUG#nBxLh@h0D7mc2}D;SgXEm@4+ z`60rcE8>iIqpO-D1f|4-GthboA{w-pE|&Y>Ofd!5``mIk%TcW}=G-nRN#CFwIX*le zXpF3*@6Fa`|2#WQk5(?xwJ22Nd8os3$a4C6iD&2ZvHzH3g8G1?}nHE*J*E={7V3@kMA7k;lu#4U7$?Tw*am_Xe&4Ypuyb(ik3q z?^(AWq3#Zf%gEsJnyrmI>b759aUTxm&Zn2C*JipQ{$GCKzwj21E2%% z(ADdRrP4=WLx;YALK;X3mci`hrGQqFaLYGYdlA7F5O7&i9VXT;_``v+!lRQ&01^vL@cV=Tk@(6vXV`UQSv7 zmxr$)S<9orl2RojV17q)mD2MAejeqcHG}lxa#LqR@0HtNUAfzgB{fI)u@+HdCE)Woojvw6Bk*%VB-hhIDMd>w7X!%=W zk#DHH-yY!CFLyLSKE`AeY{|ELs3zVciI?Rp#WVLDsP0H!&X<$sENV=oWtF0~_kb*1 z=4JN6;7JviKjT7mPS@twmFze|JddqrdR1|r`uNlzxA#3Pn-%HGdFGq6iy&AUS`NL@ z%=t^sfvjfw?^?D&ixG%(<&c8OQ81v8UqlMu|mUAcu=Fhm6hvz8QNhZ$ERd z`{VSswvU2&X7BDi1fAG;e0UA!I!=6a_MOm#|6R;#?7T->8{McK& z+8Bk0-F7m=B>OlR%?wEIVToAPj=ao=g+g!<9YuiAuoxmR-`?C@b)6P=ih3p^G^25w zd~-nLy@9^55BT;XXhOJ|H?2I69Xg-sj*{bQji&E_XLqIp0ov34*!0|Wts4HGQ~F+k z@AMmF$G!c9nYW$9J%L}oEbL3yO?pIVOnWE?wzT+{z!9Zf{rO?pO!M4id+5Bk4rV@y z!z1tt4`ni6+;6{=DR;o<^9q3dd^3~G6R8gXKftWX=Tq$~xAlD3MJ4^UtGr$bZGi_F zTo$WM_lfyWwHMm8f0}2xi*rBXL=f?*-&X4{9&yM%nvax({CG6C9%V`%uGMz98}!~7 zf3`0e14wzxlI3g0BekT*Uo4jo56o9>h0yCS?KW*kAH6SyVWb^?GBCZOh|UWO zW)*#`+MIn@BG|hg9rA9m-Bhc+rx4qjl!pimjV_lx-F1ZZ6O@@sf{jN~?p+tlU)b$3 z)BUB>Z~j0T%Soo@Vq(7a|&Q?(|q?Yy?^#%zpDmLoh%R~e0Dwk^)cz_XWD$JuB^U z%Dd#HOF__)#K~;+dOag%rz)<$na+u1M-R3`vx@b+0|!-U99(8H{$NYj;_({-ztNyN z$J;%VypKlgEeM1_tVW-%LqyXPG?F8@&dV*og2`^LbN)Bw6(|DLoae54s%B-@>*mQ= zBYl+z7~pY(y}@6@F}SvmwWDcZe9=^JS?TN zly>Tz$u?5mQU}AMUVA{+)n8LAGc`Fb#(6bQ0~LBCbBsD-<9!;-PUrjRR*9>l`y+OT zwF>S2yBa_#dHsC})b$G3LA3h8*Qki8mssTG&`1Xtu$ut*ak8KRwv*G9HO&A+nPhTz zI$Fro&o9<;;`;ER=J1{I-f4}slt_opaW)T_Q z?3VHobDpsikMRQbBH^9M zt9~xSBF^g$yT(ukC_&hyYhnP=&qRxYgPUts?A{%Pb*k`ZAat_~_ zcr;Vr7Hp`-dNqjHL3_jY3~bDfA`_1Sw%}tEVyBJU!&{L7`>`{~MUsO`u=jf;*)(&x#>$J3fEo&yIS{ z?b&p*G=DxEmBef{F}IEhk)$t3h7K*EZ8zx8Elg*V*{}ceK zqak*&8-4je_xZ%h-cMYuX5y^PKG}5MQ&n|!bzsM2uQYC7QZ~L5r>b4$S^U!hwsQ6k z_o}Y0TVbhUQHcbqIk--(wba~N6WpGJ7mOY`ZnBf&cE0xYF%X3SQ{M`0IzvFsC1FA0 z7cm@~Y)gy}m=k`PaU=ufgCNIZ1BYJ@{n9+5338@tb`v#{G?ZY63P^5RNZ~2Po`{EF zI0~cIn*}oh!#ZZ{cULOjM`knNmuaf3Vfmv`Jj)*;tfIT;yRPJ|JQBmkEdLE5Nvqk7 zM3rz~EQ~|qbaZ&XIqTfR+gft83zsBB!K^yq%pPOpU>k;&72J7(2;zr#*5ZJ)Kb^UQ zUEZA+{zm4=4ykKza{IRSFp)c`-Q@U_Oq|Xjtm`E`Kn060=+ghKnQW|m+dZT;>=Yl5-R1Cq#}gnx_e9`!eL0_aHJ zs`B&HuCXCj5m>lI?VNc;x;r2Hr!C{;E@X6}@0aa`=^UiAVfUP$pdwL@3WG(){E+6! zi8yTx<~7qVn=&PvQA&j<_XGfAy<9E`q`-7c@*0wY^fcT=l*DT4q~Cr~umcM=sGavG zULDYS$Gk(RGAsrs0uGDpH`trX6*~W8L`0`)TB%XCJJD&mF1J}<-X-Q#A|X@~bpQ^aL|}#d;)n_XBeWfx_L31DVQ)DZ>KkNPG8Pq`bUQogr z*dG6K72tw_Es%maffUg7RLH~IdHvGqfS~dYl%6LA!YtIse>y=>%#BY6VIh~(81oiZu>Mobv9sa;Z z1%0Z14a&480f$S&04H>3prF1%kC%Xaz69FS1E(KuO4ppT`@1=C1o>^@P16O?Yy*%; z43g>S>qtH*`13pd4bUl`SMPR-gV@n^jK2}@zlroulSP{jBqrSuME9l9LQla!cnch@ z5c|n54Ra!1X!^?cb7v!3ivL=Qy)gPxN1}8T8OcC=(|_iLcSHkpKllJ%dYo?I+ld6F zHmE&07$ir0WYL~~t#%lz%!o890eTsV9{R;!{5&LbS$;EL*9u<8PDF*m zG+&8|e0P@KS5uY22}z~?PA^w+CB_k{7tw}V_0A)6#D`K0-fDC%jN&=7z5Jd2__(|; zQg2WKWqZ{fZ=YX-URc0plnAMs^o!q4*vLGfsEgSJzJb#-kE)xVZzQ&{^ z1#b&@S>50=E|BAd1cnYRlwAJTZ|npXNI=MePNJD;po24ZOXC7L;7R<;_N_5kNu$K{ z!Vu^ax}hOa0vo>XJ_rXPE_WLKlJVLQM2o!FFP{@#M5Zx*F$Bnm9}Rvylaw=dI#Pc4 zmGvH|K-#_&pX*k@91$9)$s-PuEdHae?91#*6{C5wQz5Wj|6(;hPG|0ZUx0LP9>Uog zWK(&uab*e97lBWa^=E2ZReQ2Yy&AkLLGTSpzQX*t^R*~_td0fRa>k)@DW~r2w+lpJ zxjhtLXrYRK;=#)(;Nx`6Y=(E`c!k1=M>u8;rNqcSi%5~`{UR0zcL%-0AnVR42@KjP zHDu^zu`1Yor4Xj^z& ziZX9gdF*AB@mKCmz825WW$EGKbKM!QIBPO!*O>rv!jpW#(fLJ-sP)cbv;5}wJzgY8 zt1$TadV?=pFDvMCaAiig^WUaGXavCdTOAO8t)=iM5l*F%Sln%3R8rx##H7HXzI!V{ z8xrVjI}(1IPMCU%fdFEWgN|<>z|4+!4ZyB<1ypg}!38k_!~gic>*DyMM)bS-%D zcluYtY=?YQtn}!UPS`083bwUF{DLI?9niu0-uTZ3w-B&{r%_T<1e$M9LPqjkzV}@x zj_a!BU_)U%SZe)MCor*S2#@cU!<)f?rhG?Xh&f%}ptgOT7r12Sz#Mdce!5vjisv}E zdKhG~CXCX-%NM%!Tl=;+3{!rhCoH$hS{CrrPF@flD-wmwpETiHu*5Drb%d+QN){Em zVS6QMrRjt8YeoPWBz+;q6aWKjsTkui_kE`{Dx<_C;PV(T>D`4NgAp%}_PckHYj?_Y zzS&C+#C~NV0iGnZ2z7r88UX4~UMFpQa)0_@8;*xQKHf!vJ>tH1D60LTHERe> zc(Hs^-|ORRrq{y-;ohjX{aX5b=8s3U^W$b(UeyVlh4#DbQU&w&sxEB;0mgjLAVcyi zZFnk722@`%tpG8(cPGEtu}GczXJDb*owR#5ck8nblNObqt5rYM=j1sE`k&AYA>Gj* zQ4L(NrZSM9bbNnH#lHyMqb`BvN#E?FyEZ7ghcUsJOcnTG%A%2C>Z7}$6d8Hx*=5~c*@U;Uy5N+pZxoca?^ zA7JL^V;RVcI&2$DLv(y1BOB@7hN-YWhHb+b@Fkev;}GOtZbB^;Sw5cX2X{=n+0-9> z*_-9d?TRAufX&@|N8AYKBv}-k3+1iwgQYXlewBt~Hzo@$v|2Tt!y0-A(NK-wmlv~O zbjBGXSQ!2Pba$h?8%ZLtOH?Hr3jk4q&7ke}Q1qPog(E_qhQr6`o$ZR63#Gi)(Fzwn zRb*_$5g+^WDdvl`6x(<}(e;ybPPk0fC#uh3M(Lk;T&*uasLGX00wXs zgy(@0mq&AtXAaN(yy?vE?_Lu|{9E4{)Z{iCK#yL@!Nw*d$in)nfk3I^)NVLeXl+g- zk7E`skMHN71LEz(J8Zwf%cD| z>Jsh?O(}o zwQQBR9x{7U7I|QlY9#xbF;Ddwz&IXYs!mjT`$h$6X>HdO|VT`5e&09c?`) ztR8P1&Cw^_$caNU%1h`(@t>cg5S)pX9OozsK2}lb^?)YDUY`SAM^#u4#Zgxq%h<{V`Cq5V>=y63LLJgAfLqU{%$2MRiA+{p2Y;^Z0nBF701UvvjL z2aGAC59fZn?v1v(@Hu|xe@I}QJ`azL@V9solB(g}-BDbmBPIdMJ9T&NY7YG0$Rg4h_F11bHu5? z?50H0UYhtBfa}TMKaB>&x84Z4bWU`GKUQZU0`60JGHv$;iK$l{!|scIM2hG z?Q*6uPJs!D6@wbO+Sil2X(GZ)Bzb>uTMOTGB5RV71Uz^pV0D2_qN8H)(^18hGRnlU zu%*-T9Le0eM}r;s{YrH+hH}J}%$2&WWR$Sx5%_11PDhX8O8yr<#aRRcf3UmFj2@|> zE}Dv42R_gX!n+eDFKPo&sxUV5o#eZJE)sRZ2wL=FsxcKS@B4Lq%f$TWK{>vwAPMCk zeW;JKT){oz9C;3(<@E0zp;|BRgP$GG4&uQNLxBnn`gbGmrUorJ;)SePJ0+g7HYiv5 zY(jIU&{~?wcw!>XDON$#iy#{&0=;-7Y?0A1CXOuL=lC2aDiw+6g(97qPvk%Q%8$3@ zkbFe&OncDsW%!HNIf98WtZ;98Bi`T!@pEBm=3$a#zSFi?Cegr8_`0B4BpLBmsItV8{L_&gUt7kBC zc7qFhDa=^cl?dRIQNQ7Vp4_rvi_vj7VF`u4HecKY*2($*JceI%3%c8sF=CX7{FPXB zSj{1g1O)e9L3~Cn;C8b?i8G~?>qD$M1)pXV& za>c~E0dMh3N;pIO`TD>diD1FOf8qi;XIo0R0d2ILyqCl2=WQeRvEhAdJdxodFXL}- zil7@3S9)%a@PV0+HvDD5F73;$V;TNsmzZa9s**vnxu+Dsmh?7 zQsh$e4XRwR<)qTjsKo^x+>~9J4U!KSCE7}h8@%-5nmJHkTdcTYfdiEQG+@YUxKN?M z1YR22#H_K;&;3E6rcQG@*<9WROYO2Tou6neG+nOLt2k z7Q(;M-QR)C>Z6mN6$+a#x@pB*QqViGDME$I`Y!;l3k@zUK6@%7B8{tf17o(^mffo1cCMmWhdr*{Eep;;Ob!GteaS<>Q2&pX3dHep*MKNVxCX&x$J*QVdH;3}}SaMYV^+(x0WoyRr#F^Ejrt1v^66ycOsm&v2A7p1=i>uw*I9no` z6o5VsI2co{u%#F--5uxbq>SRE_2MRANhD!#cWz$AwYNEKO2mnLTynSixK=_ibnb)M zdb^V7L8`~=_?>|xxV!3xSZ4PjVS+`GYOyRC;n^N}6ds`8)0+-O*h&R}w(B>>+W#6j z1KW8(0pS>>*&U(Pck}DynSYtFcS)amDoC-+@FU>}hptpvsdB%*__(B%=^G?`czf3^ zuoy09ig=@|RwD5qKoa|yhr#;&zPD6a@#T3Cy4TH#5||kzD}5ca&>FKfM|@U;#%AFHulaO`CX@?~zZ? zZL}Ij&}k2_8=HCa%GJwZWPp{R*6^N#6t=dGzk0}FA_2dH8# zI^g90OHIMxk=E2g8pdT$UmmXfsg9Do?aS&IDOZEmaR#_>&RTsZv9i@y$EWp>ENlKXx_~Uzn zk|UHQ!8KWe7(5G zlE!17vO$LeM~e5KVq)RsN|yg}JtlL{(~+6a9$GMxwzMa(tr3{Aa{x~f;)FnZNow_e za_kj@UC|WsL>RGOkUchND1pJM-V4B}AQy;xqqfzuu+&O?<;GWUdW*aK4JA zFM1VyhRLHxYkJ&VCgQ1JM_mqJ_Ihrl6=;mA_pf)o&0{MwYHPAnF`W(}Z(F?R5#7@n z+gZ^eYxTBjDl`CfX3$*@J^mg=H=Ai>7kxd(q#0mN2+k9#%20JZx9P?rTmd!BYG#m#-(7ZTC{ozy^d0M=wHSD#9oqn(_Q92RFw zi`{vu?>ZQQ_(#^Y2*g=DY}gbHuhva+{nMb0)$uBJ@GfqMWM+8fS7(l2zvb#{*U;|q ztEi77nPtEB>SM*bdAH9R^?Tkmh{KmO3?7?n^8E*vR!twHZr6AcYR`aON?WD}ulpx*sswn%kiXNp47SWgJALZyR zt&iW!6_p)nmA97J%&xWT`J5i1!OVKTohN_%)7tKG{8)J`JpH}Q z1^b5Z?DdG{ymha_sNHX{8eveRzNLEG@#RtUb(dVE#dkNiG0GCti|TG!Uf)uCOuyRh zh2^|)ufTGB<@fW3a=lSZHcTK^WN{q1_2)xEN1!7}+7*C_l0;-kJ;6i_Hi43L8N}dD z9o5IH=PfI`?+b37Us0CJan=G)g%wjkSr`0He!s20&vJ2=u1ljyJ z;1J#yW=KKLlFM3tSZ}{=jA?iAt9Dx&b$?yIYkz|7%Ac)oho1H$>UH%b{X4RMnh0on zzUddaZzg{^U2iAWom7avsV9F8NcovsNhuS!W7>X|S5LCw^Y`NXU2DMjrU0sC{u?61 zvwSJE&Aw&_ufs*YfcvdEV%`dBP>4nk82j#A1Py3^Zj9s4l!#VLz#sygRvb`Z0kL0R zZ?VOwtjM>3xh{2TGx!ntweN4@XY-RH-JO~_MRy_n$iHBV49r4Rh zEt*&wc)7!y0B;iau?FT5Ralm+w;-CX^&-g zU+s^>%Wa`@11r9dv(i-*h!OyURXAaJY91x){v3QDxT4wmbbHR^ZrWw+jtEqVS_V*tdoKhBo&L?PJ)Tza`awEoWHdro4SoM5+4 z4QfFL?tE=DD~5<$cx$nM(&JJan{>Bio!tUafpv{CO2vR(Hb zU6|&X-?ZHBT04PQtUn8cW^lY- z5-|1`2o%X`J{XU9t) zCt5v)Uk;|B4UONMjp;aHfqo3W*6Wcp;Fr_l3doc|(-=N*BgEW8Y?=eA5Qi!~-X;&E z|LjQWD>hb)ePT{he_EFBi8SCfUifAD-N~BApE%L0S*X<#XFZ+B8ZNh-#)(d|;V!pk zwH><}{pAO0>e9V;a>#}67f=sNZ-^@zUNJ@Ms@r3K`78@rE-u#+SNxsQc~SfkZqk7$ z`u8^J-*uQ(dGh-50o7&NxN$R~{k1b?bopypNc(eN*2i{}RWFF3$uhA|`_;g9F#3Yq zo&8NB09)FDNV#z?bx^{p7C|U=H;b6>gw6@iIHgMJh2sV;aVrNa0mO0VZA}w>Fc@ zQ4rwf$&`o@gE3>?E$eZUF_8eN(THLFDylzU$VvLTE-*KrrYeL>SmXv}J>zIZbF&BwagT`LWXH zPtb#$|GK@%ue*b!xYj>AW^6Sz=_Ok5tnt^ncm59*2FXc8w;P17hdJ0Yr%-JeURel3 zKK&e9ye&T!4krPk(okT5VwB+$4<*+`;~#rrSP~#6jFW7!wZ(vw<=10bHqmB$0P0*y zYv&E;#sPG>1hmi3vLMQQ5{56~!|z%DIa{COi8V9D_hWE@?e|fy=zwkI+p~CB7zw2p zx!_JG!-L5YLlX&7G{o-Wv(I@q#FHi(-Vc`T@PVQaQ}kb#DGu${9}k<)?lY9-*S)4! ziIZ6GBr%_cnbOwRs=XgYeeWu#IuVX5T2DGfAB$Mh>f7&!Dq=hhMR(SCSN)bPMP7-m z4Z5p`2}GVYJl7Ulo@+;!6T^c;75C{XR~+@6Ri|`bcPvF+{1NWg>NDkH|Mph;@_uZ1 z*<;4F;^W@$KNXc~>eAcV) zWmK7s8vo=Vy;ux~&X#ZH9&YQ@Slt|CiRCSMiPdx&!{wT%Y3Qw(dnm1nGE<#C5#n;= z5f5{4XYl`%ilNBB+k6N>Az%HL)8)bZxJdMRADU;we17S>`-lW>We<03Z`?U605r$XTs0XIM~F?1QrD{ySp z3^QXe`p->39W&uOLdi78@rrIoyEP18e$DIt1FvEqUjFmKL)cQ>s;A|p-|*h)>tj`= zRw38lj<1f2Z6|BqP7SSo7fe~@=~I`_D4%t0k(5;-=I z0*`^E09MmeP+FW@!2s$k>GR(HqB5sKcFh(G#D@L#cp41+S#E#L(Q*b$NKV4V$$=;# zYBI>wA=pD!E-Pgf`sjesNiu@L+*}H=Dj>=mU1at*RymkM>$}pkmdjH}EMOuHl%v9$mNE*qI}06Atrra)TLBKHz22QG}zb zb+@#|N2C@RSX*lfutUXp@V~U^`}t*?@cmD{bhp3#l`|zf7X_969s>z<^{uZ>xzCQ| z;3Ql@^HsWFy1ER<)!4#XJmR>NcNs8`cj`&<-0l{L{n-98H^#@W%MET3G{|_7$aF}j zqW8}2w7{PQ&w$CvNo|7x07n1Ea@$p$C?Zr72{G10RyjGSzh`v~3J}%yxpWGRxxsX( z^};hNfPg?b09ou3^Gh$&-kRHEGVsgiQRtsMU^<#^m;xd7NI_zMgiAdkS>~}LCGO4W z17aq=q5o+EqNIoP*Q%W6vi(h@b~wP(cO&e#W694iX7W=dxOVeIr_M;{=s;=dN5ISM zuR$E4E>=4hx~|L9>L)Uy^u7zuukW{JP zh(13YQ0KU)@9g%x6tztDcRGOV2{$p4r=})^f)WHDC|>QfopB7af$4t?1N@!!;oXD& zK={axw;s|XQK z?DK{Zuu+Jv6Psmnh?bE&?TTSMy;}bc8V}?AuOiGf(1JIq_FBKajs?>4weAlgwZGi| zem!DqyKTmN8We4NI?>O1dN#UpITQ80@fp1Q7iyD{>6ii}vbj&CqRT4v{HITK+Add? z1MZDp*O&P|I_e+YeDi$yJ83EUV&s3*{Q8u%g&8&z|K`h-1G4<520Ah0U3|r9<%-tB z%jBcY2DxGGBX8Dyu-e=}M2nw7WOT^tUC7tA=75f{!MMuwagFupYdD8!OFG0G}* zpIVDv_c7aVW;|au`v)uiPuK2zclg%1EH|%*7+IoL{a~LZ0aHH9=XIkO^XKa&@_^Sy zolGBMz@h1R==H|YsMNOjaZeo zY~8|xIGm9XgEyc2YFIHZyEr`irN4ga%0mlN#L2**-~msXn2*y)0U=;EX5|mIijQ~g zH#ES?%gar-=(5|x`iDA!;k>HX(u5QH1?_GYQA_bD_{ccn>u#yfa{sh){AsgN)wcM} zbr}cqO`~UWzTQ891qZm_^7g~eoLaShz%gux216-WgKG}xVP zy{_EG>`X*Yne#?+?^D4_M3X=Nee{2ByQ4)_klB(w_gP;hZhsRyo8P`G*bf{%U;bM_OPW2@N|{=DkmpNkXxdL#%BZ?>ri;Z&%((Iwa-mrE&#?HRhq=PAK<& zsho7;7VV!TDc!&Dwpa3*)W2B6Gb5KEIV3>*CHa|3Z85s#=fWlNeMrY_u zm*}WN;j5XLf;D~>0}K`bgbn%w*`>VD-6oN^x_F+7Sd5JA4|%P6;CP`Zr`LZG`&<$$*LamB~lvn6d@>*bcv--&FGcO!W@twA5u~|gS z+R?8xcwtc=JyOFr@ds0|v1kHAaKi(W-M&)Zb|?Ca02C&28m-U+MRMP3Gi+}%XJXbWyFB}0#GT>1Wx%Yv{QoR9~QZ6TyKyT+CH+pLO~#GV13=0kdF9VRsrbW z>dx~y+6%7D%uRj1{c&h?M9Fl|T!xtPSx<7du-Q==%}Q`h_%D52!1>`0lH&UjdBXRz z__Im&jwf$^J&Qh#dNR@F@GGuncK_OOJul*e>8LAz9CK%h`D5mCXsKyBQJ^ZQy7@Oi z?w(Q>tTqt>R}jK61g3?zat=Yjl3_U5krdN>pZ=Xw9zh-knKlH5Hvdu*jHSRpVWIW{ z(2;iq0`WAp)QlB^%l=@s_cKJ1>xw>L_D!ox@4ce`@ZP1L{0pcnJKtHWupo7ygz4?L z%Vh-DeMt*mOeH%wI@$_~T*6$rLa9kT&Y9av^lLw<_bF%a*b)IPjo$soY4soLO-O;0 zMRyvW=3I)XuTS@-?uv7cy3H0>v7#>{^N2t7+D%Slp!~*pyJ^w2pCfs@HS0l)D$4hC zc>Sg4xxMLZX*h9L7Bkc9a-EO9?B$j?K<`aqmCNW>&tB!zBK7Zm6;=Og&-qQqr{}mj zyS0R$M7YbJMt&dy`n=~}epk20yaov*7Q5^`)(zq2;a9P4-~v{~VigSuPKv;V02)yk z^xg|R{3Ra_Wl8nKqI3$XJ`moo_4m>il;62wu|dFQekw)~_{%(&y**E>!eSQ_}3fd`cQC)fMr1Uw2 z&SIh3yT2Rr>G;*_uPgC6Nu>ZY0c*xpZ{^x+vZxP@z{&=C+uzy8B#Z!B^5%1$#2C@L z3nWB4VSBwB-3SamhnCAtq{OD@EPVIJkFJhJ5P0#_b0(z&IE*hig_J5y_aW$B8wK7J zOauv?>+&Czi=XV}8H=B^sfZv1g|J^$G=y+?uXCoeFb7_BtG;sbxUZS9y^@-Z+?eC^ ziUVJiR5tWk_emXv8+~UbT)l~*e&8Q~nWU72F8WT)s1RU$vrFw)lu_m);|}Vtg)x2( ztL;7+CEKEpMF-|F9tKw$Z{mb+=i{L3x5j)^lLSKu{IZ(@2XI)z^uK8YGs#wxIpYB8-9>yK@-tp7+#Cv4QmWck@L@#4Cy4-1Z^oLTr_pI%A z(`meZ=G|Qdu88MHP3Stl&&>fB0c7{`rEGRhk8?tzzQawnY(7vWQJ+6$4;#%~{u9&x zB8Z$Vr_9NB+dI@H?JBs`TqS%l1V5TCg$x%6&NW%9paj@Ce+tpUyCqtgOkgsa1oM&E zV7jGd7~8A~%;GZaZb)?;S(EiS-#h(GeM=S5!r%Uu^z=NjywS-p;AU~S!iN1@eJA^D zL@vX1H*$m<)PTOxfq-FFebcvH;TU=LPMP~xpT_miX;vR8o15X9+N_m2jl+yW$y?83 z-zsAagX(d#&y8V-K(H_Z1(mJ=Yn?q!|UubN0HZct`n9 zek&rJXd}UoGV;l4kS^R;hAdRvz708EOO99R#Kcj6q|R4kSi+E7(rO*vq8}L z*SLm8+qOj@i$t1eP>6j25CB*%OTL?{izSZ)>ZS$d&ZNWC^ibYwwchTWu|`2}w+Qsqr;R@TNw7Q`18R zTuhk`3!gT}Znoy+f#hQ>z0sVZ>msD$IRTI6bd%iYZYG|%<>9F=HJRvZe=8U8!3P4Y+k9yxR6V_=A#_m@XMa?vIiC$IX3g`gZ-7WgAd_OfCH zhlLhaGM$Q`z?c)l(FML)S%KyvFM{t@rr2BOTF!`AqW`!Uj%Z-Er{bH!3Oy;suR4L>?P(#I5I7x;U#&}SQoY3W9`;N<}VsOfo9o$C%AE;Mne5J^?>iZm1 zC>VX<qmO6;;J zhZ8+HJ6Nv2lFQ*U^c+<%hPcRpr6uA4wNkE==C?@}HWPh1U^L_!e*Q>kV)u(cluh&j z)DQNhA2l7mzRX~jtsK#4W&PmzF5?Dvtk4Zu>{Wl%iTz1&gU-`_7wU8VG!W(|gjt_% zQ)%FSKg$G7XzN>;lAsZKyQXO5GSWtKQ8vV0Z&_UJvHh5JJDF*J9Y#g}ZF?CSlxSxz z#8ut>_k;V{DgjE)_CoA7$%^Y!Eaut(En%eV*axb2G!a5dsZa|lpn#|;6qu(^2#Nk# zgHnXU(qq=Lw5_UQDDbhZ;jS0HA|GefZ-L;xrByUFR;~q4cCz;|>j_#;HWnU1<5%Av zvltpYgT(;91@?AglrCJ?&ql7o@zOX`c=!owpWSH7Qz}aZ1_ou94<_tyh_C3BU4d)} zPIfe_0cV^dEB|)7x&lKU1P3?6f?bJ)p`hu&=M8N%a%aH_Kfdtm72n!xLR!Wax8w&T zby>kmj+7Z%$oFZ%0&&uQET_^!t2M!HC#eMghyM2Hs}RM(kn9j-Rt;J6qyBIVOUJ+c zpWp*2&8&b=Q>K=s&+$)+ZvZS-hCsPOW8>(lz&_^@vy9sjY+2{C0b|;rUB(!~r~y7JpmXZkHyPg=VK?sgz6- z{ii@^X3Ith@F?*7a1%fxt^_o{lEjdo(3d;Mp2*C`-_ZMhj;VMVjXSd07Q`-LTut?i z4XuvNahzJRZU$jXFt)074|ru1{w^;700MblYlHO$6-^^qDW_HYOU*)MdesB&&7R9Nn2$=0_kyPWT2ku^l3=VQu zVIXdQucd>iy@k-aQG{Sk;fU+Z4y&Fz2)OSx#n>7wlPwDov*`*dHJ$w%b4`lgF^B^ZB>54O%jmO>s=n?1?0or;ckOG{ z7KL_mAj(_qw-{KcG@2n|`98Em4Dx1FR#Z2DCOi**)_b;obVtC>q^ z_)MVl1ebgRI$V|ccq1y~={`-3L_@mNKA7#TTI}iJMN~2zV)WulGfsj6(@oGg#G_^X z#;K4d)Zx{`L;vC>GMoy3oyb2h9ZTR$HC~0mu0fRGwxM}Om1Zx5po;Ss;Z}x4N3>Ih z00;^UG}O9sg3_OaBXPe)NDvvBu;a-GlR~ug%GP?t*NTP6l z(icoQZdfX5L_AZl`;c=`HnjvJ1w2efBl~m%s3o zAsRyhdBQ%WaBnW?1}RV#5)uBXLIX%Hh?M_Z%2Y9FG}Jgdwk)D@t;i53_p(mxt>5w3 z%f$3~=}F>S?7*5gqh1iDo&-ijRv~kQi6;&sfl3B^gi1&?el%%UWwZFgO2->c%FPIb^{G2w1p(CwOEWV~yftIjOV za9*sul@F{KiCMmS)BPgTvz8(TTk)6E?Fo+{=X2#)C%OIFK!SQ%^?@I6Ai8tBQ_|7F+rc56#4lOhhO9yF|x{;5J` z%JuZC$-(1I6w34-#t9^+FZR=<^U5xSo!5J@oIajt6Lz9>kqfyf-L(9$q%Y^XHgm!($ zYW`}4h1FhP`MtU^gaajzKVkfHqSTo!iMaYJp;;e<4 z84s2q#!bnFC=DUM>z`&vgQHbY4PN z_xn1}BZPuz*M@~{QU=P7Kn+TG9Svu3`~DOSCsrnWr2llp^|#AtX~oz!mMq<94rTCo zr9pDD&-oYaGM(>DscwCPI(Yl}Qqk0_-$WyN$lI>SJs)EXSaTp69TDgr>`E|3RhC&7 z_=u9Ct1!TnGeMB%*c)1<4<_Ic!EoGJ6PJeq6J3VC=vU(dr7rN|j}dy8#q9AJ(Gh75&3fR_ICCX6+)cnkL0dK7C@9lwdBbkFqDV<5vzw2_a_t4)4#LJK{1ES9hzz2j2!V$| zVZw7+n9)jy9bxQgMaK=WP)R)N5TMlJq(rdnst(-<()htoK+OlurojI)Ev@&AcSozgk3yp>P=9w`A8VN6v{RTJ4+| z*Lk`Yh3A9X2bkeqeiPhHN#YPDRk>t22TGJ{%3;D7pIhgep)GV3ed2%wPRjwY%T+X6eRM@!EWv2sLQ)k`n=dZhFG)m z!6kIV*fR@*N{9U@wJ|78v1z{=Erzh-GE-3*noAbyML%u7?F z6pMKWx^!xPcm@pJ}arRFg)mFB?tUEMcyx)LVvNYw) z(45q9t8o*$I{EE(Ba2_079!9St`3Ik%wfoyp5$&YGYT>x2nZ1~S}5)sX2=tZpNbN9 zNsVkmHl_3HO<`N{*tY*ie~BxwbJdA(o+lYIJWBP8GpIbfFm0?}SqR*1mq z5fDNHW1G{rHBRtoHgBrXo`PSx`6 zO0S>ZNk6V1>A$Wu?Tkwpjigo^%)5S^uL>;)X{dp=e1Wt&ny$iA-S)=0zyAJ)|H!ZH z?kvlsJ*6>}sPIYF4b&YD6IDC5U-FBA9Ky#PeN$%{Y>=$2uP9KYpf^;<8e0DxG+h%N zlIWI;lE?o~sdVD7zL2XaFcgHu!!;A>7z%lgwOB)f=Jr~1D5+y;ewLT`*&L4(+Yp0P zi>|qpPLe*wSL(W-wk%#9AtSVGq}VtcOaI(vXU1y^YG66__pF`x@n#pUCc~JEfWR!! z?BtNC#z`btjf3(H?X+9M=i;9jO{Q?&`Hzu@|Kw;nPdbGm2@Yf5+=Mg1g9R2yJ)YkT zMMVb3{^_t)-@qMyUtziLq8)p>s2yt|Pdl4`ZC^77p|Zj5%TY@~K~e88D`&7!QTDoz zAz<4bo5pT?ny&j0RX%eng^i!i*xQ(GfHGdoT6^|=j)-E8Ay0Rqj`ZQ>P;&C5R}2cz zZN9vFt~oR8Kq$aNDEd&G-m}O5X#xq)`Ymp4#>hOcZbU~uNu3kAqo@pE8Wh4ohp)Ky zWYn`??-F@ACjpwyRr)hxXlFJ#JS z4UvW2H*}pkPy2JwlK%U+gvtpbv+_KKGt{`EC2{D+-$MUfNl)Su>+B&G)x zoDN1Vwp#RmLZZIh!GA7`XcIRrr;G-hL3Z!2W$4(IG2QGuSJsq1&bz#;FQof6yz8Q! zdCV^%4;Uv+)<@J5D4>u{NCkGhm1+3nH?w(}hMG&o={25`c}AdHYgp*VMXHelp@mdK zjz37qdqlX8o0c;cNPx=Sf4b{3xOvF5erzhu(UtaySznfiw&*4#JVey0EYWCy=mdat zf1UH?sp-$v=X1^SevDw>J=00|L`7?PMFo9^61t1G83uf&P>T#_B`oL3k#eG>Ma?%4 zx^q8RUB+0iaV{4?3Oo6;XX|t8C{+DY`k68{K3W5Jf@d-=rL!&fPN~IbIft0fkb^*0 z?ozhTFLVy{Zk! zX;Omm4Gg9*5hRfJmAq3flOdsVGEK(zcFAvte#{YMeBVRzw}=gt=S$Ui3nj+O8i;kF zfguTX+a_&7)vmoFdfnC8P7-p1K&tlyxX|g!x+@m4vZ;k7(FoDt^u885KCV1DIlUUS zW>*sQ#}aw$VQW_T3e6AjCo6F#NF9G4`feO{iO-^MxVX5i-+%=@(#m$m8Bf+E94lox zdodJv{lU;`J7DT|W^RIMEqcEN1R|JB+y-EZ348 zyP18kyF8+DH6n)6hOKT0PCQgV=7}hjJz@HeOSccrm94*8H;7j|*}qFU|MEM2@vO_x zg+723A|YSEoU9UJ*|k77LyNx8WGM9TfGWf~p+jN%lOvYfO53i?y_las!lmFL0>gll z^^Mx(vLTwY)F{Q4^H24>xjiW;d4TS}h*$4ozYZ?u6Op!6Np@UQJNFWaMp>yY*l!dV z|4lkxou4SIISrF76Ud)(T3Xkdbuk_W*31bhXCBT=E|bX8e9N#4F>IpLIXp~?l&2*v zU}lUx-w|Y1K!d2iUnhXiA(t`%AE5~kByA846L=(RZhP0(YBwGomD!H5@jH+r5m`=(n+M_eRLES5~tX<9{=KmRF37hq>Dgnxd z^f?Q60DTNVh@XlolTKY>6QT>O*-Y^p9Il$qzbOA4$j_WwjBfVWErugaE+YBREPqdW zk1WbM>W}>w!ppY?kjr zGXQRP*B1k8NkLg*0>!Zel|)w&7=THr8OOIF|Q#bCji;|l%K zd<&Hb6RJbTopoR9yE(!*=G1yA1;zrW|NW0FJn`MNP} z=<}*60C_2KEtcbRCK>SC5+0e}XylM%2_C5`HQi`gbfjW3h$12ot|TkW%ARA+xEzE? z0bKmOv4`OnGZ1{=P6c^iHHqi@2c|Fgt8YXM`#-`p&;~8M6mu9c7@b-9JCF2StxRrC3J>;3Fdp64~(Nx<#he|L#flq-ZRqu+ReV5W4^(f}iM*f;qM7 zb2G&|8I2p?YSh=UT*s4iP&bQ-z%7Pt;rSw>Q<9S=*UM4 zF$c1#(Yk*fI^}qHSliGDomrfIuU;`8s5nq#JhU#aor}e&#egyX{JcW{Vn3WoD!cAF z&+~h43~Qu?lQUY#1h1~vv;8uv{;!Z--CpKJ*Z)>X+cdbN?vH}0x7L#LH3Js%m{%^hwg;zOFGITcgH9C)50h!l31N$TBcY$5wPs)x<} z2v=y*bU8C^7dM#mt)Q5EFxr!te1RldIXjwJI1ILqQQKZeTeqn2#Yio&4m40%S1cv$ z#d1dL5K7e7yW2nSu1&{~qLdM`eV{9FVo_w&SFlD-4r#K;XK@)k!svd8i;W336h}^0_(g-;fY=2xi*|)}%<24p znOSbmmaCEo*5Lx5#&xF_SC`z5)V51J&sLn);TbUeHy-D(4|j&2y}fFogE5!0lBZ1C>D&v z?@QzMmt*h8ED9RANJYtSWfGDT+!6{AesQtWKEU0&PB88y(yJC8fb1el z7D|7GFEQUhGLXkF<7Ppjs?hr_%na|??)x-r>A|MUYqokbT*#v(l*xjRo6hC%!!7K} zFSj4urw;D9cX%~-J>GoRkGlG>Yw5A()m__F9kNkj!PxKdaoNMJf~KH`+dkO@`Z#%= zp9cSZZL8Z!a*-zs^lMLV6pxR~K8`0Of+$PCrL&;`fLHU2(L^pZ+$=0yk2<>$rf*@q z+bIf^hB9b{gDws-(Q@g7x4>hR1<QNxyxKX?La5nk&_J^tSR zewp5KcDrig5wLi1H^oG#LwiZ%&7U(ff4;V-pHLFrEw@-_d6i=(bbP#5_*ODkyrtba`X34qh+acs*o9e>e?E;&$!03&o4%0jT>u1AqY>r4bJSl(5mR%j&Y3;PE&-x#N6vqT;L*lPm zrTTlm#m<(c3^j_4X;7ud!xDT(C1qJvVi@b`^y~ljFTEF?XjTM$NEy1$+IQB*i=*Zr z%S4!@%K|F7aFD_>&@X*%64mnn((O(b6GzJ*!c(=({m#s^+H;Ar1TmGQeO@}=nYKPq zu+dTJH@;{F%FFUy+@3DRuPn;l|l;5J?v=|1__& z%oaQH)WFP}feeB#sh#^kh%SLH77XQ1bwL>jngOJ2fQ(cd^sA@rF4S2pQrSm==`tqo zQ7VbBNzt8&-Rd%S+p+=!SmKOz4sA<)~5Mw&$_Xg)tYuI0AOp^(&{}4q(4mU}DL|XTo~Ese+>dpLKfVFR%E-KnWq7 zi`?JC)cP*1@9NCRAw*ooEQ&diVg@AGV7Xl>RqBz{ploA@Jf~16(k3f8NJWU%4(Sw%SHfer4?;@_rwQ$r!WEuUt`oX z@o%$%FJH-qAvRe(uALaOJtkm`;ten6Afn|-14!|KTE||`GEBy}L88Y^p10rNM)qG0 zHIzj416h-JF-X0%4VV-~73B?DA*VI?zRlk_Bph~{%DehIs?ad_m?kl6TB^Uf%lXyv zUnY}b!tf|Q(>h0L^A1e`F@InO2JfUf&I5DeUUPgpS~+5Z%SVU8WXb%BukmG{D(UQ5 z>yNIaX$V^x6W}cmLrckL?w4UJX{n1(A}V?pyB5S?Lgx9 z(B}jNpA>Z1XH&!6a+v};2?BS?nSUL`GA5@Pn7C5kMV1(yuZ#UIMa7zofLkV`wF)F) z<+P_*Zp~>yl@%EE5_lknTt^^OYII5v=N0>bavfbE%3XS>BSR)-$->DJ=dpY+*#_SK zP_nhXKA6$QtjaXQlm`Ts2g!78ezG6cc>BUyJ44B=_UDV-8I_2b6a}eI) zD952!btj2!k7;pD3O_!DAhrqD#jww}yS<(@}K2ro;hkDZ84ZrI@g zfnvebSpdp6P3=ZRCHq`uNilgI6z3)Fe)=mz0m=+P%x>_#FJKQdsD3hZG$A3TtT>@T z9tEe|RH7OPhbllh<>VI7Grhz+%i;_X;Y+3vK3eBmy19r%+s<`E`CNPb^~Y*6yl89> zU6VR4!r1TQ0K;R~OTH|eE>H5Q-4te54c6+cn>Y(yN*cHD=vrqtTxXs*!P_bB5T}BV=WSVAUlSP-NnA6mq zL_d5lO)QrsQLiIRj*^RJ8E85!sqDhI#;}_T%1gsj0%{yE5F)5%N=Hg5j=oQ4rYPW( zdbTl|#OrpFk+%5or2fZAyTY*>Ga0+H#vsu*#~-Z5iYqzH$Ym=dFMs<};0F6&NHS(Q7{r>Ke-+oW64VDG|Y z$MIP&96szS#v{I(=LEjH~KZ*0k74#gyH^fIg z#Rn3)NrG6!vW?0wG}QMy*z9lP%Tx=nM3O&Uv4n)@Ohb6N@~U=Qs66T(j;-UfsUnXS z3j7BkBr1sM5x2afi!RvuXY;`Rqv9PyCvHnUkKh}vaj@Ozj|rb^v2t*fAmx;|c6POs zn)pnPfeYxy(C%Y_U=0=4a`sMRF*h^=QTvIq3|QiNh9G-L zc;!Ae`+bYAkS@sRX(>-5kJgz2%h5+ZT}}l(q_v0q?n$Ikc9+B*Oc+zwn|OK2dpY=B z_WNhmH#M2>4KmD~zH)MuGBFjhDfL(Q7!T~u3Z;fw#_godxCG)q#XK{A7t323ol zEK|4XA#8lP4(*Mj5lorMz^)O}hI93mvK^R0H_bY6Oc_3}d?2?>_ zmOD9%?O*mfOG9C{4OI{aLZ9INP&SPF@oP+)kjdy|r2!IDKQ+7>_7uqU#YHk?DApsB_Tr-0Sm|57|gM*K$GphsLQHLcMpy*BBA zLZVIba}Y*8N$-pl_g0l5hMAG`-x5jidj2h?)fZDkjj=G0`x?1K9XiVTkiO*S3HhQz zzPHTN0-80B?-Ac*_q@KQ4v*pAD&L(gc zit&7w=Q00ki27`;7wYmnm*9oh^V|?+MyNJeF*)y(_@wA&$^)EHJ5@y2z3OqJ#>K@` zwct&Z4+5)9*GhaUCw$&CZM2QZylCN#v|rnc?p?wg5VbCjOw|&2ORyo`Dm@IfcBSRM z1$7=YlBFnhnMfb5bjDmjW#eY{R#PWwe|zV?E%ER_rqj>%^LJAtQHEg^@9j(I9FsQ9 zi-~}TUhTwpEN|DxbNJYJYE{YFu_?0ZaI}Iya0{g)89LLmeK zKb31Qu7j$L9{CxVu8Bn~f9P!F!y8C~bUikVJzZByHp(#V%SAryp5fdXJY~jZy!rMN zxLk<%-iKfBi0#Ioz7#BB`-LiX;P;NQAirdEXC;i+Pgg5z`BEA`OjG>LBi_N+R@@uo z-0b0oXIl5TA5v^GlPXgsHOU(+NPM*ik4NLZTgYiF$9!M8lHz6J=jMHGK{S4b;7uIuRc!abpKyjYsT{ruN#yo+L8>i+pi*ZmQ985h-)-tt8CF zU{zDPda1CMLWbjJ=uY$Nn)-1!w?n8Bhh8l1TOkT+&G5}cwgCX}WfD%v>G9^*qge7n zP;A4qOc#FU=ZA^|RZZUeb#K|a>$ciHsi&>Y$BJ7QT z0j6L3k+|SK%Y~zv8vAGrg-1BN)avv!&mJ*AAxe)JfYl4!bDeC4K4~S!#X( zm?dkm@olp360?%R04bVbW5aj*@})R9?xSSQVYwFnVii=8QiK(&y}3H4+Uhi#fv;eQ z@=Ni{#S%9NJij$JkQ1VY60A7nppG86p1`YX$Vs55^r-XG>BY%$WKKJ*uMqk0Zu@}~ z$om3u2p+$YsD3oO<&-*}yY4dH%)4HR(}jt`2bBMY`)OCJonXHpTcZqhjRUHpzWDZ> zl;{~R%}?To`izvd9f)IX?xwAojz4K8VZ$B}_2CwE#c{NosY@EVib{f8beo7m4;g*d zLS1t0xKP+Xwz@SudB|k7vl$C`3J0CnUTr)%iX;OO+!RUs@#*Q(&Nk2zx|#ZUGveq* zl>(PUQWQX9L`!wB)b&x(`8LWGT?~y`v{)Q^`F+VWMNI&hn`NTsE+Es51 z8}sT$qT<{5_icY{Re|n*8`hmavu6+eLXkA!15>4h5mV{y=c7gky-=nhp8~X-)V25x zX?G(nUafVPRj-8HXeCCG)WqEAstXN+-%mZmv-yrz5n9O+l!l?mgvUr`^ZvxJ*Llc~ zUY6h7-rf0iGIQeJZ}!7lvy16oOfOJ+rEyA4#8UJXJaIq-tNPVzfN&QU8km6NtKXaD z4cy@k^6$(eA+VtD^|9OqOfQo5L(@kHkGa;xxi*kHm`riM%IM~I$!x_p>MZ=qKS{3^ zJPwO%LqmzOsSvreR)okQ4HO~p ziE3aw%6T++Q+B)J6CKf%*;!`KOD-NlY>?lOi{*tEwvF)|3oUhO8yINgAUQ$-;Qam9 zz3U3cxoV%YnRW5z8hUf}Xws1MJMbX&w?ITn7dM+LM*~m{61m+uv1&W8+ z0Ppl0*fn01-g&%lj0xv+O1A{|zMG{s(ZdYx?+~8!X7SIzdsf38nd5CfncDEARg zPGEKDr;CD5QL0aoR}cxS7fKwOPtHgU8q zU&WL49O#c`1qI{#KKJt3p$IQ_YRCEgCR2LDw8~gG7E%|T)0uoZQ!{S{qTk@!Fsd--}NHW1c4=`dSJ1Yo$T_#D9#9~P7ZE1cYyCG*D6asQ}lRB+$VdLJU ze*)AeM&rd(g-H{iR;`V+an2vxGm8)@X{+7iAYM(t=sa_Nv|9Kx;)r6LE5tDDj|mfj z-HpcH&^dF$ye^g>jWR7r$hF$=>9gtcx_<_HMg4e<=SOW`MQ=8c<6*=F zV-B~cdd%`AiSvHw4VjC#E{_4v->9S#n<9NOHju>&DnPtX9r7#&jgZw&8MXngNoO83 z&_#Uuz~+(uvywe&$RnYU2e&eh-Ka>C#!&s-J-M<9858>Ik1?CuOT`TY$>q8hJjS?J z5Tg9}PoEjfUe_fbYb&IVo)_v9941#>>Su4mK#!>zBC8Z4s$Q3sRx(sL_Lsc&-gOOO zdigRAHWMWH(t^&~%fg0feb&J5w93lQ5@<71#rc*Rw3zbj{!ckC)Spf$F?(`Uw)~_E z`>n-;rGwXWOC@>>!XOQNohMh4ozFblOMS1}{3mo^OM_SW`&r^=E|>YMc$trO{7js7 zyBPeoUQ2=OgvJ%>))jzYwo&00oz0nYs+F{3F_oaH{$h`~hoTZT_ai?-VyPGE z?gUMkJ{HF|kB-)C>@@M?%nqvr-G3_)qT$5RnfeGp+&S1=Ms3?5bPwe~@+XDA`vf9p zzfq55p7I__0}0|H>a<)suEz1Jd++gpRtD%fekk&)XUk)DVCA}b*!u?eurER#=$g#N zbTgbqfME9N^FffpTHG{Wm)R{I`1FD;1o7S)$gmcs;Udr+*)k{DCIysz^vYbzv;Pbe z=_tXy$JZ&7;~xUm%-Em{am(Ab+wy6cCLHdiwBTPR$HPW~F2Z>jz5CF5Q+=o3As(PC z2e{Pi=NSe#ExpSOouflRp;9QIsO_e^wCi_G6)zb+oeSovUzH*6mA!fSnmi2L@?v5m z(bIeFSGdTW&ErEr7Sq&cIrP$Aohj2|4&znBEG`!qLbd%c)t5Hd^^CuLa!bXNnQcS- zGyf-YUA9Rd0zE7gGf{TuG%YgpWSlsjWQQo6s>ytV7i6JIs#Z&9Bo@;2%z4vv85^4G z?wz4#JxXApznIa=(N#}|R)tgh<(n9XdOcM#U`owkqF9_knhdDX^+HvxybhNpyeeO) zF;|2YR)iy0XoD(6W8dLbB;G?*W!nZ$o>J=S5C+)@W<4@5(}4dv5_THbz!6Y>CqOiY zu>iJqN(T!hiFI3&u%M%GQzWwu?_<)w$zx)kf)jsIhLM##%GaZa7QyhQCO)p*z>9oz z$X1&JG%Fm4s@}N70n2Roy&pbfXZbyOcH@KZ8Hj{Qh1^YM{kYwTW&Vsv@*t6Hv3VTT zHz?%k15NxMS8}fqTAYbWG@=M18#@Dk`#2q=tMvJhNvMzviK4~2f1RryA2j}bRhGSB z)Z<>ox+L}V_)llcTWo(u$KbTUyQ;!Mu#ED^GFsEnT)iLqa-@07TZx%4Eg~2)>M_}g zjs`64*wZrocI!>@u)}_ukZ->Q<{q8?xZ6HMUb8Q5UUFovl`uxpmJJ>f)eJqt&?lGA zyOM>(^M~ZEh~-CS8_%tVL5CD^1bdh$6!X3=^{d)yKeoaYCL{aiP*2l^V-Hs}F(!0P zxMzd+&bNEG`0voJRjX|Yn`3+gh5|yqtxUE#SXihKJ7X;+k&U@X-3P|>#qk(*PEhL0 zT90$UekNT=M&MP6iyvT!>nuk^7a!wCGwN*5DI}z}(P*~AyM(cdVeLP^8r5{{8`1~< zRM7Ir7wp04q!F*8Wq)in_}2I1(8y0(DjI#w5{@Y0{u1c%u%XpE?t3Vl2&)~j&&dOO zjK=0WG;T3#d{i{p7GC17(4WkgrwL!cdj>3K*B)GM>pIUgI&zIany0**8XfrV`8Uis zX03sR6*pE7Uqjsi)8dN7`Mr*jK*ioxt0Ow;d7ORwv ze0k0^QqtYaB&K$(B-DEO28sL6tSHdO=MS^Yn|=qdMrAkJi5)ay#H}gf>IsUEMvRE5 zf2cZ-@?2K!W`08VgHRV32pS8RM26EonnR`FgiUbAR@GDAhoO~P_E`W@Pf)F@$e;5? zs=(o6;{YT|Dsb%*%D{|AD*29z?ylcKwG$KChO}`Dzpwo{Xwd7N`{H-XEr~XexDi^i z^SX$F33qpeW*3^sNQ8a7h*Bg65JFp6oEOhVUz;nERABtLB|qeWIgY$0S`TV0Ewpl2 zzt2ahe;oE|Kn9o_K698X!6#k~<`F{#SFGjXV{PUN9UT#uFjOL1U>Kjm_Wh1Fd)p0n z78>gOuHOrY;ebzS9wwmz*O8g`N*8#hG2|WeVV<;6JMKjV_qgJF!8LJ1vI-5oEwx&t zhD8hJR;;xAao%fc%N{Sr5kLqt$IcCk@{=bosE+`Z^CD_1c|1u=EK7~1w3LIDI;Tl&O?IAOz8k3u*%CYnu) z{f~InLpC{0Ha7U0=K+U-`wAUpuO<$h03RD)12~lSmJd#hec!Tw-osCXa^Kd{F2A;P zeG%q}Oruk_W=aloLs7Zw7&#YqoLGPZF41)gE(R8%5T7%MXR2{H_l4{Ii{0VT>1UFz zSa>eLC5Zpj0${+qxfPlc=pz!CJ;Vk+vxdqywF-ViNQNk{TpmZbXMjHbh*un)cR)RQ z$vS)6Iek6Qz!hQ^4N;Qwoo)5VB*h<6D4L&7{uGV=`yk1En1#V2U`n?fS{&=PjD^k% zJf?wyIGq>_n%GVWle^>`QQe>aZCkkv%m_+h_-Zh)+(ibj-+nob#jalUJZN}Cj6oMq zugIQF4SJ`b%`46;z^3+Fn+rE+@IiFwTd_k${N`O&YVj(B_jGq^vJA2>PV|qRl(xO;gEr;pWY&1Lb zHBK9?bCUNbfYE3O^h#S-#S0#Je|WHa)iX=r_v>?RTGg|!n)veZ1EwCaGRUu9X{=Ed zDLr2NJw$Ul9cLy6krd3e`7H!MS}K8o&CoUBP3gt!^w43K!sSJnn#e}lV-Y?G8#GE- znXQN%Wen&dX+~1*x_hcFajFxd(HeNs^=l>`qriHd@oc`D%Cq8bPP7Xo5)G@4`5QwF zT|tJhRdj*8Ut3qQQ+5^F-no_=b{>uNi%8GXjTTw1|G^&kX=t$K*47pc`X1ptT_n45 zEW45@jMb$NdiW!L0HnlX(*m-}jebGuT=DydqApTcu^_GbbCLfbJH|iGpdR&X>5>>+ zsGI37jSakK2iFI;o9roxoun?h5sDEmbst(BP5k6zqLMK?uBfx-5n~2Qkwe9V2d>Dk zsQceB>;vSH8t}t=mPgj|3kB~>Q;W@*Eeu3f?|`)9pIZ!pTQeHQVht~%>YA6BfHmx| z-g?#kG`fj`YDJjyh^S;$xTdFKi!CaXlE>z9b|r^dDf_8bqBrKycTy! z<)qQ#w^|R&v;2oS|1GP?wX{2Ix_o9tYs z1QR-vuMmhQjmFZ*4vJZncdUTL7`R)v-2UuOneudg{0<8+8&|8J{@7sK5sAwzw_pbE zhWVV}-@vN_&Di@@s-J!YLIBp(Eh z_P(`A6C@q$zL}IrjHI#?`1{83vfeQeg%w@@c`U+RMPlR_q~eJZoM}<8faIDyQf#x&z78wgJ(NMLhoG^? z=0I*}ixfz^AcquMdQsS2`RBQGMNseMfMw6mDgiwP9X>hIW3b9syQh;_P(@!PnDIe{ zb=q1C)GJqyk+t}reIxSYwCowEak@`8HaGwL8uXQh;x(`iL#7Z&}X{l zAcshdejoeu!hj28Ade4s)fl>xWm?Qe3!JL$kvG?B!6EOWOA#Qpu;C-fihlb&Vr=Gx z5&Nk`2o7WNYjuPi4k!m&JpA}$4+5qYcoS0E=C-VMC3Ck7%bspj9j^K&+>;p&)DGPH zC@4_^Ilzae=P*CPQDQKD-`a>%>-VEJCq4F=F z5b*N1O)U>pvC`!C`i|9-%%}A31Cyfh%z*-v ztp(H^A)|)&iI_c%cm2I_vQA9mD^{E6ur!FM=m##{Kv{4a{oe<=gx#mXKyeYfi*R}H zY_43gnC-_kMvjNBdgx;lg&(n!Q^F$n=PsHNa3TSw1 z*9NLDkmH`zufn3EiF_-DOzFKwX8&C^6qLZg7(b8!fJ)P@--;+~(#K@ulMBz#?A%Qs z0G83f7n?whDLc_c_i5$YRrhh+xWRvbwgdfs9+XebA7RizOb0a~cNoDiQ~b!0Yq&8H zowx?%(9p0^hg0JJf*#P6S)~4Gcl04nsTWGHRm|L>^sG;lFMjRKM^z{ErG#3MI5gn> zaA`4RI!P+-aNU23xl8p#ZmzSwx(Sc!P&oE2clq6m?LMt-lP5wDyKxdz>>jEv`m32! z#S0`tidF3MwDWZJ8>m2MEIDbF4$P(!n$rLHHk1PlTTy8Wln&k=B-gs=6znqr0`{dTGx94yOO0D^#9Q(Y@jcD6WMAh^+t`*1>*MVf^aT zYKsMpGM|OE01)?OSs~qb`$faOCRj`UGpm51EH;nD{>9F&f+psV6!q_Vqs{oEpC`~) zh6BfaXf^u}`o=w;;3NakIS-PP=}bSTk6iG)GZHe-NWw5)UO00e-n(6ZLXq+6T>}o= z-)&mg^gniZXNuYdFa84JH|#B0&Fe@BUkwOYpL&HkGN%v8CEAin zr?DxbCg&FHhd{Pu zVhH>$5lg11MRMrBe!PJbSnTStTzA~sDl=6lKkV(&ADWt*fG21(FHq>Ue9C6=>tC#W zuHDo_huNZIW;(dSY8!q_2Bkp-H)h?>CV8U|4By@ZM5VMmzUQ-lIB zh6vLy!>ninK4>Kb09oBY*RRP*CB>}&8o4zWu!{f~nHSK+N6u18<-Xd}>Rb_KMRFxX z=F+eeEb-{z+gZi7e}1(W^_fL-Qvry8&JTR$iRpx_f-hBE2)W_YYb2)i>tD^NDr*yq z5XQ_Fc}W;NKGvEz`Go*ex(k#4d~6|WTu04vEf5*EpWzvqC;Ad3Jy~`$H(Lr0KA3%s~Koct465M8WvHWcA|JR5J+{iWDd&AyuZuM#t%DNNnb~tc~ zC;ST>2387oJB*SwN#(jYN`ns+I)0dATNr*-v_8I>f7N`(*;HX!1TpMa(bc^RnDqgG z8bIg?CUsk)|96r;gJa(T8rWA7DP!h8Ow6Azh$mtdXe{+OOx}70GlEoBv-Q0%mYtFU z&O=vx9S!jVjBPse9WV(=&Rt+EHkv^MG($ez79AK!F+c+tLXjDt71C8#^*_!Nhy^M3 zgny35Ru6AxD=3d&zGXimS{)WvddrQkgAw;C@~qcDM4ZF zKgLdp_jej7#I5hcyA*)ic%qm=UDUWqm~kqqQz+(ufZ40sq0iiam-)PMuKa!%-5Un> zM^n{|P z25eFLwvk+QCBgd?`r}$ny3DI<8gm`l($<^vZV`zs+^>`nNi-=mYPf*f@h)p)yhN;C z@^_NW&6j5P-Snfpgu3sac^mrvbnExdOg~kz?;lQFgJ|X^NqA~#Dpd@ddZeE!g~}^= zW;{<(UKu(}{uhfXq~?Ui_jZNys$pLiFOvQ^4rqGGsAIrbjesk1LNbz9==DsBOM&ej zW(6n8ylK704tUsR_va0RKlLKHtUW(^sL@s=wWt1Xw!|{Z6JeMTZ9*>@u}U2sY6k4~ zd14}jg8vqn!+*v8Hn#up7!A$X5eMgE8^&>iEH!> zkLvu1bhQe@6Tc2xEZ`-@C0eRoT;Q2JWr~QfT1W(J2?+D&N3|wt0~|9GaIk5pia~cz z3O?JZ$6ccybN|T(kd647O8zvkZ1<1+5SfM~yTzLh*-RoZpfpzU1l9Ac; zkJ%U~3wbF|g;$LaxYwdbJ>Cr0>+Mp1WVb9rSn77NpaS;GG9r+k$pM(v$Y|+D#650` z;K%g@J1a&H;ThD@6VqGxp znts6)t@)k>TZG`%%R+nv@&65eHL$t{pET0`F`z$I0z{7AM0a6>_+(lA{+hWEzn$QG zXe4!bHgoK110BA!SWqQorB-+Vu|jt0UE6~?@Ya)=ZX^186rFHcz2APepWH_AsB6OC zu(x!PG8ez3_+MdKLm4@GO1u65({(w}2y5!}oW{yM^D$wsCQDGfHvJRx2=Dp-A2ieF zgvyYRScHtiDm{TleB)K2H3;AtZk^r1?%>THNE*HJDB@p`=|ly%7FMG37jz~ip|);f z9?wTceA&$?G>Lnq0XCBE9Yq)+4|>u*hEEl$dI)v_#XWFp9H>yi z14xhZ%0~YG0=8wFEddTJD?p@HVtNnAQl!K(>%~+|n^1MEl%x%S$j%FXn7hHt5WaYi zU;x`%J9Sgf-3l`E%oJei5_-(CJxEVzM|HDDTjcfiI4 zUP4GYgTEvY>7v#8-SfcFAhTP#FFU-~6+grk5Ml?OMpwEw12JQ8_2Io%CpKDgnvgI1 zi8@49K(1>yA5(kHw>kePLb}zQiFS}h{%#c*gTyodb?NToMJnr^`xhYn2+sN*@wpDW z1JbV^(5n|WH0`iBR32bbO-=w;3N*UX4+2!nsAmlaee`yFwfBC+ccAyIXuvy2gWB+! z`V*13Og>3RYT=0w04XRqQ@tc_5ZxPSi;04weQ|iFC<7AXKQzfE*1pr0ID%gBmQU1i>c1sx(su1`~>&gP^7ju+fE1+Z_z z+Arf4I+=XcO^(g(rb3@ER_2a!8T7sa0CCqx8V~%=#qK71yGD;y98z^m-(-yI980AQ z1=K4sKsH*<L|#J0F=X%e;diNS;FR;Nam$416}TrjMUkmD`IBEQp3G z1(@owwF|!-KC1L+u{9eg15%Axol{qYO)%HLdTiJ7inZMChAc{3!TGW!RS)JFuxdQ1k3)_ zE02gq$fu6A^dHbhcDVt`O3_`b!v?=;I>*C}*o~9|aVAN{1nTlMVCKLdS))E$Q1T9p zYQI>>W>(*3hKrXEnhd|j8j53m29&iu9wxRB2Yq00Cz@LV#gQ3&wBsmg^s+V*+2~7{ ziAXq#-TKGCLdcjDwHjUW%8f>LN@>R4vx+r`O2qf8DCvB1<|0pm4%J0Y4xa1{2-zMQA482K zZnp5875n`p)fO`|$m)+JLz?b#1j+N@KRlM%d1T>J=Fp|-_o0Kne`Q`}_%3)!%*5Fu z%Liu{yFWf643QS-F}=ngGZZAXJ1l|D_?Kmz7up7zMMq^64_hYJNjc}lbMiw}T2at@ zI$eBjvk$(6@x0E87Jw73j}BDVImo*AC;V(nh+!pQO*0qT%zukOh2Mv-udiLb%D_kH zne@Y4*kV7u`X8cAfFE>%Czxoc#K31D-d@G82T?4#UsTS-g zBGV~@pO|4=!q1hV09pIMM^~GxL22`k7(7sVkr&e+11S#ocqX@POr*@Ntopz)dMIYX z{`)+-gQXKxHhTuI4o)fMyK*8Fn?@ooChkEsJQ4m((#^`>$yWs@L+1`ih6|={DqUf6py>IsY7%ICa{O3Nz*UC4RWJ zdPx~{ei*mQHg|8g6Q&Xyr`~cow%Xc?YLR2H2d&-KU8-Kqkigyv{0&oQ=Xsj!e@Yd2 z`^>M-Z0Z?}s^6SZ0hZC7ATc>hJ)zgPQGy7W&fT#@FFcyS}T z23dy<>})%8Gu%BT6l_2M%d= zA~avX+eC&ateaQZ%jsvOg#k%`9MnblC3_Yn802G;^pEql3z;dIN3(ni2Aa>xI=tTe zHlLoUI7Gq&zq-f_+(2KF(vS@F{<(ATZDu1w;wtX8S?YYc_lyV|JQcdPPjz#9v+tZv zBC02eeC~W3v3PU$rHQ5Uql$oC=D|g>GHQK*QyuG1H8evuQMHm{W7kYrw(CP|A7{<%naxIgNqiddjw4~?M|JITYnrBy@g1h(sCMN!r7O)Xh zHWt#DvxECcEgVf$Y6g#ujmm0endQSxUPrRoix5sSCy`m;Aj)XU`TTd%7loFDc|ERq z;uucsVkA5ck`^;=N&>D+Sezz~C_FnsW2Av*90SHsH))}_XRl(KJ&lA^6dHFLz z_j#7ht6yEUz2c`kzFT~m%)^Gf7QdP-D5FN2E}D*5I@|WaBxC?=TBLC1dO6yie({D5 zsat|w>OJ!z4gFYeOQO-zgqST-;8o3a{7hm8xlMPp4aE#*Ugjiq!cleFB+{yDWaPUA z94h10dvFn>-Rkl4$<&Bxz*MoH*Y3|96&x_6*kb=wMfC-_h)`WnkcpG~uaoo}shizf zr0;@J-D1FP#zML6udDOQoK{fQzW=4)Ll|%gz(7woZz62)6CQiD%5Bu`$r9$TbWXN$ z2p9YKlJ|9iknT~R$q-3@?xZ21vv5$}Y5Ud4Eu!^DGKUw_i*!&{#9_o{Q4}Z(um*CM z(-`L)Ii6gz=!?sJDI;O5X3yQ}CJPMkLmL0}Uo&ssjDfP~oT;u?gKi7jZ?S{qYvGQ@ z-G@nfi@)n{kJRTQO7;zFeFhdvrbU;1vX9DHk}y1lNRtH0Ij5u`;$l@T(~ z^J>_?t91g(h~&&SfMS^R{V(GQspOc?j}BThr|T~6g?uaeLFY^<<#IUsGa}J>K4{;< zMF_}|B)semljAs@H0S3@i+eH~9D%q~BLU2txDHgj3LUe5g@YE&6?M?Z05q!5 za?;edEUP^=w4j9RTSV9H(ta`uiJqK1iE&TAy~w;7!fv}wJTmA}=$86j5p)~DbeiIK z$(vZ9_T*M7=&0<+_W5}`H>Mx_KeYg-WTwBjrrL}t=aXULfCD{&i>v)Q-JcB@k-^^Y zBwjKb>}GLYiWTrY>e4=ZFGPCmoxgt;c)LA0&0xX3&$E7bDt5aobxr4`J0tx8=6`)s zf3sq=)ViM9TRlSgn=9~ayV=W`P4ZH8Qm0x*wi4&EGd-hpoE~t zfeVBcS9f*kV6957e(h0=#kMTjtD8Opz93-P107^7_@nIY@ppI8hIk|o7)vbq>Id!} zXBq^ZiH3aG_mO01|Md&6qWx$La4y!oY`mGIcE8har^z>Nr|~W&uQ!>ZLdpEjBpWQI zB!Z52_h~|I*5{U%>Tly!KMB%?@WLjx?Q#fM57)Py)zwV_H-sR{oVn~nP4!OU9Fn6= zYv;XPIj)%3kC?>GGTs(ftzLP8eRYllwmsR>xujaU?C3r-Dk-Z=9o-MS^aJLaE+p{a z8a4AguUE*qZB8PO)UG&B4kU57))puJ+l<`n^Q6j$FsY-Cc%dbcgB|mbZ^!5+S8$R%kA{))frNHe4!03)^ePUUd({mg&*k9^IGzkCV=FD`zfScSFdT5VNv z-%i=D2NKuUwngyRK|Tpzlb*zRTpDepw^|*#k9jV04E(JD(iRjy@?8VbXt`0;w3_?n z?AA3Tmh!--Rjk9XTi40MCcMgKUoQ>E(#wGqS>oE}#3Ww3uFdP$amkeBp*yWFw_gec z%^w{KEwtODNIR{sqHV0+4%N3imt7y}wb{nJKBna>(s2j~Yg$hYo75}k5Bz-^|8j8E zSBW_bM3bj8>)XwS$AU>J#*fOAC%VPf$#MvuSha|%RS6a(CJhNIE{ZG>zY$iv zQ!dU!eX-#7EbcNY$<{U_(bOS=PP5-yi`g9bap1%j_8j1!{p!0!wEPHbeY%jEaeo%z#B^&L}Ne&b`HuHC%FYJ1^Q>gMOCiW__ykDrYx?WacVkvItuD-64MOQ5!n z%YPFFH0Yb$eyyaC;K3{BPz#^o+i)&?Zfm6xYuWbm$y4yc0-MxXXhDw*(sr?Og* z)lo|OISeLsxeeP?2HYTa+fnB2WCiXDnMJPlC8eK7cYbXaCXK?& z;9HQdr6nef$FGG9z%Eg2jO}vxE?V0!e~gdUXr1>LNSc&Pq;ahHD6v$X#fJemW7kS% zDi&$P*6?sq0(Vz!Z`B8GOhppHDf;eo(sIiQj&$6I6bC7oDE0OK5mR$S(Ej^U{#3HX?$}qU z(9cuI(%ocjJC7$WOscB)tR*VNI!A$j3uBijjw`_W`uFR-jf8|+I~kI_wn^i<#e+s0 zq8K0QkZ1@ftNnTpTRoFh4s?AUHWF~vALq2_CS)@o@StoVwRhF;Ok$j*7eOrKd-$#U zjLOHM+kJ#m4n!e(zA_cKJpiiB=_IpT1+L_&-`i-V3OIn7Ueu78`uzNH`6%DHHQ-lA zT!!=b_KB~3bZ07Qj#FE?;9~1NivfKxsH?qg*7kR6rl`mowT)!uw6#!kSm`T7&YR+V zH#3p@DOf01?C^KPqwFO%W!ABK`}2&H6-?XTko0Ee9L!RM|`z_|Z?nle9Un zS#|JxhpLC83yNOl77efYmjzY@eQRy=J8Oj?7zPa79QL#us*I#RVN_zb&*@nN-*SZA+ky+DZ~PUvuV! zoD&*(<*XopJaIhIy5oP9E><6Kg?H&-_mO~~^i%z-*)wlG!}A^@6IC+SMA)QTRy^y( zV2xXB_eT5qU~Ib4m~U%F{)-o`^j)$RlnZRtauyK@z>RQQ#w1oTRd$Rzvrd5xJaOq~ zPRiRShGn`VmJ*5WSD_;wUKOt*MSjV(W(bbTG4iD&sV>hgZr3uezmJWGle{-?WihdS zVo+hzd^X?gI`+^@cjR!fCX2TSc$u0*uchmFcO2k_4uBo0Su(M8Qc7br`Qd~=z;gdO z=s^A!DHA82nx|y#Gpm3taf=w5N*d1Pfg28hfqF0&80D!>*~C|xkn&VTo#g(Vt!D^R zVNSJ#-t01QN0ze|R<^s8Z17y>JZ(u?e4}nYO`c!h(4evJE>Z|+YIh;NMze9xLNcF{ z@qvX!DO4$crczhlcU!^;2b_w-$c#5$05yxod7Su_g8Q47sF6aoHc3$~ba=r_{JbMz z&o~pJRJ88{qAA;jH!CY~w;4V<^|wi3&hc0b>`p4tr4A{KBgshvDjsPb(9}3B%z{;N z7C$uF`;w$Wwy@UYanjr=xs@U|5|3ubeP{bdRg{v7*;ThW_wLcucwWEAP%n%_g+C`g zqQ;5lk>lx^ri1@v9~$Lv&1`JW8o zoe)~tf&9NY!oUH3J&Hwt3ZS(aTvzDW~@MFrH`9Fz;7vSFeTDo%ee%rm2E`8_su31Y1=aVS1uUIOM1RM9lC&tCp~m$Ok(7J{MXuf*)h zlB#Q@{hR6{V}y8Bxqp z`S~_erOlcNT!cFvUknZ}uhi=*3jS|1Pc@amG{P3CxIih3H;88EDVPVY&mnnZy``jR z1r->iPJQ5wN51P8fZhZ-_oy!sg>cg@@apSP(4nXD45|g}IDp2`jCXV?omrXH& zIRZTF4HP-Kr#rk5Dp5NJ1noa%xKW>B{xJHXR>5K_%8SkfI?FO0-Vw_>yRrB2@s%)` zaz)$KI-dGoL5Kz%Xw?5wiN#71KFy&w2sv?b$VN^&ekzrvJY}Xyg*eng;9A8w)s5?W z;~ks)steP({D_~Y%x3{-OP`F`Kv__i;ZGKna*p#g<{p->tk*{!Dku!HL#jJiXov^I zrzVq^TzVptCJjeqppX@xS;M!j+Qm=Gc;d8BW$b{i%%Jg_Wag8Y>~NYjy4Vk&km2+G z(UT{T3XVRN{J~xkr*Se_te%f2UNl1QJ5!C~R{ba$DSM!!lZDZPaSKW>`c?9axE}Oo zSVg-GqvhZgR5kB8!4+S=H#iGe7AhBQh;=>}#rfe6R4K`;e)WKz_&7=LrGT@ikvyjmmKg2gwY~Q9E{FY#b%@en zMCB~!;%cG>>3i|W=hU>B-i>t`8;UGKjtjrW3#43sE)Ps;cQwTEO!}DA7)A=^zCEN4 zUsZ7VK}R2RWZ%Y0+6r`1o_qw0we%|-x&wbdnE6#O(zw?cA zG(KBX597|!MAME>o#(s)YJ*0NA2R{MTUm-y0P4;*&3kYxS?m6p0gd^PJaW1}GvN2^+g3WkQukxd_ zHW>m=_X-W!2+1|%9rc8q`P=Xy<-YV=5`E%m|s{w7qty`)hYH9+0 z8btmMthukVe2}t=!DmyOQ0YuZ)CS@%AdSRf33J{R9Ku8Jn`S~nmaaqPx{?k9LN8w{ zj>yo9-wAPeE-fynTp@3zBkeXA?(|=Tisd;zgmqtRpA>t%)9NyxjkzS^TMRjO9W#jB ze>t6d_J^%T~HsZ25TLb(Gdyx#QuT`3sh{AFsr_XF#Wf; zfVm9u>a@=g-wy+-D=`dT9LocD7HYlc&pZTAa~3ZJRjM*y4~S_LXsH=eAqbAb=d`LC zjmn?$@Tq)qwYtM;KYG0!b=G4L`R5|6A<4f2d4S|A8nWut9;ZG>#lIo(qax;5Rl?GK zXvZWynEnp)I!xs|$PK_a|J|ILTJH{%rvShuRuxyk9u>i+O$9JlbN}O!oF8@Y!!o5b zDFtx?WNOUnezYwY>UbFwz3^N#_Y;7bU;am$C;hJmZ|?KrNk-MXyvU6-5q*E)RR^}7 zkvl*7@=m@DvR-{63S<2~03P@b*Vb)q;Jsddx3`~e*kVX(?1j0XMT|;~uIIm9z;BNP zraPc;r9c6_U=c@_->G4apu(2Uo}eD1J#+*#NCVP04o63);a=1#LnSj3TJJx6SYF*d znztX~8cj5ULE{wjVP90GJ+1L!{Px0k0VZ14S49DD(Q+%*XhX{xw%%l7ZshLJfWL)Q z#Pau{UZb7;pO#LwwEkGje`3q^)`^$!`D30{ZiWIRq~xMkd$OfmmQn3R{D1inr_XCI zZykQE-ohXnq}FY_b~%!RmG;B+7#wu9(%%&d&0L=>kg&D-JPog_3F;k2v<3##dJ{AR|EJe$W>rgqwspCcU6hcJ14r%JnWtcDXBfQk_ao4 z@?PB+9)8ofk+E6Mb$gkyQ+q*iKfFq*>AJ_o{>cr{T*m+CJ7ISxSy!hp+pmbP6^HaT zB^^e~DXBRn%bxq+Cj@t8GmQbM#P8TunWVIlQlk}R69oiSFYCSnrLlT!&|^dMNMNIZ3WA!0NjJ^avP`n%*+wTvb)qzyuh+asE^e< zyfh`>Xlu226Sv9imf(+I;3eL-*h5NW*fRILV;fSh5a4Td`QsVHEAc?F1ok#PA&nLd zAF+^=*l-H?(zijbR+&Ba_Fv(+VB#0!KMK88L8C%sWTYGpHLUl2l)^>yM`=a97TT%9 z`uXlqb5~BzxC4O<+hV^H_`4O~c^=exHe=<}8A{CYCyh<9>)Ii?T1yT&Zuo7XTYC%r zxWfO%8;zTQ?a?EG(o#iXv;g&Z=C)ZwXqn~`kXIkcy38pKyy)gHLy13lywaEpicwy^(oJJ3&UX4M9{_t69A(y3O%-M#raxKI*Wkt1e`gt^oDykP|h= z8?-+|%7LiFU}2*(%s=WpR!@N=&=J^EWPLU(=%0hxKs3rIch18OR^N6809fJ`N#1`v zW;a1|&i}J-d<%74n_-m~uD?mq3&jIaBDuVJn)cd*b2p723wttWqclukmCp=fd%KE`Xqsl2V#njH;6w-xGKE%@+Pw zSwh-eT-2c9;yVr7J!F0NON}s*J4OOkMrDb-;QEKw230yxN94eBb~Hl)m5TU;!Vn6= zBJ}kys`-9*&HgK;4dg8j#1i5JdxFh;h|~D>E|KO5W^x8v8F|t7;-5lXp(9{6V^YZ6 z(Q@jIk@VUJRnc5Vum-6m0_o+wm)^AL`+oNizVqZx5z4O@oY?#IZ`1roOhK4sYEuXHa4P`ivXR=5Fc&|ZW7uYp?6zN8Hj z2klU&!hGCwr-{QDfX3|KVs$`J9g(X1ti}u%=Jy!)nOPy;j2C}r_%v@jk416049+FY zHv0Iv4IxisBzB6+v3|7s(Gn`L!x)tR;+1C8?%bw+78;$Q;pQQi8tfD#!TaQ(w?Rkm zC_L}!Z|7zRR;A~~Lp4KM{}IcF{8-lgc6DPGg_ZMuRV9BN3K=fW_fNtlpv8qlhZT&F zVa?)X{vIIxCPS+n0IZmw4kXz(DD$J|rKZ<-2cR~)sJ*W>lczis) zyfNysu&!#az&n&VE&D-^RRAR4eA;@K!vzV+TadNFyjTScYL8^2#xgx~OWtE`{~~nR2?ioMV%z?#*4+`8xG9s?cNvZu%pr{KFLG zq`j@D(XQH=0KCh7r!QIp>V{U65A?b`sEPXUeR!WwIW+RwpYROl7Q-jwxsk1a3 zrZngurGCnsgH=@Buezg78j|~3;7E+4Uy~Fv!VfeJpbSIV$M_{ZzB{jCBNeppRH=Bl z!jQCAhD??oP()F2d2Lx8U;f7sunG0rwYgxS-Z9pYb%+7%-ivE5{ngA2OdiXAL`G*( zbhEfTXb$7)0gT*>hthNNk}SXmwqtKaDDV8j<>lk4H@!h$rZ z5G%H?Fe*CE2`NV1?YZIftxsn9M=Iq?$V;hc{vxQrNHg8=gwK05ft(UcZ~G&L z%gL}$xSmi!><3~72WoLy6_?SZW}+#t<}Vf%CfbK>E?&o>h_4=+kpKtcN*kD)$1JBTYoZjin_I#@ zM*zyetVYBB^>NAX_4`J4F5X18{68NT?aDAt2|6=c{43?cvrC{~(-5Wh1n>?EvU>B9 z2KQSv1^$H~0K2p*zH&HmXxOyx(+t>a`|DTP?EyykFM#z-Lc!EksX|)FGbyFGMBw zZm%k<7*-wr14V(FAfIg&%qiC4nIv-(ii*{B@ehmX9tB-+`p zf3n&dwmbVs;kR^DFSskL62D|Xp{Y%kzz_78Tbmme{}>or*TShvT=_#A8@x58w=F4wzJ-&)3Dtp@icB9=5q?@6!(ZqGbdq~s_-3QE5ND=G)NZQSKm6T`Mx z-Tt&7I}Se@I!#qx3CqQAyz>11H9Y+9(Ggq7VeCkHg?pBE*5?Av(5qijhsvx9C;47G zyW3u~jiG*z7| ztJ2|~`~QhTKn@rn9vuRs9Uk4dtZ{_`ZTJ6w4}~kQ9WPO*>oEu38E6hM%un9?K6_;` z=)AA*`(Zl}Fuq-*0AzNVp6Bi{uLhdrOq=#?fcu6EOMDFn-CmHQ*~ zH?Y_-_AUAg1@NeS0hL`IQ>LtfVDlEWy; zD6(YK;>e=T$)omuSxHcNn|4`Si&Rg?XI?Gg8}CV}&Vd$)Ad|_ktlE*+!A!T=Nuxn2 zrm>6kw6+?Z7hOZhSf*o*xsRI3%u(mVdRLV@J3HrW%?S`BgL>u}&n-;~wy@3f(e{Ix zcOGUnu#dhALs=65{6E*V>$`iTv|D1)`pnR!Xu|C^)f2EmmXgz@jmKq>(qUiT&SH@L zIu1}-%Tecp-b}|6om1}2w1iz>sZK7>6c~8Fc^Q-%&Oa=2_%kKhsP3s@(fD%1#7NQ% z1reyxbLW!&diMJ7Qo*ra9;E1c?brQxVNG5O%arqequV+2Hnu<}^%zHFNw_d8g%g8p zBli=-cI>*I{kRJqBk5?1+@C7$Xq$Pf&kl?Tcg{34nXB{NN#!9sI|F(YHFaI~u|%{@ zSK3I07#(*n{Wxywx}NdyA?n_|RRIzP71QdyQ#vi!HC-e*KW3>k^T@lLb9itlbSab^ zc{zS#r)!lB5zj`7G*4FLARC~p!#zHHY>TnVt=~SSuTSERFF2@tD=*}?$IjSA#iUyI{otJqga;zas!U_{myY{H^W z`tt7lnRZ}3q+g6_pC)iXjui(rvdV`49vfr>b%V!372caoFrFEFJA|l1nxIS|#LxR= z8)vl19Ub!cp?4Vf;dnLD5n1B!v&hm1|Vu=RLr#ml}tX3u^FI#HXo>#zE0*oNW0*GYH& z92?}wy@Zd59XXFP)wIvYhe*yD5ra77;8TBqr_?dA+!55Y7<3$J2}WX;Oz8Z#Hntbf zHjHiNbFTZQqnJQqDr9s+^Ur=93np+uWkF(CB!-j=+7gSPg5XcH3JT>2wPDIax&DI6 z(hxx(Ic*WnWfoqSq?Ouy1u8Vf^M?$Nzyws(kX$$iV(4%%_xoPG%Y&UoI0$sKEB}qP z)MtEt{_+#eu|bx27R?BF868bPUR+Q>13F923ElZVx?EB)nhk+(Djrw$kv@5qrTXen zt8CV=VHfg0Er55IDYQOh!Ws0vF+>yYsT^OuBLdnx6aZzrebQWg8%9xp`k4a7o*ActgBNvyI*--7ZY0pXCh6@06A0PZrH3zR~k*SVLojXTU> zFmZIz7rLOp;u*_$u}7}hp)x(?t3~C*xvLHm^Gevv{DdAUyV|S+tKF&lcI_Y__y}N& zsK^O;O~HDt#bc@YI*?Iz4ehb`f(F#+HNPKqu^aVgsqa`fPpayAEz4{7ulEn5Ci0zT z7q6m%7zR~ZSv;@7?_5Y2HsysTXes?|q~D-olc-&5EM#i>xM$YKYo^0vm)}cil+ZYM zY~?iUNciV$EeR<1Y$sXy=PWQ+EndKwFNZNo>L)jVMpYK)wuI5Vg;+nnFUjMhC3uGq@?l#Oje$y-e12wUqjz({?N`}b{(Y!qsc9)CsHjBEh-r0p7&mV`o zOer1ZD*7DR99Jf;k{YTA+yM36DKN1L!p}DiXDb4dLzb}Pq)uyt(ZZ0XAx*IzOxoM5?E!OZ^+)7{i9DEc;tw~DF{!r2)$TklG$NsUq zt`hBNa@!hNu-WDlvB3mSmOIgMK^Yuq4SzEe63(abDqj4IuzIljduw+7N^xttre*tR zlFwl3P}%SiwK~`k`_T~vY|bMuK9elY)&(4obd(rSHs*!xc5M0Bbvd6^GR(-bKjeRc z5_Av^sNprpZh9YCbK8=XD)SA*W_{3G)nw$zeMl<#2}wrxy=9=gpTC8!d@SQ`(xmrp zTc|6Qy5F}HUQ4>S64&|kyY?2k-f<++m+!W}vd7cp@&@c1a$_S8K82bIAR$hf;=%}i zcg&#D;n&gJkTLlcWrdBgzd)im*4^Ad`i(&NK=xHzo@$J_ibpH;$_x=x$SmXC=CJ#q z#|TIPZM0TP;JmxQw&_@MY6$yBI2vGm_sKQUpNT95vEUS*>7^iM&E)A`sZrF1ToG+HRv`(>>|?2gM)@ zE&&JOlQAVlFV@G)dXJ%eIjI;5C?-$*ajN-QJ}x=xy^FA?pys*>D|l4@bzsnovp18* z4iqjq<4_@BM#x6{`pyiv3S$OydkJaeNhqF5_b5n|hRM$|mVs-4x~m~?+gX*r*>8|J zFq4MEVx+~WT+De2%>WD%X)ob*T7c1fDd5+83QC~jBsS(CU5*^K6-gJ2cefR`7WdhS zku5PGM=L&9>6Y1UkXB4%-{q_mJ~oqlrAw z(#57JWTxDlo!3`kK34Tc_i(Zwms-PG38LBU(GtA;B>)0;hlAPS$B+4dMWUc8%O=>y z%h5bZr!&W{WRpQeQqAR#&z?-VqW~~rNr%c;UzQI#mOWx#1?$|Lx>s^eav2w!&md7O zft1BV@5R7lAnT~xj~W_}3U<>|p?71L>QzMWilQ#*Y;EL*va=OqZ{ffQpP+J;S#hc4 z1^d7A9v|0~lvX1@*Vaz)S0z@HfR^8d^~vzXazeol z#1Da%Ypq+k0TM35-)p@FUvgyR=4u~qjKYEjy%OGvWQ;!sa5#`moW9?*yZe2-%3=0u6X<$?0WO5^?6px>^^Aw)uc~Ild{=|6Q zM<&PpJDRL**Yn4rLCNtUbC|&S*2zJGZw{vb48EQNh}eRBxm_E+SUi|XWaM*NU#_-RV2!SBm<)B(;{Hl0LdTVL=6l2YoUZPYrBf`Ef5)mmbGPIb*vp}hfM=Qs(fuCBrBvTa}=EyLd*_hUCK(I1?vAS zEliJ(Rb!UWuwe@VhSdcvyaK5``m+aVff?osoi|_nOt;( zM!8q-6iL#xk7o@4!=;-v5=|0PmX~3ea?}lw?pXKdiP*bHv74U+wWs^_Uoqenpomy5 zWNARCV{_on+}BT^VvXXBvWpE}T+EA%J_}M~oB4H9=rNnVw)e~5nxq!Fp^Cu;3ZiF? zUDr82msQtUU00K$5M1z(=BroFKI9OQeVw1UF=B|#=DV$rhWBxue*O9w1Dk-7lbf5` zQX!}~j9qjg(0fp;qWhpCZtb*NG=js6N|Fk|pHUT^(!KRx#xKBB#l#$v4o4=SAz5n9 zWi8tzX=6djnOuG@t)kut z=0^=#U@?tEe?8J3lnmZ$x)n`ON@zl`c|N@c3OVgxJsdFS?IW@q0Nu1Fwl5r7QRbz6 zPxgL`vx!^&It@0vXc}$2z4%75^9cdyVc1jUs3zKnvrp+kjKeO$IyzcU2ZDfCWq6kS`cgzEJ!Y3J*tj}N5 zUmQVa7vIXFPh-OZmaXJA1=<>&cyizdgy?S|RfpwEW>p+f!7NHcZ1ok>J@bucuA^sZ z)Bd6tI+Pa`dul|_Yd?+k4PG&PbCE;DGn39)MDC3%lSH>?gnf}eVX2;3rCQ;94s0(; zA7;fcn5RwjSeB5hHhaqXZpODxNwPPWj4x+ItNAvR2qS`8`=Gej+X|DHYj!2>!d$DqXi# zthuScWE+~XA;g4vnKp*;_Sk#!EEhXZ)op3jBeJsmKGc#DW4Vo;bx(q>*X{1)F=ci6 zngM6E$M=U&=#!U8<+7&%60Fw1E?^Z|@a!X*m30%@NEG{PIxE)fCqU zP7*>VMro@m;Y?q?>Q_|sw}`RC92?Z6;ZITAnU89ABpuSdJXMQ6FtWM;H-MOsjjaL| z0L$wlaa@9W+s7sl6FNjw9g-3&F+lGQH8YmHcV; zIohx?rb;%?DvCV9k(bknIs!_2w;XcdQ_@!et_>Db0ea;hMa>D5`w3KBeD{jQ{jg(UTR?h*LHTlSpJ8s*3f$1e)WGYj|vnm z4m??GUC$lrcyOQaimvz*V(fR=)?B6m#tS(%L(n*Kmlr0@?*gP>fs%%HeUeo)n~1-D zW@(oNT4Dr(XQo!6;^bBv1)NQtbuf<$Hc|p?|Fxg-6Qm)V?K;CcasgcOLL{%ecGfn8 zkHQ`VnhKP~F*Ks!`o9vN$pLjd773w)(5WMz;KYcp_Xh3J1M2N*)hPF)`AvzZh{;p{x;7b~y6#TQ`ZU zKYp=J=4@R{h@i$=sHw!t=oL-fQaAA^!uW|{b0{c~5F-y5dyE!s(-CdlDo}?b_OJr) z#RaFQ_|+gw{2vUhcjpWG%fPp&jiI+xoM_paAMFmk{itmL$%wVI?gzgh{e^Zd z9`=c_V3LmTnFhNc4M!NBAdGj*h2GBZ4jB9#GsZXBZsfZkcgo-FC#iPeTk4)Uyod&; z`LP~S1^CEwK<^?xGRnAy7Ejro+)@aLNehD<$5yAPW5zT zUoe>eerAu9-IA1WV6T6o<+mg-x5Fs9;&Rha4M1K9m1;1~47c0KHU*$(b?|0uS59>byMKI~} zJ9zH4nQAa3@Z+iRD2P6|ICJjd;*XbS?^axDT;rx^jyjD8v;zhU_z~xYrv^K#0NJh85_20bP?Hivo|6iuW}OThKM?H7YU?$+ zDN#M-`F9zJhiq+`awSbcd-KEk}U1I*4|1LZEC)B_NZSf zBp)c$jKQ{rboiYPN~#E>`x~ZOg>vo{V*&usguk{rVlg!!5!u=A!wpy%Ho?B z!b|QUoQmq}x#FMp-1;E-z3%zLtpDY!Nm29rVoo3`p2>}acqmlso8hmaamfudQ8#&O z5f}vK13Qc`6j}S?2FSljm@z!?qxQ(b*jJ^upxfH*`rAW(E}ftLps7qP;&gUnPT)>b z@8?Q_h-g3Vfsud~YQNV5KDpNmH91N#f|f3*J??lYzYHbVn6G!(+BV z$bj37q4p{_4YiZ73=83d-}mp&vX`SxBY#V)t$WsNzAb18)ED4Ud&Hi(W;IGl`)OWfb`_msrAnRhrbv#2W)YQ?zGf!@kg%up5T? zwnDTTTxyrAF)80u!2k5`W$&jluE|`>!ZD(9XcJxYm%)2bQ_*akY)Qa6iDovgCAz&5 z=i`mLH`I*7Jg{x3`Mk>eBfU1mQilQHPgQ2!p zUzMV_O}G3}kD(Ba;Vo(veomAT)q z2cV2ZAIeu@SQRSM8w`OR!{aeoxJ4d1M(n^QBBWfS%^x6-L-#Vx#?`euvfl!!U%xDd zflX+6@T5NR&JkT%N!3fBYDQ4^f@!**mLYIVD+E2R``{2wbJ>ApiFS#d{DQZEJ~^TV zD0d%P?8HRv)ES#=m15+de)+EO(WtBQs1qqiU2WbsZ~yOgulgsF{9Snr8Mh-JiM>&?2tHg4kOW>%JKw`=lK2 zF|Ol&uAYhx>9cLx6nTA4bjl05KN^Fv!wh@T8bhmJpZ6_Z_$-n{^Cv}KQ+93GwJ@Ee zC1MbUF>#PL+?Kf&`EOnf#{X*i*2XhtI85n=;)u~MQz6q^|HPT)Zu*l9MGt@~{K;N4 zb;z59(+jIWm$s>h)$0ofJfY@~fUKf5!Y56J8=>6{~h&t$Q<={O17?Yu9md6p6z&K5Ma*Z43W|0a*}BXNb}; zLPrWBE%Mydw$Vv%L}VX)ODXnem9Sk+p{iZ_=N}H4i*03B6!{v2a}FNH$*ud60d4RbF?{*+;ze&-*JZt^C`$_~d*S*2C7mv?#mq}5QQ_c*FYVm@T3xxIRB@^XS`Ku{szVI%X zEcnnX`;T} zE=>n)Nbxu=TzoXEGOaizf6#DY)){F)Faq@Ig#?BhR0uiNV*XSL>z=3GABf!#Usvfz z*3z6~qYobX$`KQ;QWcqS1mu2MhMUg2`c^fBh>%%=y>6FzaGf@Foi^=y&1@!L&G>{z zj<3bFF#7B~DFQ^$a$HqL>J`b;M(zbDiVnjiBg7N!rD8@QIwPIP*q}R&+0gaIjzR$^ zztlpv>;Pvs9{p4;Usx(yrl5b~kM6tRw!b%SWT4D0PWqH%+cO>vhQi$0M&G>rV=U|u zvYp(y&v$;r42sVEoThY`rP=N$ex))274kwEwVrnz$Vt{!1ut_xDb@smH3%&IjQ93kr=|-0C$7({bfSU{)lg$F2V#27^@XSNDeu zlZ;6u<5(-wd0dmEPXJqS33h8`r1v z$5nQ|U#yk;m@y1bwhN9810RM-;|$9_h`3WokH+Yn{hh7xAJefeYdlIRf{!$i3+Dvx z)Y&jS_d~XQnz{X+AE`n3V9a97Wp=VPo_ANha8ht*w*0mJ3V+hLt?_jV*gGy0E`okyjbo~HWIY_WQKEhaO5ZgW?oBqhX4ATce$n>^|RhCJilJT z6w0(+DFL-oiHoXKa#~Wa@sD!}`lS@{ZOVqt^1xp~n9VPrKqxV=DvLtHZ5u7}e0!|D z&x8t_wWL6I;hL8#O<4-GpCF#lE^(}SG0ucOJ@7}d{hBbw6Rq3IqKre>fo<4xKUXz( zj~qBu%2+O1LPN)LjT97lvD&q(9C91vZPW2|yHzS0;*bgHmF>yIFj0-P<|$vh`v9v; zo3f1U=!2GLXGnp(tGNORXZY(5+b=v-sFZtL0`xZtx|*op{QeOJ?QzzfpNnX31nJRql>Z*tuakBTF+VR~Co zCH1jk%58cBzDK>YW*zN5b@Y}$NYC&uYxE_3t2eN=%!32!X@b+r(+#l^<}KMN(y8T!@l+#k@LzC z&>mWCFGQ+a4<~#en3w9PC312qm9|i$4r+YHg-HPBY&>3&HG-`_UwQ`MJ#PhmA-&+$ zM9~uZ&^Z3Hg{G*vCWi;0Av>|-&hy=eq=39BmOoy`rE~gN^(;sJ_^PSvf~jk-?(@+? z&TLqa()Dkw-8qiF^UwO|J~nCM3KbO9vOW2B5fm*843f8L{j+$beEmdo_q_Ar?ol_lcmSi#|clSOg zcUADq&jA23;v4sA#fvfhs!)V9{&U%j`uEa#CU;Z-l3V+5m{ft~lUDNNi6u55`Q2R)9LDM8ZUV86n) z9=)Wu-I{Ma?0w(+=a;djli@!6D)--o~GP5_prNCsaF0e}$Fyn#^!R5e=hg=}*W$_a zbtFE?=1MRpCkM3D#Rj^Xh`OkmaMrhK{gYu>oyZ~|XqVI|dbx>bUlXAK;4wxq$uy1Hm%Ek0Vkvt_Kni~)XHy8kE%6U324Y`iZeHDTICr4C#=ZmP27#|y0sv* z)gZqsQFPn#e4#0FMK9(C*~UW9HOrFuMuNqjg<8z`8FrxV4d9Nr6pcE15T?vsx>r;a zzBAo()5VQ!DKB1mp|dpr1I=lWz(@!;ppsJK9y zl>?0I7B+r+Oc$3K>xwvBz6$?|u6XvXV^c z4(R!@rmYq%3^$lN_jPQNgE)DdyLy)sOI)(xTDpFE+Ye@2@T{M4Q!kpwl$2R*Hfayl*P70!{jHq)9KeCM^05Y5g=9Kqqd< z>*eHYI9I7y*l9cu9W#ho@!1f`J>U{ca^Q9+>7#O67kiy=T3dJ;TV1q~g9%T`Ui+0Y( zg>b5k{Os<`_xG45>A!1f%+(spT;tPZm{7@7obvpg7~(di`v7 zgXL-$pxRuY8c;e&YO7^q-FT8XMVehhnIo@yJ(Qpyea9{UNrgkR+2Jce5G{B#=fPG| zn0M6H-yqNuezfaseer}7q*WHbY%PtZSa90Q8dYoszT=(?*J*9#CFLkYL9<|0QuhLZ zmyb{Rc&}fZ=Fojo2G88D?LTPKbU8mUO7KsrnsldlNL*rGzFkCbdk!=XDDBgI4Lq7= z`>);&pVvQT27b(vnPhBoNn>SHGh_}Z)Xo_2Gh0|op?Ed0_F~)8j`FDYzsxw#y(oBS zb08r6+$mw&+8A_U1-k7}3$MuNr}0DUgzs9Plo)?nXY^zZ- ztlQx#CIl;s|0h;2#}=ncyKgOBAnzDKZ5lG9ln(k|K{Eu4ItCwkRH3q^ zf-;mt57x>GG?O+}=<4_#{RU=Cp5)YyXYSnua@NDy_Z`xyX#(=pZkVLO<7_Ur=2)`4 zD2Uz!G#e!yQn(4!Xtnd3OZvvn`w#eBZjN|foNfgq_oYz=JPWjksx!M-qKj*zH1v}D z5=Of=U))Zh8}7n)%4;07yBbvtXUT{CBeOH_xES9G(Ysgo6-F`bU$m+5@>nMPtUg)9 zT2i#etU`TkQi|h?_!^%+G*kd(Xa zW4vH!`|qNi_uTS2Xy?Kbjvpb620F_4bhN~Y(ebEob0%>4WN;uWF~FJjg-BHNwp(ow zf{Ia^NKK@e2IOXRq0#=0v%udXuMRoQ%>)ICrmC(2CUH)nhmaNO!%G5nT9CY#?mHTL z70rRs21h`t0WKirDESLqPiJ==&1H+=K{gI<*&*oFv-%I8vi;&ng|0Y^#JNsNM&Zd~ z)Pyf~Ma@Le@^hMI_K58`@70{!{BiN$mCN4vHwp(gS7vU83`c7o_g9aMP(THI98(WQ zOIy;4M20egn$GI!?ps->jl0`W!oEFN8z+1B=w-xfcZ_?kjzIe71Wup7hg-Wad5&{O{F{mOi>0I zmAAvHWTz)2#;gQkS)8KmggmB;rf%@+P62a(;hI}xCCQ7bjXm0bRv1z$P1wPdIND|L zxejU;r-N3g13qtx0;%?U&mS%oAItolV_DC+tdcfsS$K<*_5^f#!X1Z8i>g&Tbka{cz{PEF zTiSGwymiVNSX{G*npMK&eiN~tQ(UY}HsxJhtfJxinM=K*k+oB>^|QuZ&s@xDhZoo( z9ta6i0vGfR2c$chC+%d6y#@#!xN*@;=%h=UTdd8=iIe^C$@O)XoX|my4 zQF${^Z0Bk({CbbcFuIE)7)oM^FfY+dei^L^CxKRFxpF%(D_A@WoE*mRSFabAC-loR zLz{z~-yLjrKQeU_9F6xEq$Dm;WfzNlNSj#h0^UQ1z;7Dl%`VK6fJT*%ap+U{+xvDu zx87sSgyh}h`T@l*H5SZ;6l3?Dvivv&`)E%f_9{1*wJK+wvD>B6o0^PO-7UM=@2(~_ z3CI$xLWUSQlq2maP%Cs}=Bvff0SjwNi+gpOKh~lIlMiK>*wAj_^cs07MfcAQ#!}7{ zF+Ej${b5YmVpSeoqyL2PP}0- zQJ89~#_XpLdjBasgO>W+PUaH2$WZX^BQXiH9x)4#ca5+8^ac732|?8`t4liMtpaK; z=q)#HCxtn_cfYuwQz`8SM;0566e8&jJBJKkfk0G(_5UI>#N!!|8$?)I8pQN`$Sfd< zPG(k`iaPj~9Jv>i;C)J@asSoXk-mQ05zpOY?`DALBOBpYr*+##8nu|f1QuON+*D?2 zrf()&q{hnZKjM2*b%Wns3Q46wiupl;gtPKN@OSR;d&`t$HfG;g+?Tj%=H~Us)SV1i z=H3O)O>421k=-&50(mFKP9n&BLI_^&#itLFLXNb`U0T4Lt|egD=k1S` zacO{0{Yw7eHKben$M`1a2X{0X2N++Hyq<=iN`$1)sSYOa-yInr7ZgA&8|p%)75#+V_cP~5##o= z)Pdo8j5JEtNjUmuLI+tiyDthb$^!n~Au)}=Ja@Jii1lT9kTh6+Rq*7wYd0}y#`6(Y zZ14`3iU5gwCMT_-oO||7i;~(v>4X7OkN@1+iB(V{3)}C)Z3JefucyF!(V1nCQ@a3K zl&TKIwjX2q{SZf+3xpC*ukD0#K?;H|y2l0rA;mZlPwRF@W=GJnL4WpYuda*@lrMj` zzdS7ER$_lrRS)dP02OT5w7T_PCNQx~U0>d*3aGn{tn?roe$V1KN~vPttm?f!iH2I_ z?4uHKAaG9%T*Q@5Gm``K<|V(D(39sk_R@IL#Ye1}yw)Rh!RM`0S~v?ajpQA*G-sJ8 zcH4FQlO+Q}WA`7*MqTdmBC{L%H?mOmc#^fyJyQn|cZ2 zLS2Fk2nbj_?w-FoOcd>C+%KBOWSGgiRKcsq4S(f+Ur)}JnM$ToA*JHDRj zei+PrkwhstueaiU_;v67{6&mo%)Sh6TvEq4FKX;xaFtJ6C}eF0{0F)?2y)4nQ4hv|?_5TOYy+&Y@rJ zM?KYGaLMFAGVmce5w2Sa%rd1VgLT?G9qwKZu}6hSKp`*2#OTeE$cw)3U(_JDOo1v$ zr>sqZR~6*LOB--RJ-?_(kt8z{a#Sh~J*zS+XpZ=|H5PS_KUAWA@);<1ZC!SZmu@I@ z14ziJG6kC)U>ucAen}OlHNmY{P{}ncH=dM~B&?Gryc$D;6a8%sa5@f1<@|}i0~+^- zAI92#Q+!U6mxzI)x^KWjUVs*}+k|I>bv5c$bd|Y`9zrRg1c?2(x?z*@UgC4nR4F;7kE=pi)ThLHxs|R#j4JKV7PEvwKn~9nA8F^D~1x zckZBfltVUOD+h0EU9Sa2uxcXrFHYeLvkmwDQLjUSdeBl$_vtyED_QwJ>N}`Wt3*WX zE_OzeYxK)G{0U(G&V}Ibz5PV()1(`j=s(rlt^Lnj|A(%(jH;^b!bTS*A<_s)cPc5} z4bmV;iMZ+RZV+jtyGx{`yF|LX*+}=MJHF-nyw7{a8Rt915B3;)bFa1Td&YHLbI#|p z$^y!wU2m;w7OPZWz9pd-RFndl%h&`?cOy#Y;S3{E`qYf@>2lZ$-KjDBP-v~|WSxG6 z@?NIkC9xD8ne_zqq;qsmP}hqHPUEjEjQ^kFLNg;fseNxNO^Au=glT#^Cx_5uBeIm-X$& zg=?8NScn814`KQ?`9l2@n3J59_s(&#PIc@*J$34PlOI4`fAZ^jp7+0~0tho?P8R~O zrG^Z9>~3n1z|3pcj5d1eJ(oMH#bA4bZ3!8FXdbWpB+X!zJne85L)03Gc`kIX_j_Vn zhl>21PUlD0Vrshzte?m~3sv^6edtP2CkVRQ@Canz0(Bsfj!26vr5og zKx`)w$r1WGrEcF}c_0~=){RB7tmo7p(mjO~Ny@-n>v)kLVS~;fGEvw<&06-4SBF5y z{meKy;psvgncRkwFYj~&kcHIgmq8=x^Hii|-~s6jXnaoaIy;%Y$ENu~lXloMjT@n( zZdjnsdc}VHos+XG9MIihv!`!zy))A zQ~=ewpi`i2mSkb~^GF=cNEL_?%cpj$YWz54N?kkrO&VWUD0f9(cjvmmjNg9MQ5Ul} zFLh>v$=XAYDTc!Nk`^8y>;YCIR*qMNl)aybj^UzV!Vh0qeO>{69YflasbXK+eYYAB z?l4{(H0C!KrD2JN)FI=yolY-6qHO_SzkHoI97`IHodQY9c+w?9P%@PC3mB8r*EKeEke)X*!9#Ioo0}s3n91GwL_n z)W#05<{lg_&o4Hgj=$qIx3*9mFqj?&>5`xSY0r+#cWdLgoO zq2FIrSDVg{5lzldx}_%kS;%I}rRuToPniATWH#ocWWfL-548 zt6kBypCU}O71(gGyF%EMN+)NSw$s&aC$oGb*i$7{)1^bGjv6L=_>cid6Xh%cTW;#> z-zVxJSOEaw>{JNWhupg3N|oCw>>a^N7Aj`lB0rs90TgnGCWLQIi2#ZSkZ9h??QXG> zrbHG+F$~MRKIn=Ttn_!xG$!hdurFlCACb$cZR=Tp_h;`dZ%vE3fEjoV>Z{W$J@FdD zuTQV5^ogGYmU2u9YG^Fi5wJj5lwvVNl(VTr*EAoy)k0^-eOBl`PA^?1;XznH1)5@o z?7f{c7Vu-5)7I*?xtcn#NQpdiY@V|cvR5VL%6%q)Sf2A@g6w=XopXrSga#+r5Kx)n z-v;w4kL$4*Z%+)MqZwwcU1x<|z_7N&;JhugZ!(fu>AkN+j$vM8dYbS4+@ws`buo7H zU2WAAF=j}iJ@xl7*(+YAb8&WQlhX`NB$avC3s zLJ4YiC9z}G-~h-gzq_M!u!n>pdT%-cK<|T3f3lmm|5~`45Yj2tX^|K9Cm8C_F~s{P z{kb7gPlRI+!+H>oG%Da{ZMErbU5f+{`Ld1xc6;zR+zAx|#FwM$ zAo;gWO&lg&@D?o0F*c_iT%!r6dc*tL@U#!xQQRrrDpbp5|DDeE`Fe=2<-Q{>zbAie zIa0MIM!G=T_E_CI0 z`@EmtOtvr}?Wra`a;A=alK!4feveZ!i-B;aeW`&Nb+(LgBy2S zcip9(`71cdOor4P1ohJsc-->?4cTu#dCoi`EH3?n9BZ-qZ034uvfgQ&wX?QgOm#w< z!5V<@&2Sic#c=H_7#!9JDkK8;r~yG_(jOa}{S6Ax8S=sML1Q47A-l4kCpYAi<`c21 z2A+=P_9t)~B2r3*_>N7;?xp93SlE!rF>(^f0V6 z8R}v-Ewy7u1}G6X^&}&3XJm;!!L90;Gjkrw5%E>1_B@N);M`j*MSy+J5QtCW%g8q= zEWV!t(WGCuM#vxgM@fk=c4-1ZnjghW&#OIz?Q^`8!p?8(p@{cBCV5$0q73#k-KvNH z3d8^a*mCuz?zF5ML-2Y_SM}tJORE-$wgErG2s3YWhBt0Bycdaxnlq>X1rh@A=KGcX zbyVFj@h>T-lg~aAaCs4q)N3e|8|ir{p)70ZsBb{^vQ7l{cF_7>?ZnHi*<}qbiB9rH z_4GEaZ<$|nI;gMv1g73+WjkgzO zSmW^x!6a8%zMOlU%;f=%^;-0yCbDZ&vJAQUef|;A;Zw$2=W+14L}dv#kOWNrNsYQf z6KNFciGULw0)xABwg{p;LWr?G`T}JTVb2U7IqHcfn%`=&p)^sKntZ|D5h@^xf(-;Q z6owc|Gl21bFt6dvf-DV?jhBZKF3C2+Kzq1?6_FB>% z)$Olp6p?SSuNN$s%V<%-$Ofblw-1iFg{*C;Gibz&22i!%F--dmh|W*QO{flR^GwL$ zWG@#PjjMA1_ju$(J26j-SBc>Ii68Yx))-Dv>+iFf!Sd)BJU!w~gZsrKdEqb5ex+hySCCNu zLc-R=k_Y@g;jGUYFlcqZ9e>$SyaZCN-@1BK@&syXho|`u&9>jY+RsPq&M9&~dcmV2 zd^ZO3*i6f?;ej_aF9b5g7R&%&Mk*!yTn3veCWNY!!SeOt`3lzXg#5i> zH`{8V0~1(9J+j7Y!AXMxnrx33wF&OO7ZnB=a3~!82ML36{l`Jy<{ z!U6OUJrCc&A-Rex_D*y5b}nIMN(g{!8?=)Txli6)QpUHoiri4WmW8ZI{}^5Vo5!9* z3u@=OonzYy7)kZ}WJYmez8`HoWF-c%rC8&HQk*1ScO&e>b@MV>AH+IgTamWw&zglT}9eLaC0nMQ(*jftuA7LPGq{<|Dv`|4aEfsc?6JKk%C^%kYMHkl@q% z1vBRAUPBGg@bf_quF3ceExMkCAEF2`hvy&c(nt+`u>V}KmpXGS+Fh9b-;7+yi(vn* z&qdky?7OXL8Yw8I`SU!wY9oYdr0LdAak0m*z3thlIBW1zUHHdJ>tAH;l9hw1N7hM} z-(TBJqW9PA6FvhF{g~*7h;rhcZ|`d}UY6Fl-ay4OY|r{+5^9JV8gU)L*y~8#bwjWwZ_FGT33@z&bBK< z>Z@5cEn!~7EY z>eB4|gH^JU;@_o0%kHy9MQCS|W%svEDEZ!^+84O-=inK6*jzGG8c&h>RilLO96K2KXt z8?)*J*A=XrQ;e(e#hl&+0TL}TfOI<9KSAMH!0XIL4|0AQ$i3@u6@{! z2n`23uw=M5gIwz~~1IwFH?M>K`&auRj>9$X| zE@jOf{{1)q`3$tfhtXM8xT0xXPS3OyLsm5_+9&ncJiU~$pcr;_F^G{Zj=`KoxbtbMUX9|*zyy@BLEaM&jMv8q+5qvVh{VSU_(5a2>zEb5`5IA4LcodUc{8V+$I-l z=Ln0X^HyZ@n%X3Ty8d#*YQu;)BY_V_=zKBuYo#fLZ*!fZRQ-ySb$bidhCf2?@D1j zZfdn|g1^SlPJLj^`lzF{&-rHKPSAwr@ZIbmxS~16s*uWrMDxN3Ij(7i9+}g&;qHzX z>RBI1|1Mv51Va_NvB6#!rx#~RZv!7}NN7?VM>4=zyX)U8P?ETV!kxu4EX#2;$ke)g z6HQ2LpEB6jxUFX#XP79~Gt&6%2+ogX@IDMvdsB``v9>;icdO33mnkoo_lG9aw0!p+ zYn{-UyqY~jV##-*H|tUE8ykey?aW$Y*?D%10nnxsJ&-ypzwJint=qgu@=mERrZ2A1 zyvy3L%BI@R?K1!Tz|$m{)YnMu+fwa6RhJ&asps^GFxXJ@;Q<#=QIb#&4PDRZe33SS zoUKIk`Rrzg;QX1NVBRvOs3*Hi`wnV7O?ANLGeGu_ox~v_CYc3yb$@ZOph4m4=;b%kdVk^Nch#&VCWlX@4r(BD>+8lXVujv0~ z%iRO)9T;14rJ6?jJxE17MFmdQM!^IL*IA^-kc0dgVo8+I#aH4R!)bvf?&ummAqTBg zE)|(3HTG!>+OU#Mu4sjc7O}4Zw`?`0?b|hHWNBO~UkWh5b*&$mMHmF`o` zo6lZhH-KRMF}e>3b?Jr!G9OjBePn}FKNSC-wy4!C(I_#*lirkD5Ex0iiU07#^(=$< z!IZpG=29#r&@{Q$9Zt&MKZc)+f5Jlz6E;%+ajq^7vDK!5PxXbGK2zQ%AC+xTSMymJ zf%5@PS7g`l_=MDd>?vWec3HM4Wjv}PNt%f|Pps3OP~5BydQ&g2!n;V8Ll0gpdGj&_ zDcx5v=Ie05$AiE)^}Iv!ufeYFJdeDkHP6xLt6x;_$;NTX#hGxv2*`gopApv8 zS#;QTh$qD#m1j{4c9^$Ju)qs0&?$#s*or}jMiT24+&mV(8p0shJ$*LHX8E!pe%RW~ zNx~9WDw2j&C>Fztu`Qvnp52EARYF}{y~jL;c+!kZ2GdpQqiqCc5|=}8I0kt8hnTuN zF5RX;h+Czddzvswz&?#3(iM2+V` z`<=e$@t$^SF-@x>feXHY-%H08H>2)in`+9@_-U%k@v~q~)B-snhAT4nC$D;f#o>_4 zMh^`qwN^%7HOqSSaOxN*4_p@;dUbww2LrmInS#|XUdP*at)?wi?`*$%a7rk#k@4;v0q1b z7TmVAEXOVvISVuhrG&37gfQ}Oz*Q=k)?}mB&h$5s2+W+wpdk0enj~&&r{)(#3Qp)) zW<->nl=m?Q<_SB%4m$oiSI`c;v`aC6osA#NkG?KX8zF(aXLq+eJWmYsrmAg3$zv$} zO~U1BTxPAKDUW6c?3@7QKNlR;VTX03Uc0DedgW0CttKIzK~i3|euyS_v1 z74%=SS|tg8Z|yYOlr*x{W(QkAliw>+voeLj_>-N?D9U$)JQ?W&ak(J|b+w#DGH;|W z6*zyfm~NmGkb{D zm=1_$HclBc36AgC{0I=zrAyEhqj+b$xjIaZN;h;XG>1Q)DPwp~xF@T%)}Rxa%akU= zp&%#bFGf+oV~pPAEx@kK-``5cJ2wxkw408b4tQEJDDR?N%=m7YZ-$ENYEhP99~LdST{{c*KM#aDY0nS?OZNHzH%9d8bHW@;&9Bqc@5 zJc+5O7*?v7xqik~H**Zm@tamJHI}j`gqi7~S zAHABa2Y71`kn3Ynuz-A*DxdtI!iegmVNE8&g%tEQzaUBA+W?aH*NLD*nno}LK<~24 ztRs}AEOitQp%|k>V>VBvi7X{Ly6v3m;gpqKaQDUeqDms8N*l#&Ncar?%hPJ^qtaBR zvmG;({!lmBwi;|RXAloR@V;!^8pYvUbUhR~^)729ay)_I7X?a?c)WQ0THOzQd)Wz^ z7g^&c8h2&IsMq7ZO}_-IK+{#C`Wj6F;zaSjgTo}*VZwTV&|HW=RViZ8!{Eebj%Q)6 znLWH3$LEEqj4+>xWm%u%s2fCCbL3waHkkC|ra!UXXaABHjUe>|SKCrQlRm{->20h@ zZ2o-f6WuQ+^g|c_%+yy|y!;MaQ}T%Z@i>58`31bvO^CU03QfPpXYK%tvZ+STS$5gz z;2hN)G^3x#kPMn(lhgGI&jj+`BO#8#>XG@gk3AbZQLq)2W zbW>#^E@5^H)lYsLimAIAj-M?Y&Ufr_Q>5aLeiJJh$({z)Ex(;l$KvR) zV|gXyGep8BFBgBx_1UM}nbnwxbBJTC^}6PSljYa>YI*4Gv=muUu9JM^mfV3>(wpgp zlxk{`pO(2Zp@Pi+JGh&DlT|F#GSKTHTkh0;B!ZUXiNQC;)0Uz|Tqm(xo+%x0e$;@- zF5I7ia4bNDf-3upSSLOHg}~Qpni^}<_rwN_D2P<3u|Fer|Ljz*gR{NFs>b|j*Iy#qFJRF58uPA`^*nGkO4Dg~(m)(}w5aH(P{0Tyh`Z}TcQ?1}QTYps zpRg){@ZB>+mPc(bCHo>oV8c)41K!3vYeA{j6hCX^*u%$3~RY@uyW_m}zxO(%s}k7Lc+;Ei#*rbJLo0Y__0ntk8uQ>i*xTALu5NrNEA zv2ktI#HAD6JeK!a4p$=lKQ|?)cp)6A7}|2~vh_^184hh@g-So+mRy-(7TAfO*( zeK=5L9)hBQM=g7;^}7W^tF&tu#e&TzlQ?EBvaAoEi-Z;L|DMSITt*Kg8d*}q|7iDZ zh1bO(YwN*UTHCEM^x>$r!ee@Hf@WGe$%f6_Rj}F~$S)1kM-lGI8_4hTro?7BFeXVU!B5vT#T;C z(=U~91pii9GO|r)DNK!r}m09Jg0+aowV5-?k1MM;}&GbnDwUPRN(; zI*gO_^~k_8bQU^a7?GoioFzMxr9CGX=6BxFSyPF2KqFChX`?)w_5PpN>`?znf6%*F z@U;_{T^2L6TCS_Ed{#*x5FXrZUh@GI`%6^L5;f|=23^^^FryLwvjmj-54}_}BYf~b z%b*?bSTR&Ml8+(a^!g2EjO!rf+zwHAQD0wJGxVvhz3s%Z-skLc-{6S}_%I)II4t5R z_B~z{5%rJW6JRBh&SnZ?q!^%~gv~B)#@7s0~TGc_?&_o&Q(g*x^mhKd4&_F=!ybt3rT-kvCZ%z%5G-7nc@Vj4iJRfy1?KCtM?yXw#q~v!>)&csk?`|!~ zA8s(c9||jYh!ug_0Mx~1VTVwxfIhxcA0$rqN#r*uDmabOiFaaHZO?Mx--{DL?eB!N z^u2czSbbDeGuk3r9e|b!gyG}>(ZqB~3Q-5+JO28&%26cqmxHTXmqVxp62H1*?X!Qs zEBChN{5ty7y)AP3oP47Rp^JszT3xy<;pM<@Y1-XR{WzihFm?~7A-ceIa~zM!ZJY%; zi?W&-sSRbtOY}ObXe%&BeyRR!2@#BiBb28nXj-q1kbMqPnMe80&<*VEe1htHd11E= z$wLZpAuq2OMk?IR1giNEv&}Z6+)I?*7=3DSzZjJ{Cid|;xNmW1Irj{L6HyPLjhxD* zjw2P-KdG+1%#4fp*xY%UAltLRb->pa>EBOolTlk@cY_DYzSt-|zyJDr&eGtD37+4Y zs6LqYI!wZp49n#O`UkY}H(@=6=49g{_q;D=!@MXBM-$m&JfE7OR|ES7ja8sK-R2=x zmxk16%3OwQ#am|m?(&aWGLcJ-KYso}k}TC_qhDiN2Nxre$H7E(HRf-`uV>G&)giEr zLHf#PIn5vx=ZvL(BxYa1&o)~ALlv)1=n~G*R|4Q=_6GbefYdGjlG(ACam<#QUi7h@i|55N&ek=T)>)ZWV6u z>DY4l!*O&^WP%B3%a6ZF-IA;M)jLwLuEcDWs@0?Y4Do0|tL13kw5}{Wf@teTv7Rw+ zCQ`IG=3)5tHa?U?-!|gVI&of!L#qzV9v}J>&%*Y9mcQ-2RQ{Q9)oZ<(RF{B+V!-BP(7Bgjs#6I{AcY=@c1=^X5 z2{;{&w_<2`T$W^TTQ7m{T%iR?C;F~S)teED;$c!T>{LTOrBuW95Su5r+vaZbXctcp zqG*!kvw&e@Sj&1Qemr}ZVAf>aOo}FPab#2^YVAV(Ydd$#4{?%>hP;Z^G{fFVbyhLR zlT=9~b<|%!+2setaE2pA{|7PXz%(1eZMNSpY1HE=bHMZ&>5Uq;P{e}Bjslic~abXrj8~Q;9 zT+5HqrJ&<*c@OZA)~fBTmXGkLch4Z@nSVA-Cmwv+$R49+alKpKmj|MiENFUipTXS4 zj|6Uxs}>O5=-aezZ{*|gCA;rowjbleb*(`}^y!yMD@cUZynF+xS~)y+XN53CMs#AM z$^f58Dk=zzCN6+bcze+-I za$PovK}5rm}H73;bkc+bb*eK8-tV$Wd2GpdS~E_6q(19>JIf zWux&>9>0$mR%UV5r(XL^5A5Vg0T+OD{LU*;g>1z{ZFQ;s#fMazL-C%CnSh9QHvj{8 zJzF>5TAWijn93q%V#Xx|j5BMvzY(I7F*arx37kHFe}lWs;UAzE1SYt22;4?qCK5x} zs%L{4k_7w2234y9HB_pQsI_~TSxnXvO_OO&QyC$6F>&kE85Nnu$~42j8%u*Eo+**I zPw{9k^rd|1$ovmbFofu)Q?V&X#8Le+!5>R*{vlPAD!0PdP-g!0W8z5|N|0f)aYq_Q zY+hnU{)EnU!=j7UKVaV{tmlBsNM>ss$fEv`Po|rXh0&#l)tt zl>NO`p^)+gEj)tU$I31L8W6)PdXW^BnY5if;%M0@^fzCQYjmj*MFjl2pX)dWb8}TK zVdunm2LmHp#>87-K2`nFERExtPHzt)6K_kidixs%B3#{H-Ik^@K1P@z`j#>oEW&P>NL$ z+bC%SY9v`84g3$K63f<&u_dvIN__Jd=}{=zU=v}&x-T#WcSS;q-uNhW+VBRhuiZm| zNFAS9q%aoz-E5k1RBwe*CumShS>zeh7RPpiXXwIS$?<#8kj|-{zfE1~xsIVhf6@Y$)usx4oJ1ma4ic2Fq3OwF91Y%Li#SXpg3H(~_ zHa|l|e@XM_Xp>Jj?L*Jt8%Sr_0NY?NIR{11m~^n=cV~&>Pos54pWapaPhl`oo4=AW z+&!RarsknbA!^){%rs6>3zM;~@Mv_v6|As9+G?_V@$G2}M`t&43?)DvkyP)m9rG`(n1M{;}K-FPi*HcIo-nZKn82$HJG zGUoAbrus5VXTtQlck|Pk4F8MQ5RQ;&x~JjZFyd>+(%HrTXcki*ZWtXX)4}6s*kl$ z6n~I=-#>78{cT;`2FQM}p`Qtk_|ISZ_A@;{YgI%9+--<=5vQ{#I2kvGtb;T8eAbJO zmTMozUfB+C4Nh>4Rt*uny1!mz89iOva#t#IW)7J<{>*1QWQrcF4F@tZV@50Ss^;G3 zqK*&RQi-NN`mZ}r7vygHaB+zzHFU_Avr0uu8ctJm-2WGtvlXol85;LFrkSL$dseq$ z7oeK*U2ZbI-}^)F^Yk_(1Rku3_WK5pL()fp+xATqQDVM54nXOQu{2M# zI%1%EKRR2aPLUc)iuwIl6O)`+fMIe0Xp7)Q>lxR_Fi4iiGUbY& zB2@o>ff7NO9WYh(xc&Px-znEVJp!+G2A3|z0Ka^wgb;gSC`cM*43;0F-Sm4HF{P%g{i#R+eDx;>piRjF9|`9W#D*n|<%@W<#hM1lGec zBA;9{8s+6MG@}!fmU+^XS^7T`K}iJZ8HHJ-()QMeIum+;R5>?)lKff6keQkTA_iOt z%c;21pt{>?+_Nqg0Zs*?NOmR#%%jxr2KqD}O!7rjW;cg4-W$j+ic|bCrkfGze4FiW z{{xwJm3oDt+nw)Tchuc(pcteQl*wf{Mio70N3VW%To_UKDs_$lpTtMUH)xp;01I`F-FFq$Y7+@2ehd8q=nd3Hkz8R8502vD9edVaGeqPca@WXaRbS zw*llFWAQ2DG3=4S7E$Q5#G}?_8QQi{_=OQQXjD)rzIz#;OLqRh_@NTd-D?(7B{sEG z*83vl5$M(z;#`C(ci}2}=#x>^g}D9WbY&<%0y+HaH;opP9k&Be@5?rsBXXTVMfa{z zeHE5(yN4tw07ZQx^|k{Ke`*{OP_xm}PZyK+wkyE6C7FXxv~%5*$mB~hh*McOABP$} zqz%*3{$Je-elxd9aD)Xw$6StFM3`3J$vp z6H0#kX^Qo3nXmVGiVU5e@N1!!>ww!{5%&j}*QMe<_w0;SxEyT|jv6YqY&<-|Z{R%^ zA{7ce(5uywaQvl-aZ`D@b&9uwUWv@OYfaOEV06A5>R0rk zkR`g#n!f#YFuLHA`{y}TbKLxjKrg$KNiU%jhIo3BO%J02*a>P3s{V;66o@FYarZd_ zbgsfv4O;YL8M9>g5CFyzurb>ZK@-CZ+yqekN)wo$;C`}s zHdSG^jCklwaI}m7GX?=<11Gr@C!Ib!p(6rRRECww)MX73+vLcGTB^Hn+rAlY{lvlA zC$?sb@$Bq6JkbZ(rXRO{eYoBbCL!#PenDd%0H2^{93jZngBO$KOq9~VyBQQneKQV5 zsOji>w0|R1&1Y0pxt>Q}tEtad?qi=r9fpx?+YWW1d74ls0SU`!9r|3b)o#0xqQys4nx6k^0b(99&iu-G zVna*!(0^ zq~)p{*<173Bz^y#7F}=Aee|%jqqJImM~gi)69MdLV@B>VuZs_e6^k+!C~ClpXZKql z9=snO9$FEEgdT)yJ<4>-nDT@lbfGuv25rte^Q!GK+e_u7)RrpQ2nYxd2;L6};5%Bq zAA}yqCw0rFnG)m~0sw4PAh%+dr7@zcTA)sZy(rd{{%L0ZBq}zpTdhh&N4LM^WTS&^ zDB!edCLh5%pBp46r!1g#+1BVZGy^ZX%u{L2!DzzOj^+IZc!~vW~8z-BKmMiay=tAL}Zikg4>rf+EmCH*g`c};FBc; z$nHDytZ6GkBKR;rCF2(v4}4d!QpiW$n2Q-9qAOMWPeh;Y63e8r91~5p_fLZ7t~59K zm##|DcCdRl12zVdEh8D?vzv6R^@MpH3qWsIE=25K=XUJA$I4YwnUY_?^6SXB^m1#D zD-O+FbiZE3mMQn%`Euai2FX}`Eiy`ll0W_oh*vWtOfE$2vSYF+95&%pTqM6adadkr zl}JBRDqpL%L4tkrg|o~N$WDJAz=8t*q~!?sHQ;$aRvTL#4vDp2t=z2EKRuMsww>@( z;e?DNfnX9AAL1|bf)HG$dGaGfwR2Cq5$?iQ*0ky+8&y^uTVbGvgBNs!wcy0$z~+gS z$nIxMb#a|$slScul8F8&F}9;w;%k}h}OI`UhE?9dARZ& z7r7}jxQVM=Uk5h&oFAPVsh=OBAq`_w`g@Pacn%&3xpFX8MUlif)@;0HyJ~Qf^d$g~ zk0BvQnP^!g>~)CR+CFu3h_aScJ9{#HiO>Clm-`TCQ2&d7BvHACueKNO{ubtIr9?}x zC}5f4tU~C>TgCOx>MYcyC!EtraJe_8CE}s^FoWeJa1aAs9ZlsE7D!q_sLH1Q%6jJA z|B8APCTJ9^SeUIh+CDjW$#OMOL+$cw{&k51T5}8zcO%S6dLEr=D?Y8hKKt#<KxJDrzFe;j*#t3}<5%{P*iT?e@gLeYD8+eQm{K>tOHfZDgpL8G6vO0ANt8d{ zFA=B{G^O|Zc5*Uq3acqWSW5gen;VVwR3OC_vr3|AaQDIgSDl02l`g$(xGOEcxVz<3 z$BbI7J2L0g#o7kzdAfNET#xFu-F0U9vet1hr+&0M&S)zuz{;>3{wCtJv$OqlF!PMa{(W!4n-+QZp~Rbk)Wz>Eq*!rJIRnVE}15 zN6AMwQ-tEOaE!m<-*3CnkiS+X@93vj4SxhuuIR!~_vK#Shv3oslStrrn)Tt#*5?#L z-$=Z@r1f;01#3PN1Fj>LAJ>RiZhu_rYPH`>X|uZ)}wSXkUv zAAJ|(9~Y7y3v~PAsewd# z1saw3?*c6jEi%GF$!#jz0m3vpHBewQ+|T}FN>}39vuGYnD->#4&f<6`_YK@$z1oT$ zmgw8wROAUTrW^9a>{9&8G!ON8GzAxq-u!&-rZS5gFf!op7b2|N{74`6W=+@T@lrx$ zZp&1yCK!I~{Je0k%+aMcPuc4t{0R|&J*7OI-tv5uY25cocYRE0Kj}9(Ccpc0MNaio z6a$51p6Jf%b&8?CUq=NC8};d=&(LhmJdsCtT7KPW_6 zM_(VJD=ZYpIso>a`~WQj#}wZx??v@9u(Z)V4>-rh2AqoG(H+GP0RRmTzMG5I_m4^Y z`R}0#0M4IUoQj5u%Ju1r>=f2=>&%)aaTuai8-Kr3N8e14pR8UG0*?*KeoBpkwVC-c zu^x@^{y~9i$yQ#U<=6WE)hYc=+5SO*FuVRNb0{}cK|l2T%k9wjhh4l!V^MqSzt*&G za2l(nhYDORUyOOX3f+_7BU3pX-;Cl(du$sxMOEOup8t`q8Ou$716y`t11lG8JS(nO62P&q&-s<@%}Uaq&@>-21|Yf`e2GzSaQ{b0 z1fBwmLKL9F;jc4#$HcUA(xh~vwpZ<>X1#zIbU?b~T z{&70%Y2&r4Z?W3*ZPeMAzz0u^8iSk8R*}6vLI6}}Oj^F;^c@^Aq7_xeI}y#^_#17A z-J`{eqgWde2J7&|qg`6PdsbI7@_s?g7_2j9U{N4?SLEpQ%cVq8}u)LqJeQtWW$IYg#$K3_=z6WfmbhPMu-BaJ1+T_8-kBu|GD>37H z2=R5}FysX!DF8`!YoQT93`JB7o<&pz-33eU)d#5t`~-G*Dl{|_c;4q=)10uVSXcw> z!q)r!)n(axAyVwsb9c%1%SsWCBWOGzNeQ8NUUo`^3Q($`V08;8iS@s0l4S(JI%G6F zac2c0nKVf6_Z6*phA#g~BZI{56xkr|rt}yY9m_I$>B$pF-Aq^u2QMhrpbY1H!}E{Y zNjvi970U^Z8`tYa3v#KK>s_RL=-*1c_nUo}iY$E37kzyWODcX$=8T?>PxGeW>yzmEK&l_qdCW;D5Te$@G5}QRu}68tB(haqbLcUH7!CQDt`s% zeQeDL6`%N?+NnXyc@y)x#gx7ag+mC6=nP+#OKR-H($?ind9@7-e5NV`kq$@ZlD9p8?{daCnN&cC$-14dAEt)@ z-zX5u?)}4r5cM-L^q+JO7KlJ7^KgNQ4JH4)Fe1_pI^qQ4y~$LKm^`FrWEIbfrTq)^ z1ROm!2*k3(_v=B$mEcEEH8O;LjrRMm6&^}9iBvXNlDT8x^vEIgqjmDrq(EZ?c5Zp_ zo;~{Bq*Rd2Ib!jHV`;UtIN$G|oL?8qPB_T8n;ryvIfMmnH ztfK{1pW_Z@8lUr@3u9UcW93k!)d zm%)aD+eZ89UHhZh(|w&TEag2JBX`L((`?Hc+LPDlq(nDHv(?j=_(#eFNwE+?v^ecs zViw8*7R`(g?e5uS8(>?mN3X0OGk@RU@Z<$M3Y7hDe9Avd)&_{kp{J;fhA zp|AR5c{^S%Ux*E6@ti20ONxg4q6`3H1AvbYxU46{OV{&hWbST7zDJ~_D3z^Qvo#*E z-?Fc`^BfQ>rfMge(vonnah}@0<(qMzJ&#D@ z{@2xM+eV-f3&WwTq~>Ur+#hB6EVX++En|*)vrE|T;uBnN`#$#iww|3V{~FE3%Dtn^ zUpkxe@y``HCV_)9+}HlAtOc(+iw&wog~~FV?%NrkUAQWLFC%vTlr4jWgaS>xXtw{8wu9( zu;lWvql0npha!$5GlHb_!T!+lqt`I=8_;pfY%N6le&(_Mcm)-{2!-BFgE~Lf$0=Ly z>vdS`?ZTj^M~U$3cdzEFm0@>}`k3BlTZ&I zU{jDmPo}iDLH?DX6$?N)t<#4FFfkQf@~a=;*gj}v=VSfaIx0St@I4=_hx4nx47y4h zWLZBVt&`#N185}uN8l*eYAYHo5@Wq11F&K`)KLlw>{gdwO_iKl!8F z0^74>g&mgNM#f3kZSOu@tz3-p*gi&SIq3EPb-MiqkN3l~`o58(m1mXoqkOQ4=LSwk zRE#%>944+f!Hldc@#7j2afH)mAnKdxjauB-%=r9YAy$n`>?D;hcJbJ(p6W$3S9HR^ zYGDONSQr*IYIff=A}TV5Y75MCg`fI80w8H|!SI}5V^;qO@^o<}a!cQS#s^#vjxL=i zWi^_QNSsf3n6%Hm`LJ}Il4fJ&eXMS9{Tiwj5rP=>?gQzn#$gZ-GZPj(oanQ)`*-zT zm&7pQG{LYDz|XOKNMAREM&%pDH>D4MI@-32pd4RgAAH6AoPpy38(QC6OW&K_is7=2 z%v;p>r3bMJBLhEOaG#*7tAT#{|1GGRSxC9qn(L%NSL8dSF_ms>XFk<&JP?(mHeL8{ zPAYx5pXiqH#oV)N1|t%+sYm!5R?v&uVwtK>oUycb8=sw%jfk&-`HcD zk8Vr0)j48xKND5Q5*3y*LRr*%R7wJsMHiLS5c82S%FzV+|NiZAKqDpU@hSO`<{idA z!h}S8NwEzqRDZ;G&kJ9{V_JW4L~+^q zS_vF@zyCwlTZYxu^!mfQg#rbN)8g(@DDF^n?xIq$nJ__EoH$z(D~Ci!LZ8i8}maf?%v@8R^?8`wL%blK_YM{%n8urWUFN0>l#3BGL z51JYpB^tEqk4~2Am^hJSV)fM9vveLE*}VPxB>HM;@6x=q00!=f^l(!fwQm56t}50O zj!Xh0W%v)3S~Fq?C{rLO{Otq>Tlia6uHx><*>tC_#$q<$I7_aF7Gmlo%=dqhQw~3K zo8y_Usr?Z`g`-1vlb(ED_ug8DlR@4{#>;^~;vgHe*jLMBzd_|~$&WQ(0mvu`JZ0MR%H7a@6Xbh75smRjdZ|7-gFy8> z0H$f5TRkqA^rw)Fx6Y!{L|dLpS&t1HPd0`TzAZyWgmZc$jr**C!UIM#TI{6=CXJuu zZ>EWv*2Pvb{E~xjKN?Ahl$J#K_NHK{ZI`4( zROjhb>V>#%g+%M#FIzu7^SSk$lEzHf7@}MV=RA$ZdINIw5r!><_f+nF6tp z+*qA%LBUKY;s;1_+MFLoVNbs|N1gCahb93P@T zhXIp8iYiVVf--iCWO>n-uw1F_vNMM$=M2dlUV0#glMlWs(9i>8Osi9ez(pBaNMh8z z_V#eI{>5|N%pyYs0=iD11P5ER>~B5IUoZIFr)NBzVDdjj40O~9%R= zND6S-!9X9AtqpO7@ zf`4yq(7|z*V(0c}*?EfN^YJ!ZDgJ~q7GPrgq8{TjF z&jx=ca-TW$>caW^f3XFkbOY~S04XmsCb!0Ka1Y7iu1Ifr2f~=0^Yp4-B(y}9gN>r{ zs^qe8NL~Lniml)mj7C1_KvdC*j`kKVFYXoG8$l9@1El)26`JblqimV_1~mWO!1H7( zrdof@AjPOthXKnPB2A=dv`s|v5!&@K9>Zh^+NroP=Alf)^$Sa5+1CfQvhT(CL+AS1Zdg1wvOEQJOFcbj>5*Q0pgWa;L=< z)YXH-w}qQI?1#UYuVgnUS4S2gUsk>0Ic&Z|p`zMfpX)0_Lqmx6EA`z4>y3DVh$=|C zun9TEvF}YBted`&k6d#Ty^>7S>-TX#U^1Enj()LRb+m_zYL=P$+}L6H;#%h1ea;)Q zM$Nby(PHG`sjt`|z)@K5Jv%aYeTm3TPilEaW~J>{`bM7%3(Z0A!RFHo?X)igBzV~3kXVx$A@CFJZK>h+g&qr2M7>D2UoLbEZvIm_GFn|kByX|%$8s)R6`k#^k&V8f>v-+F30Q9EdU?u_R z5HNVsDla5B*jKQ(+Uwl$5O>(?I;i{rzhuzWOP_Ahd!}==j0Y~3=@m)#NjIJqzr4zs z0uVP{`hsQbfry9%HUxiAL?w(BsFlxp z3%aSS#g_P|d7|Q(?f;=QprbeRfppCL&AfiD`L?~@*q=5?euH+vd2O~kF8a;M+fp!>XRv4h0 zdomkh>byyf@}WQrsjZe3NZt{UMSki!UOe^7aC5;5^g{|`v$M>m3Z{Ydut3oW25|kiWKAi;C}4~CyUCfaRHc4v5#>UV!*Xd6##r@ar$w z48Owg8%LN1%^wsH{&!SPDii0IZ^V_d;A3;}0|pENBIw>{qnT#$*ntq$O^bhE48~H1FpS z+o0zb7np>CP3f`t*T)Fd%d}iPJHG2B|D|gwwYB`vzfKeNAb99H^6wns}JDD2x)P#O~?#q_M-rW8njx zS5{QeUcL66l$nl<9gVd+>UE*`#y~`j3d*(ryqv|N$1VQPrZ>v|G*#s`JUhpfBM=de zPcP00^9t0fv2A(1{{np}tVtNzGZjtJrv9L}_|-Ap*mC^{ z+>c!>yi2M-TZ}kCgCzKRCHAFAIm`rxj_7Bu;otJJnevys*8eU)vyd_P3u=DS(C0M0esr}*m&Scr{Nybj&# z+-aZdfbz~tosZbehz{eFBQ?}HY<{08Cihm*`|g+#xW!SU2sjeiB&;ZBLmv7J{}+br zteTwh?^OpdZ(f5-^Aya5mZuqfj;6DCgX+}dv(|q4TxT(OZ4uU3^KqlV%hS-S@)W1o zcrKt;f1nm1qGl|Ed8sf`CNs$EjN!*=x#o^ER3DbmG*2Bmewco2BwX~E#Y~75?gli# zlKP~nfgpO1DD70L+8*-?xzEwq5h2h@<%2lzp-Mv72>)?5?2)@5w%cuFnu$~|lMXzPNB{F@68YHs zBKnx$<4S!yLE?qjy^zTiCZJ$Pj)ILw96&lAsH-l({2y#t>ib*87~XHCy`-AMz5kTD zmJmNoX;8&xjeTs~>q_9e$q{?rnt1l^s%B`qIdgD_HP_39BInT5#DW948OMmS@v+_h z>1Yhfz@Kk8<+*%q6gH+2q*qvd#x9!|dcQToe^(Z%Ln7C79*EH2l-q@f-P4^~aeE+k zgx&Bwj6c)78d_>%T{syw4NqPf_JPU(1jgB&G~}u}9=&co-&yrS-)=t1h5(hji7|J9 zwbIz=IZo%j1a@0SAHxD$2SMPBMo6vTB()oZP5>=;-uSu(N@0# zkC^@CGz!>eB#ICico@XT53hOdALj`c9k<-`GM7|0vpd>c=Jb1x4hSCw?@Q0zEV| z^@H7Jl8n6@D?G#=T{<`fHH@Op2K&`#f#8GQ7Q}%;&@bm}_2MOdnd>BDV8lZsFt#$aX`5X;y7=5VU`ta~Tk5_65&Rq%0Eg*vQ{Ox>DX z=Qi^=GjL&SpCq!F2Xgou?w#4_&<^EYYR*(75D3J@XKmVWU-e#)l4ODfs$C)Y?%j@! zdULOxrQ{?YIdw=d-frf2vR^v}i9%ADy#l7wT*AjumLbLqUp;8V4C@`fA@DjmA|zPE zNR(|ICp$9^UFp|>#OJTsrrq?`-C^<{GITd-Hh+ehcY}PpF{!&COna|7f_LV!q@73n zP8}ieiVcL!9j}zD7o+^Dq$rb!ZZloE!~-TDn3(Y-*!xx;0UAf^gGbOLGZ+MfQ!S)Q z*+>`O3szf+L1H0zMXR%8ufKVBcZKpZEGmabnq#v#gqFRsVhiOqbB_MdWvEY z_Gu_hZ;Yk247Z`|CXB9Cmeo;6)k(<4MR6!2Z|Hf8%I5S{4CGTlaIRGj_BSarfG5VZ z<8!%T4Q2Zc`DS7+DcbIx@TRW{H0arL{HkOqcg0%<_QrWX*7Hk~2Ti;JT6k}tao0_M z&9_(Roy6>72?I~YGntF#Ip)&ilTg1Y{-K_aqZ*vpkEMvXD2Kh?V0^=(J)^{$CdImn zvK^uiM@I|mz z499juc2Qt<5zaP`msEA$h?RqEnL0;m@VHweJc|K~0qFPa?|0ZgSvyQ_9s=ca0tb*p zp-9)b<%~tzgNm)@6KZzJ_Q6X%F+5)aOIiNI{4|8cQTl>O+;kVaNyPYhFWVD@fu*1oq8!;yTu!R z?SIlx2P$hN6Y+dfE(7jN6@^m3hIqSQ@YXG@-`0*oJ z2JNrhnCaKlPrONNwq!=Y{)e#fT(OMwSosO3!xI_qTGbNYtq)V8N!ahO8Zv=JFWPNz zb`46KXI2py!(U|(7wY47{Vs?JFrQ;023J;pl92{GGw{_;s;f9QRG{q4WigtHwmdEa zpmH%olH;w^`)?{92!cHph(2#CwZmOssZUIfX(N>8vTpJM~^9k?v3K+k|#eY`a( zOe#OmHsNqQ*Z@hGl(6=1H=}h6g8s{^zb_WxhNdw2IC#~-;7GqympyvAAf%c{v-1zg6k;54D&!v z_pB1SkF}LLHkl>1$$*XQzdV7_12#`~ojA5vi_m;hf((S?`jQ7yBaV$5wSZvpMQVgQ z)DdC@^;gOM9S=uzf<=g3UmW|L(8MLWnB}oHDQ)sFAnFo22a^)%_##|=HLw25gm+2b zd5jqwfUkFqvQ)d`qB{@DsXAu)ODyJw#T(^ma`iPpY$#Vd$!GkNJ$T-cAN!WVDgS3` zpcN@VHXa*}W-h^TD$45jWen``GEDD;VD=<*ij-9+TP!w|@=G`H-*Vvp`$}q?J`XQ- zjviLBEPq3zbK2LfNQqLO#|iNU2qCAmJuNT!v8nWbM<9A@tsqJ|U;>ROttfd#rK0BE z-5js^z9L(mIT_f$vN1ep38|k-E$$?i^yK9KXO$qCX2}>#*=m7D>|wTQ-(xYwJP=hD zT)eWtU1NnKCb=dr(;}3K;^9`X%LSy-(ec`^W4_(^9jehAdk+`F$%390nR!=~|5?SA zK~V!i1<^&hcO56Yb7N^6c`+NsljjlnZd`pDeXWJklzqxY+xQmReX6C;JwZQo`81q; zgQaL29F7~1Ra=`JM($+hz5o0FGEuQiUaSH$3S+ynhFiUg+jt*!aPto%)c{!ydELp8 z$0TAmZ>=*qBU%aXot>jaSoTeqLkE%7nMKuzc+A%OQ-lxo%%e^Tt zTPNw;tei$}v-Qu<|8B=os_GXZnyDU#;!z;wS=ZUuFy>ZiD2NG@Bq%MCURxQkG3wjb z>Z!^Wo-~`_0wf;i%Xa$Zv(~Q*Q+2yDeNE?*rMhabC zGjT($cmC@SKz@V;*R{tU^$gkyC|h+WH5L+=k`t2EuSv+5dnaKdRcoDnoV``-)~VS# zcUgM83DAS06eYogjX1zta2bqsFmYar|27Q8!)8tIe!c#opH|Yt$%g05JQ>(ds%k99v{whqRRB+$=$XaFCeu8o%Wogj;&7i(d;9lOVU$cF9l?%G>VUU{ zGR|qBw>$Y>63RvUM@(psUvLf36asO>@eGR?lnLDU0C;PhV=P0X${EHth#AX_iN*2w zCHM`d>Zm=%H90bqRYQg9pHpb6FZH}M16ukQC$jK^K8Aw{81a8J1niR49;gcmm-Vz6 z{mFc55GYNW&3n(cpZYFeQH;xVs9EEv#;Ys0=wQsWT6CR;rP{MLb%Uo*my z0YY&rKNkp4|ABCx_bK&zmeng?lGgx9e{EhNPH4VGVA=h-`diZ?)bzWO6iQme#ztkN(o=XXC zHmQ2{yF4Y(#1?)yhZRn zUfZOE!5<}Fx{WDcwOf;RHk%8rf@{jwXMH+*FZt|K$4B4vH=t0smo-b93@k|j91@np zli|77$-0nd;kx|(Y4d`|yrE(luIQheA2FoS`<%47n{sM=v$@-KA&GFmigd>s@8w7| zs-J8Cox%s`>h#AH6+q4Ar_p}%OI`go{IYa0AIohuBQXFsm6SEYx;WIj(FR+awuAv= zz6O0@z6PIlNZ?6a|CDe2WN%%)&6irlq^LtNE-%Ho4A|6C83cc*(Lq!sFH}12eL_UF z8^%zcaZ;YCE{1@e0uTfBJ1}(HiqE;!b^6)pW$Hid~b*sx% z*J z-)WNRPb-NOUmI^C^KZ74nUc7CaqBB6xTc*SKGMv(vrtt_AiPXbMwMo5dc%+3(tU-$ zAR*j{Y(3}o^T?UO=Vm^qO<#MAqu)R?8yD0;r(`C&0=kTB7~ z_7zFK^j?~gyIl4Vc8C#wHrwBxe!}tftAd!pPk~?Sn;T{h8M}I%(d7X>q+Esq`d2u9 zuYci=i#TQ^)env1W;tNgRgQDN`3}xN)!`D~8*hc&lXVHPkWOi-#WUiMl@Y+Qr;)8o zv0^Nm?09RJPe&eB-=`|3$S54!(J7?oCA&*fI5ikB)Dc(ie!IvZ z`v3(w5X(Sn0&|#iy~IGN7qEZyJ!ILHXE{gDxV7(5AMY(ixrK!9R^fneJ+bDofKvxW&ydtcqYePzPSd{bR%P0$CmFE3P zojk)0908zT{NCg1u5odF8+#7?WY*Cc7V6wG)8aDRz^v)XgC(NI3zU{GP=_J36)mfO z*1lJ>DMDF8REF#G&GC)#F9A!fJQM5T^PX`2uq++!=Bq6i2qXdxh@-Degps=Z1e-K1 zPDHGiP85F=8Eq}wypqaCTyCN7!gv`BY~R|^wGdY&SWT%?9fsApq~aNeIcCf!#N)+i zWcv>m62G*4Jv;F`Qu)0mUxv%&?GDfv2HXymJr~ELr<=RL^K*)Tjaog1t7afVsW1y~ zh!HVkWcz%#Y=U-3OSf9GAX%81>9hCa9_;9M(0_DwL3>4s>7Tg@!F|5Y-A}@W8lHMz zWjVjq3X1+vkBDS0%kK#8TI5!QH=9pJm8Dd7CQ44jm8INe{}G47jbn?C1m|ojZ{@O; zGV@Mbku@Xh-mA@bq+s`M)vB^-l2h>(ocGuznsv*f3v@ie3LIf{gd!dS{i|ST68nR} zTvto)^v}hdS=PuHY?7ol!z{Fv)%DYXD~l0^m6YWd>IpNBN1csRmOKC_9|!PwCu-;r zS~*gYh8oUqIx|PgclkO9FW$WN9vZFjLPK@#xK(nosi_-<;sd-6KHcAG+KvV(V-OHX zPWVMB?EEoJ&M!)376GoFwg9@<#ij9(s5YbVIt|L4edgX{6p>Hr=bTYWrx*}qm1cJA zFxhE%S^3?&kTs3tkZAmH(d-(lnq!y@TP{PMhAiBM=*{al;R1|1=h*Hyxf&{#(+ui% z3v^4wPOSuR&<|~y%g4-)q}mDBnX3Ph1JG!3zCpWTNc1KG%wyY^(d}!Cz%7o0m>Kgc z>$eC$^dvirfli3|OvAA(>hJg7uH?tCAPP-Pu32qJKTsvhHR7>LOTy`?zh^`EHj#CE z2sG-zY2e3zyZhQEAaK2#@j+eXhD9vx=f;+DzU=kwBcIQx$&D9ZYoN&Fe;L2fhn6vN z0$W-6tJ1#m*XuUl1%F{0= z8Bx-BBTYDG--#>{(`4hF;xdaTx>kPLf$eLDtiyS`xrVg$`@v-UF2d1Yl@FguqXK3{ zK>zBFluN7(y0`jMRaj>gHkxoH#e5pO*2D!lq(8I+Y$3vqv%O)FJTSZs;b>wpOeAAib3jQnpileD~?diQ|;0PRaSV;-M=;iw)IMvR; zdzC*+mEC86C<+kmAlwPPni^fKvP#s31__|Pc=efz;hHSc2C27RPjbB(6XcYXvABEV zUPP(=`v6mS)}P_bJX;97fX{h zfiovPnRd;frEUSLDn^epaDI03Awmg#aZ#v4|JiK8M2eF01gy6Ll^|_92%{J}n6- z4sfe7q0~f+A~nIpLRd?>#r;Zg)M3%>!4r4a7X1=#5GJgX} z%{ffeqE457dQLSFrDB>-_1yI^F1d8n^zSwek@5btmK8@tnNk;`FQ;1Wm5hYxC-%({IW_$oQmjv_KanCJj_4Hp}^s)STFR5MER8 zvw@goS(7=15un^pQQ&Pe@x#`F5^wrsrM$GY9Qg@*?@DsB|Gs?(@B z3hIUEmQ9Wt`g$QLMzQte>|&a2_;o3xFjUFfrH|rs3h+G@(qdFo8*n6Ooh-(-Q*uhI zyj|871yG!tdz3pH~Ky4dF6D+^99=*Me8F9$FC0yWz!!o5hn7bYozZ3Q_2Sh z1`qdn#z2C}3_=GLoC0Vi8XZ#ym4(yf@|0ARRF##|x!laZH#{;h zG*8$193y;8g!?N-lZet=74h{}q)!VAPO2DBaVGYEH3UFdz zY-GyKRInWeQ(Ze|8DNLjcij*az+xQjs1c;#cMemiUzvLfY^7Iew!=P&T5*852l!yo z1eKIk(zul6y(>Kr3vgYI{w%ioEbj)yEJjPw*3>dD^@PeUhb51hb~-{ujT8rb5gl?_ zda~_*6Vn}rJ=N&qvRE)5FD*Q|#e6O((GxA$;E~Ot07s%9_Ni>AVHjH#qg$N|>4LXU zdIBTjPz?!{I=P)=rao&&VoqXdx|8-Z*V;-J>Hf^4#lY5KguzJ5nZmUb*4v~bJI0)? z;WUr4bNdl|{u+~Vf+t_p3Cedx7`5vh@dj^cE7omArt0!d4=3!HR8gHYyaa{YQSYw^ z9xM;LrBiC*iMO?Qe_$%ZZ>Ks~-#7}f=gYRd#$0C0!2p3w%cBP)4s|!254o({6PKDj zpufIy@e~whYVL>+h!q}p_Wv0`ioePs2)7F+S)-`yrZ1@PXdNi4M^&{5+3d;p?9Zsx) zbJO4B1JHJ+zMoTd%C$;hZdTE56@S3af1RU-l>C!uXfZ(XuvjXTeSL4Gd42S0NRwEcxaw4Y(MiJJ-u`WBGnUzQC$_tCAu!~H^_X_z@s$nuVXar_sp0l7QwZ_ppWM`b zk_n@2%B?!MG_tW=Y&xIYFEgJ!j_Rk%^Rnlvd*4p^V;UKs)%8j_;qLLX7}vNn9`%AB zTUWEu;p8QG*K7aYL*}qgXkR&gjn58XEjPBa=4r8{_*1yuAuPwg|Eh@`|3w&3X3F-0 zS+v_jOjg^#V%px=9kAvbIcgl&R#C!&XCXzM z0a2-MsYphJ#LvgzphN`&FlZ5w5WDB{!Eg8J;N zdcIX-5vrl5SSey7t;;l-9&hmd2TllrL;#?{M zS8f0c)g)DXi52JyeCd!gJ$}7>>)LJf!JV0ENYv`yTSQo)m;L!PJ)4?+=?b%ze+R6m z86T?v$yGzQdS;b$f{qg2$o0~Si5-7GY^IL@(K7scVm0Hkh;7hKMoLZo3)(Dsc{{~x zYA|iSH8@#bh+R%H=`7XxQhYKjok!h!oqop1h*d13F65hx54kp%3*!ZZ z2=*UTT-zlyqvrIaX}S{~dzi!CQq*JQ7SF1y&*=DEMiM@${|GajtT|RSo&>aNvF%M+ zK<(ycpyfbSUTk~OdiM{fk9k7tq)DIgHHj;!`ML%YtKQ_T&K*0)f{oI<9n#$6MRHH& z5`MRH+jw6OxdbBN3JN!5b%4Ro+f_egb)L@mpPvX_F7}kah)B(cZ+|Z*mJ-RB+~t6; z_%jiZXK?jC%6U?rI#n}X1ZQOEo%edW=92o;Q{ktMECs;NN;M-R^>;PvTRysfO7ptE zV4(+MCO1nl*iFtQ&R7(^=iFpvSkf*PeVEnLbd<#8vM#rp_A^JN4#RJ@4K;IV-0EkS z>+YwomppwA>J2nFAoBBdy7fs&%O2Y~s~xb{d-K{7yBcsh1roHnLfxxZuc-5CH*kx# zaCTHr*}9e9&-Vs>8X%Eg7ZO97b@|y@#v@|*@6-+onLRgF_#W@(YLu7Pqe3OjGS`XB zUYll1=zW^`Mn)VQ^FyIK47b|k*^V2BqH=LmxDe3wAc7+>1zt!PhY5JuyICvp8>tLo zpJQT?)2jM8wk^<&R)?QT$S~|-sNRS?9t^3UPn{E{2~V*-8eR;toS#%$f^246@m|<< z1PvB4+nv=H7pukF(nGuAY1GDaCT#w;mFT-C3W!ori!c1h&w5^*l;K$^1BY-K^Qtv7 zN}jplf$&%=jYoZ|u0VE;n#?Fh30@N9gpiyiyKwbhR+@%RQYn?ot*=D#`F12uP|Z5x zB?{szqpMcoa_x6Ml96d%?A6fonM*-m+K4MrDb2uer<&b`Y#mN-UPk%0Z@@(NsjX2& zo_R0AF!_V;M7|8{KxMWu3)&}TWn321Oslul0k(L%Fg(B4L34dFw!bDNs4XQO*Wbhx zXk&!b>D^NlQ(4(j!jho=dd_^>4#R==dOfa~hX{9x#c5_vrVme?`63mQgndfN%Hde7 zgBAG1Yoyq9xB`t01Woijc=OmAqc%HrZ|U*+s>aUXN0KQ08Kq5r|6URmh!F&@qO|dV z4s9qIXOt&;L{s1|z9=JTO|!DaHON)~w7 zydFB8_Ekl_fZAak%lYQAhQ+sFQFEA74QWIkn;kstJ&iCFvB_P zg>|m;!NOnOLw9BM#MA*~)D@@m>H&GD(Sgm964Zxhq3F#p7n;q+X=WNn6r!eLW?;IM zZ-*u7^)(jO5%}TmxSye-L}^6SFfl3JulJkHH_e&%1o8j|oQN_B8_o~!DO)r+4^>E^ zcVI;Yl427JclA|zKf0ISxK{r({3XU@)!-3egg~4o;LO-almJF;cV>4HMus|+8GJ{Q zWEApMt~LiYA~D}!Ha?riKdJA+Q)9lpMwo~;Q8P+Xid^q=hA)c}HS6k5u(#K1zKke2 zYG7z{RKX~rwl6;=y+7KCz^~R7;mNTQS&1@fh3xy~GAB4U zN;txd>;C!(8RoTgKRxpA*>0<#h}g2?Ym?jDp8+VnkMa4f3BZ@4B8u&GM;Y=kAi%he ziquEc(fT)!f^dI#%@Vk~l76B>N%@`en+0m4J+Ozeuar%81Q`jjIDDJ zjalwDlqFG2r07F1Dq+;CC?vA7yDw#a0>bpO1Y4FrfzUlE`F$WVXRDY_`Q5SgY(>Mx zq@E@RjjH>5L@Xc$1bZLNgHNe{uZj>r7A;M-F7~~!u<&L!XQZj-BHSz><`}4=BNl>j zuCeyTF%^kh1FvoH{R*%exKMiG&72EaM+lq+t$;h5Dz)2zzXlV;j1&M`UkSlc`%8EB zhB(nnN!EOVMShPSy==1aVIzxeZ)MB-d$rVOLrME_$@UzhE=}h$D!!VOVEamhJq~;z zgHW#VTnj{hzYvK{Tma#0Aa%0fWx9_UpnLqKHMT&OZow0?{n?=R(O!q&L49cpn}u5?mw4=>bJ7k)12sEZB= z@ww{VpK81xP4k$U5-#RG`KD2`cULkFVRC_F1b=K?%ihxNyyQTUS2mLFRyGC5m6tzV zfr@m$PIR5Qzb1Hb$Uq=4X3$C`Od51GKGz3S1L=9DitOyQ_7Gj?5!^H$&lA6%rq9e< z=bEP8gNbY-ycyg+SEP|k%ej%QS9>YQAKfQ;pL_0oWU55HpKh2L=$;&CJ|qE*NZR&M zdA`IH`SEx`8~a7bt_F;L|Wx-tF8Lg+HgUbegzj$spJ(2cT;uvWbppoKHG>C z=Zfb{S@RO?vCj#2h)n>gaf~7O$S#rlIke@`9aY{)WD4Q?^ICI@-eSHnCq7fUkeCa) zy!24fVo*&w4FfitZFb697IPh*23S9*+4G7#?dK_r22`W-;pMQ+{nppc+Mn@2BRAm5 zjh<*T%VuYYdwWNpQsZ;IOmOz(c{6tZONiJNAw?tH35(U(-{r|}Y)?|rnagsvYLPpG z#i@zs8x}i5)6*Poxr{x2tE?>D^V)Os0k+7I33`G&UDLTmhRwp=`1kS>I*oZtT|3d* zB!EJnx;vw59<1Ko8HYb7pSPVLHH&Anex0J+_JeS<5(-8rz|u#r87#Tg5-etY;ck)u zgmW~$1B-`AKn%anF$x?s4jDH|_guPn=o?>VF(U?XpnnTOOa-_y6W<~(Tu$$GK7YNK zuHV(t{NQzM2&{aY5KMZJnhF)|i1rG#w5nk+Hqu zTaU}#Pd95tGpH|Y1hg&^-mJQcx8+3?HNL}j==4^bNMt?bcbUkc&;KwE6s0Z$iz-Vs zYkt&J;QiFZ{_1Ihl29f!1D4*Pt17Djnd2tS$fFxG z9HYsTB4H(M#Ul!LkhofP`+PL9x+RkN-^B;{QbeB)&@OqM=M=2>k=p3EdYvt@pwBj2 z90Ti+j-L{Te=uKu{l|_pra;ue`eHv{PH;WE~9jQL&xx-J_l{+0A`?( zaJJq+R@sOZ)#+HC(R?tNBK$0c((}BRp;uN*&u*f99~YCgs~VHOElhOGiW zw35EVg7=}*Z)3+*$-T}_2hMr1 z*{q&*c-%Is+Qku$wSXopF2{5CYikM3E5TfJ03%h0>o26%$41z#o42n-gdMM&YZvXd zrG+m#*IQ*4Sp0&Yo}MzAPF)_)XhWy#l92YbZUJTE^H{TghHy5VesWpNG6h$bq^h;y zwH_}$4~vW)r6qWLRF>{zB(zMDLfP&6%B%TYw9C|PV2}sMhO-ntCw8hjAF0a`jgF8SBQ*D5cDCP$fQ?`^-(8#x6KY-jSs{j+BXUJYk>?I$g1t5-Q2jmR3hp(uZzxdV1FUsin`s|_ zj{&KcMiW0OEj69=wxG%B4og(w==-ty=DfAqwU2>93TLLfC6KGUDrsM@=V_$aqQ99R zfOdR*@x@-ZWbQj+@!7UVH1BJReUnT`{VM-0fAXylNIdL(16vQRvB{$E`fw&CSSB{j z6Ih?JcTJT$)4z&?0fl&)tNrWj7ujnYHPv~v?sV?TS=P~7n{p9k?}FmW&|oBdjAwnOlmqN$kG+ZU#R#yfXM$Nyv{Jk<^2-alRRJIW;=6^F_`AlReRjH_^S+= zfX><$xmWmFcuiH7A=SR^q7PxqXQ0ut8SZfULkl*Bj(v*@0x&p7%mlHWmB>E)iv9AV z3ptJx!iR_ZgnCGo~<7(4B?w) zBioLc;MuJVRTv8-oeTbOsMJVhlxWEBQ%NE26?Fhxx><+6)OOtKdO7Y7!q#BQFhwjN9QZ02rKh>lOP_H{{cu~H!(F6hPjx)j7bdS8tzh0YeV zn&{Z+(Wtua6CS%K+}v5s1PZp@839yqx_Db2{+48Ts?OR^Wn1JewOs?M|A!vFe$?@6 z@oJ=`_Bn6gKhnMk=hEdh9~>1Ucy3WOzOPBMBJqr&S;pq_^u6w@O=xw=*9R2jd1xBD zof1u}aZD!`PwsMgx(`c*gs$e@*4Nu*_Z`2U8q2xy$rSSwTeOQ7l=e$%HTj7xO7qHT z4L@FpSTLAoEVyMPEcpdISnk}6oYuLHVlz}Z+j=uUuzcQN@{MOddTw^=T7Sg07q0k4XVV$6zV-snm)x z=c7M|MYHbitSXrAXr5KZ<4c`6uq1$eYKr(-0rK0A^d0gT4r$WhJd?xvo%Hp>lkfF* zoP%-hSy5|!|U}IDIa@OU&{VMsk$2RV?mG#Mb zNI9i!DMbHW74Om4hbamy#@bIOTvVx^gfS|N_>}H5zR|(XW;cC-Uher zBOPm#wE|hb$=sbai*}db2ldsuikSgFPuqm<$f=WbqxR!? zdIPd8DcPTF7pVZx{<{tJE4ESgml!mf&4VgRKkPhG%_^_` z%A9=ckYqtMIA<_!^22xjr(VS)$`8D*E)abnfp^-H^Za|Mi1x>?u>_&45)tI3L(aK9 z_DkR-#_-X=eKl95$d6(#YSJf`IUes*gLB5roAYBG`6>#`Qw?R!n*P;A9ANcElMS%& zIOeWQQL*biM5r(tj-R+y-_wWP61`uzUg}SXfjw7Mo`&h>zuqUO_8tQch*r*u>l9(9 z?a<>YH^88eH>9$PJVb6MQBNVq6eXoM5;XG(4dgMI2`m>#oc~4@32p_#^BzbTq?{B?CGG#-GsY4<-Cm=@mn@5+qyG&SBbb8`?2*e@1 zm2}b*D=wx`c*MC;BoUT=NKy++PG^&+)K~fOhO(BWEva?5U4c$1d(gUMN1D-|Rw8@A z5?>t74=8d7JB}Rk17_@W?^abR$yo>O67p0`0L**G776`kS~0L<*ljtf!DwrR_>s`< z;U4+pUC8~m;{Rjst^cC>qPAg7x}>DL(*Y!2lVJM}$Q$k`uT0%OcV`!uq zkZy*qA>M=E`+e^BZ+Jh?`S2Ta&W^R$wXU`HUTcfrw*lRAh(9Kuo{sKlA!k z+5wyq`mybIdY|Y_%r9kGjd<7-C)v;s0YZ~yBzOfCZ{0~_VUL(UySt*e5lBGL^1MDxb7wymYV0wX&xy>8Js2vop7BjDrFd`)`ZT@$e=$D$YS5wX7=^C{aOMJ zYlsEo#j(=iXz|KUc}VK1=b+A9D!cV7n#4P|>n7#?!4qK;a(P)|{RvgUHiake{30u( zC_JBGTY;8hUd^T!K2-jKgCj__)HXO%x@yzS$9`LXsTJ{hq#-u- z1mtOxgh+PhWs0X8E;$ zGdR=!R)-19)cN@_t|&TCmIh>_@NnZzF+uDbbY$3d%rp)LYA1nzMep6E5E!;O)o#4E}ZQ+v(x(i zQ?vUgw>{v#%nRGvDd*We<>rWNLG`TWpRrOeX2%oF6pNGad^fP9UI>Aez6BB%)g^c= za)b*NZ;TAU5b0idoI<|2vn{1W$m8$rw=IOlj@U+}IW-bC8K03AFU{2R+gmn%{hovU<>4?cCEIW*M1_4B6}O1rjp|t0Q=-ihDTKwn<9?hqb8aUBnU+!B%jhnSnw#p!uz~N&F5@a z$8o-P_YCUT6oA;wzFpm4DsCF}*b0?gysD&U^tnE@YazETJ3dwsNPXSWn+h4?aJ=o9 z8k(1nrfOYTRfCID`|dTVHrwr17>U%}$9gPQ>qweT#~>0}DOfe-c|f^m;veO^a$3eT z$x==r>PR!>vtOI zVKiFCs&Apx4_yiL0h6hp@QFyC9ets{Wi-sy52stkC}6=bjTC&7xIPU<4;3qTYld;GfaTq~0|K%c z6$8)|@9y5CKWyU7C2qFgXBm#hL?^$(85=j;hS2^Q_WWQ8Iz_;{tcJScL0H>p5Le=N zj%Bzj(bN>et_+nDfWbp9>VKpkgF3d|?aY(9dTcW_+wOcOzAGlG{1F0xA>dUzCAYd> zl=}dO*0--LQ~q3;?;duk`R-;4%%*>gRO~0+%(vHdvl%H+GRZ8s#`GecC`XU}wmkhL zyqK-HptyK|Fgi~aH_184y11AnJeNJbXvj+0=*Qb*el+abtLc>1eW z{QjkCoaJjW*whHn#NCS4eII20$%rkS%<2XGa1pk^XZu(6H_kC!r1k^^+>@P9skX37b4qt-e0upgh00aSpMd$TOEInWNLIE}1${cGD@6#9LpU!L*oogC_NwQlWzi6B>8)9S zN0t{x{H|m3fUV(D>nf_+n~T*hr{XhP@q4(V@h47CNB!B?TQH4+Nnw)ateGFk3Qzu6 z;V{~lfE|8wl(58gGo{l4S!nd^#6CtMgqAz8=E>f}$fNtKWR4 zbVUBwYAV<|+)sUt+7)P$o=KA3&GK_sTzOZemYt=Xd}q^s16dHKz8~X_x!+3rM(k^T zIC^Jz(imJ~dyWuyJ^YP7Qhrzf8~@@;AU5{r=%wFl`ZNOID&cAA<741;wl zKAZC)+zrUQD<|_R>2 zkT)$g&L}?7av1h&D+jVMSl`X7Dj!K9%4zcYIAcuu?&oQWarQkWB1t&lOy>Fw0Usq1 z3Iq4|@CHKljCx8|R0L+IIcp#obp@t3@5;A!Xg_N?`Bti5g8{PkMGj*JTkTTNBf6ZAj>!LfPi{Wg#WtZL))?# ze{1`*O^(8Eoq3Yd0-Br~WhnOkcpa7Ytbn0F4;e~eej6|K6_bfk(dJa%xUJT`FbUxE zuZu`HfCa?PUfgVfNbCC5bHjb-vOz>r9?9J*_|2yw8IwE9U~_d-jXHacp0nu7AGf~m zv_AL9%KJ+M^8P~QBDJ|30PN%+CSq3Ke^E z52ga?OtVQo4?=NZHQBXpa-cN)?O4izbZtQs^D!@BANiW=)9s1X2JySo=n9{a+mpX{ zjrs*qWN-F5e^uO^+^Q}5tkPe6bxY;FKR!y3i^=9IP|Kz|TROY)2?O)9d1}Cz+uC0f ziwW^sk}wHVS`4Qz?zCCFAvYZlu{EA;cAjr?EVFXrzs$Hw8Oh-HncEhU(b&4q2-Re= zi;sLo7Zh3&o}NiSvyyB-uWo|r(ra__TibHb)!2wSC^S%dETR4TGH;c)*w9}xdb)b5 zxt{G0y0wI0+>#6(=OhBt@D7TQ_ZUbtznf*U5i?4@d_Rr0+od)?WPZ`+-4(81$;2Z> zTgv*UFlEt$lGbdm2vfsUs(GJhg5Yu`m4hZhfo7n{l}(JhBQ?4woF zbkD1OWoQpo9v&taT^*lI$q;d8z!!?<0}lA=)!NT?(B|G`+0;D}YnuBTw~$dTG~3QG zIf`w_NY_&twxKJJVLh9Sms`LD)>3b9Lk?qdrSmwCv&_du?IZwgMKBxgW;OBd4-v19 z=Hxk??9P_x8`U*m+gCK_?$}+dU(@m1h}S7$^GQFV64CbBk?{<0oBXlpBGRNU{s!VU z;UlL3e_E#1Ib>a)Va-O(`#0Wk(Pww-6@7JpZu=g}s>(zS(-n&la3rs@ki7Ag&oR;H zlQI!tm&Cu8VGLAUT0Yloz59n7h~XT+oX2MX@tR4{jnlfVicCa48`cbpA$4+&k5ggG zOH>)y9?ds#U<-nShEg|dihpH%ykf%sIZIEAiS4ingphN~4j?(<(+Ku-9r}oL>w05-h(NsI6;q^%iIKOM?H4 z+4EXa7Y0O9KUVgaNMCN;OI;J|%wE4p7@+g@`yD>@=jE8ypDH`Lal(TMwjm+Gb-P^Kc9RD9C|KhNw*Mu`y0WVXCX)y6)&x#n&`f!uZe&cdr3vP}w1cFfv9@QwpRT^M#The+N?A z4bUXTj<5LV&%0kwYba>BJtX=w2MU=z&hAz{5J9vE5e~@xpI2XhZ^^)U>rM0;#X4x3 zgR$GC@Q%$U3AS<5fN+T87r&lyRv>KWwEQ#>`!VC3($%j~D!|FNMcF;kzfOGde zxO;`uy9*jlAr07R2IgY$p_4%VNC4UOQ?rbJAC}PYd5WEE*I&nnVL(*&_~F}Rhy4Ey zcZ#<`(RHPpD2rR7Zw}*cWJ3kywO=mmHz(=5;F~X9>>17se%FP~F%)8-|3E4|mZ*&S zg+JEZ)C~*D*c$wQvbpyyXO*swD3YB^MP-5}P~)_nZCcf7)&8XCEd?v|gNm~caeU|sEf$HP zu-kFX$%~2+(pPd_q0Ze=u9ugc{0GqT=8bWHo)zj;K`IVRM?@?)_BuS4Zf4H37_P-6 zPW9-%bd=W)-pszEOy-nYP0@PYRCpg@A@YsXfII>LqPWleLRRb|F25= zr-LLNwv1|Wc3aMpO=%7F>;wP&dg>{ zZCJ2`pqOSoov2b12OxAGO-Ka%>1fA-LnG#9W6MDClKKCWKMWLy$a~HIB;lm(-2CE3 zpzZx9;8G{rz}S!T1%_>&$7(Ws=GUGi27;_d0p&Wm!_SC<1XnS12xUz&;vk>PLjqUE zevtA;!jBEq>!k*h&+M@W;cEmg5&!rq!{U3IRDNlz3egjLl1tlPecrHz2?NgJ?9=5)E5O(>cU8O+y|K$6-eiBTGd#%68B- ztZ}}ox(Ky8pG;9{{%cU6)bpOBFQ=U_#sJjwa+8Cq`S+XmgLf@vQ_l|iPuje;_uK9h zj^29?6J5NkUopzyRiUd%JRjKpE0q6pkSL!hG-p8Y=_$GbBA4*aI9;xiR0UHVB93@FyJdN3Z&FoB4< zJ51wk`E?4nY`Wlb8fn_ty!$JEadVq=L43b6a=;F>QRh7i(ZJ|kIv%v{J4#X%M z%|peKe~)Qu6pjP-hN_}Tpx($bigjuu`fSQ%U=`>$xozmG~F|2`LS>UpZjB=oACRNVWr zPmSuv?PFz%@BQ3me_k^W>a-f|$~SW*-CdIRpD%PmvUx*8Lwgt3y8y>jH)ipfOxH_R z1IDWUWvTFk77T~_{hbIdalezcT8R3KwyW;=;(>HXs|UdfC1`rvzf|vk#)9&K0xz<` zA<&P*XUS*1x9!Gv4Apk&dOpNf+wSx3S?=t#f#&qj)7SHnyk=eg&KmMAJQ zpx!8B0L*x~pq->7;QI2Atd^!2F^{hJrIzcSD%>yt{dJh%QtcLej%9eMjm}4+Nr6IB z{RNZYq2-tTt_*i)v$eqJ$naIOd=ES|%c&VT(MY_PwHiX1gg9B?hi-Q`V*!@BovbcMo4=@HVOhX)mn6*s1{s1Bsc z16?s0=Y!^v&clMHgQ}waib{$yk(hGv3LrTZ+MZ|3)-(Ft&W}-Cr>as_f-$=EI{d&+E;(8gINW)~=ZEYfMkL zE=14q9T9_{eHG;Loq~AFi!<1}h`@ZZ>YDvwyg`Ea30(}%;Q9_B<{i*)4z#Gz@hb;FY!h81G=6%7dg>! z5y`5%B`LP2Mat#=5|1Hd$kc9}czaDi|4`}X-y_lV*}#T?5iQ=48>l;B{CVUI4q0c< zXh1dnv=g)nuUmY7-p(wW&2Kh0B`56EV`%(jGt#BNX_)i`BQYrpODA~pbGo7j!P{Zd zS0CA!bN$MeHd;?6^*4F?pBCW#b^V`KZtl+yMBf%48ysFDTP=&AvyxlS5({#4^V|J% zY0znzWE2eOEXqy$y82X`U&$m`oeHOZOC3}E7`)b_Jow{DLjwq_LEV(N-CyF}BgWdt zW&YUk@1vM~bfdPMo0;OptZF*kn%srS8yB~SW90|M6NQVyw_+on!^l?DzK_w1I_`h_ zd+&_*#?FjyYT}i8p1-mx$|vLbzND5cj)&a7IQYD_7vjdr4U{9!bc$wAii9)s*4pI9 zg6kb14ObQO;x64OcQ+wXZk9W%Zm6_@$$E-sJ+)Q6zQ^BY>cp?o(!4=$QhM|)bbaSftGD(Cui=DA+{wH+fJJb}7hk2~VdA9D?pWtvF%bow z?)EV+*!A?sUHRcupV>V%Lt1QWmZGFlDWx)OSZHui@^D$5AvL?O=vhU{CTG2{qh|T& ze#Q&sqK{{x?(8_(H>XMSwTTCxuQ+MDc8EqQjE=nLITy}G>fo2%BUvRMzsp-mJjh&P z&T9V#=HW|)yq=mbPtjv7s zN;upwrqP$h!pL`vz6G4A)y;rOgmO0B^S%y|vSCzZg%XdyEso?~R8^YY@c+uQETiqCF)Q2RHX^z08XSZ2e&tbe=9^-W#znIxCPv3A zg(kBrftBQC&1IB}uo>y+UdhVz4q(sWgB3`VqLatRIOq9>_w{OqaD+M8CANNyf^uCR zfB7(qxVA1`p`p}xsU!#{17qYs3Dt9;JYHaifyVJ5LcQ~z1+@P<;D4IApv0I@Uo%?3 zmzb!;R+MFO&2$kWfmAGcXY-rwP@R5(-WR29W3)r5f8YS`J--CV~|> z0f7DnmdVtHKp!=;bkl&F1I&n(RhIK`n zZP@;F;a+?Wl{Mc9J7ue!BjdvuxjfT(nGZ(+6*tFZMv`X=E%@bvsMcW6%7flM*FT=6br=jPI3_@#^t{YCS7b5$fX`H=)L4PFMtk2O=d2j7>x4=X znT|8)Dq9dnnNW%oD@$=rVCJ0yc-?6#NG<e-xA2ObR+LTSU2vPi13LwuO#|%8OW@R@+2!>EufFfN{VjnF} zIS1km<=}op?mI_%k)Dw*iA;4}5+}#$+QuFE;O-@uQQK0%$3g|79f>%}~ek&s9OysO9V8_9083 zGGC^k!%S(wHR4Am6}?4x}G* zVj&bZniJ=dG3MhkLlyH%*Nh$%(k2lz=Ucx@=MEEAmnJQ5e6p+X73i!)DhlWn&)nlYMm@8y5GRN*u7;xfmo&1i9z~o=&n>Kub9x~$NUh@zh zj}03KjuIKZoszOu@=`#0R=9Tzlgf)op|K{t05L@4a*y9d!1BbJ`s0;~EEv}tCKhXe zlUt8330o@6RP|&KNo*+f`?2lz>PpdlG++aK@4T*=i6iAJ%P{%1nBtK#5?VgGj zCMQLkd(D)d?J`S|%rFE=c4JX3Zj}}suf!JcO0ww1B>sNrk7&8f&{ZsoT&}FY0{dO( zj)DYl4lU{rQsJ7^lX|wjCM%+#+G_mVN#P>EM|RTdI>j*C+nmo+VKS6O9p^Taf=l)m z$Uk88Z^Tffg7r#->EmTH0p`zN!tL^XD> zsw6GPG%!XHgDsF5jDkWMn)`&yRY`G~#U8B)ir4W@3B9u_^3fC2rvZ<$N0^~}ssYYO zqtQ(!`7H&$Jwf;T9%V6Nb-9vFJ^`7FR&_ro%LME6yYKrKNV|Ci?xl(bn>m?V@H;RY z>9-Wgga>((GQN=wnenmu_4l=h0y`^oGci?yRA6s9Z*RxWX*^SzPQn>aFj*CDJ~72`G5_RWh3vTOO@bTS|eG zf|Esv=85%CSX|K@8vikC}E2#TD^rBHNz=?dg7(a@`T)l)=7z7d72AEQsOOTcs1aTU?!6&RBU z0VAlAy?=tdd6W$v0C5%xo}GRtcc}rkmnhY_)9v%FBJW%3zPjHRtWGYQBKR?TPm$QL zDvmZvpz7798=*T2<5A1hVEg&#RhivfC9-%%20+XFRg%hcr9mEf>x~Rx10OsoVK57A z4R`>UK(b_ORwx#X4~QeBy>n5}1*0)S?S!qVL`pkox9e`l@sjOPz1?|UVy#`ec(?!d z|87lIV2wsgH#BB3q0%Uxqb%9jNTf}Vijr|Li1vL8#eilLl~ejmzf~K!p(j5+7_3rp zC}rmrqzPs2x-^DEIB3cXIg+lB|r- zzsZ5XM3oTau5o3XafCA&3eNX+P)_Okz*G=&!YFUVL2(#LMepJ5AkMaROl0SB#t)M; z;TJ@~Kkesh?c2m(qFvrTf>|Q-;|F*&1z$kyb*>2-31h>7*~pg!00qS#PwIU#l+yk> z(w~4(N+`!k6HOY;Twknh{Q@vag^5)@wRm(pf?&lVJ!PPp2jWBilk4M6*eM20Q<6^d zl(!Shij1oHnjv4~=fTsxx%-(9A@3z6(K>4qLwi~Y3udbddK$88BORP_)x|s>^q>8m zD@v8te%OvZbYxl}Bie8CHvKr99v)BQxg z8JCqCKnzrIAZ|>k4x-}V8zDt&9O=)}Lbr{fLc2s7zrI^F-CRI<;8g8}`N5)i+gALgHj!+!p)3{kSEoG)F& zBlCjZ&FT}{;s{{UUOz)M{+vzrA1OBjNibP}3^bo!M;GTIgYcfw_l!Oq(?)pe zsduW3Wf&#-ll6m(eJP!QLeS^9&6tTYB6M)jqfz-<+tC3d(7hHeqISo=q+*M+nEhR z@hjhIZGVEQO(kOFSIesJ{uvHa17hcE1FR?Ey((=e;uT>D%#`AHSD8pq8p33CtsM8KF7v)gP1g z#s>)4y%%3MO{o6p$bA?ty>}-9R5)-Bqz`Hzi@;)_zpZPn33Eq5sj(gCWzwOZ=T5{^ zAHeC8X}=4&zuQ@2R?Ga85S4#kvxL`?7>xIb-DzcJd$~dSiZISv4m-Wkw0AYn1(Wxl z&|QDqI67N2o`B->rB(NG9?=1{YW7W0Yg>K5*m3svilwnRjRwz7L9e(kTvU8#XOuBU z!VaK|AJ0*07@@+)iuUV1`e5jxpF=yc_$NR;bWqQb`D+VY%a3ejK6$TO$+9a4a=F0TGs{ir?d8A0^@+Z^& zZeqfZ!mXbXtOQkon(Yw^@LsqNCt-4T{Vh#g6|Ce=1SgvhA_o`B{2=x_Nd(Vvw?|$@ zMt*86cZMVG3Dvbkq*kX+oC5h0)Dh?Rf#_GO#>HZwL`ioj=!o%_WbKy)nwpb_AxKPmkWM3`{~>UVkXC_ z62Bd8a~-tlYGNrE6^x{PCYmaA4j?73J*;x<0qgYIetxq~Wg?Wir3P$~IhMH#WF}w# z5I@q-I}Xpj9V!^{UOrY9fBpL9Y2AdtDD*eP%?%sTtfk&sa=vo&ao(}Ji7DL8tbBbXmQ9Iu{a8q--PgBm>3mjf-f}{Tl&b93S7a(%CMZ5&C0P&lpr}wX_to=iBmeZ|!URwrL19xg9D83Ie5&T1V z^>-`G#~d-4iLw9L6S1c@SY^+?b#mqX2;iKY*(2xOOdPa%O|p!uw~FuNGo0SrzA3ko z>OSanyual(Wsc!!@ZOW94}SW%)|z~PC-NXl_q(zrv!N2-3!CWRbSZxR-iv*)_kL`j z@>fplkGd1a!RxnMNC&?ueH$F;w=$0$)`22uDS zN)s*Uo4BBt*V2LXWK!WL)F>+cCIRm3{PHb4_U+91F{|H4B5!5TddKw8;<8i7(Q%im zZ~ww}-J{cZKFu6{iFOahK~(gOk3uNqvqgQ+KktVG^L5g`v3r+A>r&q86>P5$uzix# z^~;+t{!6Z^EfzGc7U5^imF$KeUsZmLd>tqckZNbq(gB32n)Mj zqICY9kZ}x`%S@ooxcz=EEnT$nJQFgepz*vnikcV!zVQWQDiM+vsX1--ze9E!tmpY=?xxP<=XrYM()_TE!|CLREb$#>2_fhuJChq&A zFro}9Gv6BbGiFIznNo4|ppK#7bU{xkh8JUc5zjdTPcEWD8ZRQYz@u@Mrt<8sK6Lp@ zcEo;qr<}5a`P6Pc+>P~%L@W@87>%1*CzzKF7{+D zc;Lfmx$O@}909T}{L^BEKif-fOWUh%?HRre^q*9vhq7FLnWsIPKY!*UnY~1Eh~zZ> zjX*4aoUzbbG%79jI1rUYr&TFRm(Br#Bd;~{%})bT`8*$oJQfvt4CP4zhE-(`RHFVW z_K!WA7NRap0C%0>*dqyGAS~9-oOd%I`Z%;(bnqB7wE&uEK7aRpq;}uMFnr1K8{pRX zM~}a};Blg*0PF;|x$aO@l*vS5oWfnGH*JRGZOG`y!@f{Ssh4s$;ibsquqDgh=XU3Z z$6;y7ZCtP_AmlT0j&72wqzJ__FWO+5s4aWPQv3@~Y8Oo&rCI0=PY_;kVu8JWGH|zo zeOra`Z^!Gsne9gxH83uqfBO8&)~U7om?53f(tckFU0F9BY8^H%Zyw;8W}7#Ufjb+; zh>3U6NbJ?b*))+_2Zu(QSYFW>j@<3?J3)$Yn=;B_yuaI$EI{bkoE)f1612>a)W23;pJ>-ds>%ddeO@?y7j*+)wZ6md1{!5Ojaupm(Nm<)Om6_4FEa0I5 zOJuqM-NjBQN}e8lic~$;yoUb7MVutXIiP8SXvvzcYeIZ zo6Yo}KQQ~P$LZ=R>(s$|f{~fUF!f97*C^M(wxR^tsu44J-?>5h*uMzFXv)a7zbn-U zA3*nk=kW1=FHRb|4l&n-#)!RGZ19^_oxBB)xReTa6$h)qLO3(z5n?uaFy7@ zbNKZCe(2vs{NFkHZyV?TKkFz^k$|R2Q){yMXl}TtM~=3u2FA^v#S{EmlH04ghU2_D zbiw4=GfzICnmG;Rkr$v5RfFkTBM!68oabjdS|c~x-IF!wUWhF(hqGeK72NyjHZdU5 z3WS_nP~HHFq+3u~B$QUD1+-A*3EA6S30o~t<<8k2)p9#K<9m#93Y47Um$!|!0-)E}wGxz3i^3c4t=-Fvx z+60NWuaTO~Q^(}w26h~&(Bl>)t1yH0Y#xk2&+(){2hrfDm?BjHNi~*~6f+@5iRFqB zuU+Z>MJ{r^9lZ|~sZA+2IvWJOa}N{u{^BotE_MF+RQSN?(Fo9e1Fxk{m|sI~Ubyu9 z;e97m+~|B_ozHLwm^OwkJ6ai4w|z&QD);9oG)Pdah{L@DGf_z3>FT1R`1{t1cz1fp zqtPZ7;otfX132G0u7X1vuRA!u)p52UmJe>n@f$i{v=X2_y_SPGz^iVTLy`^s_RBmL z?hLtiK$X_z_DTDT)xL1KXiF4{T9zf9+4CI_mHtFU+3ukI)}0{%_iGE$?B;$G zaP;D@{UzjL3IFNjyJR)q8DjhKMBTR77WhLn_$dzfo;A9C+ZbvK&LcyKCGu_h}GZv2cAs)*AgGerw|#3KBin;qzA?Fx_OL8(45r(In%k7FyI zRH^QnLGyc>KtVl7GXzrvUbR)APYmxW9F1Tbz%AE3qbS2b>GRgFv-60l8ib9stbQ}L zZ)(&y3VhW~(6a#i`f@IY|RbW#N@oQQS4*hsgXyMVCb=rJ2eX~+2Pwcl3d-&)q z<}7Bc5FjW5TkSU~JYeU3ctisAqC0Y$1b+X!&Y{`*(M5$)MWt-eL7$<9A@b6c-HI4R zBADH;Zqh4O(P*n+MdJMNZ%jhUzhQy7#~BUPjlwwxJ3_pU_t$iKpnYO*-z?;Egsp(K z*5oJ$5WAf^xQ+wVHkP<_+kdgnV$$W{Lv~8`w}CSpNcT6WIgr3>ZD$84&EqfXdGhXJ z9M$CW+)GVMc~KYT))UGWgBeu%EyoL+$kJ)*E#UW2Zf+sCb!({7!8jDJy+2pffScEY z@VW_l&#rrSwoRU()PMgJTwI*);f@gUTyga>ReIejSw&Js^3GL)`rnp0emUz2aVXekcuFP@mYpwso{e4k4*8rzTx2&Oe&^o>4oxA$|jj8*dll z5ZD6TFfm)@sV}b<3ctuFhrRQtTDK)JaOv!zQeE3F>wJ##H|_wA#oi*+xf)ZZpztz0 z9MkV;hQ6`_QGeTXgemH>F|2gw>b0A^FMfN});$peInZBe&6MA0{-2?t2y3=hfK2#9 zMtr%jJ=luNg~;w3^X~Xe>imF(yg9N^>x(GG z3#)m|c{%U*0B`F8XDQt1$+zIZSVr}M?!#yM*uT|XtlVOPV|h6M2&kQ}U=hExu-4_c z16-bkwp{=Z8AyqzTYrva`mUYgnr)CXKxiS7#vSQ6TjqeOP2B!88^|9t#`<*I7zLv; z%u=zZo5JJL5Nwq1ke(=eH=BoPXd$Wvk6zV5q_V!79m`yF z>KHF3uPIhRVEatgq{q+ujrxp6Q+UQ7wd_gAmYCqkn#x;V;-mnec?Z+JbyIg2fe1gPnU|$E?7QGp*LiKY}1I;^HSt+2E16a1&6HN*-JNyzJfL>W#(0DHH zcM%fE$*iIzTA`lj)i$82LeK1+cif z&CkTu#AH(kOIxg=c;77fKT8rxS#l8U0|=%0W&umc4h7|n^W-G&i|hOKHiNMlyq)D+ zR$!pwK*!A9qFMTVOhy8AT=!f0?QWu`^$bbVo!3tzeFDt5&A0(7>bi9Z(K-mBmp8xW z2{Di1@(^!6DtKm(NPp$ndU@L1(B|tc>s`75m=-^8cSBG^ga|VW8&|XE@+JYX!F#}b z9L#zFMhC!q_$`Hpd^S+3T8+E>t*-O~Bm!s^)jJ%19007u>65Wy^j9L>;&pkIU(pdQ zz{E_)RBV|YTVAye1eQEyWK1}?IbR7Z5k6Rs=&x`;I0F=YD-6Jgjls+$9#Y?K4Yib( z7R&a{O*cqn%XH@bu87iV#V6FRnXeJ3=hWc{PXJvHSQ&`cQDGNaSsq(=(bM{do%J`x zFwi-6e2aHti%Od7Cftm>&q1X^f?Ij(G8mgWs~^&I+^=;&+3E(ANr4wuf9^246KL`(~^VL7Np?BCYQcuma@bcH@zOkmkW!FHq zhb#I@vaG-Z$Sy2}L2)SUZ$|u7N(Duat5N5p>054hBLIjOyefIhDh9|6?lN+#3@8%8 zNMl{fozr#2 zo_2CA05&)~@Vh=hABxzx`mf%!Ec+o?^ z7yyL%EsAIV5%dC0;0fMD^7iVC8?Axfh-?&-_`wa-+4?D8qr|`U8{;!{j zDru?9jiP{PmIXb0ly!4A<$jC4t>jho)rXS8>2lX3ZU+M7fnsewcSO|SNN`GJ|?H39Vj z)YRQk1D#h{{EeeSJa_YPvGqa7I^?3xXZXt#qJt0}!#AtV-!fmfto3{&HXD9e$wU_m zZEJU^t&{G>vNxyPuXl1zsz(#ufDF4~C0}SKV2qeN^-_82J_44;zJu9DOiI-DXfw;zc)ax=n$A9>iH58UnCgzth`nw->|UsjDTI`}nmbosL-u^8_ye zSB_eyFf02a%>#u1TvuI=<*#zSi=hn0sW-gHVn9IUhNEmSUBG4-!WE?Q)x_@5Y31su zh(4`!_1BI#tEns2*S7=1B`NScsR&D=nv25ZpZq0maJ}lTCd1os;-N;hZMNdmu@D}J z2`7C83$Mf8tL@C@bp(?@MFozJ&S`@o|KT~0b?y}vr) z*tt;=><47@N!i?c63(XSQJrsFl_k zN)>{?d0$x!R8w$oQZ`-tC?@8y5p<)=eYs!>coVKS%YX%;J=;}xnfd!zWFndGG0=Y< z044AD*^Pnf(f32QLx2&rUTVZ`w3F zl%h9>GA=TZ(ej-7$6z>|N?$Y8N#tY3G&bhuy*;etNHGi$^#kKH<9+`4OX9v|6Q+#I z42sWlpw%saJt`Z`Y>DxTd)G5X2;Y}%4sv*%b00Zn@O09dILLwuqkTjg<62@{Xt3iN z^QdLgUv`EaDAd!w1KgwocpOW2^X@FN_8W=s;;j?l2PJqf{}`i(<2LcpF`3|R)WSqA zVExNKC6@@z&t$3yKj;29%Y0T`s%h*fv_p_%osW=5%ax-puQ z4EM^ol!9x%f@X)`1DP2Hwcp$r5rtq^rHEj-WT_@Uj(Hl;UV3mtFcr)(-y-0fyGzyn zsxZI=heQvU(PcF%)?u(P)?|(oZ&c`kGq)8(;Ag;^4^r&?HNx+uj2~mf6cZo0<>`k# z1Dpkh(lyN_*R{o8C6gii?h9MBBdlql{!EobgtAS#nzZzj5NwdHiw{ARMDk;927@XG zk#Mn2k^2>N(}geGz63*o4Mdq7Mou}h0#zh5NxDiQvLQQjrhF8Qn}AxqXgxV~oOU%8 zF-lAm$xrvvxzeABiRe<;GJ#uS6c>NfHNlp z^rVU;pHX@H=a$orLR37Ku8apEfXql|S{Xpv(2F-2H^c)MT3i4Tq~^M zXUT=l@KeA$Nc%~SZeC#aH5)J2YyhBg4BVhnz=)~3+25FS#1W@BJz8!i=U&pe+Cv!E zOW=^3nacdA&x?5T!kkzn*NA-yM*TIIOCDg5j+mXZr-OXxx^E&zo;pxr>sMoP&Akqi9kyS6} z)k&(V4iRn98|LRNmktbkT+#v><-$LLr~n0?_M5?JeC=74j4&ncbHz}5eS3V_DYiRd zC8F+7V#3_>O?>zeQ<#dP$Uwj|cuEC8h82LQ%UP0sW3vwCC`im>?1B~Uci^i%9rEBhb3%&g=ZzC|>z4p!aVP_( zM|m-QN$zJqySM0Ky&5Y<7IlY0vaJv(b-j70+cSl%ODjmVQp30uBt>Si<%^x(DDK~J zzx++pL)-H=uW?>#JWGBlgrKLZYpS->6etToq$dIiXUma=JWUV# zk&=7u_bDCM<*5L~0BZoj0f0YWeNZ5gi5Ho5(O=4G zQzw~|y`YQq7DM+3hhO4_2ysVE{9DO&_6Aa7kst1A&=ilkYPM#UNLya*xg7n!O^|gU zpaG$VW2)Lsr10&{JE8GoAQ~VH*B^{WD1hsoFiAf}J9Km0833E4D1SIj6ZR(a)G^A@ zp$AY|F}TAEC#OOBj~H!NJikYM(Di>gNjfT9ip! z?W(q4K589*U&YqEKv8SKh_*R00)k_Z2^+3p?*IIs@dR=QNIB`tUfT;t7H!2|1;5O` zc-&A^T&zqzAWm>AFQc@fqgzdXMbn918fY<(Mv-4oc)5uiMmkV{#7^> z+=v9Co$JawvF3joDswt__{OnDN%`vb`@zYy-HgKPiL-WZe_g9aDmL)KzP%iyO}X>C&(5#O}89Lg#&fY+KgOaRDsjn|3%95RhvXDo4n@CK3EOu(^1r*c%m$h+t{-i?QkaXte@a~EkcM+X zcyXh^0mvQsWol@FjEijEVyw3@@U)%{J zN(G*m5t#j9C)J6)qWMzw+E)!mE7NmaEF$XTC-*_m&9Sn@)`qHUp+?;l@9OaXB(97s z(`RR0WTAciivhxR+Cb}22!8y<>n}g=&_9CKB(OpMX%OT@@<1;y>zVCW1D%X|q|$$C z2&S(}?XCulkfx9w`S!)A#wd{te<-*dnAUeforJf1@2lX?G*Yu%qWFx$jyLUzUwE-@ z@Xx*!L(o?r*C*{@uvT_J9jHluUWen}zno*}9Ev(^puO?N8<-On5E;;CMfQZ&%Gea9 z)lzq!q@ZZdc10p)ioJks6K7zz8gjJ#X4jGg2Lj+ijAW@xumAkxiQ_cwd`;$PM{gxa z0MZUe9Fn|<8LcDR*|mE4q=EfqcOhQZWO-utis;pJ_jPcMTYCZX1dIR~$?QUL50k&^ z;ti7}!(AY&3Of=-uc$b)(JJ^io|BPt)Q^su=ODY_O(L~9=U2-`scO42^mj2A`+4RT zX2^RBZ;$jv^KlD2GGpg~qbiy>6SyD{S)aAR5Os{;L^X6&1&PTf;a~eQ?HsLjoVswQ z|5?jL`JJIgK6u1tDfn2;jCN>haq?BrwsTvj1?S+LFZ+$&tCN3ZflINhYnb$J367en zYNqUm$JRkyi;ZktuKZ6Ws6(r?LfLur`2J`^rlp;A=5%k;1%(7TH=TU^wJEU@Mm4}y z5XeAoNdbZG==3i2$@iesUnxK09Duee+ zjP>Y>rm#$3_-o~Vn*#G6Z*|a_9jwRFZC1J9neQ36o)#ZH6)?FQ0)cPE{v>72ve)Fp z&`P3w4~`ac%dJC(l%*U&ctB!{5Y!X>$Y$7|#$ZmY>5++X)7rOjlKlLnjs@PiyB`9H z(c;`K;vu7w)7KbzP!eVMi>0@;FC4cW3WX$c>01?3$d%)W)pZbI+_>c%!gau%+|*m6 z$r<(AAfI<*P5z;(oN_0@lLu1RJ@5sL=>t#kOJ5me7eth%QbL{FlBlxr@bT^ zKX|krhfFWLRi-IO)gJ4HuCBr*>h8vw<+Ek6!7VSYn{S0ejxti`f=r@DlANlYYyVOj z!WoPkv3&jEndb`7a}ThQ$jcP8ou-gi7}d@mt1K!yaG6NbtC`ReN-iQ?u+L!ZGq0a3Al(WReTozM{Dws1>LAQ0d-yWDQ_%jv%Wo-L|E z9_rC^&~1*Y>c|+rQMEH+)kYjEH3rzU^Goxru`xlFGk9k(vb8a>yZdAK+_l;sUFK~i zd)0w(d+$pVstOVYNBR|S1HMgD zTx)+Cag{Cs&}|E2{y01ERQ8wFpYSx%&YxhA#7b}J*SBq|ivWmkC zYr;sO!^suJ&Vs6wKpbJF;CGkcS4$rJ;$!GPl+pmd>TD?@W30lUFwG2G_=$Dw@8zLUB8L)0C5G1j19a5X>uVYSZU>Ev?%*=xt!Ow~-NH zr}p+oxFS}`b!98f-%X35 zlfz5V#_tbD%pULCzdG~))rHm~h2D=BxN{AZJHN3~W8wz_Zep^ikHqBPw{2H$!Ns6; z5Ut0)Ss2sGNMoFP{X`8ij;rv3|4#Y}+CU&Xa?&Rq)(9t^ zlwV{{JpLC})~h95nDPL6pD#JOFF7boO|fKYDO=u-56UukS{*#$ZS751eyvZMO4sp( z`fq!`4ni`>`kuCr&r&>StcM3+L3{%o~x&FC`Pvh)?8Zp zjQov@=f!-14$`}n>dPvb-<&6@F@ft`9YXRBM9Z|x!Bg5wvn&_K9-|L7tk~>G$HM=e z-!RWmlwDY;=$INl;QQuyVb|`b>mk;Mvr^ebZR=B4}PRlRi!QORg%pl^U&b81k@olxUtz zIZu(sh-P}Z#4;bbdFFbhxnkTrS|&r~49a&i$JBpwU=cl^j;+rLU4-@(-eDix1aU>< zaGqD|>~u59F{ACRo=@<>&lR$EX@0)$g$>G$swDFN{?p1(sC(SC{{DEQ98_0>_Eb*p K$IBfqCH)r)_ycbu3fqA9@0|VRs5+1yA&GM-g{PV&=^n=Pv z@W=Dzry%e<;^+684lppU$Dn^D~f5*m)mwh%`bLwge#7Z(?1a~n$sW5drT z%(nKXsfU6D;6qr@52=_tTA9E|IysoUQFgREOoq2Cs>H!*ZH zx3zhr0zMAI%FfKj#>~#j%ES1Em4}&?7y2_Ve-c*`_&nEtm+WM3Z1TUeLBLfpJJ>>B zf`IdwFx!|ovOr(dclh6>GJm!)#VH>$0tXQPcS76GZuaJ;W{xmoVsG9#I@+5XIsLnU z|J}g93vxF3-;F>o-^$$B#KytIfyKy;g@c)u<-Gf79R}tN%m?vzDz0h!3of1mSEPSV zrgRV4P0%se$UCfpnyMB#A7vj8%aVD@bri&`jX7g zHJbM6ZWta;+Dak&eN78_(+dVfCM`!kk{_?Z+v47NlKxD)oS7fFTGsvSqGESN|R-g_%%lmSNreJ47T&mfV!S326BDkjbp7Lg3Rxb9N0ArC=R z!_cZ@r2Y3HIQTHiEG|n9Dh{`;_f(*F)Di@^vzXb=%KV#2?}pvL|`9mZq_1{`L+B*+7O z+fi%GIA7HNv$GvF8(VZ>G}^!|&e^GWfy(J|s~83*hU|g!;Q0`Jj!^|C#B=vDsjozAeQ}xvs50@iD zR>9!8nNGNTQF%%;j3D=G)`?mQ3i1(Q?3$cdOy&~i?Mi;~!_@8r#Y$TGs*vrUW}GJ{ zdCuC}+DQ56H6AN%(=&7EgqSi`vS9@aU0ng$8NGIaODNrDtVtOTc22D$y@+y+0oy-m zE7siHa1q|DdcWX57|Ud$Nz>JNFD<+J?$Zyq`Oc7RrGg-LJc)0T#uh&JGwgV;<{JoY zNL-@9;b+j#Xaj1er?I+!LnevS!Nc6j4xTz5jMMZJx#*mz_adxsexObq3OpSgFCDz~ z5jxV=SOKr@AANwrjcDTOtxY#Tiu1*gRc6kark>`3zS8OIu5KY%C zjq8=$OGx@~T#w}1lRJn(VSiDVyE`d z7~}K!*gbxzKbUzXf=YR*XcbbsM0yAN2&+0%ZO%aEW97Z85i9p0qbdfv=#a`r+C@0; zMqelx`aXA_&XylpBGD!)6T2Be!l05SDr+a1!!ou~T>C;yZ>2;Wo6>DOKJ1A?r;yY| ztB@p=FBuP+N1e_ks2TJr<*KSZc=RYX?<|%!!Irb>D3(kow^8M{n=^e~gYFf=R)>DI z=8%Wu;-1Nr%DE~(&}PaYqJS4JN%2;YAS@w564a9jf<+(N6ju~u5fYh=zIa##Fej#` z4LAu#b{ruO6T9myl1+M%D+T=$JK>52DzsQ3SjJdL{yXdumj(Urqdd)}3sj6)@dlWm z3lq0j7_(#6$mg%Mlve2k-<>2Cbv#^GX^G7bNA$|h?=(%O1%VgCRwt>=Z!YI-*$lp4 zEjNP-(LGBGBSi$04C+`A?lL$EVtwahtQy*FPQm&6{&Si%C-NqwPL2RV5i2uqWoW>Z=AS)$4tx|1Jt{D1iorHu$ z^ji#n9i-gkb4}zraZs<}(uAC8Gd1-o#JzJSEOr(Oc?_CD=Ql92UW+!q5oPhq9-Uj( z&U+OR5s`o@6@(k$G+YYGt1I);$ESo|eb3%1%PB_Ukk>3&|EJpOkYkCyY|U8k2%73J zomULfQEJiiLc*m>+dQL>4`DOzj`G8~a=g_<{`ZZ~cSF`j#zq{6wBpPker7}+o+0^v z`|<^bZtRWU>$X#B`BcBzpLu24n<4Bt_RM~%soKm^RO0N`F$u@V$6*37I`_^Kzf`yX zp4}2M**V({ZER;mLhHTa7wk*gm|S)glfJkt!}fqWqWIUn9+`(OHRzP}n{XnN;EY%8!aSYZw||?STXLO|5Waf$YerqvyIeb7g14d?#v@3fi2o3_xL*b1ECR*we2e7k6J>i*+Gn zC3uAsL1}D{3my)ZL1yGl*p*ZmIVUIQ)DSujj^vWC*UiI<*C^Nr(`c0TtE z<$U>(nveRIM@zOAb9D|h;GMyVDTL$Y*5|hu2i+yQ4fYp;d>hv%Yv%`Ji86`I$+KaS z6yV=n+stOi-RWS&%#UR{Usz!NJubn-?-K8dw5mdou<2nwuXUj$FlvfwXlO)aY)uwx zGiv_sU^M6Tc@&gMVC;3|I^Nf6_T&S{;%e$j<*_|IjB=T&cd{(W;Az+#$Y`&&o;$v{ z;Hqj8Sl7KhKWwje+6xK``!t*(lz@B;_a^bBKR^Jftp)}stDTMqv)JnD>N0_`mfvvT zMt+;fNGC8NQmVgsI_V}op<5H{PhesqO0TP}MS#CFDoyHT|QiBVAQFNaz2NZ7HM4JzF9$v@kTcy*KLLQ~Pb*9YH)_Yv=3zaF;vxv2X&iE3;t6nLV|q zSYUs+M^~{W-#?Ng#Ro=qtt+zslFgA{_g87}@sbvKF1rJ6+_`%bow;=XFV(`J@bFyQ zmgcrHaCDcmn6EqMoF?LvF_g}~rr7Zuyq(fAnO3M?nn!3G zOD#vM-z*rF_8A3V@Wq!>Q!nd~tMi>PohJ8qaON`4n^UZAd=VSLZ~|_2%Lb1tX0av0 z5LDJh=u$&XM@wSK9$lgVz*~qs%xzy8c zQXkgCSi3kz&6o@!kCfZNRGvsfvA~gEstk~UwAO_N7tL}{I9q40SSp#sq>I6@{zMk# zQX3&qe|-_q^32M!_vg!FSK0#{4)^&s2hwAAjHxIo%ZxgaZqIjoi;J0HN=iypiZr6Z z`dc|Tkh|EOfIU7vefBnIkL4RKLh5V?4pT^JDGM%(UPpI#cbt^~A78T3!F;Ym$P@_yL0^_w zpll=;0SZ$}&GKxO<@xSJXrC&*!^SCgwar3bTxs5M%hSW~c&XlkyH1ODdNhTUQnij| zwe>>RQOncN*XIbl4wtVkFE5k5(MZMsM1675e`;d)*|F!8?KI}UN>{8|M<5pSRjssU z;H3T?7(}W3v61|bnJSsKi;a8^K1nhwZT_&=X6I7ZahHajr`UNvcEF+yBj!lpww^6B zT!SB*wp;5`?8<+Y>3RPl0ka70xdTqKqnZxsB12>HDYTdp7 zl(VH~ue|QDe7P;K81`oxx;ZOm8eD9_Tz?no;(k2#QH0H?BSC1eTs=JUZ#}1Y8d72yU~Fw|=~aII0ZU)j8q9baFg$ zV(T?pDYz%BFg^F#O+!)hBEo`1fdwlgv|mOGHnjR6ZUg@i@7J`T+AyjrXIYHMvP zYimCH>-Q1eknm6N02hiU5DU4-t#wD9?U-gl%+neg1g;JjID}A}l5|>rITcRQ-R@qv zzo{FVTRuX8>i0Jji=&5-9hG(3z(NSNw4#UjMS$QYI`;KFYE*eH839A?V0BQ%#0 z7Z_*Rl#{UQ>8^9cWSheE5ah@ z(9$s;lUqqiNeT14TFz}MMX5kMS1I}S(^LLwo?UNXcd!ivwCQ*NDV`kzATm~JE7F{DkKG=C(mpej z3}F57_6UVml|_861`>g$FCg`B(@BV=XA|Rec^woC@@c%S9f+8k)mF)xcVLDnG2RcA zsOx)S6A}_q%O@pS+ysQ;vOsoRL1i5;(v0P@oP3RqZE`uEEfET1c4<-ZDzd_1OTMkG zZPYVdA`}O1dFVhuGlYf4^J>AxtV(Ov#mv^??q(}fkrqp{ne4mk$tp}5zgu6C5I;YE zXKo`hF7s<*V(ZDt@9&E5(hgyEyM?Z_`o za>s_*VhrK40{G9^(Bv{A6)gUtHg&mYPpi;t%Ru4*3OR z(yA5V&DGeNj&^+Dw4RLvWzNJk3`~~3M3#@B%_?~{gPXa&zAkgzQF`_ZVfo--5X^r( zR)e>BYXf0HK`){3%5qS@eR48ESHPwG^NMH+w>6mCx&y1$OU*(gBqW;r&7q;lpiLPc z%vM3muU3_%-D$@M9A=%KCXcHDX5GW-a$f)j4YvPgjTdMKl1qeqUhhFqVA7TZ-#9rr zfi>8zEcjPn4#$(h^UstFC(N3#;PbxcE%ervNaeOpVsns8@WGMH8HqdJshnku9JYk6 zB5_#m#wP{N6M`-u{tSB6UxHUBBrNxjC;6lfZ|XmBK!(06ZAg1Q&TO5#Zhau%T3KoR z{?7Mzqwvl~qCTX5lIeVBA@K4b#?%eyAxDj8s3HS*WbG#aqpX9g*s2&7k*SCj%`!NN z>1L)=6v60}21)M)!rp@GecZOiE zT?end@P6F`DnF>5Av<;OCERH7Y2`BndqKDUYsRTkq<~(!s&{>RySlLvU);GA zi61UmqEpuf9@6pE)v#lu!`6_=c#on^i`{}FA`=t9kY#18=k7wy&CPuEogeqqilI2R z=4gI%r9E)I#fMa1e+g@E5sZuM`}Z%W+Dc5!^N!5<(<1ca`rcCr=-_2E9rb2qr-Zwu#vCF>MDC%`} z%YRzb`1b5G<;~#-D7$?bO$TarXKU!rWbvE>jrxZV9~iIAVZr>SVP=k)p4Q0D$q5b# zSp|T*5-d4@zkX;=zTp557p$QiojQ9>0$d_2Sumg2WlP?;MY}pr)|kXMG zVKm)TiEfVf!|f|#?(@2cGeD2L)1D~QOLVdnvP5aIkN_zHL-OMXB?bn@^~2Rt-RwF& zA&<4JiyB3~gt$2P5F9)jewyi)C&|`OdJd?JeD>{Mt18ONv(!uVl=7gW=f=hM6sEs? zG{pxfDr+ga->8eo8ryk^ibT!NPhe_lszQ&AU0s_M7>MAc-Kym~; zy_btlGb>>7hv=()nrB0+bjv}3g@xU@EKn_+bZDWu@gFf;pRaSE%*vJJ)3-XXVj@i{MpbW~{36RPlnWg-L5jU#_}d3E($ ztHB#^;WRWfi-fl%fYHFd;pB|d6Y@aE#5A5gve}I*ctyZT+xg6#tSA|a02@kw2Ky!$ zwQn5~xMhc!z(Z&s89@h3Pe(`Rv$J!ac|~5{I?{K*6#!U5sM505s`Pq#^y0Q%w95MF z-$U|`m97ULH>}jw)+WU;@bK^e7M75`L}i6wKfxIb69BK>@QTYM0h>Q+eu{Vz^O<2E zvh$gplH;Gcz{ktKy1kubv(nS}8W%SZU~~cJ{bj&`c?ATNf2kC3-ql#mj52OQ35Y~e zTL@Gv0AO575iQgwgG1~;qLM1?5uiFRfk#RjDV@8T>GPNci0X?MFMeo?S89|SK~(40 z*494A%E~%*$u#1DCA{Q)#{~cCzSI_N9ywq4-6Fw1X}nDwvvfu8x`{}{MqK;_0N0X* z6TiLgoF&5vI(>T(%+Xk5j8fD7hGtj-=QKI1!fb0%8ryE3(IpB)!LH7j0*yqQ4w-Wu9p*n}H(S08< z-Lb4Kr%F2e`x(H|$$5GRfX+_)2dFKTffP@YGw^U2Bk`k5v;u;k_N{c z3~2iv%nw8B6f{AZ4DADIz@I;FXZAIrtFy}NjK7Ogtb|4ZpU>;Ia>m%V^$*}YKLP3b z^$;5qqeQ6@$W&F&Geno*NU{tBO6e&zB2lQx- zx^rA-KP#T9#8bM?EoGW#tjh=95aV0aosT){1m!}X1X`FMI>$?t3X@vSX<)h#yxSJ$ zK3>{dE;OU)E*|#nVCG*t@A5#pQ@M@^f&P61>6=R9yIJSC;78eIckOt6xY*o&{l^Ys zES$_4@q45HTyL_JG1HTcq*$%Lp$R@U9G}yZ6=>5GpB^BSMV|J(oj?Yk z3Eh0g3b~dNmlRLQuw2T0e$e2&JiU@Lkj~Y;@4Cd2;tOPO4r;lzwmRgz%K#Rh?4^c> zI&|msU}Ku3@EgzF7t%Lz)Mkt1sQYua7Nr_>dDwX>0ktp?M~#+35(|2bD0vI zdLs*;jjFtJqQ9XGS&28h6}=E+k|*E$wX3t6t0N!T0~Xb@t%pdFH~jP|e_WP$2steF z@2(M4Oa@sB5pULd^<4E9^*s6Z7+?a<3H;;>IfSNid{|PrZTlXZ?hrFtXgi1Bl%@-K z;Se@qwAg#^Os4nA-CZ7t6{zGTMQb%cm@>UCHGdh0!BfDC9Tp>ElGpTl7MAfZ$=xN4 z(a0xP&3x~G71%GA8(Bhy%H94qj^)qbP^VYk_OS9u*<)g z`5i~voZNq2Fvg^X=rz_QM;F8JzS-(}`Q2_+*(=EpCE|~HtdB0KlK;Vw#+M|+(vJJ% z(e4+!uu$P=#7F|n*ZF#<{-T!rd>X*;1znH7BqiY~Hx$n%tUk$+1^W9Zhtnzjc7au23k6f7KJ!dXewCc z<>g&nUB6FQ)Op=i&)~7JuJ#-^sP z>r6Ts&2tAQrVQNN2|YdUahNsUt2kmc!C$WATChYot~Q92hJV;O*c0T-O7xnf{ZFx#(p!~iu^KNXwTrGzBto_Tfe}re zv>uqHY;`XOs1cA~DGaT?xUt?#8jr#1ZWM& zW5%2WV~kU8I*h>p0g(Pa8J_HS;mlsCQiC%w;32j<3g8-yB_qkm*}r{lhgU9^tP&J? zQeTp7o&4Ta7v+$VNf?9s+me$&P*8Ay`GTL97d(8o^KEpgNRnKj+2RC$Tqgx!b)!Eq zM{Jmocin5x z(moU9CbLfk`mLJj!mp}4iNlPH45(?~d9c4zVYbRudGuF!c-ZUi(%SiOHrWRI=JCXb zq4a-QfQP#rr2^OMW3t1;LpQwi*L5;bXSzosy}^-M5>uvEiHeC>I%999!ZZ)36v!U} zon1qsf-+eO%WG@Cu+Qn8`fgn(B{3Kk4JfIo%1y6GWCF>MSdTCk0U~Bs@D!Kt(1KK{_;3GJ?%*Z5Q>R|EV{KlUA08X;E zCo`zU1z;r(Hob~#$pfV-J+^Y5e00N0{~k*Kf9+{4MSPqA3VuY_o704f z3J$1tq-KjO?5s&I3frCbiL8Gho-kJRO~EWEOJva&9I8jX6NH$rLuI8J3m+L>B)cXNZ@ z$eRRUl*x|kO>s;EaDEEGlT*l%FCW%XQz_L;xA)$#XQe+O*RF6!iH?rOcc~OzacNg1 zRKa3ejo#_EY;ZovRv*^OJ4g>nWRz@)Uihh1h@txIzRacP)}+4ELG5f}yDqOAHR6)- z*4RDe;mZ0c5SF+rsmiNqT{*-ZXXv!fe*<`xZOjX+<0HscI#sa?WIhf%qf~lsH`vdf zKTqbjw)o1#&VZOu#xLWIw0RjmS82PH0c1Z7ej>8dDwb}!zEL&48k>b|%fu3K2?^L^ zNG7+ZpcAdX4v~X^iXsd^rhRcNACgb0%&#Ub@5}8G^gWM-MikiwlGuDlg>&)w9e?Jw z4`3@H$Y`W%^VrzkjG9yoI{CkLGyePM9XMi^0&UV+KPC0kaWthmd3WW}V%ngTFP(Nd zCya4-*x<>9*Q6*E?~>zsiQL_JI^Y^AD7*$p3FaJcGz7h+srhW|Nk<}OHi0@SdYbE`mW>G+gac>dC3){ zREt%ETS*_$zJU33x>wo);gu{<8HEBdZrgyIoQvUKeXqbMO<258Xlr<}*xAGcjLTu` z+sYkr;js1M3|r-$TxmD&$3J$YS)+3bzdqgi%YOt)i~th=Y#k6`%#D9egBU1f=@Gdwt=kzjIyyQC zOW?ZZ)l7q&#wgBtYF+U>0D(9?(f`0+zWK+0luZB4G?#SHP&_DBv<80{4m6%{%* zwts7sJ&KZ}BPU>=5;-FpZ+_#pmx`29ni;-!Ih_Acvot~>1$#`V{tS0Y)D1o5wMt>S zV4=t9`Ugk;bpM=QO(4t^R+t?=>xv}qZuLcQI3NN5KEKSv5C@2cmRs-TfL5(AE#%?k z37E0!L*eK{@p=A8@!;}1U%EZ*NycO$_j6jVUsHb;2Gi|L`hkESw(*Q{`EmA)ros7e z9W2tFO96LIzSh%HO*WP`jAvhdJyB^8ga-imLX$TojSAR*Jv7coi-Qw|>dD+*DFwyA zYqDCXkEgU=9O^Jpla+l1h1wr8g{SA{o<~JRy-gg_bp`zdjBLJ5Q+>gqfVu)yE!(b@ z!n$CD>?n+0)J9oEXWQUJtlvloxpu5P&=ediX-X%`c`%utx8prN?7i5azg^bOQpt-X zb-uwdjR=g1i}KTtXTP5M8HH)s5+xW?m-?8IY}9N~ML^NzshFk$D)(tHkm5jBGh1qI z$`$LajAu~Cz{Qnnd3=zy2Hx2l@YvXlyI4(jv5+MbdH7v+(ro)Uv9NH5bvBx6gg0V$ z#CAA}7+?^tNY)BR9L&dZ@x>%tp8wfEo^m@X{)m5Lt{>n%&!F7>=-wvkol^JAm(0zf z4t~($_ouOpkqH3T&3?`qo4PZd9Z(oPm> zwl^F#Th4ywB`5#Z(IF0VxY%U5ySKj|2I#Auon1#qheGXVLmD(Z!qItfco+eiG0-n~d3l$2E%es@<)zR_S2s5yDTp5XJ6DB3OHwJ(;hVT( z08y$6(_wp1=A`3j-&I*m(5ld0O%doKHHjB-lN>26V%ZPfRmnAPczzhCTv^o-SXh^R5tqrz;2oM7DcZEi^X_Iv?{iC6z5rb9o-9ER^@nAImiv?^YU`B&gLSXnemRo%x z73#6n@#uJT?QX0?|%OK2JhpElJYb{-Mz7 zMCy4;hS`!7R9adpF=Wf6=aEB}PinI^rrBQM73S*^0n5sIY)!yf7_1(gt@4ZBBV!Xg zU0%w-;LX}I5$aVvKd7Y+ZIb|Un@6*wkVZZv0V$sCQ_pMn%iUlw&w$Zx3=EK=^s4>U zeh?-YFH}eK@$pffYrHwTcHQqL67f!3QhS0}_&B%J0aG;BdajxgNy`u5gfs7?Zh*%C zbU+4X(nsMNKcG>g10VC+ZVjK?5f8Yj;<@ZkTZUk?c1ClNDI_EQOiaXQCO!_3`rNk$ ztx#OM%?#5S0Sm2baWUiR;m9YideO`M4A>Glj2eGH2BJW-kQxR^n?t9=pTJQNQBgOW zt`>8ohDm^>1=Y#E`F@Q_%w?rp z6UmUp1Ck+ejLK<}rIUGawkKHS4^4NpG-#P?hsAes%-Y`|)&vS&ED!Y%xM-je4jrsF z7&`G6gDMZAk!`zHIt^cYj$c9n#jnP=lg&y#le5ji+WPv_hy5eDUJ0=Lwig;|B(5X` z-A*Y%C0Oas^yajL`nOANUV)-n-;KK?%p;z>FRxw^UVc*hX|qrtcS7e2Xq?B@LGI@j z8W%3$hem*TUv&^31Dvy zU}1DEJoJ*Qf%)-Etq|#L)9r=pS~!pFunFYr-WXVxpIuzG8jf1pySm`t-+1G{f)79= zlm~|LjmZWJFCUdpD+&SUOJHJZ)mRMfADMFO?R{$q*nA+PsHmKAc<01*4u5p@_${)T zD2*JE-}(4e;Wr+;t9(smH9{q70mJ2EFGtHyr2i^ z9c{*mm}!~YAPE3>U6b;I#d|BB9Cqlj5TCc*PfZ!^B2>|_>lj&O{4|m2%fnti_IE=E zfegAlfCc}97ZB&j8Drq&*3{Gk<(b!MH*UrOyt588IY=UcgBCau zFit*!5CffsuaI{OaMPge1X#$_K*$B6Y3d`i@&jdhC{rZUk}c$Z<)}%@ZEaBfX6>(N zgukuP%B}gd!1T#ZnPy&x>0+t==t=szALJ~$lc>4kYr6~g^3Fl@#u?mYdKk6v*NMB$ zli|S&BIKokJc?Gwqb1awlit+>{579X{YM_jO-AH*r6q<}H}Ut?R|xm?H>`%^HS-Oz zRr=E6i!u0&WRGVnQl!vz*%e7R>^Drad~`J2&(bo0@Rd}d-my;xX&4C&?PHa*IuKE- zjiqH}KLIVX@n$m>$h4++yu;F#t!HbO1332{f|eInqBsC_>FevKa9LgwDdrvJjF8&h zU)yzgu}2Jx+!29ksOPqUE&THSi&AckZC#7w4h9U+M@_*o3rJ{ko}~?2wsjzO>|xTb zlXnUfPees78eZ25=XJBx{OpJRGD~xYsM!!WB&5Bv{K>Z7EjudIxH`1FtJ|U$+c}Zo z;#X3L6+Mr7+Su5JnYd1Lj0NGQ>m48dZ&u&rFo6#r^bmcQwY|B;Kq=*Xb?YAtyU zmklqBw*klKBy9pR+$m0TWRLEAqZZKxIk2*_a=kmK$(8keJ)=51g`i!PJ~Nx%n5|YQ z7aTKU_6ejgD!p=;M24Q&+Fvm6ya2%lC{yG#kJ$y;!e>o-nca#)cAJi;8^ifimmsnR z%|&Mld;NAS>aKXMIm0bgtj6(q`7JyuQ4lyB$d!S<9g*pM9}L8K#R8?%SFJz|VXBVu zTKVQL=!qq|mV97lT)Ji6JbwE^@NK2UEzn4J`= zkO&CuEk@};^$Pd%J#)asYpmzG_DV%GIe#h8n$1?y`6J`XJIN{8G}x`dL!IJN_1Llf zwVj<{Q1F57mDg|NwsCnd2Nte!m4zyx^-8_&pbcvQvwefb5f>L%M^~2*Aa=>MSm{Bn z;slYahvaM@t)oN`zyOJoVg5K;#cyC;oPyQGy8ECA$nU~F9*|qoguPOLr*A?xS;Px2 z7w{ZT&s@WjbP6G^y~)0D$Z*?yd(XO4lBT9{?v$xb!f#P*eH`1g-rn}Do2ydH`@5+R zmo@_8kqJ2(^*5HgJjGuvSJtUu^#^XgsLyF^Rv23Yi)Jj3`smZie`aFi+Gq+8-HYUn zM9JNKQ4zH7uyUW-D^D@&7U(tcFD&i`w6ur-US@n@TL|?Zj@$nThOU(ZpqisXzXMVW zDP9DeR)0WfV0$Kq#upxqoQ@6=Bxou+DDpjz9}yfq3JJl7p=9gkY!ed~w|i>I)4IOn zx9yU!sF>J|MW6Z%zZ4+$8r4?##Ir8ntsAQ6YrTDeHxmwuLH_n!owBLLREZo2=O}@( zZfkGXyx5}$ZV0{*q_SY?n?JIdLJKT_VUX+N6{wRAtknn-zCMupFb6r55^9ZvY9R4b z;+u{CLKKe*D^PRN zIo!!fuRb|I|E4piCJ^KqU*B(ZT9sziHD}3e4)B0_nyht&Tg}&^$4g6qHTLqJHLBGY zKJ|iF(3KSo*2fcXxm4lsNa5R_akU&tln7#xE&$V#*o@)f#ZDQ00k!~+c;!))Pc=zj zgos+H7-ne}@B(8Da#vspiu09J(gBCD*n3l_r>s!D(5Wyt9*s#v%g@S2-GpoTxo{q%jy7p!;x!kI&CL#>U37*ufgJF>- zf#^5tXnI)h0gZFS3;hB^=lS2*(uy@7zRV1a8S4*{hJ)^O=|{AFxY>gEQ#(1AZ0Ymb zuC=#*Gd4o^x;|E;j2rt+VB~bQH>ng_1_WT0lVe8NwM{3yTRRrdJ|Vj4J^h?#bMlV^ z`@J%}&c~bpS*~oGMMmm>sK0RYRZG|F-62g^RW6*kXFWo|c;AO>@)3CTZG9l8L?Y<= zC#l@{)DdO&tlo8n-0Svy1#s)%jgZvNBHnr)HYVwFq>JEo3vIMRHx(mBd6%uQOU zB!$+hjWhWuwwJy0EFW$Bbu%oV#4YoEcF!)aIg&_#n+}5b;?7PF`aChK7$+BW#ybj( zq99_%_o*Vcos~|znh~%rS;dP7O|UNBU)|tN02KBb9liDMAH(sV=~syk)IJZYd57dh zI(6(IO|ddsXX%ES^!iUHhq|1PkB%A@G@yb%W@_-~?NO_j9ZT9(uIF!2Xs4rBDKP*P zlH3Uz%Ga*3NkA1_{|#DAx%n71AWmj#)M7#Nj=S$q)l>o8%>(Jpol7pfS80N77+*U4 z{B$A@+x%?}|k%=jiAda3>@D09WH)aG{fp12+Zkg4c!J zt>tewqW3#;XkuLr1Y9=?;U0JJ*^FM4&$@z{qRk)Y7!TSfFgeOi`f%Lq#$t&uM zk&^C#FmPBFaG%89;E%cZb- zx^DJCci7l0H4moGxv)2`TcaR#5jD~(1i+)lHoVj~0RF@NOa(&AsduH#0{d|48EIGVHjZhyYO7TtLfL(`g>A|H<>)4juPEM6|$)x-)QcRJL4 zzC|*Im*3bAUSGgUOHbFts-zvuWJs6q4*W{`3@VA?U_Q{Mc&uD85(o4I{HNYrdmS7> z^JEFA2ygW7tJ4y4o`~LMDJx7Ykpde~He9pDn^-wdcBP-CnNnD-eT$;ma7bMS@uzO^ zuPXWzr9q_HTF61n7T|7>$P4%9-A{a}kS7M*l6*4(hkbGze~v2xzYY9zc+p|qwe=N; zG(pb{nXQtnDVig*mLaJ9iDdJ* zCD4JFJsKCpg1?GpErW@{s8RN&eU_Kpt5pOZ1m0O6$4-(LD`lgbD!A zy?&9AQiWq${8)ytPmgyj@V>CQO1&*lkEtu(VL*uj(e@WrT5|dF>9oMLB#}tr1Af;} zD^;Mpg2~N@{PN}UUy(p)PUDn?DG<#8$jF=o;kja%H-4angUHtSFV#>W){NxJ;Cgy_ zF%hd}fE}Dzc)(ePhLJ}NgSN`7=eWlbYd}02;Ent9v5)4Lk-@>BhA8F6t1Dg0~t2&jfYParocLtNSz7}56Ly^3<2AU*2~8y z2M=Z{0zr+)ofV;@qmxKgv(!yYcrNmC-MhnUk@$iHmaA~<<|cnQBbd0FcY32mCQejP zu5_w`*OiU`+|jjyg1KbUs~v2kZQC6;S5WWcZ2SVkx&VX8jWmM@Jtt<-{oBHc?6F-| zJVuA$zpW!=GA0CmWojS?1eBI%f3xL~3qO{v6Uu6&$9Bt~ZY_Gv*Lii1W(5gWg7i4R zGfI`E`ZbHsMzDzf?f!~wretIs7|nWlA_}rY8tLJdPs{l?B6gem_%eayr+)$MP%h@d zCLtLD_>k$fV(V*nvN)jbqvPV5Le1i=9x72XKUIwu8?Rr9Bf$NCiHK-8U1o?Aw0qM8 zOzRHH4yC^QN@&g2SEj|fzPT~{^JR8&a&+a>d#GR9=^qeaf26k?e+82GplDe)%asvu z*~FFhbG|fWfk<-Co2syp;F~4DX!_nTm4?++$+$%4`SvhY{W(0u~P-0Xf?KVpA$)Fs0mN4`Jemt6v_b*1(apOBwk~5LMK5tha|RU{;^^mKxWNt3ATrq;9wwmD zh{YHL|%5-J%E>~Itun$R$Afqf>B$SN(Sbbe>)3Lgs&t%uvob`ZZC+d4l# z{{bmZftCso;WJ7_+uhS>1Dph`d|K!suFKb;3*7v^qSFZE;>Nsel^N!uC$7{q5DjNcv8)iP2S?q07d6rJ3MrYRhtaGDq!?1|Sa;N_t((;^)~#KB;* zSQ|Y1J-aqvy}C~aD#Wqnbc2`(+~ZySYjSN+UPM7zy0Je^oVjI$?liQS4#FX3X}Ls_ z_fNAY>)XotnW>ikzbpU@k=^^p2Jlb_Hh?Sm_pbtUSKi~p-Qa9G(SCte`77sYI$Txc z{yMKC*sLj=WD*8!q?GZKj}<=SIlS^|Jo=3b-z80~CXzGLTHJWP^-m36H38Ucy!s>f z1w4FSy@-)o7@lyON?w-aLCUaG(SDzNasY6HB*Te1M{{KcXH~hm8&wt&coK`e>V)nb z)Mlpt{#2Vq-16PVlh>_XU4|Z=zh!A-w!Dlc$SmZ&WEmzArNb?uJ&z>Mvyv-__TI+(^#@}gfJrC;(KLBpc3ksoj*DmX zN=^2f`vu!Vpi%nNYlpA-W0_izo#H?dTZSaSp5aRw6 zJ|SUUtjv$~L#?1G!m0I(F0P%00zR*9@4-h3Q$Dch7nXL1^zWYB50e>X_$r^?fc?j% zji=Ii7$9G61C5<$XJ^A~KS5-gA3wce_W$SBzrDF}UE5k4oU=~-eIL2-Dw8uZaHRbI zcIw3juurWikkUX}3;vw+%ZS;JvO*52sbR?ZpOP4zU|Bl-u2e4u$g%g*I@~m*O! z8`DgO!}&TZ;zHd!XGgC)xkeet;b|Hb92^`v2F9~LWf=$C_wE$PQj|#9s}jf+j*@Ta z2?;?sd3&kUC(q`)Lg56XPGuC>iT`J;p978DOBfWIXB+Fiu>c*xd?_g=ICGxMOF((Q zt_{AIFm(M|K${DcW>36p!|DidYCidNUJ3A@FYpl=ZpM{rWPq}`wjTGGeGZb{W0s6y za0*B-2SblLkblWnA%zjoqd?;Xzbz){k)`6!07X+XJ??Qy=CM&8J#kln5`rgmFLoH1Ula?mhMqe=ND3V! z%FNj5g2JU4emq%^9zEeF-#2f1K4(3nmw6j-uQ2moF^>^$M&|oIRZcFJ^woF& z!)VCEW6(Utm;|DnRx>F{1`XfUyu(r|=gyqNu+SFRlf%+@RDouG7Er}StQJp^$Pg7B zANtI0r(%KutIz!gE^VTj_ua*VfA_`%)pr@;)O*CPaQx7qpdh6^p{MTN&>_1E{+>aW zw-9HWOXOOk#U}IlF4eG!Cs0;c-Cn>xpGF*& zkfFLi8)%Zu<3n<~VU(1T+Wf7Db8DuKZ${gaFXoqVx4!A;M|WOff)mtzYkFb2)4hS2 zEVI?=-@^Lt?(W-jH7t}-VC&PVYT)H)k^y>?Fz#z zJm>py&RO5LPRx4OyIeEOGtYC!zIR;LwP~)u9G`HdqZfEcXVZ5ufxl3hQCb?=cz&Xu z*pZfK7~QwMe~7G2yb2t(3Yxcl-F>}Xl>*T&Nr~YII)dTt{U@^8*I3!)jBK=b7z%Pa zJGUGh18cRuy-`Uwn_F3zNl2u;o#*SFzb<+A<8PZ6xNFu$O8qm$?;rnq_5rU@y;#gm z>YCfWJ5}RFW1QDnd5o}6qudn&f=F2rC$$Wvdv+Ymw9XsSZG(N3y;P%t63oaNC5EY4 zb)tvnm?XhC&vb`W8RF(f8!XzHgr#EK8n+x+TsLyoraY$V4zYgL9_OvkU$Z?d;Nr42 z1@L1H5CpH&i>98uF?y9X>0HO%!jQ^icVrmOcA!gh{^^uF=eu4wd^xkIs#<`7wqrgy zGVu56H*wjihxhmIPw5b}1&O;C$eTpN5-m3Y|`bF zsT5rjnO!3zGyK~h$P84g9#1;*lAvz*Ts!T^saxsDrG^2vK-0Os%AKG2!FoacnGvO! zq~5}mZ#=AEn57eT=hgQp57~_SUJs+2$Bzsjyc?wyT-)HFY}b{4a}dH+HXwg$x&?MUwdtf zfkfwb-^YJ%GwvT${#354fD^UV)mgPKeuN5XNOLDr!)``4gx?f%X1zHYWTo4MxxtP< zzCD&0Z`tHtZ+2v||BC#qKF_}Ypx4OKIA9rPl>SA(y&UxqVg%=Qa(YH-ZqVn+CNg$c z-n@0BaH`I%lf>S$jweaZTm4N@>G6?3Oyq8H>w6m|vc>rTyYR1E>ru-za_3>vXv+n0 zy!}J&zu6N_wU^RH4MI-*CeFwk}~p~gu3m(8Ut=}htLi@S^({d=nuls=!vt|QW3yqN7SK6S}8u`8c{ zdNfv5m*XoxC|Gm#)%scm&2^#0;SL>6ZrL z!b6ZBQT;_Kf;t5Kmt>w}D{Xsg&oo8gwYWp8vYbL<#I45{Tq|Qc60rPX-?w!PR)q;+ z-_u@uL}#23>#!SMV(2HSDoIYkEK-6bdYVo1T=$-@921$__r3kN)L#y@8#4^WEGjB0 zQFmErHu&Q!XhDExloZ<13TxqGXC^Mj%k+bXS1!{-61Fb$Ks4c zM&H5?w+}F9vP_$n^9*{3RT6P$3FdgY1LuD)7ozq$Z~iJA#`Fg?^*+U?iY7|}wwb2$ zh|FL8GhV?N&bb-u?&))%%g+bzHB&{Vn;D+^@?g2&2p#94&!$E)PnySwNQ&wU3!kq& zA1>*Y97v}aDJ33V7Yw8Go{seFVsC~s6@D*@gS2hK9!%(K6XvUSii8Td(CaF$nj5sa~ zW#@3r`);p@lB7`6BpRMeFOJIFn@t)QO?{!8B`}#?m7mXdm%xiKQe zKlSFQjYt3-`G{ETX|VFfKjY_BSNN`PwPa8CY|lOvnJ+Y@Z#B3qiEv8buaNFgjL;7` z`y%*faY39Z=A{5hstnV(xPgeLeF^=j3qiCMFlb6ndd zj^2b=Dz?K>fKau?J9KjPW!TA(6?$myUdioDhM6hH6my%K2~Cm5L8qYad> zY@N*$$aixBE`JOSi33-G9*LV{mXux~H3WW#8vWCJMP5qP8!}-OT4t+uG0%vRA`GQzSj8q zRN>%(X^Ht!m<%KHn_@8)_5(q$Ru}dIIaTF@2>h@8Jwdg#$2jY`ms9Z{E0Xl_YhUxA zH{9(*)`wyUPN&>rkG*v!N`3u~$}3&|o1^bJ4w96Q1(PYoFGYyU-vbOyASy8x`B0$J zKgfJxhGNoLN#qG6c4LK|e|~l>EJ;tD+e_u8TTXnmmA{Nm^~_P??#EjXxNce}k95`e zPFK+UvJz!xJ8@-M-OYy0M@1EiPSRQ#$byUeMgTduzO+=i*CIX7QZfD+*iew%+S*%w zK2>|^(4)IlaILa|^Q9(7fJm6Qx&kfrdePt~0|e8TG4TXhU*5tey1`$UBoI%ECo1(_ zE(aVlWu83~Z|K~wcxPC-I^3u`#5-J~N2RQ+Tth{n^ECS9%-&$b_Tj>$3rrh#Ewyzr z-;;V$JkA1J^U99oYsN5-9Gh?;R8eGyuQLGBPYjNsXz2&Aw^E>k!uZIsuD#e;p`6E!j!Ap_GdLx z)FJA8dr$`$X0$dd@na>V8TV30sN%-OVJPx(!I)~}k5I~B4KY^i21Z1M4qvO`8 zFAionrFj_{w`R9HXCf`Rh}`t1B7!@6%kCj0XQoH6IS5rMf7Cjf*VNWRLJ)1JBg%Z~2u>sF&B zXc;MuhOKLKmDGf4YDNUcT#m=HBhs?XMcXe2JYX7ZIJ# zA@1e7!67ZCHk1-KX~(1c)1A-uLJ%@$2Y8GO(O9UY{Ib^b#m!q+5)7m9S5BOq!zg*K zzd>g{7;^+j1eay3!=tz0;~A}KutcRMmwvmEpCsV9Oj_3@LNzCb9TpZ=cK-!2E-v!{ z@?v{0a=T@#yvB7sFz^FwdK{HktL>)8cFuyUGC?GL%q7xj)v(od{6v;?6*|)#$|NW9GKufAJZRH{Mo1cJfKC z*FjEdf+UNeP#h0e>5S`@{rbqR?cIsvtI}%HHeP5<{aVQCKu_OHH7XBmr5PYkU?d&83QLQ6+y{v>=gLGUFv zJjL7bcGcIE+c(b|EN|s`v0dTYk~Z`dQTU#3G8gkhHI0Sj`YgAYJ#9NAGwb7FO8MX;H3C&XjK_vw;v6jm>+uP4k zv@z3UkXc)`H1f0FnWNF*tYK?vVBm&#`f+{x3F_%S65P#?T^A{%J2+Tq@-Q_q@ki;H zxKn=(wFHu4TtZGJV`pb42n?}yM$q6O?%liB3~3`JC8aH~^|kSG3>GgY;Iq^xF{6n;p16z)zN6a`}N7*%NL7_8Ad~acCSm$vY%*ITVW(>UenQ;{KOLP z9}R10xmf@`PW{La%G>EwYF`{ zu5y^+^t(4qK4WXqu3hu4@m73f>NVqgo?zI}DV!|yK`c~=csWa`{_qwoBBux^?Wbke z@TpWwI?_U3r%EuioSOW?%&0dhOY3?TAR#dVLc+YbFHW}XoP+Z7^Q+qE-K6&xBaZe~V0H8uTvbrIuoqND#JDK;UXOu%`YbBDgjx@dNFGpO0j*HZ6ozgsV& zx$QF(tVR0x9>V?K^wg;-hSnx*uOi_h$V-kPS?kOSiR#e-P`TF&CyVtLu;5k3M z@&JeR%_|udW1`m0K>8D9+@d1Z+4WWb98vj^zUP2f(H|F@Y9jM(50G$i8I?E>+@D=o zS6f*v!Lz!krrhST2YW#0=kH%hgRdM{_4?xQW@BhnFms74qvoncaa)!vPIPRZFvr{a zAOZHNFN}##aXQjC_ zP4vRt)TlS7lAPQ4-kk9~=XqN`|Jj&Pr9eL}9v+utAFGF;_#+=TnVFWF?ai>>4_)D% z=fN^5r>8|qGoyW^I?lG@$xhbDPr}pg^X-H49clG&8>E+8Lccb4BV=8N{euz&+}WKr z$FrT%HfT4-osS5v46F48coh5`^un=XRs{GQM=+cB;-zF@P%Pq`i+?-aV`UAHVHDea-#hzz$p;i^Gt0_u zP>sIwi7w}AG${X=vWZ355G)x=rnX#<74jPC>x%|n2Msc+d9+VqayItqqBLPp(WRt| zpxN1J&G~QqK7Bk%2leX;0cjGY&hFS&0q3^M*Xe8Bc8i^`LK}m^b-YAhw2If%FmDRF zlI&yYh*goe7r|i6F8v99yuZ>s*+m7pN&dsbNA$Izpxqqqe);=*oMR2~&A@;?#_xK( z1Tdo=Rb%t(v^AtoG|pMAoF=1WVyhl zs8#Q^aXGp9rSRS1bbhlhQMx~WPR71Y9J!|x??lx^AkcyBr%5-QMT@Wd4 z4%y8un~$8?UK~6ZORW?s7S(3HI}0KU(|3qMZjg|mqIRQK#zpIP-5+;_H2WicPdq0T zzJ65UC9v_$5%sm8okE{H*tk)3x!;90TOMCc<~6xjxHSdx$={xSL->>lbo9%9dDHLo zW%|@Q?$fiyP#d=Qve_=B{kLAPj?jcB@k+s@aIJq{zOQ+yZO z47*xo0|UBgHz)B)C+k~3hQy7K z-6T^7c*DgyrP>^sPf8;}dFz^Dnjm;#x0Oqc8a_D>lqK$(w<*~mq}_vLMIJ;meS5*Z zHd#+VB(tp^k6%_coSMnjF_?Iz@S|2GCO$>Lymah*3~<7?d+H3fdLEmWs>d`l9oa04 zRd{{IgAj0Iid{BE?7=IsuYvL%RTN~zI=>NV z{xekIiQ=;A)XABqD_@p0Ie}uLF2A?vJ?A%XIzSE!f%t6RtNt_BIsKOfsJ5}|*7%5H z5@0Vd$>w|!cnZjub0s=!p#o>a6L2G;a6(BsQx<*p*TKHUG zoZNP{o7D5*hA@+Cqi?7mznX-&G1QFuiNAR zm}`T=`BqP-f$EAE@4{^q=k74IX&w;ZVzmH|4#cAiOi7_;)vwSrO$RDwTmK9JM8s`B zc_imqX=rG;jz7Sz0b<{+6(bU;TwKTLxYK#U{HUJaWT>ACDZ=Ud!yxpnM_>1@HnmXJ zEc&kC zAYP@dBGG+<-4B&nGOD%&MMMnlESFbS64DaT9?*hzI@juMsomlzO`MYru$pYv>*w^> zJZoy}MGcyk-kLAkq$)QV`5VvY-&8tF4eB5ak>Gb+peF6r34S9|dXv3cDD?F7YUnm2 zYDMgtBwA-P=3+&z)5Lxzl$M&39&^&W6)l6f$6@WX`V)_@L9pCoP^Hp$IWRDgx!+ng zQE=;c3L2Q1o5SNM}BS>4vyp?$2e*^_%yu8)02Fo}UZo`Gfea+jiAhCW@_3 zZSt;83XYCURKmNDpX6r-5b!PsS>bw2S~40JBjg-zf}3{40sjO&gPq+S( zFRv<3kWpf|*jXkfs*M_7&OKvK|19QpL_JTyd?phWm6G?V_s`6sprUq0m7smG}W1y9$d5Mk~EQg=FY~v^N6M2YI~;g$<6Bv{W~$=bD2-c&rUxx z_?|H1z+=HfQn+eaa{YOVV~Azae&@>(tg0qD_}UP16Vq8S^163l_lcLwdtJ4gaqkSz z591HZ#ia*Ur?V;>#rgvZzJERQI0uj9^~wZxYMoQ7|7rBmVy6iIn=?kS2LVqqmEb?a z%U>c~UOQKYt(54<`cg3w;Jz63wbojee_hxiqqL2Emx3;RSDID3k^J-D@2&fVWA@+z zoqgIMlaQK%Ahq_epBpe(XC5gcRNAj2E`QBBACDdP9=c8mqlxI{LJ>HvQ12$39}0i6fu%P-o=-~m2%7MdKHzA4^?t8t&Fk`sx` zU9mchzfUxWUa^qpZO^Z_#B*s=2iL;?iq*z^CWcR1JH@Q& z+|32xlh9GN-x~|`u{v5h_e};gA}5E&h`GNimD&p>B#`wwzj*P2v3+{8;ey$l@#w<6 zB&{*ca$S@zN;I&<)|uGr7oxdisR)z86vSHS1Jc2Zw~+)~~GL-cF=W zEKh4(>%^~7Q1V&_lNM{w0Y`#)(lp9wpsXcIFh1{S~YvNGih!3vw1Yn3ac=iBC+86I?D-@}Fv?Cmug@xzn(d{a#Zci4b} z+`20CVp!?xi|&?n+BCiq<6>4J_e17GE9K~ZEm!@}W>R0~&Em8)S{MP2W`k@YPft@< z`-fMDt#s+Qzy6-v$RhQRPTX1iB3_MeF;%oAQ2swzx2Y+sC)X&}QrGp59Mwk;{j z25T>J=cUL(NG|o0d&DnT^nT`Ae;%#z+JB5#Fx>8&e*8gWvwFKf+`A0S*6fDR?c29? zYh0Dd>;KqjV2DfMb__(f4P-=f8@Z(Hkc;jo%`L~|B7F9nY??DN{mVBND-#Z8Rc8~1 zL@vqUuYE%cS-|gd)v(`;r5r-`BWZ*Ke@(0A=FYl5t5dkrjN5&s z>PM$x@#Px~ZOke$eSKOu3-LEZg8fek3PzIaEz+WN(R^y2gr$2O5sI8|-3FGSq0!6C z9~f%YUV5)Lv#Eo4o#yzdgC!qiEcUH zJ#9tK`??H}#0accq`@MQ8(l%qA-zFB`#_VtR8>jo7Nf@Bzkp4!r^cri!D3m>vT0jC zbu!-Pu=))_?Ikxauf*hmZI<SCt}wH}+A zqR!<=>kQK}$ipjv$j{}2JWI`(s=m5|qjTH-R`Su({QV9ECQB>xP+#$DEoxn3uK}r8 zAj`Z?G`stPn*w4(LOLo;0e5#7H}~ty>BBm#-GxL@AuzX{W+g?^G}~Y z-5{n34cmLo|45*vAxApD>%P2=0{%Oqn5dLkT%2dmYTRquT9nVL78f}P*)?})m=rIu zAe4YC`c~d@rut=K(RWsm)WZAZc(oyrv!M^8aQFH~3FGDF$=ULwESn`hP7fAFZ~>n_ z1B6-(KcJM3`U+*|6%^V=rVXk&@Onv3%nF|};F7~#p#z<^DF>ii11tzZ~Ec??*avTnFRiqe-y9v(-!GBP(ny~{oXBP(GUOI40Si$ zL>Fr1nA!?%io|PP&iV&gn$oX6X!+5tGG9K}gDpf{u=7PpS98UkiqK23IBb(`nHghA z{0bpZA-KHGj=Xy=DxEj=|NQx5!N$*!_2#aie>7;~OA#bhLUFT#&@a$RWKWp^Ukqjk|JX@QYX#_d9 z_H&s4Br-pCyWvjiZSE@)lKzw7;o+eVK&=Nkf&b&x6J^c&<%EPnpWW7(<8(2}5nWx~fV{%2G9CpMdU>uT8_X zf`7lj$c>IWY}=^RByq3D#NpqJ(Y_Tk-PofmziT6P zKBnaO!gMYnpE@E)!t0o2%Scpwh{K3b6Y3+EO|~k6P5N(kWy-PNr!;853X!;Zpv&Gt z`)@hP->bsJabS}WL#-S=k_8oA=Ai#-)m@vw_gQMfhr32E*PJwpB0Y0ljp%lSfZNlX zmbLsMwSLypG$=C7Cp^5e)5Kj1Z)FTw8W~^Jc5iT>s`>^M*=6@clc)54!#kOeFq02$ zrc_vlMh@TWhey~p0K(eQI!M`w0V$T5GN!jf(yzF!z;Sm)8TaCFUVHX<=g#geqL2!5 z<&oJrWz75c^C$5AWj?&;NDRuS3moi|dDDP>S>RR}b#(K!UYfhIqQ=fU&LKpJ zDMpl)og_^p{gv*Q*Ei|pPtlRc?sdJ#O~}o^y#--qGF{v9;Hu=^s3r0pJGz2yETDkX( zx~7&^!HB$^Y-ei+i{1QRYl*4I0A%gevvbd_i>1t^nM0bmI+IR$Q5zdt#P678I);oY zkq2ixR3*9%zAYH1S<;6JM>c~5M~kC+i01mu*U}#X_sd+q$pV7|4>r7Ql_$n-IY_C$6PBTcmsPqs0FMRHA+;ZXzt<911&U^ z^skM7=D8yLE6sA>>8z2=q)SoU$tgnVebUL5$Sf1Pz47~Dw5~){qa`=0)_>L0B_?Vi z5O?olo^3DE&-NvliOM4o-swV-x1|-)5jIm_kV^^tnPhkfh7FPCj%Q$Q|xbE7cd&HG#9BluOdA^`EssOii27*p4y3+R;vfoN3-6A$VDp+Hu2<9 zwviQ~w-GG~_RphIQ!yMJd5Vr*DK>)iI#<%)w+;_qL6q4Kk*4sw=8Vr5oxRjctfqmr ziA8397j)t96%_Erj*BOq)h4F5dTb#?`Dl-ER_lyoGs7Q>O&5lqu@;>y_$E-6K?cU8 z^ltHimq{KzaLTrEB;gppDxUPlK>NObb9>z>tM|IX_o;a*7?4ZOJVWE zMe5_Kwn?G-R_N$uY)`*3R`@~P{Lfa)c$w2x-~mLXoRQNC$BT*thrVT6}Q%>IuNn(zXHRPd@xR{L>Y-@R}<5X4u74)VBuZ8YCQb% zMwpG>V_?4HVPik7vEL=7m6F79-f55(Db@{hKE=mC1f(@xN!RwZm?_~aDlWDY*`Y^m z*<8h;!5ZanJjBF=(Faej|KYFRw$Fbfh|xPc)Jo(W`QrC>8+rxxoaP#10>>{O@w1_GBc!N;!tQF zsBNnqrycYhZ=7gw)TLy@a?4=>-QLw`7N1`cS*7)ah-WrUgFdzbw)kADHBRM|LdDv%E=?_xmSr3 zjch@Wv(6>rjVJjYlnVEJ{esWT)^cK|{w?&Nu7*FZP7hpVjFW~?wDVtVyP7=0$tq^4 znVb6@_LHX{g5v5*p~jRWcaibkWO&bxV?QN$Y2n;?U98}-Tv0_jD|ymV%c$ONLn_yp zLsXhlu+PCL^S5_!vMG2m+3oO%h8|Mi>We8{w=&MZlr_%k?poE7P&MVV9L7RJ)5WM@ zw%jQ>(Jmd4bSiMLk-!OhN0#%TY9J_%n5?9vuR{1*Xz-FLHNPDKQq!n@EfqAf<@nS} z5mdznrT8A6P{EhGUo!IrSyOW*L35?;;rLD*jdcH$fI1rMfhFtuPb^Kw-1+FoS}XC( zxZ#qVoFy)o{?aCn!FxVBnCYz_qA8N_90Gimj{`&g2u3q(+vt~1eqYBmT(?x6t;uuX zYGKG??j0GqY2djZ2Od*&ZLP!39M^|H!xicIN^1FUQW1#(Q3lm*qiy83M*+{3B5Qvs zN7`88EnfU(o!@*fWm$RFiN;fI;AIk31iO6muf%fx8csC6m890Kj9I3+`Kw73`6UI5@h$ z(gIJt&F2Oes0HhabiLMp`?I&!BKTh4ONGywsvc7Hn&!fCxhhKx^6)F_x8B;m*WpvNRh4% z70DTHU54RqW)_czw>uO9UK3%~5)%@}HBYELy3ue;`aD7e@wG=OV)l~*yXULe+6FYs z>d&3^j+Vwt%m#?&b@Aow4RJLp_p)Fab8yswUS}bI8}4$#OEbt-ZmKc-E+s`5;Ukyz z#O=$$-K)J%V=M)(Abc8fu@S$YzDAp(O31hNKI5cA7_REa8s7iolNY;zGlqDcNvmcc zhq~rXQqn9C*8=-;`!BoZ+ghCWmI7hiLJ{A)oNcE@oyZ@zHC5i(!;?62f5W}fbgTQyH9oR6(--wZ(@W*+KKPbYo!gkK2` zmi_(P+V7zh|Lj^I$=Q(&e-fUZkAEcgkNNf*n7r-k-2uzkm~O=DL=yg{Wv-Gukb6ixl4E-Cucvw3iRk%SW)zFv zZh!8P`Q`Jo&rKG|A=$zc@~27ke&cr_I!o6<0yo4tV3AyEead&1>dlr(Z*$|KO4W~q0$ zIZ$b(foKjQ?#wVquSRVhQm-a->4>y|hFq=_wu2u|{OHkGc7?XrOmNmq_3ja&RoO_D zykS8`xWIF`w zlciw(my!Cv=?vxtfGl{4(Sfkd;q`+PC@B&We#4l@M~&w+)OTtL zfP6IiPk!+~Nh~a)Zb+i5efC>SE-Io@Dz$kMmv;6Zb-(m~#@3Hke=EcNF^<<--~J*f zR6!$!+$N&`_bh;14-UQpeQS=Ohh`w4RnMoum+G{n~hhZNyAlt!~gG71WTiHQ`{G^xN-L;_P7#MQ)179}Ry zP~(Z!?2k!MX4Xpy7NJ21dLuG1O7@=}ccnvN=lN5VruSPAIaQh5fD{Ad8BvH+j>aQ& z(CtJZK<*rJ8BZ9Z5eQVZ5!(Ck+Wyjw$(7P>MqPU85u0{JGl*oA0m=)Pg8SptX>ZCS zVmvBdR;T2+IKFmcF5&aN=aA?yf@9fbwYd8C-@n%qOu`1*uoOM zNqmEa8XQcTBUkkVL)IMCV1AL?5t zaxzY25FS-vARg6R^94qY#{xGPY$Omm(GWoGM-twa`aSJDgM?l@W{QE#G^o^C77+@_ z27khw)0GJ(%2ks9VYLrD$4TN?}bg1?tPEMq2>+3ATfvU_MAY5S&0^|ThBLew+ zJ4Z(dksqKIGfk(?*226|B12sU$UV8i_NglQqE$w z1i2O9%pNU$h?Fg^h7K&yIly@5&o@q9-gcOS=%gfVV^7f3ffhJ2E&$ez0PP&0+M~k? zgZm1EZV8~BSA0uKpfnpS^g9)RZ$bGf@Zy)H*EE`fR-V z!pzKgt~~?=gh2JFLL5PdKR`oH6HQwG{FEJK2>-u2l&dHWF+D`qzd`L6(w#8#2p^!j zL#w647izbUkB>p5ItNUA!Ol^!jH)CatsprtSWzf>S-G- zf5!&sQ=ubJyzh=5892)ol-PgM8<7vI6b0mViE-Pto}gC_P#QcdTHwaA|ri{K71 z_||HT=Z7~}M?v-zR{&H?4h7SRm}wb=4t7V;bGO3 z$8Th<-9f?^+8qQzgNNqP(F5e9;?t*S2-vgnnqAO0kRf^rImyIOumPTfno&RdIf%~6 zTTj9_K-_$ooj^DyF){x@!ftQKnVBL^lMwK79Zy4`5IU=s#9vJA-OshU(8~ zF;Wm0*ymUA^3p)s?zsC6%9lBE=;Y-Wcm$67bttH)%7ZPy0vf)Ufrla*9#}2g7IUQY|F^5uF#6-ikEr-$9ZMSM=S5CQ@$zgOia4N5?y~a-`+A45rj}0I z%nThVDJi80soFn_yr(VTam0B6ZKhOKLG6|XT0L?M+j!~IiaiJd>VX?5E*DQ6fH|B0 z8hsmzam))RCnrmw^*m8+dnwj+d-|ViiuDA#^BJ0%sjpSNWxaVP?%URu5ec;L2%Ux+ z<=Z4$S_=wl+-gcbLrb%_mr1_%sF%09=?3R5vx~Sk`<%BY@fyA9Hey;miOh3)NQ`4FNm_ z)Tn~FL_yQPoF7bP)4GE*gwofCnyTC066%4Z!7Ki~J!U7PuOLhex%Y41zQM`P54#@_bI&uYWFDddcLwwU z=)U7X;IKKAOF%R?Zx5!6oRZvdE{LvmYU)dC7a;XT5J2iM+DsEV0)KPN0d>OrLf+8be`Yc1n80ntTVmQ1zM%nVeFy`dsAbjBWxGmqY|EqiMp~GSZjVdRX;i3?TTQkpfY-d5H z1;lCcWE;#wRP^`v7?6Cjj8%MwpYJ}%q@6;Pwha~yw+J7_C|<9_Qts|0wW z5{N*28U|J?VrbPs+!fh2G(-$)j*wO*Twh;@IK%gpkPuqjKe4oAwydcYHBcmgMmExL z?kHE==+*EGh?gkc5)`BkB4FuybF=yjz8cMcp9l~ks&2RLA7wc69Ajs^d)EX+M$0NI zF)~utdT9t)=S@u%CyXNNnai6R6Dk{-vsHuPd{+*$2V{S_z;C?z6x&G(M-MhIvS7p_% z{|hMozrs3fpfaMg`Tj-2Yiq35K^a`A@sx=&Q1SukIm>@G5#<%PwEV({O0KB}V*#U@ zr;rrNTMS@Noe_)gvxQhQ%hKO}3RPAA-=gmS+O3=ygzH?%SG``V`>%)yn8F(&5)kpX z{;R?>-2M<1o05e(oY-`(K*>#R(nyv3Kf+l^I0efXf26FQK+;&im?f=|1u&Tyqv60kAJYkAJLZnjUe^K?a`FF#p(UE z%SdlY&WZ{bOTv{*#sKmcNJXB+Bf)slCalf*&_ttv7w9;MTZzr+2pOg_ z1mYbwE5hgh;w`P*4K&06N$4>H)6mlj?P7BLAc8)#&syKjKHnqgA*aTpO_9Gll)&-V zo|pe^+m2mwOn!)fCSptC;pb#>gRpsk;aQoF(Uq8Za77$$&J?HW>FakM8B@nBzo6pz ziT3Z_qyymZ{Y9?XL*OE*T$I{ywrdK5SR?p4^bB10{^ZeGTZDCpX6zt84d_0KcL%moDn3=GB>Tnq(nV+aJVI^iIVR6zU`8sf+Q<*@%h%@>QW z+Hf^GB4gXDVmPvydEyN#8e*J)g{_iEGxc!}0^dSNWP&iYrIUl}6@C#4U-CYkq0!@{cOHSD%oz!_IGodzH;|mt4GNvWpXfOK=u~y9IZ5C%6R&?(XpI^Zxp} zX6BAmP1U`(yU*#<;i|wdIGE&^0000-UQSva06?&U-)o>F!msqfB8Tt+0j&N-5>PQt zxetH9aFWvn0{~bA|D6bcvHJuy-?fUhMTz+} ziARs)k(Np8)VcCW+^lIrSskssG2n5ZwcY1&uklCNE)6L*s=qi5{*3-z-vV1J)Rm@Ka_sDu^%^<^+_>vC5#Qt#)Jz>tt9r)x|LFEwhzja*{Q z`01qEg6j3hZjVxWE-pt;&&J|nmihY2kFbYr;?O|H&3bh!Gam72T#Cz7%(e||Yj081k(+BSE)IzIag^v^ zs?$J1gxS~xTE+)!v~#Z?@;gV4Ad!9v24@e7ty%@Dp=8wNy1VdvS#Q6B6 z{JZ?M>ur7G`U!N1g0LT9-rVmiIZv@~#K6xFZYOhCAAD}^j*gaEKNgXQky=`+8jWYq zP+VX0f0^;Zk>D}#plYXbT^ZUMZan7aO;2l*Oi@30|J2@9IU_XP5t|)!e!K1 zJZ2h@Ok!cleQ_B9%n;($_H1Nwy8a(wykgTlh z@sX#d4c{FLdaj1XMPg@H#^kBnAGbCL}m!Y5|t-Q=bxbmY-w9=d02S!J^{30O+-YqEiwX4Sdo z$FM$b(krYAFxtc1a{L)m`r2>KamAh9i{+)on{%6!mW;bQPtP!MS?(aY{#OA$I%>*u${Q{Lvvu$btwx8UfImd41M>&&sc6YvRhaO z2g{)D=Jte_GxhpMIve0C43%Hgm%O?;RG~k!G~fgaomJ%7S35m9^L}Z4ow}LO;z*%i zmV{ZC5r>S?Z9Ia__fkg+o8%r4Qub&njV#r^t5-UESDHh zrzeC|)*Z_i1ou4sP9K~HhCaKqn)*;4JZhBAh1zx~_p3M<0RaEn8h7`6G#uAz+O4`C z=Zi{8Y(XHY?Gs3BdzpU*fjqlgC=m!6|_iaLuxb zOi-n%n%$9dIa~NL1_gw7VTwXgsz@FD&T4YML$~bW2kTWmLzP)r!05wVh&WBIN;5gv zGEjBars#dMvoAeRT3sH5ia92AYzHN$FzMLJNvSofMBV+Y=pE-|(-$BILD-0F*_dJ$ zC>Nnzp2;pQII6jwUOyI4ep-w?{=Oq%ih04F&@-5wRz z5#bA4648}Sp;N(RLw0eol_z(=#MU~U^PHkLE#Z)ASFfpVgwofuT>4a49ne4Wsn0@Q zhqiiu?$CQ*#_)b%WKD5b_}KCQh2rF?MPFS#eq=sgoTdlvFa+jTN=IRua|41bc~o)T zg-6WDKe>?HP_9{!o^z`hel_nsXK&x~9eyPN_!pg9j@6gdxGOtz9>!*`KwDJ}uZ+_E zydvAq7JAJ4Wt6Q>#MYu3``_NVM@B{AxH1hc;3ygGB-IMcrIw$Os_TY0QmGg=hPvs% zUdiccxwq+wlfn2M52})1MEhBlE%EK?UdFQ7mXy{zuH$B6@VGOYufHHkZmPqN3=(M@n{0P? z^zWM;D~eBzxGq?-s)=3;bDAR$tUFg$k=aG7uTRz8n;(7Qm@8ObVDI%ZRerKOuJebv&%DDFfXuuBCvAHwOI#vkQbSxA8G~c?<#K{^Q7GEdfKh zc|zr>SYf0;KQaNISThrXSMDwXM`6yGho^XCL%gr9L&_00fWJN z^0C!yWFh%qaq5Vy*y{1rc+}+*3ZQ=TtDF*LX zEg#=}QiojQ6}y0{oMipMj3@vAE4n#XR?tQZiPBwf#z(g}h=TqA!`$3dOEA~haD;{W ze7CdhVWz(~P8hS6rV$ks&6lE}Pl6MNmg3Vqd~PdVmZvoVbc7=L?KL?oJv~U?Z!L~c znXI1!KM2t}r~Yc|(@9Acu^xK$hU8e8gff?WYIf&2y|-`8Y5sXQBz}v6jruM?oP5y{ z*yjAQB0`TER8c0lY?Zz~(p!~np9Fh@vU7+llXce;G~LY42d3dg;4X%_P1$HteU9Aj z&pjt`*Cg8cE6rfN)G}0jfV*Uc619$PFJN%%Y+J%YUv$9kBDOp)Yk$ysQ@>OCsLj4g z0qK+tzZO1x#+8teu(Y&2U5vDdF$Vg^Y9 zlFuJYlJosWp@n%E2}Z543_7>=Txgq}jz4dNWR+kJ7pxaS`BNxHA#vheFm9 z9~YOPQaGb|y%7-^G^1-Qov$7h6=kaJM1dU09zcWjXv)Aqe;|vE${ZQ|XyGhA*FzB) z82F8k+#SW@XmWWqEAJC=atIl<(B|sMmmfcVR8cp^*3`HxV9@gozh~nhP9|e(=<=|_ zC@p=z@$nZ>N=nMf$w^A8=PVOwU|=ANI@)kIB9%Ku_1Ra5a-U{vb2BY14N_$htFMpj z%|1uE>T4ZZQ9oyefq?-`H0ugPnVz1Gj*jNw;OOxX+1S`1AbOjCh>uItP`7!0 z7m!HD#Kc5Lw|TPKl9`!FzgQ9&h)DgPWu0#gEW>Hw($c}+-czqpw+R5?FDbFoF|oV% z9{Ai^d$u9f9)@>cQ{x#;X=6ba7P{$vLU?d+&>nVqIdy-(*DL*Uyqs=mpqENrkaxym z$l70HV=n`J2*$Am8)suY`q#g{!Npo7=UM_Wab;8t<9YO z({g=nj&jp-nHNc?qqadKFF$`Vl4Wyi>x&N8gYAF{wMX_8c7cVb_YWN%)sxjymiI-T zPmSJ}JH*I{22Fv=DyLM^nqPmppAsJ&NV4ln0)Z7TXTRt;%57JhwNQ~LMHn>I1I^Oz zA28kBXzlC<8ysfap~m+wewI#7Z5fp1Rrr2KI${--`LlJFidd@_DbqzhXmB|&8V0X4?zN`S7Z?r zCL8VhBqUR(I}3hf8C)LUQj7;0K~}Jrm0vS&t6rYEJy#aXWg}L-?y7^(-F?u&2!|?^ z8c(cS(G{G?adMx0>U~(lq;r~E8u<8VxQu!Yn*wL&Pmuw4_ZuQ2_*lfMzJ4tO@1*I* zMoODXFo}pl&TmN;8Y(dWs;ZqxfU_&RxcCDqo|KeYMY+j$l)b~ls?T%n4u&8X9%DUn zW7Syy1Pze5zqq%^>DB%_(0qNgr?+?FzxK@ z2vJ7QsG&wJ@&B1+%(&zenDh)uenR$xXWP*`d2o{V$7c7M7Bx^=8ayoUkKO-X}Q&V+KOlXq#WVnvM|M*eU&@ixd4xjs*H*YL>yqtziKI}D` zroWW23?*pKJo0?W&(YAx7!XM$6Dt;dAi}qCG#3<{xr8KX%$9QwTN(@5Okdq$8IgJP zq=V3WZ>(Co4|NSxfeLoslHbpHqP5Pa_pfSdY6KnEoGJ*V>E3YL88o?s^jHxR5w$^{ zAFdDQ>G$Kd`mfg#H6;R20Yw!j?U#y*YNK&7T)bXhZ8B=OjD);fqufD z5)RKyQI_;67D*i7p<|%6@BD8up|l|Q*l0yn_fLZo+$TbRSzd)Jlh1HWwmwmgA<@Pi z&R24AasuV$7i(<@n04LZxJBC2zu6bDI6vOg!|S-#3VnTUC@Eq6{CRC^%9-Jmj4XAn zp(qv~2{8HRk6#nWHWuJ-beUYHa&XY>_f_Nf;YQDa+o9g}zi*XB=djNlCf3Xqm0W(? z!!!X5ybuI{oZO`K#0lxfx|52^#C)ak?#|B1a)XKn_{@ScnalWPgDNLGyR%)L5Yy7m z?r3*c+sFuVy4IGjm`wMu64}Ox6tZJe#`&(ms0UNAmLD=SHan*oK@j&uyuVM&cWghV zly9OTghy>v(6%hwF;F3~4`T-;XXa5CJr-OXY7oKV~vg8sh*#v1N|BNk&FvDlpAY z5Q8=-G$BC3+Ku1kP%G}Q31vDeJay{mN5CC=evZ!14+T10r>ds5SZ)5r%4(;@384l6X^FG9!t)f6rKRrZy*R z8264td{e(;<8Qg(EYn4&F*Z1|u}pnJsoPi*p1BfsIpZ|a?P|=vn00aj)4B!*jN#c9 zJlXYisf$seE2j#7)_ODq6(A=+<%3EiBhxnaBVq~w5N?8OqQ4D4*jpeb`q~185&&pu zOvH!eRCaN&31j1P7@6ke6sD1oP#)S(xZ{h8i;q{Dc-h$4n3=67V4vV{E7m8qGoDF- zkFN@!!{d!TGjpNDI`6?3p1XT)<+`5hn({8mLZ4*c;R_=T(PhQ;j#iJMLhWH=AY5%y z$>I7y;Pe$T+D_r()Le)4*v;k80BnLZ*ZoRcT6{SeCtCcBF>pD3szqo4G_IurWDB@a zafwyz@-7AZ9WW^=DS5!rf#kS5rK?w=W4_(HmbYL)1!R{le(CUKfR+{^K-}+>i+}j1;PiC#OkCV|w(6?c$IJ7*-s2B5Ei#6N zezzy9!otFfi;FGp$Lbmy6l8c9n3yGHWlBm)25sJTjg5Esp$u(~NM+(+!&xU~|NYU7 zHVgeDvfKU=Gc~flMa573`=gY$wAiRUW?ONdVwJ-&i;{0^c#l_8drR_tbB$gi0! zZwl4EQNar~Eoj!Bu8FnJayhhq!ecFFXZx_;`1kEWJJF9c4X^t}99M3k00?7lvO6g` zIZViz4o>;#=mN8{pmzqQ2Y-tD`}?)Dv(R-zD z?{NWDp3b+mtr3c#Z|_g$I3QeH8~mW9yu5GQ+eO(l2EHG~h(}D?hC{kOl$5H$VBK&G zzxW44LD7>zxmXcKIJ-AhFvg`KWM!Au*Hc$gUPb^A6DJPJjjpylq2Xb2Q?Ixnk&~0J z{m*oDb>W#=U0v+ux~K`f3rrWV-{=lm-=b~%?NV%e@Rvaz6teg?FTsGpvvZq= z%Lxa|;xg&y^Dt>==hHis2+NgL-O^uEePwTajWTZjVN{lv2N%76-srKNTe0;L9v(zj zVWOv_vn>Q3Q@%i`+H{-#p> zzjSLbt{>)&0H|tU9vGNUOV{X+DKz_l&Cg%*lZcV_S7<&70AORIr=)cJa9ut;tciet ziaVgH_KVYS$^F&=M}x~-l8Iixk=XKse}jj{gf(|f4If^N6)KY58v8IKNC}dfm1?xv z(DV3rXlo#Py>e@b7(DTwUU-z5_Me=e-!qr*{TJIrg#MertErE}Rr(e(4mN*xh{54- z$%W5~)7H)|Z)X228I8w%kAp0Mse~SXFmOJ`bRPRha4Y89a0MVR)DVHJB_08QPngV$03C4VpIia2psIHvG`9V|orbMgSP8 zg^XIIl;2osyyNB2xtQI8^VS9f@7z3o`it))xgcf|d*KFtc15oXI{~a&riq%!@bCZ_ zGTBUgHT3qT|A5EkvZ^C5@vYaKMPIbD2rqYF&ckK0ih!G3Ovm6;$-1VV9@x%Dp+8J} zNCf<1VLsvIW|(;_3vK#69XPA)o7QHLo)>tKt=k=$_d z(kYyaH1g^v)3yU_A%zXuoIIh6JVHm2-h|I;tTi4>1w}z#r~- z1@&v&L7=cYZ&QtAC9phyvC<4DWlm_uQhi+?6bpdYvzXFq> z?6!_!x+0xZ6Pl2qVQb5J#X9&NwF^S_dMxTOIy#EWoQQW=5h1nRwcXR_IPT0)(`97rTS+p_SlyIabWn7^m8B&L zGP3wUVB}P1aQKW~a_rx~xgNH6)reIi3j}_)BhMgfIMPc?OPiP=lKBN-1orU8^(scN z6g3%pqI11wd!~-W(7%O!_8WdymiZOSWz17sw*l`v)cg)eA`XmMW@k;Rd1UfV-NqRddX>Uu-|fuF5Tz$ z#2GA_C8olRpGZVBb$?}#rhoC*qDBqje&=d&6cua}jv$EgV_2H))r^c^1`&&|!j+cPc- ztPfz~V&w*g_n=>1_rKP!NM_toJ)iEGgtemO9?av)%daI3kLBU5LAJ0N%afB6K}Xq{ zeZIFCg++s^+J#CmrA%%l{O~gc2uddI^o)dFnz(GJxDnOeS%87|d`?bI4fGX4L6JV? zJ@@_8w}(Mgba`Ziji0}+x;joaS3+=;k`vmnyHp2ia9AClnBd~zC@(EN+}rclm%l*? zk%aTV&2!JU;dzCH4Zim-mX_O7e#7XfzK?i_0oaEIsK|%`@dyF`uYG#)`%dg`14+k1 Tmn0M3*#*eU0HrG=O#=T9^R6LL literal 0 HcmV?d00001 diff --git a/images/Geo-Rep-LAN.png b/images/Geo-Rep-LAN.png new file mode 100644 index 0000000000000000000000000000000000000000..a74f6dbb50a960593c8351df3272c0c126600592 GIT binary patch literal 163417 zcmV)|KzzT6P)Px#1ZP1_K>z@;j|==^1poj5AY({UO#lFTCIA3{ga82g0001h=l}q9FaQARU;qF* zm;eA5aGbhPJOBXyj!8s8RCr$O{Rf;SS$5Znj~2A(zXHLc#jGS&5(ptc8VQg=<1`Wo zSdcU$0}H|mvkOAfbeBu-%XYQz9^ccZTzd1oN$aL!u_leK@#ETbklYKqDwY9}Jr*WKbYe)apa`(Y$M|hP~-_H0(!Hjc0dx)&fsL3s{}s8WDD}z^K=O%!eII2w1~ePu8D0 zvh067z6acM{O-8>j=EWu)Sbsyw`V0kY72-l`gDF2DS=NA7w~4E5Bmzh3iDQH)e)$@ zoJ({x?(WJvMzS>wxLgag5Zbzh1@P7py^&VQdNryOtgfzV3D$8tv|DpYimc+0m!{d8=z3&cLw;SvqUj?3({9(1gQwsKWJl27?va+HF z)nw~PM$su7k7ucuiAgQZF6j^2xt^_5#Hzh0%9 ziUp!i>>#un+}>6nRwsT|&a)PH!du{gV6V8Y0_RBpd$n^cS!Xr!6Dwz)@NV$&R`2Nr zdrO11yUckRI@lDC6;08*l~S`@^{NM0cD4oWJM32-5{krjfQLoU5_A+dy!@%FQ4VE^&9|FgS)ye+T;?Ct!VrRZC{y#@0Yw{JHaR5I9Q&f^>G z+y$Y8yi$9EYFKQd9q)HPopt2nZT4q(x3|Dk4fdA9Ug>%ZNA1Qmy%u2BQMa(LK)gblz zV~LC>s(Q~V^Q;9PYYW)nP;X>`rH*0?ye-0SH)Cg)Ep{^RanxKD2)+@9Dgbxzmpe3! z{qX*A)a_ZB@2>@(Qn1&4%(znXT0e7hbBl|MYS#17&dmw)^YhB0FD)(6?>P?Y^8xY| zi7lkJn&7WP^v)9dVOIW0sARgQIInES4Y03E@)9&Tf0VY)!(8S>|p=+^!@kI z)t-U<`)IUhmweZ>zyrWug@AqJ?k4Ii<-pSNt)0i+L~4Q9YiQS09{1g!mH#2Oz|#o! z;I6hHI~w2K(b18Ulk?I`FI~EHX<%T0kqPQ6#w(5v4h|L-6}|rY>)-t5H~ahh74_B1 zS9)LJTrF?*wS3hED2?-#7|@$4sPj#+!^6Y3Z{NOm@1E)tl*z|Jbne1Ig@5JnwQ%Jm z*ikQor5rr5<8TL!|3oPwAL4L&R@Ac=*s}#5Ky*L&FMDab&-~!6y~AGD0xjR7+9w=Q z`J>`R&#{<^-F4V8x>Y*qX==1QpxXBOSa-4C^n*Y0Q(MB*2=*5J4qk$+;JVA@`n})# zy`TQ+pZ=ZS`JD?FE&#pYD~7(}E>Il|2LIp>{@}-d{Kr4@na?<#P9@v{VVNNp@qoh040K;{^M&@7CQdDYPd} z?-mXRX~`!&_NB`&_)zs8*p#rdm)T!#KM7=_?4iEiHNiZ;5@2LKM_OD&L z_JuEe;gA3LkE^SzIa#OQ0OWd8P#-OX^C^T1DA_>86y~k*sReS-a;iNcM)U5GxkKJP znd9MEt)8{O<7k10g8e~m`Zz569pXYomL9akqh;t@$L$eC)@eIP0zGd1YZnOio3g*d zn%}P}4tUx@TJq^U=cxmG>or@}xq|)g|Nig)*pL0#-}oDU%sdyR|>1Oor@KmJFo_Fw(gUj^*J zer#wf8Wy+upZv+6VA(_UfBy5Ihj`~LDj4J7FD)(QNJ&WvS3$@_{wunxH+^hutgNhz z@;S%haIkaZ#tm5aD_5?3``h3CJAdcz{L6p&FW-3Mjn>vyy^#74K!B&<;>C-_#l>E) z7o9+(0=eVm%a_Z`%X@o!`}+C{3JUNzpeXPZszaz8v5%aOvA0=LX-6HNjna?tr98X! z$JGKK7T8<${}9KKh80`Zg))?OLq)9=PUEHu)yNqnm9mWbQ@<_y$8K_{L{cDEVP$^` zy-J0x0>A#Wj#wIi9s4bv=}}+kesbwi%lRap|I~rK_Dg-`bUV5%KkMu30sDXd@Bcld zdSYVY$&)9E!h=!AZsxnPvJ!{=umAe5|JVQeU;oX&`8RcSb#z3ib$IwghYtPyzyJ4t z@CSeJr+(_E&;yPfIpXnntj^5|r%s)MtN(>x_yutNCx7xMf9QvP=*>6Z1j2~~Bt+o* zzyJI39sHmF=l@A;;LV#isR{S`&;R*9|L_n0@L&C_fAuf_<-h!Q|L)&SOG`sDAcznB zqrJWTi(mZWzxr4I>i_wF{-4i&_OpEF=jU^mCl>5Ki9_O9ou9S9hu#7^z+Q37GCbbL z?LOe;_B&c;H%N?Kp>9OWBWYRuR?HAPR$P$=CaXlXa9Rxy{b{%9RgKWUEDPR(cFUT# zF0{9g7G+ugdW-knZXaUny^lJ4T$em`V6P3W9ZRR%@!~5gD*o-i{kQ+&Kl}%R?O%NH z#sBhO{>$j-sO4G51W!mv;2_!nK-<*RM6bo%C%leJz-ho6zk6zGDtONkrQ%V0Uw!q} z-~7$rIT9Zq-_X!NO}G$lUuO(j#=WHie-LT%arMY&_kGp^ z-+e9c#Dcww2=!^Id84vR@mpVnMkcYN-bwz2o74~TQ>PkfC=wseQTtx0Q~jyJR?ks% zx7Y&J08}7b7w*7vixRL*H9K?gNWH~+5l^9PeCoj7(loV;sJaRR|D`W|>C0dKGWq_PEN+f#X;szPfx@7|Lo8HEbtH2ubuci-}w#<0k`Y!?$+K6 zU61+xfB)bAHx=UqJOUW>Kl3v`^VVB$S>sSgsZR6TVKF>~9r}?*{<=1 zU-vmvXDn;@vlXqZ=Dtn}*g?@NXJDPF@5@fcbOgC)3-hddoNg+6fT=^*N4y@ci@7b9r`lHba6FD47A)($WIJzwp8f zC<>}i{NW$|Av-$Vj-kblsw}FVP;DKwyWi99j{Y=v{ZDp3epdTWS_|06n&R0($ovmW zU*C_oR-r-IuQKy@?%ct^AUNs&`oI1!I7pn2P>JkgmOxJuKcrAijL~2EOMeO1Cx!~w zV%Tt@TK}p&sPJS5L4<(fUzK|RKR?;mtbBSNN{lPk*ZgVXzeARY!5}?I~9Yp8-uq4PQ zP2N7M@3R*8*jm6+_bsGFW>nfaBke1(n<(86K? zuHZ2r{OhpvSnBjP^Z@nK6Hbpgj-r5no#;7+1Q*gT>Dk!gaPrLPgEn9uKcD~a|NH+? z1qklrrUd$N^08ybP$Q_!#KZ*50ik(#3UDYuCvZL!1Ss=IfAmM6d+s^h_SEfjpZgrT z2$ygR3en!r<5ObY}ic0U0|nD}aJC|X-Vd>1d9!2a;mfxR`rbVoZDmOVx`lj}fJy!F`exbW%Ue9&IWqzi1L=Mt+A`l{g$ z_K0TtSD+5u6uie_PgT)8R4LYc{;C42!mFQy7!4n= z>pZ*sSqnUCfrGce4zPa`klw1i&a4B%5oI_E*`a#6?w0~ELIxK;Gl>vE$QJB=#1#HS zgr55BQ3#NA=gyrY3>k+eN&pN$P|fAcA%Y1&u3{kZ(Uc?zE_{&)SVSeF42A%<@~dC{ zDsz|-zZAtZW2G&%l2*bo4yvqs-wpW8vOjpI{bZlHql+k~MBf6lL2h_*`XcdhQ1?t^ zr|$x^II*zB>EA@-6Dm)~krd|2*0^Wh!;g zP*Kv%0()gFK6J2uYDdT?ySqQD{S(sy9}w&x-vHFqoX8WP7x9HPg6PvYJH!k3)rCS3 zV#p=TYmR{BBn~G$8K0vXDp(_sqrdb^zr-|S<`@&p3}$1^b3Pf#AViT}x~hn}^AW1Q z@f*K^xT7dM1;qLx)|kc&uYl_lQH=i-A*P|t)Qt&dOrFiH8AXc~X}^pzbt??Xq$0sFlrYaUSGLC$$Uo%Go?&syM<-vV~9-#zFa zJa#_xk!2OAv2V04-+(UWJ83_d5{x9YLg&;?&L!+1=&*c{Q9t){KSzKO1`KlV{Q2|H zoQO3HeZ-p1UglqHdL$LW#Xs>AKS3liVM&yOJVe0aDIg#k>4&KUPl+}Fz+-f=PYH-P zQ~?5^u!2;?p>3dQDB19+t+%`H98lq0u_gX90UGdythLhQUo> z9riuiD^@$boQ_C$#IH}fT)I1H?=Z5ND-Xku)(QO6LD5M8W7GnTcA?V~SVwQ>Cw-L8 z3Uv?IzxLW|^n1efiE(8};6zXaW$GPuAW;t_0U9yY;}U29bO@60XkQ?Ck6$iwQrWJT(#!4S;JYpL-z&L3-}OoM$X}^%|f|a0H$JsjG?z ze<5|%dB2;sRAFv?J}e98p^f>;I^n4Ud+iq1OHzv&utyceCU&U=L;k%v=Th;|0jg&4sGMrssnm0L?`g zM>*gwT!I50jhG=sC&nMf=%QFiEsS{1M-RYvKpY?SLBj>(82}uyW(I1^ffeDhyYpFw z{*FPeXIgH4t4-`J4g;~TPG_nqx2f@fsr3bj2S<0}O*zOej7ysg4qy!BV(3$$&f&i(%@E869 z#ag0?zjQ9X-MX-h4ZA3NsIl?Pvj1rM{KtG3Rr;*nKo5$UiE~b^D-N@(IqzM@5(`ql*Hm6>0&&{|qx2eufhl5|PJnLcIX&aS*^akl}~nq!@3_ZQZS~ zTakJ@6k)r(*dbr|sSJT0*PK>*7whJr)c9 zB0^CVm_Wn{Py$F-%pT+$;l^117%&h6h)VuNw89YJ+Q%cn379(2nHc&AH!1*iN+J0B zp#d}vpUywhk3F7$2lnr1if4kzYP6@(vbP?d4x|AfpfWubKRTUEI|l=RZbw%}E#)Ue z3-Go&B0uSETmsz$uH$q^Kc&;7*wQt*N|6{<6yBcRPoL%-j)Uc@8uORpJ%8alfUeij z6$+tP&ZO(>+&}hp`ie%oPzP?o5u}D@uT-skw}t)S$b7XG>-J<8#Hr53a@Iw?hU3yx^c9Q{kbHIzo7*Bah>{tkoN0$>Zycc8ER z1Z(7TP>BtNZM_jk^kcX0?Uy_Q`-6-W4v3!!6Mjp zYXH0P-U5C-S%Zi%yO|RN#R$NwP~xB_yu%7&g|Hw0@gFA+3GoHGV~zv%SShggL@R^w zkeaZepuVOOC&~{qMlZ)`A>JA46mAk4l^`SnpRs3P0s#B(`@Zi(lVHjph2W}(yu|y7 zvl2HbaeS&SAR9q?M3Blxo=7E<)_-@??a83*(2$NApEn5N>G_)=<}Sti(8+X;n-+vEsdTFYV%b znrbIn`Tf9thrz8Ess+CTycM%~A`92;NN)#wOM$RTv!sco1U$3spEgnQ?0Fxl1$KbF z1^K%Ru^ajpw&{@9ac7yi_Ccow?pGzNOch)(?d&j(9%C9*#{O0`<)Eg-hmT}}Ftpm|*8jD?_Se*%4^@f7 z6uO=j$e=knQ%xhQ=9-oV|_cvSMMp>LnJ+T01&0ECXGiWyi;Ut)8rO!Vcr!(hMw?e$PAF>+lThKk-B5 z*#mrdEno*%3!ipq#&#&OaLc;T;?L}-Sz7xJ7}JyOvTnaeQIVzA2vge$QYIuB)}4ch zDNSV$Bm?#UygJu8P6R%|#hMU|*kJ;XSW*OGsCbXAfW|;kgg0qEUGBm`>~&(CApl_M z3CH6gm*A{qvLA(D=u-`Z55iEL2r3m7)HSJ&CgE4s!IIl{fdBB`*mq$`Pn~6N*LJmw zE5_>wsMAFo>0UY%mB!~ZtMlnkMO^j1E6YS*R8w2ocpWX4^{oAvk}2Ps*lH;j3ZgoT zRM~#z0Mxml=W{!)9%b60RByUNaM&T=&dFO~Z^@1w*14sBsIIW*#}4ZIo$~>~ez$P^ zE*yNHghzVzB0l(A+S4@OVcFZyVSF)WkpR|U)Sn0zMLz3f_I05R%W0@)fDUt&QCz6j zgkuW1;eF-!76k3rUbZp*mdZ49PM1fyrWsUCJ!CRRIJ!t7PJ0#I=u-|>;gN!Kq?e^RM zxCYGQzVG9H-h;I55N1sK!S?)GHqR~QX45&uj{;6AmTs3(E3soRG25?IEC<0pQ%2zU5Kg5vb$G5 zY5nAJ*XOANdsY2(q$wmTQYv7p*37>;qg2o2qkYIykS!O0z8?A#E-?sO!EWy<#)tUF~w+ zu|BE+0HW3eAIr?$p%y5*Yx!z3Scs^a0rWdysWOD0c-C2w3wxb!9O?wggXPfpxYdDY z&cQCnb|*IpIE~RyydJDRQ;3PP!*FMY9)OK10NxYEuY#2FIg{CX;5wwe`tq^h3C^cL zPQ&ZZ${@O^E;j}5Ap_6}EEgwwhPDP5s@cPZn#z|((5Zup_u7mqV=X!M4D5G;y-NID z4Fsrsklt=x3()SleJF)LS|JwHX&1M;!fsR8Qj>LLDtIe5z*Txg5!doIX(4)=qO$4@ zs(0%_%XGGz#nwJ0D|g37Yp4u_1?twB_WD0s6YO>Nql2{lh3`Btw>e32dFF$uf%uJ84reeyDs?MBo?g>z-ZbQLpHK^hg$qmzHk)?mep>V z=9ZPO;zEJn;u`s>YsaaLuGYB)-nzg0qXNqE4yZn%P66FlyPwBr9}XWYoLTZLOG6$o zQhR~)8WpbwG%yUzGus~Pg^B^mIPW=)O9;vXm6=SB(a+R7b~N8DC37Qa07CRR$bGqr z9eoOL8V-P)G93VGo(yJ8gyuM}kE@g%RV#>}mULCCiPIGCwQj1}SYp;L0Uj^}KFarg zkb7x&wUiY*i0qbWYQaAA^C`=cQ#4W=k$qa<8Xj5{ViS|^gIESwq5Etftke&7^=>g| z$9n}`ZFD|5X0)Lc>~&n}CH5Ysn!BECfxTsgTTuOgH)M@ZJy{3po|=LE;0J4r?W~gr zE@xva0;cMljj9S!oEEf>Hk z^eSyz)j6~#I^3*lRTojMf`j~Jg*mGaUWp2;H0xRPF;&q~`%2%PbtAi6-O+}sMQQu6 zZ@tzXjeo#b_K2qeqt^C_m)cI(c2Kvk4o3b|or!&Yca{clkPg5(0`^wBSb86?1Y$>* z6OPJnQ=T^5C;fB4rm-G#$4IkAot9&7AFGKTDl7HnJ)raLS@@27*@e^|uxYE_2YF@o zn(S4MU4e^MaBr}`Pa$|vjqa0ok7(Ij1Ay1WP@$3O#tayp1Y!5H98gLgu(y07PJ1ZW z?|zE?y4)w?d9ALiP@&(796T3Y#ljCo4hugX-_z~a>nu5M4GcZ9Lz3*kE&JPf-)~A= zhM}QkxLOq20(P}P4QR^?pkS=w`WkGcng=^1B2yHn912&3K&L2kkiQ@a_~?>&BkhCI(`Q5yP~Jd{ResNJG{*lK};^jM+s z72{P?uwD#5HKs$``T;F?klQ>t=U8Lup;fX5&yG=KZ{j_B&yHI>0HGfsT=wgi``z)O zkMr10I0%g|eKof2U6FmC_YuKfn^bKAoxZ5nfco%M9%*O(1daR;@!jobYdoNrexe&n zE6=yp2b71?o?w}Pme8~Y?_=6|@T=T?)BVo>5MPP{Hp2vIuZeOw!eCL(T(OxG_y&n} zG7*DI=i_mX#W*V7TV%W9lbVYbT zL&g4Mdfo54SZ!4WUrSgi`=W2>p)j<&=X{E5R7ca6)qSN7D3Q0Kh1yl84^i}d^hNF2 z`n#)PL4u{|SW?3hi+1_AW8b1c?3Y^@V=rpI7ODrGReBZM4RN$*J#K&Ue)%4ei#0tsUk+eSg2>L9l zRB^2;0ovxOZEKw^hlS$0>f)9ofFpV@>*@3_s|EBg#UuM^J04aqx89)D$dCJ-eu(AM zl2#LGIED6M%V@Fmr-KirPgzQk7No2T`&#B1@ELpi*bNp_7{Ezp!q`@E2 zvR8S56@rXVhL9j2%n=m_PxFg=DAMn?kW^LO^Z3WxF&_2qD%{j~RgBljZN)hY!wwFR z4?WWo8asCOM05qh9UtpV`|$_h)}H5E*V<**jw;!an?kJDtzct)_-jYAbEaB`YM}G4 zrNPkD%C}qN&QiHljZjTOwECZ{m+m7qmL6aviHc+Y?WYtfCGwAd#!d{zN6S!1wKei zZj1HzTgnH2zdJ)XA62ZY6i^|e$+j#hV@ZlfE%T#1|NXZ7-q-Ef_a1tS-F@k!Y=IB= zUJ6k>vckN|aSe1*II$%AN2Am1H;O;pr+z?bd%k_eY+6HsR$+dJ8l<4EFs#ax0=Cs# z6uj-X=3bm;H7q-7GTAN69V&%huE4A>&aR-_i_+RYSf1~G*vH?_?`P)G?pj%I!D>?V zeW0Xjtb^4Q+N1}0E&+R`Xf*SyUPC)-Fa@!Y=m4-g*0#PDdZbMix7Y_7XV2~avmWi& zeq}!VUX~uJeSODmwR6KN**EIXPnzAZO=y~QdO5+izq1^*9(F&Is>AzztdXV{Y6-h5 zX%)NU{6}j-+u0wioQJph11e^2O;biUI6t%P{jtA0`@RkmRbH(ttzOODcN-`N87Cjv zIS)nR_kDs2(ETod@V_6~<3ElY@n{;AtpidXVe%?eF*bs&+8l=zFn^6kZSikh4=iGzCj;SUc~%_nt}$ zOBg-C8hGev`3O#U-&p>bF0?BFAHm~4j$5cHuOOiyqzWJAjfF-}c&~UI&GHeHc@T@! zu2QIy@mMRi-!QRm`p^^X)!U1wd4yat4;Zs_S8WZY+tnuLTRSRUS9k2>%0t`jQ>t~? zDH@QTn5NHv{kuXFkKy!aw)-{@&02(l7nO&;8t|S+RJ}4_TJ>`uzQ0 z_=SI9HvRmPwf+3h|6**@>;8Uh+oz-t=`eX5`LI|r9~NL{X$Sp~wE2-fTF&lDJ!1R( z{G*<~-yQdBo?ra=UwEW#zxwPrZo8en$Af(2wEdq~IzX(q{DWWo#h;IDER}iC4;j07 zm3;FmzyJHc|5Km(6l-rVNYsq5TOJP`7$4UO3a*du^I*$<&-*^A0r7D)-2?8+b0OA9 z(?*&XkH8(*CLjoE0_xy0e13r|-$2!5jzvHS$Du2I|wZgkU z`yGF1%~^JqXQ$e<42AHM4_9dgtf$j|_q>b#W!-4M)2tI{?cJqm33@I6+E36rSPiQu zXd0jWnqtq^(_-y~>uD6V=VwPD_RFp6*()3f&~AHdJ1(@Y-S55}=V4ham=C>_pIS+l zAfg*v3hN{3MxR18(6|*VkpkoaDG1@cjvhVy`@jFMVOE6K-&kD_uWWAZ<|qzuWP^+r zc1ji!2982FaEo1ulDAw9kdm9yV{H`c?Cv>lLg+NW7|lkmH#_`N$!?75?@qhduKk_3#@?$^t zW30-GvQ9pB(z~)u`w#ul53v9%2Y>WO|JoyNKgzPO)`vW=eE6w52mkCS;h`7qfBvJ@ z|0jOpCwOogghpWhCx7xM`DULZ9A`~iJwg8^A?|(^(@XT(_3`Q-5sAj4{{E@ML)YQ(IY!fkk0dCKOEg`ed(|LwI4Ry_UFOH z@?11N{qt9TKf=7q_9#`nf1a&teLV!z|~gxK`oJwN)Fo@SkS@Vgx3F+a+44qkIP z(@Okm?L;S=*>b|#&w1qb-+hAzA9?>x^?(Q8#1MskM4o~yVVHox`5*kjAF$(gyD^n8^s$ym(TbIiA;pf* z=Xs=!tJEy!qXzMgk7bfRc*lN~{mebfeBdoG&>vybqIi5hYRxO>LHk%wWc~YqN6-tk zDYQ)esh2#US@wHC`&G(1*guvv|A5Q)tgT+S`>zMI^8w$WInA_uz#ZN8coEx$J^0OH z`fJ)?dMTDYJDiU|qxZ6}a~~e{BVcctI_lh7T_y8TcxL9-;lt1W?(cmGQMt0ZfMu^> zzZuzhq|FF-Q51HhO0`w=KCsustB(w62UbD$vsCtXpX{CDcfT6$pAvYVRKYP&SB!t? z_Q>sHOD_B9vnRG&#d=@+G*s;AG$-zkZ98|KCe00*O-{4aJA1<2`#Q0yzx`uxqWj(^ zr_nc!9}8>#SAX?aq4c#V4Z~KXf5O53F}dEYVjl|XkL0s_RB`jMoUd+5b$NdJ)1M|x zNBe*>d^^1Lk2-!;ldxL#0k2P={ebWHVZ&`_F}CyfOG&<;9DB5P@__EKLoc*9%Y#{^ z2e*k{`=|}_&~E(To9@2dgPSQjAiHnzfhZK2JrHM6lpwYXC> z#xnN$VOB}FZ=Gh&l<1 zcC^fgKIZlb8=FAj!MFEE_C}x7yyZQ%=-ZU8CHi8tOGJBxRyEdE(V=bSm)4rwKlbLj z?+?ljNBccPg;lv&K8q7{BIMP3Q=X`d>Lt4Nt2eF3;<~V-*_>OMCPfm-8*GFUK zEC}DxyB?}m?f{z)??paZ{dsD;>Z9TGdmfR|8%w8i@Nr~+%pJ1jp-p6OlMk!8>?+89 zESLTGLDUpIN8^C^Gj;BlZ+bp6Aog9WLNQ)Fj~2S)a!O52)u>YaSUSN$`p@<}d+P-& zmN1F`(4ptP{M)}_sC%)?k01HqkHQ|G{Ws;mUre?I)M^ zFLv8fI*76H;Pr`M+LM=cYk&O>UJ!S`NJBOyD!{P{*H1Us=M#kZr1Z!C+v7% zJ8ra}lCeXQ#9Bs-nT^d=^BvoaiF^N2VbOM_>pei>P!mOOq`eoJcJ}O91$#TpD{J-8 zk68A0GQm8#NM7Rd2+;ez-}@bY5)lif1wZBMu!YaJR0 z=71@fYW-W9LDZ1lx8v?GC2gz#)euuM`fV&uAuYB)_7}OMrK}*XJ!qLdY3i6N^9VM# z@&PvA{C*t1^rZWS(gT<}&b{y7gHZQ-E+V(b@Y`oN-;d?0+um>AD;r@pWpQFmJ~nq0 z?9KZX!MnX}$UZKe{VaP`kF;e>x24x?FG$G1V8VqCm`{Ya{-I;_*$MBv1ynN9Pq6Gy zoH${}FcqfSg-rR6rmSm~tUeZ7cQFc8)*#xm=H}n+#kF^zEW%;u*xv)iK8UiSaAF@L zQm0)oY@tpRvm8uT%Y*0y>!qcoDezlOcR-})67|{3 z$Lh6GH-vF0bm-8bFMs*V`c9)(mM8}6kuBLocHh@iwH#9k^%IMn=FAYAn)Q~q-w5fUDq)113b zxK9e)?>m?A9_>B03fMbGZ2k3}qQuUWM1vs|^zIwMwxJb0uEBi}E$ez!>|Un*4MJ|e zrWlPM1NCizc7Ha*?fS-0))Zs9hTIGVGZwF*I-(hqQirGm-gFGpH=^{6shA1EFt1e_ z$#k1&_0+Q0tE7ubnVTE;MTKZhL;{=l!3kOiJ71=QN!1A}>i&yg{36r*v?X=G*d@#R z6A;g?`sB5M={BlC^7)yc`Pq}F&PY$ZUszj%ecxc=Xs5M}tSG}L9aQ~LNb_~WeMA^9EN=KhvLTZ`O?rn+BSjUe_upJeSj^W*O-M)dnJ6~ONw41t6V`{I&vH?B& zI<2wsXn;zaAr?rjqFxT^lWt3xSjEg;cFNwJjc*Im5|MKMtI>;hgdT~fS#SQ}IH&E^ zuwzZGnuJ~z`&Uad-fL}b;8Y(>#E-S90MRF-4cIJZQt(4RDtL`=*UUK!$v1v*vq6em zAZ3K7%BAfXHM>vKI$My4O=F5SAckOclQSOx_S)_?uop>gh4I^a zzP^n6x1BF9_7=BafF9v}TbunHyVh^fGSOFTtJ8KhY-qnH*y}K`b0Nk)-43DH);RPv zY=5Mo4bod+A7j5&TEuYILRs5zc@Wqe(IpVME!=EFLLoNVUJCX$(Pr+YFI|<7sO}|w z@%}~VV?)9OZ$_)C7W#G%(Hq6eiwQR~{uJz?1%w&U`aA>sPaY0`BsD%5*sC!k*s(n= z$Uwk-7$5E1QH^#7*l#;G*TdrUG~^ciZ7j$}rMyB%RQkm<@+jDwSZN(7#zAU z>`{b>nE^I!Ww!!wpX9JmDTek@85EWo(c&XUs2lxUg@6LTj6`FDSYR&*WvmFEJF3%c z!$EAcZi|R$&G!Ihw)~M zh8ahBj;Jc6f<3ggE?a~dZnCKd+(I!!BpB}5QmA6Qtn%yHjt>R<0}iAEfqfLDjpAd} zRbbxQVsF|O1*T|iRD+o19Rquu|4}+efs5eatv9MnV)J#J)FPCBR-brOH)}j*Wj6b`~v`d31)W-a50)T3}Ti_ z7R0I^VCc#FEop75Z8q^i*t~~gw5PU&@@3oZ4>F2c>f3gb;iWg(?$fkPHFyx_r|E{Z zs@y=#)K*I@2KJj+#!|?dk$2k~n{gQ!ZD4IZ64FhLHz5^_1`su!HDrEkiRM>)OwX?$a(SXPLDx{cQ{dVsxs^oLUM zyTM+xWOdj_?{0klQMZ2-#Sxz&(UVz{Ni7fXUQ8zg_R0%!rZG*T$#b58{Q*V5({S28 zV81;LWP7-&MSdTE8AywMc7~&>VWY$V2ngQORikl2Hn5M`Ep~p*7)~*;SIyXNv#Ks^ z*^ZXSYkzsTbKLDVZ1#QEh7fW&@kRjNJ!>@By2Quh|pTVNjzHZo>C zNWaE9on}+i*LJXjJ^JFu2KF+0YGajcE4;!MVG6$4-w1P%O#7nAMs-+a0t^^$gO6qH zMOj~I&nW8^i_)^Fn;mr4eI~UU$5CS6W}(`;&a8?1IWgQ8XJ^ga4J|K30Kz((op z1NQeZA2A+=gBnU^+22uhm9{amC4y%sw5}US9|ij;z>B&U<_rUB+h8A~L$ro!>DruF zmg%bvuQWqc<&R0qkF{(TGn~gF{oqNn6S~!+);^WosYdM zc3W&x3mp#v_I4IU_oHS&Y>IMJkVb`ys8)}(?Uv_zA-$cV-wpOr(_dfBs#xt&%ih3^ zgfVG5*uh>&0@X`&iZvfv42)litS(1Z*jBezEy$NHq;tDuijmpbj9T`Z5;SJ(?&+f0 zTLwJDvj5VTewzmn1qo&R7KCLQ$f#HL`DylH76bK|?imBKn9s=qtmwQHt5cdjYMNes zCp#!9Gxfk0Ua>1!W}B3Y;Ueu5rhP>3QKyUOI`aC>e0rNtS{uO-NdT7^_aJ>3Op{dw zmnAKMiCAP?mu-b@BeHUTK%$uY(dwB7H(Z$df$mRTux+epQ|yiOMoZtY@ZLgtRp8{$ z?FFdJN|&)fIeji=Y834Ch|&>y#JoTC0&)|Cps0+5XChk-*qa5ur0P+#p5|L6x1)JO z7T9w-gC;t}V|cGLfOJs<_PXSW-LiiM_D{J6unX+Bz3+R1{rz~qtqR{KAEF$9{Hue` z*z~CFGUrNV8b~B=dt98xEZJd|-XE_cqseC6%Wy%hwVRmc$bE&Vmozr&?e&Z)=rJFm z4Ma7TTU39ofFY1=kZE|@-CXy!IdMPOTbAf9q>sj*+bwScc+pUs`=b!dLu_Z?+OBU@ z@)$c`qGz-ZEnP~aWDv$ZwS(d!}FW#^PAy&x&iqc8w;WY7_b)r zXE9^UFiHFli0Smhs2ye^KaIi?UQ>ENE(~i5eU!v#dKr74la6jFciVIG?t?CSFpY6R ziJd5u0CY22woz7uevo-uwuK0zQ~qoNRagqs=+?&L%c!ssM+)K53TsZ*aA-5MvL0Fv zucP{Cc#hHHjKUNJBxAn{{-Kt|V%$w1h~|wC(Hb$ZSB&3?Ea@im))lH-XKgKnw>rrhK5nfbMHf=z2 z)2RI&3z5@+UK?yjJ(?8D)EY@6ZP z@W$-s=KSX7qM{>DOt)Bp<-iOx3%YQ4GMnyYwY)0(GEhx}h+3wSYaNh|HX5w~9_u?Y#8h;Thb(x{H(#|*4SKDSsL5Ke6r5jPPt(wY45^& zV`D})-bifQg5!)PVY4d%W?yo2?)Pm~!@MRd@=Wc(Z3z}7;-dgrK&HP;w#Czk7>qWr za`kF4BI=EazPToEd}D1{Hqb4ml!zjtX+X@$8*8h&0TXMXrPcLCAfI<3FD15#VHU%w zP>42^%b9xjSS))|OFsYn^DM9_Q|YE!6QT3??Ftu5nWjR+Ghb*CK-imWZqtt`&2lYV~{M?h#b zw8pNB|oH z9ZXybFU>B`E^jQs>VzU|tC6L}@Z4hP-s&cJzZ}|F3~ekcRk#vfUS40q#{-70Mg$kB z7&S5U6nY~qL)V#KTA2at7gz3y7>cYfg_iY=pbVR6m)N{gSg(y$I^4p_GUY6XSJ_tK zD8kF)X5eL_g)H7fer<%7XfXQX;@ZOU#_}QsvUI2L9uv8`5?Np5E+Kh4%PVU%ZgfTvD%|?U+}y&- zs!%BMQdSnJym~Mh5_AFH0tqOXXRmyhdj8bQ_3ZOhwSWeeMG4QX$j^QHXHTBU0!A1lrY1>6M4D5vdV@d&xE$3JFl*-F0fgyE^em`0Ye<&w#ty; zk~JtV!zRW|d1Gu~gNPYQA)z%Ht&GLhwME35sQaSyFY~-BtMiD3h0y$bcwuF8S>47< zvaPZ$$PuuAb#8rOF0`}+2Y@snaz;qf*o12n(4&rm==ic`=80%Ae(H_o$i^a@@%CaR zinmulBqTcm-iS}(i8*1(s0Fap@^}aY&D=_zsVc1+ZHg_S12Hw%*ICmYxhqd6RSOH7 zqLE}GuTYg_-;@cOuFdhyd*Mc_tM}OW$(g(z1e#8yFm}1|RIUTuAvW`&s>^6>2IiwS zkGwL>kd0N|tTgiS0vJy_Z7k7ji(4y;G$EpEePxmM7fZ>s20~Xoyk;)mx;kK2SM&v$ zY;p+9wH4`FMlIl7D_W^XfSoUV@$<~Hw{qtxZF%s|zBIla>{WtODYOv`%H_3ttLyW| ze~i_;u(B}+)x5T~WWXuB6bbWMmbNx&+QrQ+eOB6jZewFEyoqXXHx!yPNFWZC@XFl1 z+Y9q|RZ{b)3u|i&>l+k3hfZNK^zw}$M{m%14oVn+BE0AMWti;9o z`UJK)*oiJh8`>KaMS)?A$Wbkga_+689W0^LFKwaRAWjT+BgITyDsK%{c42L4E=)Z` zQ~-)^etmUmof0T#otMQ%A6Pcw#L}>+Yb350g#!JHC&!PmHamY?bQj1&ln}!PahhB! z1#T`yHWsia=t%NXXh1}i&@7_zpbR4n$$KB!L;+o4z-zn+W9b>#AAIyb1!wLD_JR;d zrl?<1EOP0a>mX1RDr}RJ|3V`2ST|$FhxHCV0Yi)Tp zyf)9sMyf?Uo2G}W;|XzUO{|JWV5t_ML=PqJb}y?D<3_3RSt4jEEJ-H z#hV~4V7NgU$ZPAsp;=%k3ig8cR^(1Jhz&~}u%F*rzqh`2XKne;%E~Mo78D8xQUw8T ztVdBk0B_j=1lC1t)4Uq?V?xz522N78i|MjBGdFjgZ!p_L{Ai+|9XS$sX@)8={&!Ur zOjZw!Y|L(>5EyTZkjfa1ygf`yajzO<%q$s$sM4jkRh=^YBtld$TOe^utVz*GXgTEW z3Utm|WO-?Q;oj=KyQ>TLC|3JeERA|JCM=qONZC#k)1`U7b%kzeMw1^`yf$ePOZ>`R6hUO!i z3z4mRk*&L%n|C%g?u574=C&f3$pHVoNaW5|_E1MV%NECJVO z9~%M^Kz!9bvJ7#&uI@yy&EA<22P&CuP@D#{8q zp;lB$H=awj&8>TzRD0tdMrU|)eq)Q0DbQ#qQta)8rMnBu&`6DIC4 z(WaLH`^ehejpg}B2qSuCeVJ_zbDZGk@aDqC78^Tr>l>WOwJe1e(ai7*JOlfu z0KOmX5qE(7&;HD()v||)6Nbz5XSK_bMUYj(7D?C?0z_CP9(E#kU6Gaf z>?>yFkM!XBrCfhy$K`~K#Ij72`*_EmI+{p z`L+$CzCIrg-CbXu4Xt41&8r0$gK(%0_(&m5P2dqErC1%v7Go`#ecEQjnvf=3B*2Mg zAV7gwf~62JM!&It#K~xs{wM(>sEmDr@}z;!;+z#VD5~v2VXiEJY|tcVM`$oeX}qKw z9W54GES^sEC=*qt&QKc7aG9c8(MY&nCG#E%!ph?E{M_Qg9P7*IyP^XK*$G_=y-m+s z;|M%C=(Q|S8XKZmz|<_$Wq8{fio|`l&_KmWD!PXBmdIzm@cGQ+Sv;?jlpmsHFJS+h zzX^@cQ();v`vLYG2Ic(53VO?(@XFoIb%Zj8`7Q7akcdS1zO@y(xf!{&A=}KB9K5k1 z8-KC!*UgRa9qi-C76`f^);!EV8ni^9-P?-X=7g|ZOR-#gJ-o#w9AtmiAgf{Wj5M*5SJDf8}z z)bsZ8>dfLY`iGdbCR)x+%QZ2I68r+Gv0`s7tdD7|7+SJ zfzCve&TSCk8Cr~l?}}m(tDEUC+N9QwzIWZEdAE%27~#FI-(6cmX;{P=v^+1Y+B3`k z(a`X_{A>&CMKW$hqF~R+Lz&kFI3yJ)=qL&?Fe6&Nu?JDCkpPSu^!V8=>C!SPH&H-Q z>qYpec_4j$V{>|O@#fOP?UeMcO4>b4=)3l)|+Z$gPh4H#p? zVAX_XmLcI*4QVH{#gI18^0TW8GxPTrm&8X#$dB<@Ncp;{gOY<7PIw^!ApqYv&g3-B zgrPJWoBAltbS5SBqjAt;PYX-GfYrXbwtyN0hlj^PSXG1QT4&XwWHU{qHlAwv_rl0w z&cv=;ytljv_M;A=+_MQ2gZjd<>^L~r*=QNr_$l#Ww8Og41?vCh+fcb;wxPtRKBI zn|0YZIgH{X8$Y=at8yOQ$xy%YLf1ByLR4Pv#Z%A?cna(yc|aP}%))rQVEygQD zWGp1xSqH$4+VgYpVZLpRbT@G!IEw58^>U?!Qa(T_%dQ0j|o6s`c zlsd3!gVRdX|N6SrUnS8(L@ad`NjKLwM1|tLgrzi|TFytMh{!iZaUkG{mn=GBex zjZKVuLs1~$=#JRm+;@4U|K8l-;u2@_LMPU@cso>^hP@h+H$^#AiH&-)uNAwwA!-Vb zhRQMn{j-INAj||h4R25{mkT-H)JiM1y>H5thDAT)_}1q2Fp80Q3WUYeSbwv+^E03Q zcR7Y1`0)%QYkG$m(gy606OAu4sl*6N&PF(5Tty)^I=SMFLXI$zSSjm~>Gh}qb0aK9 z3kT`0Xw1kTl@*8-(Qbhy8G1O1jWf$|gpiI4C;V-n(}1?$q3zm>-aE;my12k$W3ay@6{*|1cXvi7^#5HjJ5}KV$5p(riNP z@hya$#85{T2mm(odWlO~T#wAJN9ICX_tr7POqDhxLVtp$lHy}~em}N3;54@!VnZgx zvYktAf#_Uo6QL_n<=CiZYM(^*hy!0MASMCZZCaA{L@tFRw~c|vCWc~6Ojwv7hq5KC)!<`#v#Tnmd1C*kWclG2ED zwS3f|k{ue^vR$pd$K!-I?}dbuR>s#MV{`?mni)9aOi)H`iFaMUX>x=NOVv3?1 z(NeVj<%Oluxy7a{*9%&^vYUI(H+8?;z}EX-W8cN*{@m8V+~$G2hMwH|uH44%g7&_G zj=}7Vh{bYV@$B%^VBIn+LN+uh!i zQ`N$mT$t0`lixa!)6`qo+*i~p-+4_vc}=~PU(hzpmft$WPb%za>u;RAHMSTApD2*= ze04e0e{^aAAPGgn$q$XQ+Ny3Wg{`VPZL4HF}kLUaW1;Z|@O2Ik&#gd3pt!JIURxhoSgIHtEvRoPXza*uqQi7vYV62v>dbCx zXXjF5mu?ptI?va4@O`PdGpDsPuf033xz*j(-8D6R6GAL3!AL~*JOld!-Hl_EVub|KrwxtlGe4ukE zav(gd%4mIrI<;+jgllhZY#RD)OK!2bi7c7{@bG&Yq_m*%F1I#!=bKxob5QY834@p? zR342DB|rkMjU^!It&HYm(p`#Ygzwo^Ar-`eR~AJ1RIU8|wN-(jwuiP4PsFoe-Uzd6 zOeScRTdQk;=mPvoIDC5zDl|q<+IEx@j?$Jff>0Y!S`41mltz7kY5+UxXg27s)zh~x zoTb@hKUQreZpC8J9ukfjndk@Wq9L*I((i`Ry`s3T%&Wc^eIwQuwLfFsK;Jv9JsT1d zR2{+C`U>`+{oEIb#{ckXdm!d|0I(pi%d&yVYaYfTGOolGIDi z`17v#OWvdBU2*4J$(OuYIl-)KU*d1=|jXPrLR zz)0`H@+1^JuXa7sbK{P?sq^?-mr~!(&$#4GdZ#$$oHH}Wla=c|SrA|+2K$9CKsk( za!^$21;@!8_o-aZ$!u@NMOVf}PgaiqWPbUng0dHiE05=R)7~pQmF>J(+tEBdHxV+X z|9YfjdM4OC{6>*4>z%wax$e|+MM>`!C0{5>zT`~FcCjUAmn2;(PR=f3OS)8$oL!ij zTbz+sl%A7!vY_zos%lSv-{|tv)cV@3&>Frb$R!MVjcS$omW&A;{HXu*8;shuNav0D z_sW}IEC^(rb7WrhXXTV8z3Ynmwj=2s&xyRU-&bQ?BzV~3)!BucZ+#D$FqYc@`5MxOSAHQ z(!ANUw&P^Jo3={3PN_Qw?|IH#^1faaEFZZ# zys~u}yFU`?n7LOucJ=Mj>Wp`?GtU?BW;0C%QZE#yoG(Z|m!Eu&{o?dXuC#1dDzBUV zmRpjZ??}3km-z1amkUcgon7O1@4~nWmx!(Q%(8zfK>VN&xgXd|SecnlEqt$d;x`HR zn!IoxNkhk!kgY zFgWSy8EUwCz2n9Vw)=HliW0uFd2M!~Yy4VW??^@WP+9LNrE#WraKdb3o`F%{z_7oc zZ8X?FR@yrr>>2ZS5Ba+Xf;|IOBctup(?j#K*CQL(x3;D>wuTp0Fkq^NCpgnjeflT( z_VtYV`o;nSW2HkAr9)$Zq0!)=6dM>A;U|T=yZgO8{bfU=UAOLzt*i=gicHwNva;H9 z<7Vy9cty`>MfYS`&tz#Y+c?FR4NjB|PX;MraFnflV6?PvsI+&myl1eYyT7)(uWfL6 z=-TvkOu#Vl2PR}{Lu_J=`865kbln%i{njNSJG?Oy+PE^eh{;hiFdpa{^0W`SJ4f7I zW3H|VclV^L_o{Bro=M-}RqxXJ6ChsjI6SqU(f=LKfN>n3`@J z9uIbn`gyew2J z(E564v+u@C!{|h?v(Mkv7wjG4WO*sls+VOOm}D#KAJdJ$*nB<1zTQDkcYpKM8$Fa(wP|{$xNG>$U}JiI=}AvxhNC{Us4g+DDk-Njv!Lcoal`3?8X!tA>7pYo z*OQv(OUm^o=6H@?bj0VlkC&95Z|$wSG1I@ee0d`>G{3?dI$zlW)}~(co+znK%PURK zFJp)8w5u+&u;OHK^@-xjGbNR0U6p4Y6=#ae&K6g`8F>|%`Q_=k zrD!VY+1_uIwUmzE>RZ?xS&mGsL@GzF!3~_scBi~o{F1ZgL_vAdMNe8@FukZeqogXs zS)J~vN-L?zaFk~{%TBn!fxgTUtWsTj^BQ-y6Z(pZCXx6WtJ6Won@-J>U6;zp8BH zYVVyzc=&aHv99&flYH@GGqkm{>9oc*KUD6db?aiVsdEDD||D9tSJ zXXN|R^8Iw1^n$?glJe88+Ebpot*-8Uy zvZiM={y~xODL8L8*q=OklIJGe40$PWLim&6OR>(ES39mwik>1FBbUD=KIg)d(RelUM}{2r>xrD zJJ53T_GOfYNMt;`*?jYENyorjWsPsTt6y_fvc2Lcf2p|iY;oyJPT5{`1kaZEUv`zA zF7%wtcbqPC@qHqv=*42sH-pusBa>Zs=kb4upAM0`w2C(HR%zAC`GFS;O3&s8U&ssa zET{5)rwYAP?@WpBbg}nTf%|m6<5XVBnY`i`@=MUIUn?wm#~1Llw6|ZmI`vxUAh`JR`G{cm_G5L{pPS6^yutG;|~Y)wM1cwfWw%V739 z6}7Jxd0#H}f7Mm~YDuZQkwTxg@C!vj>SJCs9YeMkkyLpuvpJz58?Mb<#`V0qd2MxT zY%$b)^%e~DtB&BA0_O|Go>x5nmpr}~-QE{mzSFM28F%mnPw7kEGPctW-x;U>C3op7 z?lRugDvOj$I z`QQA~Z{YC)>7l}4^6#y!(vR`NdIqn2qoyOvS(8{)n^@A6>}*SQbfgxxrWZ7)=hh!D zXgFR_pL(e>J-;HO5Kt^l$_r+>>rrQ*?&$t1q+7m@}FSy$h zE|nfHt~_1fKVR26651MGkF?xdz1TkZ)#{EDt|*cnchsd8R;Lv=Bp1}C7uKcZRi+eF zWjY&9cpEbvwfv-zjH3E1XG3yvOhlH_j6@U)!pHy`&_pDwS@EUilmRvmHq50|(yO3P08 zyfwFP!A)IXUz=Sg9Gw~M(ZoKIhzBc!iG>8v5py1i49$iLnulNax2G4>$7k0k6|^Ph zx22VIr5AN3<~F7jG^K$3MfIpwiGZj)SA9xJeL{XsN^t`RGm2_o543!{vbAFDI(Hmd zi@?cFwbZmNEBW$c_;CfZM_D^HH6Jc$3X!t7Gq{fBL6So<7B%q{JY4#>}6? zLIpqCZ{5vr>%~aGLP$C1PC4&S%`VT#uN6Hzzy5@)HM68Cv$#Pv^zXukllk@Ommmcz zp#~G)b$p|;Bd24y;l`{)oNR1OE`>@vht8L`oy{vdkpmkigj{NlKef0j-BpK?fEJ5J zd%VDpg8PEA;$%q`3U^jfWom99E!X#gv+SMvj_Rq~BO6;|TaoUiHSg%Pmpo$dBxn0G zjBzgXaE)UNxg5^;6ibk=_zDzHYE#{<;VC)XuU4{<%PEFs5lG>ve z{K$mVg4(pgI>be0L48(1)A7RAtb(@8{N}8}hT}zbCuohLKzf1i=y@j(d%UpvTjgCf zmmyA|(>JfKEH_SGyHwNhinBf?yFC6eq66~-TPU-zN&u*!9K$Rb zrn#t8Et8jh&EKr;3|zi3j>9*!KC-mjFm?UChPKnLU~&%TO)2c@=|T+4>J#~B9yQ!u z?7HGInu4~?EC^yKQZgnMV3Ar>_C{rsb8vECaTTA=cxa=0ZZW5Q;EnR;q+EaeC0A;m zDU1hd~NhxU+gmW}xx@v%* z#5_N*7A(vxX*m<!Ut4CNJ+7Eb$}n)7rk7EK+h-PEat5(1lM2e? z@+#x=tFl~;z#GFgt)x29(E!pXdplA*ZPYde^1xXYm+$?P99LqXCcU;fwWdC;to&F( zL2&BYmB`ku4KhJ6^N!G!=wcfvRm{1Nlmlk&8Cr1H&4stiJF)4HU93Ku(|D|~EzRAR z>Fz)7>`g0bODbwkb+)8Anv(LXu9A+e^z>~IGW>&8mL}83PFBFZu@Ze()v(%^WCPF6YgM=GjO!HG{IAqSX`dss6;hPFRn%w zP-aR=t*B7GrVMXGqO$8lx z2SC^li1cD`JUO2z((eL$nL2M~$x9Y3e9|W1w|45*TeU5qFQQy{FGq7~UVXx)+LVHp zEO*awS0{RQVs0%8YgTd7DOcMmM@xE6&GEcy=&Q8c%G2J4R|1WNeWS#y;-l@pIeWgM z@eOAk94D4f8fHd5`fD($xGv4x!jQpII_a!D<&X{Q1+=Em3q@L7l?>@v5O_7%P~1P! zzqY|JX}PzY+dYgClI5&JUuR@xxtb9~X+<^IEm%S64)pc9OjlicG5?mQ7YBfl_#9_) zQQ*)ecXDCr%fZI%w*KC^6~MlKVWp&h^u^NJq~bDcDn?mWN#k)x6J`&EB)RGm+%;)| z`ZQl1C!F*$qAO4Ms!}~=@lNmmxRCS57qVX|tM&|y_1>PFA-Iod$LNwg%wmu{nKrQB zy1TXkt=)BVsigbzH|qvZ`#X*m)TN*~Av24mJ(Kfj&$g3}?yREjl$@62+@|!R1{4?M zJanl+)B405VTQ1>%~1xk2=EJ&ikjkXO?EYc7fJcP*UD<@r*2@lQebdw;^jci zad%B(VMVH=Cc{;c;S3}dIMQ9dBzI|oyDY&~5$`BNex^Bs$Gw#q-ii!QReY}RkKW2V z{9e(E?h1mFYA3Hs*nwpHVRrEv=%AMUhX(ed{>E}4sb#Ox_`tkGj&FpRGjMxji|&%! z+IPYcL@p;d>*FxmTIgwwLhGmu;KtY4R$0i03&Qx?Hdm2)F z?Fl9InT~2y*Y5e*YmqH?_wb2apJ=Au=H!xk*n-pL?HP13ZzD(O*}QKw!PLS^p~@Yl z$$35yC?&62EJbg-boy-9SAA83E1}Vih;RIQBKlNu6*z(6o>o+K+S7!Mm}G2UGg?X$ z@&ZZ4<*CIL$%SQ%vD3chGeBTbWs<8V(OHSIcCw_bacY5i1kJY>ab|1Xl8Wn}zgUVv zPvuX#YZsr+$O&+iuFRvM z!=@ILp>w3W8cqf~0q-=7bypqD{*tRAI6B20TW&9ZBiMlZJfonDOBf7k?uPin%EaPI zER2)B2269>ge}uiPg5Md0b)H?kUa4E;;Th zPj^=X_MGrSX$wt(QiXnv=s|fB8B|o6m>*2QZ{llup|U&6*Ou&R%JQ|#Upao{4jjyQ zB=Yt0ij=(K6ld^6MRT&Zh8}gw&rq)v&5v_D%o z&MGd;xKx~5*9z+e@9n&yfCM82rIuUQ2j_J`VtU&G0H&9SztsPb4rh z`J$&Zt1y6VhlMT9vf`$+!p1B|>luGfN`7rpE&@mFQIS0PHCcuAf&{rWU;|*u*g55@ zI@i)OVP=cq!hf~EcRIK1B*1KJjI5G+85GzsUThe!xk7A`;!?KLZsduKXha!QR$5U- zN`4S~gVK6e)ZHFGb)0{p4PN)wV7g#j^~|q)rv`ad%3mpreNS5^^#>YrkvX+VB~>ZD+GKZi zlA}B^KY*aaU`sD4OLteMc*~DDJkRIkXB4@mbVXQBZy zUY)m>FVy$F=xc&OjLWZ1^|WMq+r-R(?aQyhJDHK!kdfDtn%kTPH(b(~T3C*^mm!Z# zKH=-gbhN^biLn`()__bqg&pD& z$%c#q3zBk0K&2FxV#fzZFAuMV1{RhtG&H?jT6H>Di~Qjo@#;^}u=! zq%^6}o0RY2)xduU@91bqaW?@O;+QV>5HN9Nb8BR6?P62QNk{Oc2UDj;Fs0Cs`JP(r z2J90Z{zO+-|Mul6NB5*ZJ14Z79ovX;a7!Sp~oLLZd+R#kJ?>>acrZ_hYUUC=cd z?q-l4!r;~V-V9F-&riRnE1z;VzTj!Vyv{22XF2_+ycMrjw7yc-otRT~EC)@t7Aox9 z2jvdl{q+rl)VyYexqjK#j5_eqMFwmMo$J>fIWJHL`am>pnNd$U-Z}GWj{`{ zxa-k2p2u-sQu{)nIU_%S4=*@$nbv5TS^BE4IwRYiRai<1sHnUJ5IW6Oo#jUTtillv z`-j6`sI!vFGXVg;;mxLjgrbV)FL`N`mr7gSC~YhqzmDqAx48DID{!*N`&wliI8qQuC=8x0ZNy1W+n%B{SIud{2z=Ei+!d#M@cB(=%UWNl?S7%WYcao$$0`+4DqV3zbx)d8)Gl^;y0; ztk^6ZY8RbHE|jD?%aLiB<<+TvtZnC6hr6tQ1g|bUv&;=68eit}kvm9I%d!FX1mQFJ zjp+kbqf>-MAA8Sv^dd|{>9NA9(VXQUYhbXH+4?vhMQ{DP(=Y<*Y|Oc7`KaesZP ztM<6R1JW@yr;2!~ex_VSBE9$K&K9^%6jh-X#O2mu*+UZG%ES!AenXg_2|&JzdyyvM ziQ+E#51(@@l)M=12L6v+aK>NE_m57(efG~Uy;$hXD)DA|tK$oT$Flu^{TX*72$f3w z5Z+Hm)d^2^rn@|~#3z(HjuHpOwzYPuQKz$Bsl$gK*IBUT{W>aaDq^ zv+TGhnB|4S^d-82r^*{ogZJL5GybZCoRY7+lN(3$adG)6FUZn@^iF=a;N9|Cf(cBF zp5!0X=_8;2{O3OeuvgN5hh=Ypy_x1GGgwGXaC<#O*jIKlE|wBN5fd=Y*Yry5P}OY2 zJ+V|h9d4bCG|q$@XF`oLtBrS8s_!h8-&(4g+3;OoJ3l!6e6ByPs1}4~IDM_Ou4nch zlMsl^On=Xrnp2VGZ9QArcebjlY+=hgvlzU+6uh}yc4M{TR;YA(v1(>1I6dRNcC&0| zz9kg#POtK^j}aAERE;+DjbJT4d!}uaUzZo_+h=~QXcv0{I`14Ybv+Tm%xGf!!4i|g^(j-xrH z3Fw2Kj?)zb1!HqnGvUhHi#4|ws_!gU-(9J>vsOQ|)O35XX=c9m=6v&Fr1I|iSDX6| z;d3hS!_pjg_-dzS0DGqFGfB*>kt56fkyVLy*EIE{XkcO#egme}M$?y$d7Ivz zd97gp0^sl^7?;Y-JTI30_*SHMDfCi_H}0KWJnPs5$?p31dMU{tvDbQ zLZINIxQZOv&`j$gtB-%DuHJXCc!7=1QK&_Z&#f0H4rw0xCi(}1IVw7yI7v# zYLg(C;!3!8z@Bjz7#h#KP$Z#6p4P;ohBQw*{2v4AP@XTbBnWa~vAp0TDwZfzS8~1w zD8h;W>_G^O%QOC_v)+bR{WVzj!{JT;__fqaMH~@2+Sx#C&1+SC>?h{n6&Cn0DtiUG zJGKX&L^R{GrS%ZV#wAycG(ahskAbYdl^6;z|j zU{3>kpwZd#CTy7FSpUQ)*b0VQ#bB=83B zW%WQ(Z$VXJ-Y{{l;mU2orWNCYfrva81P7!57BCZ^@63JUojW+}ag%Bbh z0ZZ&`Wt-4=zRu(RE-Z4K=tC=8v@O%P2#e#@Bo#I!7Ph20J5L3APM3BGsfi1tM96mR zZ`j|2Y@bl>MOC2w*u*6c3J2(Ro2a6qyB$&mONX zmOWX3Wc9p_wSV+$9|n!TC)isV^c3u|?9tuuBHV~iF|+z`pzLEdDKj z-{n&|rSvxR-*h^kuc2saw&2Pw*VK&b>TT!5t&)-J!K-&D%6oO%H+7@r@>KEUt=ys8 z5Kvg#cx&j9(D;zdbcNE%yF_8|QX%c(yHJzQ_gu{zxmh?qQ#5hgdF58l;P}OX(Spfq zj%&9IC$ASz+;mUPIj+u~Ds6~&1QT3D-Z@T{I9qSuMN@0Iw@mmqsFv()OLBK+27Ax< z-Uv)B1+L6^CZ>uH8JfNzg|2!8)ds0QPa`@=xTIc>4!oOo12fooc@SU!{b3Fr>dWSA__P^8C^G?Um zI~^16_1*Yd%cKmeymDfPQ8)69n zvOC5vwU1qB9eb~3?A^|5-)X<{PWRZkzVUB$3==s->%|ul>mxCY{VSW)zh`mnc!3+G zC$X?8KELU>ukV|!Q|AY6o$I~&cGu`Tox|Vh=>KL@&)4d@zu7o&uIuu-?kku2Z=CDC z2EUt-TM4G3=s>GfT)BbIWn^{pRcEQ_TORz9)lj70X`8s%dyNk8R@=~f?IY(qN8alh ze7n2v+a2BCYVZC|``|mBZ z<6|=Gs>^DA>#M*1t6w;tmGJ=r^fY>9mt{}FWDD#CoJ>{t3#`d$qd$_4=k4%B!;cWv41?PgRKmhZc-w|3Z*ZTW?Nld-vkPaA*z6izhu> z3W3pdI46j`{%TblFa4#m8g%Vb-m+6@#O?sW|2zVL6c{&Ots8i$y!$KXJUB0KF%Y8c z?HePV0BKEOM`S%^W7z}tKmYSTZzrpE1ME|(0SJq?`=elQx9N5NyVkPDxJT@f^A*dU zG&Mz?edKRQ$aOI~h!H`cm)#DP-CnJm+iY2iw68@P=GW>MWGlVBfJ$09x7EB6DI8yV zp{)N@X)j<8h5l-39cYAc4@I1wQv%bL883Y(G`q&#jwQBaO=u3anY!sJORMy%1_z+H9KJXu7vmH@j7OYpv|=#!C&| zC+eDsI6izSH#5Jm=JHj*o|*K-a_LH1WZ^Z&{WjR&Gl<1}t0Uh$_myww5G{!B;B-}I z#X_WFE?hmg*02z6o)0(P3$@H`wk}61XCsw!5gMd!Wz%zO>6OO5G=Cd0eR0|4fPK$$ z7%2qXaiWl*gnB}nGCe)7)lKG&E&69Rpg+rIm#Y_6YZg}-ewY(ZbTGk*XRCVh$L^NiUN675-mnsBSlFb7 zHH&M!vdZOEzG?I4d1KA{J>|3jAhOW86hI4*PUp+ws!Q;>-8y zcui}Oio1(dbfML)>c#cC#Zc?YM#KCnoeB!Gc0TfM=M{;RbTuD&&wKQp;&*G>nEixt zPg-%(6NzycUj4Pt|MFu5dtC@;2iQ+-g>zb3PdYs*#s0(j{Ve_jr*UNzGJy{8J^ZR8^_$!D~+p>E^ssyX{9EsoAqlOwQC!7E0Nme2!Bz- z^7#mX0?+4r|L}a?vE0J5Zg0okIaCKmBhw_F&ny2*etn{+_f*w*{^W97IMT5mX%ERO zWMii{92r=T46R4Hmm|GQ|67ZEr+bpv4SY@vD&~t8jXt^P0L~287=FP z`cR~HE%I_hKi+XzxIg*U#pC%-Xa68Vr(<~?oq*|cL@E$@_fkz)a3NB^6=@{)ka9Mp zv`!N5@I>KA`-Ze_&2pqB6nVS zm}{>`)PX&-V3@8C|uVChT2%9Ac87P-*Ek7WCg``cfr z9w3gDu|U`_oHMh$dRI13*&z-kl+OjfUCZ>^Q@-|B4UM0aA4te{r{#O`1*GM>nP^9V zH-_fRRn2cS_N04j{_9)$$BLlq>tNIX``YU_73|M?%P|bH$~qIg&CeB95f$`ec?Vu< z$Uit|GKFAToOA_F6b8t9K~@qpkFU3kGG!*Cs_{fkZG3)FYIbhZ}CNFGXFma`=R0o^aM7&RMv6if}dH(gfpKOr+!)4m0M_LK*raC4KJ0qo+xjNcSsNeJU{K6 zTvEw~}>j>e-{-zWcam11)j#rPblUTH47hHH` zPL}l@$*-beU$5yn<*iC7aA&$pPX(&zV~M$rtRgqL*XUYI!o;j+62Z~zic?H3b2guG z)}3$c#Up^KB#V=UWO-i_{C@~wZ(r(LLzfW&MB`gN`)IOBNsUCV+zsHFM6f3vzEpQb zlD7`sFsrKn!sy(4eYY-jU%%9SHLq_fr)L6oB6s9U?&wtZ=*{;AuAdv3d81=2rL-l{ z*LFC+2L1H4(#E#gIojGcaPxE#VGd33*DnrFUFe_69-M|! z%O9Q29=`l;|LBF0>z5|yzR@+4792X_=ty!AO=J zYz+*|RL=0tivv^dc8$T9z1=zRPT$zYv1?R}FoKH%H?jw3-tM_^x~e-K+PpYOAjWC8 zuWd#sFO;t=!YG{eNxtqxUtebV=r=oW<&4he4Bj~3JNZu6#Jk;-=X$3u_DtvYOcn6m zbtSL=`n%m%U##wBh8U_M5uUux+H12U_Ln5sn4TM~L`{-P);m~`6`y3?MA3y&;>2e^ zeG6JC;XHF>nHz(?((>(&Yv=o>-)SGu?w`sTx_+VW>V=-kOMO?e2d`Wjx&|%(-tf(D z_FOwtH;`FMkFP~`z>RxGC#Zkpz126Xm?B(BL~b0xJI>lyT1MU;zVY_J^xJ)x-|iWQ zvb)efc4=@TXLure`06{om(LB}`gYHaGu1=HC@{>>pb2<%3|=1Gi1e(5zgAvHx*M6P z@8~>LG4@{X?Th`lvWITbjOY8V;lhVMJU2Lbapdy3;feFZ*LaC<^-aIjFp}=?Mv$Qe zrx$zjdisVpwuZtHW@I2o7&*Me1Xt^ewL|ar-*~V8+IPAqqr);*Ww7t@wBCT8u3}4YwRL<;+?R`)@jxoSIhnvfAJTAb6x)VlOBz~ zqy7(`NoYTL_k+tqTe37NZDQAe1nil+Ae&kIG-l=RwCweq{Vo0{$U~t%`{;W1W}f}0 zKmF2weZgQRj_PH&cj2h<65f-2)OQSPyG>$VHHr-9|T_aBYuT_Z8o-Hswo>}Ao?Bkp@$K1^) z>qg%mox3=3FZ=S$rSYqI<5x$yqSfr`dhaI>=zc{BautVzUBmfOH!~cdt#yF%0lt@oucs@ zd81deM8yu?S59)-c&!OP-92BR7%lJbUpd-b4JM*NTUy zi$-XWdqtD8i29<-H!cicVUmvc#l5ZI1pzwP^t5f);xYl$GIPO$rPq9>a`2sj+xeGg zvnQr6j*ed(8@@0!P%wG9`08~2_|*A+91v5vBe&k^y7FRWk4y%3)iaOo*rgJ@%#=oU zuNR%@H#PVfNt$w|1}DpO&d{CQi90wn^LabNS8~TLXXBK-d@XNsI&T8`eh>1^#t zJT7TheG(y!MPAR)1Zo*mC%#eDgqKrgTB5W0((oPs%@ya>+1%l4`Qz7dPhS|ihPM>| zY0>cI-2Th?!#8PsA*xFnabzdvRFWR;t*SP{JhALqB7xO|WYvYOwSV-5&zv}(i7|sr zdmjSrQw+^V0(&w)1NN7i+u@UO2*rCEkNMh@0zKgLv4V!U?24o3gYnq`W`HnpFwIqk z))R*c3|M-aTb+ ze=*4PGbRw?&N*6ABM)AJ!^~HG?Jg-7$c9TazGRKwki~Tk*s~CmERu}o#(TwQ-#j(* zsGV6wUv2l5&@wvgIf>N21UQ&VP{(M2jbA>fWosFsf zHe4!nT(nWg&^Yi+5Z~)n?PMLGiytYh`lCz1=Sd2Luf$P%w748%hL`~I=aA=td7l_7 z$MPjs7K-I?L1Tim^LViDOj$QUNltRyZbdqm*WN6z#lm2YX+lY7lBQrL2=s1f%ec0=L7$Y>-2RdBwwA6=fX$5da> z;o?^Ad#t$jxtzeCT=E@)Z7V91AV={&_>Nxkq1{9O;H5iK(1=SV&efU`?7)wZ-#sP+ z!({GcYJ-(Q@4E@?_k&Oi^&h}e985F#uJzeJ80?A0CzCmu&+}Tl(h8i1F1hHjaB^R7 z8*@!9yRR+#rkCOKJyWypYcoWi!jSuBRvb4Li*GEsW;VXoHg&?^%SMn;(k1U#OY1P} zi98KXOr+%(F`Qqf?O?ofSklR)1B*?1jF1fi{a(%&lecpF{ zE_i)DFg?e;0ykD*VZArjJU8Y7GYbXRZok&jpIO>Tm>2$Rz`kc$EPKG7m;;m{1^ZK# z1MdvY@H!l~=Uun&dS~zYW@p^D?|5hKd2Y-)rsur3*L=6bmxgb@Qa5lUr-BJzsm>}U z(FG=N0?)n6ku#1`#3>Xtu<~Nv=(&MAaEG3|Tkcz{rfp|=UnRF^opi&?!s{Fy=hb6heXFKTW{HDh&Q|dL+`tSE<(^(FoV@M4zUaNP zM(1(z$T#OaS8ns-0=GkfJCSd+UE$4|Oe7V|_j|9t8!S^AKL+;ezxug<2-qXvECc3| zH0w{H`I5fPk`(({_N?P8bMUrg0V2Sjg;EGgWJnx!H9hBSd*0cR%_ar+z;8oIG z%}}hQ$s_6#Q}R$@B@XYyj;7}csVMI{>g!JMbe%@Jk#8HP+B)+7XH%cI0LP=c$>V>P80)E2uk^UkQLwAILH0uw}UGF}PEGO=*F4Q~}ceC6=_s7q=fP zZUHZkmQ;~H4zO&!vn-bJ;>xMAx)dha`}>Z%2cIwLJyO{E#}_MzP2=g(f^85L;!1FK zrj~T47E9)h)RJb5&g4Kxaob*t#KX|}3iixXHO4)el9-k% z{;ihjJFhv(>|2@T={O!7I9@uE85p8*k_(x@(<984tC2(n>2j}D@Pb-Wfjh`IsPAl(8E$vA0lM$#6#b5Lu zlbH_fgl5SoA58HN9CLKX6}6=}WFCw6%1z+8@!tHY5-&4S=$G+4Z(uad*TbthmREBm zhipP{Ujzg+p(C>;5_9LOOZH0Ap*UCP5!1p1B$F;9yK}G)$45Bwb_3~38c`|IONSG@ zz47j@M0Xp;4G$#^SlXW8YakI5S%4CY>XI>`9G#iI!OY+&EXy%hXR5ygHSSW!&^SvL z$qHREk5MgqqVa#}mwxHH#L8iH^Ky9M08FT2Ew=>1pd8DxVk1n~tlIuQ_ z=RKC^Nh}~1xRlwg$MVa`GnO9cL*xPuVVLRf zFkSEl)EGXf2G4bc9@E+TNfAb7&*Z@7IMnb5Yd{F0w} z&c*=qw}AIA)D9$-)cnyqC4X|Rd<{;t_8=>sFwc-l2l$dtSN0xrR40|zW8A|< z1NIf8ljN2+i?zrMd)D^VO;hVYpD-(r%p!zf*Im0w<`4K+?t>CwpMeqC{o&EUf_cWJ1gIbR8>d#*pAa zQvD6bN=l#0D|#-k;BbCHy00wR6+DvT`jhhx7|kqy3tN0~E$)B#MIwa>TB^Kp4|wih zkKlYk^+7E=R1CzlrhB_kBk;K6c9b|fg05V!AeE_s>a;)|LQ zT_hShl2?PxNmf%P;9Y3y1Iy@^F!(DnNn6qx{K{wl;mH$O$Twx*AJq%FFW3{HC+Tv{ zJbMYnH(<{qQOx|~-nf1XpuBGTPS? z=V?g^4#fL=k1>Oya^Q%s?<>Wv@db5n)(F_s+$H@tP8hI1=IS}>?K|x4PAKh72pUW( zMTh84aCU**0s~~eV|I$S{fN6O&NGr!dgYLN5ETKif5TrrxU{BV4>L>?qXZmtb;Y^6 z7-ng{4vwU_B{2?40~LdTiSr0fr?`95eFLf9!31~DF?Tzf1ZjQG`f6HdR)%n3&(0ef zpY!>5IKelT794>~fS*YZc4SoaCIouosG(;t#WkE<0u|Gpc^?R=nLt9Wy%mL~1Xwp>gaI&*6)zzEs zK^*kOyFv8UI7c)6k~X2vLWl5}<-^bq$?kSW3)5gFIp%AHX+&K>k4z~+qv}m{520kFmQOtI zXg`Kups@Virk>#ynYGBuffm?9;{)~#iSG^?Ur}){?rs~N?F9RNOuHo@*s)W3vExxD zUq`)F$b*%M`!$Tmi{Fn_`UAk8Xna}0ob?n;QXQl!G=6Hn1EGJcxCUh!fsRI-RNRO7=0@#mW z8BfhhI6Wi9)hc7n(UBP#WViwRGLtzEhL03jjbO=9M_H1uCdCI&S!2X+8-^B227DW_ zt_`kyvofwQ0q!tc)v(g>Q+^w!fRxw>zB9#rWmxa>^6TUX{il+)h$LNZ4 z&~9YVVscXxN<220Nj^iqFT9rZNseYD6|r(hivrK*`!MaY%KMRNOulDgeb3S+ZV00B znL3E-ljZHEDV{HEKH_Ld#G~h;@UmoIc}B3BS%PRh23f1Xc&xPye>=1I5=&aRg)sR= zWlU%Sn|s$HL|cU~l>FEX!WO zUNh*4&)$GN^Tc4E;TBlZl-X_w?jz3DL$2NjX7aPTeKD z1zBuC4*r6B7yR&afl2TxMDK%+~KlkRH3 zerz*ReP`iBum(5;{NjA$+%Y9Ep6FuGVtTd_VQ|>fDJbO`NpX)Sl?c#_ap$?o?TzGZ!vaC{v%|1T^L3%T9>&S# ztWPDObvfF9D?%o{bTrM|AD`clnqT#`vSyGTK=MvpPDXVIK%})HB82<&_JK?cG$QpN z&oFI)I#RHKvUsnA)AIElE@@}OB9s}$CG~GqceTu~L4YyQmnq|T>(RU+5#wEeT<76} zmben;tcw{#S~I{5HRTBAa?S+fsRA2`eU9(^!`*g;qG0jC9Q{vo+eRuS@-j<>Lac)W|oui z2e8LD0nv%&iX#2y3V6?kh?5NKkxjs!Wyh-~uD?)HPLA_oGRl|wiF0>Glq9mPl zlCMnWRV6skOIT-wjAYG3gW<^~o*=7w1bLEJ&`9!HD0j&1?uFGendo-v1jS8|8uk-S}!*|cRyE0sT zfc-JgkQC_aJ>=><64XX$IC}Be+AD1eer=o05Z`#lHeJR$7=Tv zCzcK#bz!rz>_J8A%o>BXZhnRN_skJU42&Fgj~{l9CwNBWEwToWxAmyE?Ri%_>JDI^ z?43w)jwKWgC6o+;M#p^pC^vX64wHf^->qN|FG3j0`o`)yIgjx#Zem$$oGhb*kTJGlNGxLrLY6JS-svDE{QrW%<4m z*4HG!@b1>jrCo50S%KaY6|mm}V5*$y5j3SeVO-cieS`fyN8FwC;KZ_tgy7|4-Z4BE za0#c&y75_J**7n&ycl3LfjaE$qs9H{71Q#T(XT;wYU1vVccJLe0npowM&j%OywMoq zT!Y7)gD5KmPO@IbyY>D3s}bbRxyHea;s&@7)T4Oc2#Vd2k{%wGhdSaOJmecV9Oymj z@1dS4o`EFCU?RQCF_h>X<=;Op!hb>P-_na6eI%+hYjT7&T`pixwkfsjRS|w4@hk@% zLLb>_dxhGO449{T&;&VI0R}C7lZYOPs@rYxQ!GtnJ%w8QHX`A{;dr}MjgPE79)6<- zfW1tzkAXctdkgHzS`VX+_KCetR7#p>P&8EUAjpbFi_#hI?MyA}&8$UMbpOx%2Ks+S z`K0iI#iU`Wdn4FPBno)%9G#5IDJE1qwX#30W<0rkG$lBExCjgu&JP9{lOur@E?g~G zAjgW^PSspJUVS;K5|8Y|qu&o_b11J? zk{6-j1v-T8^|s;uCZg*w>$AA=q;$ajBnErId%6UZ1#sECSK9{I<1{B-0boyKGI&4# zxzCV6&syTvnlS(Hkzmfa59}Em*zW2NP|N;HyDWQ2inJBUsp~wRA0(~~g%qSY8kjij z9y{V1OY}^}yMUtcq~Q1=XFrG{SnCF%hvWTYae?7ur6cjd@uLnD;U>Vo{hoN)y#seo z7k8u-btbz9vHt+wWKQ$y3raR9sexKW(@^7EcCfI-2Y zM8^PX`P3~)m18I(-k$g%w-dwyLa^QAoIMN=jvw)KAM*84pM=u!q`*X?e>~1J#)EK2 zEPhllV$d3IEi;v_erf$gX+274ynpnFXY!D1Jjpkf>>Ffsq2i(1gN|q<=nBcMu@vV- zLeWS96+T&nk89lre|18dVxGs;#OYUB)XEA=kN+Sbf!8ZmpqNCtFVLRd;qIvlX$V^3TBAZZ7fVHBr zcU$g7UMuZRE38j^2$5{`8;o&-{@qFaBO{lUvl|yeCZHW%^~kN3LGs% zC1kd89Q!?~?`gYFO|7FeEu><@vY3 zm~koz84Z>zsN1pMtPDWRIT1au9qaOwzPBZY0;@CVnH&s-R_&QOVpt!Mi`QvPS+wba z5%_kQ7oUB;8xjjU8*BsV<4bzed}B$TQM#p3Kt;EPNCO20Aj=1j zdV8LCz=UA{45qkyP^n)lZPk$MvgsLA{5W^RQLN_x0}2gwts#HJGOs()0J?;tuO6F*XBLWbYXh(c;Ei8$$qx3U%f&dX znz(tARY;g}Hf}eO`!3)B+Ft-2&yqTzC$w9m!sAb(?ld-K$`4ERLF-7o-%FA_xjP-DJr}*_e=DLk60=gyI{^Ue`na+-E+^61T`VJB@#EGXIgmp8T$NpuB3@v+_!bW2cje1?C-3 z2u{WOt^)RPPUzop27vT$48+n=UPe+WrvJoY@TvUrQSTT6J)xla_42OPyQ>E5XILvQ zwWvpimTwe|i9v;Be+0@UFm}u{5$C-^;YU4~@&eWLNTGbalN>*Udf^*lv79%9wL>f7 zv#*%GEt#VI9TGh69Xf1)4TXT=M3;hg6LS_k3k*H)9mM5Nu~K2*L=^uBb92WeACV9rgF41Nf1L7CJALj3l52BO|fn-UJ@nq*jl6x}AJ3)ao3v9Xc z9z1-x9&%t!krS7J0prESP$1NP{-u#POs$$FZsDG8ba_E`3;7^Gk?J`pd9*Qi|L7v+WxnZ>4He+bt` za4@cP;%I<3br~ifTApwrz#hxKb8X|Sx01!XP$^P;lZpN-D*oiXc#wv}qUCrXb%cHp z3c)jgS|$pLIiBPiNOQCS_Lo{lhSnmZ;mEsnJt*vey`k~3>^XlRkx}FsIpi9q8i#{J zsAZIsf}_Ma!XQj25gNv5W_^$=Xz1A;{nxj)OsXjXdy?t^U3Rel1TBQ4aK8Jak%nt) zlKEfOL?*OM7Ng!=US2lJjfeyAn>82MWSJ#{{Q}l3#jp>PMXiXvRAotj{Bv@V1O zw<4v}^MoZa!4I$p!Z{6rLxJMYa+qsHvh{-5hNu(F7QMketdb2+B{VoozSd69V;Ohc z+j`N-5*r;Uu6}BgR5o_F7-$@TenT=))uVy2=V4eS3~MCCH-4;$^p|+_0ONs_ioQ5c z9V_mz97)ai^{DS`i)88{1NH<3pL$h2NVw;<@hc1HvBUJg{*!9K||0)3d|8IQ-h zaNZ+ z{=}QGUBwsP5||oZ}OPu zGIlm#50g*#;)NXzjvfw$S065|w2kgb#D{6SHtQ{Q_pM5kMzpyLSKY`79 z6uXcYW?a#LJ>)rs%jO*u%iGwXM|rir$s@k2&jUy}gPo0Ul-3U#uqWFT)5=A$@<0QO z$W>zg%EUSG`r}~sV#p31@sA!xBk@m)C!VhC83AmJjot-aMiOjt-Aii8mC#9|Y_Q`Y zB>dN&cVCJ3T)~MBM~xze?QTp@>7xnm%ki#BZf9;f!cC2y-YIcvg{2kOS>I1+dTJmbMim#e|~PoUd=Pgnov!weSL>7H2d90yszYQRG$#H$S;-ZE10y4$6!i?d5Bi{`9N)?t)Oh$10%0j!fbs&S6yvpfCHlWW`k3pKPvjUYUb<%o(Z&ee4Q_PP)bIupYgb?S3p_-y9!ELKTXu(odp zA2gp5{F?K)L6QKs~3LaZvF9;M6q{l21I%o&j6Zu7R+rZv6c!SlWlML7k z9qk*A3yQ-Va}%Gs9qi>r`g^3YqF_JuyyvQby`yOd*n=O!amnaXu&3Vu_B|3!CSX58 zmpbg9w7{OXLKl>HG-7L60IH~>`L<}m5Z5fpunqREYk<9hE{yEn!~Xu~ONX8hjvq$9 zabG#+nmj7dYk-Ol0U+VDE@>d^ZP|@EJUVu;=l$WlM`+`-2c^hFVF&vR0ecjA<8l+~ zek2C=!$$%}gE3$)vPS#`G>g#k+hCtp>NH^AFt@?fJ7P*nc*ihPu&0uHfxW;dMPb=1 z*dr-WoS6cEN>0iPz@B`3yTM)z6k-$zo-iDYXnynu|0wMM*h}P~At4YMfIS)vbD8!7 zd&ZA=eB48#j2rb!v>e}nxo{|@WONU(&kY)|7vCWk>JG36P@h<^7jyIeyf*w?UBf`% z(y#+YGsZdN6RcfaT3c9L4XtmX4H%kz6R?-nsaQ>%kn+uqP#DsU1v4?fVwuBEx0*Dgt9JGVInbxFWU%LzJJ7ZvPhCj|n7ej`c zh=wiYo1tfJ1V&C6GweRFM+)5s_6+mbwK1fZ@k=B!7XrWr?=fQl`$^nq5|~D0Ur8-1 z%vxX%8cB8{htOfQ+^22@v#h3_B_hk$!WHQ4a%LEl4 z34mxt7C~K*tgv%Vz`pmMfPHeYfPJ#>icq}Bd5i?{OJZ0KQ6?s+NI8l{D8)7j^Ad7} z5GcUDGGM@dWG(W#znUqRfW1Jk0egAZ^7sPwK!)IPp!Z0SNG}OK6VaramRrD{5K9Gn zN;6DF>+i&4;ZkQ=zYr3I3(^RGqxmBN+=>^Zw&02)=_DJJ(j%%_MiWi&sbn@ z+2;!R2l+@+9SO>&eiC>XFe z^X%2KcY$qypBRhM=z{bQ0eiY2gB&MK4D1&r+f)qfaU{q2t{rxXWv^h5(f~BdV+OXt zUf{}rz1-Bb4fYk+=XO~3h+_r@NUvZo8T4$H{SL4f54~S4d%)fp!76A34vjfZFQl8| zaB`93voR|#;Orj-?Ca-3FPD>4G79#>H%JZd1A7yuC(eQ$V9z)K?9-qM4A@Hw1bd!KOcUo2NS{@OOy7YkzpOZPs@{s{+r`w)%Jg#&drwjk$Mu&?pQ3*#d(H0$9AX8rua%ALFSFz&^a zVz#C*@SA`=;}^w$VR?3GbvCp?az?T?Ed$<|<4a4V-;#+?n;U}Ml)AXMNU<`%MhHRy zdx{rFK0yzpMuBvabC$&tMw2k#GDU4C*xT*OLLz$5S&BU=m) z0i+nMs5GZo_Vj-wj7UGBwJ`MCNvgvF)~vcYu&FdYD@4n@6;76bVqx{}1A7pf(jZVp zkJq83*zX-PmVIkxes#n3Wu#CiY@dfLZ$!dFEY6_ z?CoHG%27#xo|#J=QLvBE_*Q`4DhG)`w!oesK|9zBdoN~v6zl<4Ec+wAD`-NZ?sy14 zlo>i=+51K?^qGAJ*k9d>^v!LYEvdn>rzw(slYqU_ff|G)l8_d*z+U)?=*DULfIai< zs|?upP%DrgJ_AK33igC~fjfadqRTC?H`;)BcHp)Z>_H>Io**(!{E}1Oew^jYoOLpF8?ZMz z6m3dx5Q+w8ZD2t7?Vz-~g1v0g5v5m&WnWMajUNU3fm>?Xrx150C)mMWNUG@D0;mM+ zmG{6~MpqV}J^ht#^m?$)0((MH$j1kNB3zu}E|_F!eAG06Ev8)4v4nP(YU6Z|fjxmX zSoW>A6zsz+@hf6Sy-Wo(^9vZd3^OeI%hEq&n2AD{=oX!h5L1GC$twrgqlnyZ z*%MeL^X}LfEr31$GAFfpdR}P!Lb7GGLNnlslP)EJHU{j2gVN`WlU~3+iqO)Bgb0!7 zl1>8j4A>7x!M=j^H>)JgfLB8I)wYl=AiTZOM-pczMad)ri8{f1Vo1DX?`Vd*m%vxY z@HF#%73@qEQ*JH%%>;y$ChSvS$QB6{=+)rSUtMp#<0uu0~i)`n}rD z6U7odXajqhfG4bs;UtY#hje4+A(@4Sh|W{6#}h$QkitR1{@R8K6awr~O%07tQXLJ@ zQy140NaODqp9=O^_BeG+xZDOv8(v+3Z^t@Ux*oh=0ERKmStyyPy3i^Z%q4iBvEu=F z1M~BXD|eUI<^g+d!I?tM)5%shZr_^ae2xeM0^Y9?{jg%G=`56%NWzCh0VwFDx0Pk0sF4G zO``E}Sm5$QRvSx9Ocb>N82c0g8zD*nAPVt7loF!MHHx=_m;klxPdUmhuqS<>mvFBsPs&_6qg_%uIBm=7vZ>AcO7_j4WU;)l-fT>?f~I*TNPuxpzK%mb9cr&Eo!m znNSYjI0Yo!*g!IJBkCQ9g1uOM0?+qb_Pi-9`#1fvUZ~^~WU|A3U@ty?iRre$9tml@ zjfg&xc!UBt#qFDdW#9c!u!p*rSzre2`vvU1V%G0v*%P4GsWiUbvj4&tKbv(TlfiD0 z@$U!hEjrPfWvgKSJHPkahye6|Hc6X6#JHJf57@It0-*$wvB3kWF7~2^j9FmMBMB@4 zA&T@du$Pb)xRHYT*GoG)?`;5qo`GAZO91;00eeC*X^3shUeH2gzCB64@bC z`Q-C%X4{W2MBfP1kyM9h{F%Uoh;sKSh2Tic2Tvxr3%*?chA6XMQf1X@PeB%KFu%z8Qyq>yY9;z78n0sDNPE`&pF z{8!4`V9pinEf5uPz}P_am_T|BlhIB51rqdU#DoF+tdd6O&@?mUx&eFE0L*7uuufuF z_5ypNCQTTGL=nncvx7Z(htf(q((|fv+D3MOJw7?ZI!NT59qfgaqdzbL0edH8iHII} zOTvg0?2j6-zZzy#uv{q?1~4T6|HUtUQ8Vc2x;UQ{uwPnU1!zsIoWN{&b!l~F0dSY6 zFy3|8dGvw#`FU^|dmh+?@LyYlA_}dniZW?zEpecVv2FtR=jRt!nf%OTkcB0zb{Xs@ z0#CewWHb-2tctZik4nJ3Sr`T?LAXHz^wu^}5l|E)9MHJ@73?1vtoIQC`9W@TK(MFh zkS@2VW7L3s9TCwGmIMqLGnXKwNr}3p@1cN#(`Eg-NgONGs}Eh4z}O)yIRf;MX(oeS zBshL8J;zC8wXCTTFuJMn&>OHv+>aayjsf`+pCt3>K+u7t;7+hV>#6Qo3^SXne0pBN z9>NkCwIA41VFUJJ<3_<=KvbgJQVJT5=an}ZlLMdktDgGwq6Sz%3FV7X-%-~du?R?j zpfCi{X*(Fi{(krtzu&*W!AJN~OgY?GdNB|b1M(lDN^M)`mEVY1<$UQk>lz?K)xim)$ z32uwKWS%_%dT%xKB^5Tn^e97MM3&4$7HBuR2<1>c6F?T5$7DDO8;!pW_T*(Dhk^0R zg?{Dnf&GzVhXH$hh9Esw0ALRw5l?Ohds`kznK_;0Cz76)iai-qIz`kK{N#@yG zU{CC~H1vI7kHbl-4)%n{ zo5(ovmKnE@ynYIS({l;f!#o4_B)3D9mpoy}+tpruIp*xW`D| zm+6uiI@j=Z0sF-bz@Bx6_5u4bp%%Bn9_rGJu9#&{#v!8dxg4-()gs()V%e9}V%b|@ zZ>AN=JbSs;h#_Ghs1@p(OTb11 z9ykK)h?WQG*Ouo9bh>qO>ej6r*Kgh!otyxV0sGmxd+_`^U(S4(Q@1)df9uZO=^M8u zrl!Yl-JY7cbAu0{C>XyiVS1<#H*d|1j!jHmy>|1)we{5nGarPwzXgiAJ#+WkjoV|F zuPv{v6CyA_x4ekWzq%p0(4H!=C%>$x;|Owwq3xd#u=k9kxErD0ceFGL_M&StH&}dy zcCc?}nBbZ$9TTu8mnGA#1ne>2y+{eB$VxCQ{(d{yi}AA??41n?_9L5-im7>m2Voo) z?2YvjrSXj)8ZBXhs@@0oV%i#u*~nX>+X$&!4 zEPKHIxBu1eDA5TCVkxA5Deg`vU}8oT>?ID0#q76MSl?Di+0A*GnYt6~=SZrfG`~-#(TJ|)Tw2WHznYn&we0p9J1G|D`lT`}#8a5x(c6E}1ZqX_G;_|S--ZP?L zf4rzp!M^0g?tUgOL;x)N1@1q^ig$Y2)&| z){&7F0sC_`ohOTHGw_la7enm)U0}~_B@=+<$bre(i7K#ze4mSfgS8V#m1t<8s4<}aLo`}h9kmw)TazxfCM?Z38l zb+SM*M7yTMz~WzF)|>>@lNe`hY^>+4w_g9lKm5>@jw0N-~Ctr_7DH~(AebE{PNoB z8jCX;_rcx)dQWsZohS)mXnZsEj%fUSY5bj*y&dcWII6~(YHtJkKDF!(*h{x$2rvyj z*+G(2*vj_tO$GZaY1zfBluDL5Qmru&SK_xl;H8O=Vg3%>KJGtZzXrsuDL#9#%Q2iR zpZ!@+Epy84U@zWDEPK&B#@R#=89%hiVj@n`ZOeXtu;=M&>;Gp%0r`&*rpP_5$`8Fo6BKWVv7hiAk3moq8vhy@GxH)tgzu1n&fU z^&mtoW+RANd@Jsj|x1G360O1w)cTO0Sd;l2kb?}Y22SgMq`fJz&_4FjxR}B zmY!Q`1N#A1a7ET>l%HYy#f}#y7y@4+)}u40R8v&fs)9YUE$#<<)==5D?CoH$lja_g z#@`3*$+KaB{px0jg$q|>U{4-tlIrNZN%BH4bz6d2JZ3(aiJ&&dz50X2yKGvSxJomB z%&=b2M=ANW7TEg+ub+0*Cl!#P9kWt8nRI$)wK6tjp1lzGj0fdCWVsZX>m|@)zlQo<^4`tq%ly38n+=$)ccO4~=hG_6qiyr1Hglcgh@k8`w+22VDNb z*hq+}i6l3KbFyRD4)#^o=h4(20QOA6i&^&jg1xlrzBIla>>B}lN8eBk>^o9qg=}&b zF|km>=JjP++HQ27TP&DSW}}K_FYrtTu}pX8USQvzSc1?o{t@xnOA6cAj35an*az(U zGD=!9^2+jChelQ-R|V|bPZn1IdO(H0-w*a8`=W{z3L+DlCAPrOMMn3>4QYrO0ee~C zAU^<&uVBwSAI5+>s{hG9`6o=Z7q#_C_1W*9dZ$BoVMV9EL*uV*g%{SAXOA8Jqwo7O z|I4So=TkrNlYb*nUbX-TZ*G~$IYBY;voo1vYlW?>%=vHr&6ob%pZnfF{iiJR++ zAN*wFyJm};_Lg5HBL>mTVk`qUpf*kjxahcC%qc7c6a z+x=i)P4dm;{lQ*51>0aRk#SM5cXyITTETvFBWl@amNXf#?-8(<=qO>ZRd)yMNmgb& z?t9Vr#v3+!2xq(vg{jP`E? zcns`?y)!nh&Moqi$Fof`&ls?W_CtKZZRlDmoeL|hQ-{otfxQGen3UlL>9KqyYslVU zZxBAUMTl=k5 zX1IC>rtv2MsAN-;^s8u3^j%ao)0HKe16U&2Sjm{yBf5o2(qbl_k-CT6ijppuRh38> zhv=%hI?EKAlwz_?kuQ&23jHwQCbgtk_M%})vK`3~cgS~{c^x2=Op^uMBnphxpSlQL zIaOH2e0N?R0eV@ka&-1I)CI@TF^7_75*f$iXvQ}%j{p`^PobchNJGY4$&>FGA_9Qr zu1N}AF?COAd?qnT7W~bnP8DLz`?)&ysLOBwHJ$%lXN2fzQ%{`v3w>p$`1_03HX7>l9Mk{I0v z%_N8}ycJ#|;qqJGeD%|R_b0yZdq4FT|NQ^<=YQbO{=g6XfX^40n`ecf$l~hS=;Y-; z`O4uR`r#k_zW?>l{IB2pJwN&*f2A<*Jl7Je_tmd{{fB<|$Nu!6`3ryhXMXY8&Dj;G zdqeq&e_uBHXWM5VTOj9vbALYSX10U9gcqBscN&c^+=zsC$RuW15q$OqZNsRcK#2J4 zNeoJ)2%&01`S~7)9m~(YbPnda3A~djSNIpg!pCR6fMel;GJnN|k>=}3E*XDpa?NQNfhlo$5<6vLZ}mvj);B(T;t8PfTdvQ;r1s8lyyk5nt0uYRM5IV) zo?OT!OdOC4c`mFhNzG}POLN4CaZ;KhR2x%@Dgk>|sgGIq%nT8;9^;-(a$CuaVo40j zd{6?qvFxGoWjQ-pw37@o8qcehWO@;+qlt4?;)kN)^t69#T%eOIG$LkYI&=#Y(%R-% z@v?W3`@|7sX(IgPLgO3nuu)3n1!>k0rs~L?WRsA`oFK-%`521Z$@arqB<}7> z+Ll-U?V4Uz;s)#`u+jL4g-`V!6U z=$=T=_A`x=*nZZhN4Ev(&B{75-A)7P6z(NxsY?tKox*O<(zZQ$Qk!vdmo@{nnRithZ0AR^=*O}G;naR&)NH5 z>tAdA3)2mis1^maWrOlFQ3XH_y{2Ov?aQ()^)7GF*8g2Kae0VXPU11EAuL zCU`I%0QN{+jhmKolAxM@A z1AB(1(>$4z2ckIG^YjQN=|xS0%48z|dl*jud&qVI?C)5{D$uq|foX@`vjB}-$PWvV zb>NMe?`v z9uD^K*8%n@egW9StnaJp0j){=|nq{1dKh;a$R|Gh z;SYc4gYW;~PyYC`fBly;lOv(v*()!<{tN%)H{SccPkick{&0O;0DBMudAxrcurIX2 zdny?5w=-Se&9{69uxFW^Bq@j(CHOXIMN;ts@-lt*cuAdqxiXN#Pr^9PG`^-yq#RfITG)L0N>T z!MRJaiEm2A!Beul{aNf?@a%8u2FbIBg5LU3O`iehR>gc6>@bKHbGb>ZRvuaBm@Oc8 zUb=_E_8|FHlW<_bR{Hls*dbYh9%bXBD-6C1A7O=Rf*=ofB%mS20bZ~q^4mDF8OqRk zTx5oV9=g<@5L*c&oA$HN7-l3UmR05b|Jozsb0Age!G1W>2lrnD#k^3ju2Rd@o)iSfrzh zJ|RJmYU<_z_P{bQ+;#m%`Pd?yLqNux&Z!#l0Hla<5RyQ^9AbyeClBJs0<&P)vCmFp zD&QrOI3u<`?4lr~W$rq$;jt@_2YV`O90XGAVMK9bdh+0(`(FG3is&U8{L#+_? zS1%di*+&3@n&}O+)u}@#W78!1mR?p)6e;H}%jYjiXD><7dJ&wOr>_7p;6 z^3Dd9+>U}4`DD3l5=3_a>@Axo0HotYA{vh%F)EWhPo&N=xd;IuD*y-;UQ#8-uRslr4Q1uV)YW#GEPZCKpGt?W8Zpv5hcSx_#Iv$`xp z-w4nWYh3z<8N4h_hUsUq%32|f1+E5ZaLR|EVSq-|>V<89{m5D3j@AJu124i306)np z11F)FiyzL6IU-J&oyP+@B>HU~O6Jxwx%+lsi^9>M9l`~= zEFXPNj@C!i@~*-N&mL`xh__(+l=RyR8iL>|5mtm0_EnLqq*LANpV?05pu!E`jmPW( z_FwtRS3dW-&++FxX8*1Nd(_m0(#ex78x>`L{n3y9_(wkW6TkMUUvYZfkyHwMC|3CcH@BGoq z#vwd=CNoEReiuA@a)k@o`0qZj&t^XR>CZGbw-OsdHwDS+!LH(956`{<9bM7y0}UIY z0*($VtaOcw-q5}bu-A=WF-#Og^puHY3;PC-5~9_X#QnTEu$ZA8AD%t>!cmuW)gT9Z z;`~`f8HsvPVjAg=fO}^-%d?lUHWf$!Tmjf)$HOkfJ$oeR0ql)yht<7$fIVb%(O430 z2_rbE8--A<4q$_T6~iP}tx6s_#R#aDkHEgN-wjnw-!8G0!^g?%N*C&(NVDr=zIr}! zhrC;Eud@MTf>;LvB$9Z8nmCe>S|vvFEOouR0QN}Gqrr7qNWVFg#$Afu)2_804))ad zTtWUJybh}1r|82HCKD;fke$aMr`$ACZkZ~vjFuRgj65I^tcl25+iUcmhEV|Q4@DZJ zx+*D}m5&nJ$BYv0))S47S}n@6qshTJ7o2-CgTM>QC(t$!0=HlU^-Fsz=^T=Wz*d5T zVWoGYq`6B3EpsU;GoK=rWH;!huq3Iww|e$e1)hdHLL6YfpUR*wJLGa(`z;O9!74~g%BHS>-)2&ajLaud zbk0_>>`hk4fsW?r`;K3W%ZYw&s=?}h%Zmoz&@R<|DkhhS*SaoQkW4WWX&4dz|Zp-RnZNCYvk--x{(-=J= zcn?;R&(lpI5sWa5Yxf$~NScur8<>Mhbb7$Rt$=1N;08Rq=U8U$^KiY=0I+Y>PShdW zK)(c7Ewe3N(nI2yb$Nkc1-l`r?GkKK+-j&nm!mD_RHcIYD4=^IRA_k29-XtX3`iIs zB~Ga`(D+EuOQb`wG!#lf+VVR4GmvccQWxwM5>-Z;0+h533qV%bOoFxp&xD|xiSBYL z_s7W?C}SVYKkQ0)_L|u>P+kCgDCl7et3%BUv<|7c-+Gm0TgcrN=&vU=DxAl=BnKNd zNBySMBQ?sw%04~79tnDZlh|#IwMJ~B_TK0ZVStn`OmbRX`R;jCEl~KNdVmgip?ow_0jTf9~Ty^>aV{;jjPc|8%ph`G5ak|Kl(I z>@R%kQ@`EW*}cB81=ZNCz5Ty_uIOVQ|Cx_}{G-45$zS>9fBZ8a{m@VRPyhDIeLcOm zZocrFzxn$=`GKDY*e|aiaj-`z!vid#D^-JbPyk@~!B9sxO_W$B(W)EGJh2_sqLUUD9f&vi2>QO? zH4@mv`_wP4B0%PaLmuw3E4LJX96T9n;W-8QAlBlU#97(fDM9_t0)W!c2*MGEQ= z4Y0qVb&rG+5VJCF9Mp&n_3A-1ORY1^U@PD;FKi1q2U-hU`x4P^R%OoerI1%I)ZA15 z$v!8*5)KR;fWS%en%Z3^c3+hvL65$-P$OqL@hq+XFBf){x|GptAS+Nsg_>!ptC zGK+m~15sTnc|{U;7K)5dz@8M=sRI*ao5-^#8egJ@=~Jme%Wv}Rc?*>a;~M&>lv>b? z0}XG6G0KfI6_$l^`%H;#?5cSTMgSlXBw~XsfQ)zVX%sr11Hnjx!qli4L6DK46sA8x z3|u%;Vd3RkwBc~=5Eh1@ZOuq^-M9pB0@1n#kYQRqT+QU>Bf00b)Y_*)=Yuaw9XIlM zb1a*WGP}VUV1tEI&SN(em!#0`G`4y$H?>MvO}|PyK1)0Sy53obDthgx6{IS3bRja} z5t|rv^+&mU9+_q6$D@KUC`k@bAtjq_PU9nu5w7oJm%rfILui$Ez5CLaz5oS1WaD$N zzv$op58ZmaL6FN8)+)=!=U@+y0W>~Dbr8JcV2_wRzSIu^ABA7YsNhB|OX?X8fSUNE_4BGfR~wt0%^ z4g-6185-q1ps0|8z#hyq!6)fjn^sE9YhdbFS0!q_gZ2p+fH(AgX;!)1`$5Pd*-uWY zm4?oq21sMdN0N771(!R*ixT5w)_BuLY+Me)wA4n zvf_EE35=v!^8V!`RsBwcMdXw7FjhS0$r~1Qi=i$|IQdEwS`)2MpBp3eKB-ilLI6CK zl3ywP0)iCS>9A!~nP<_wq1hNf7eVRF3K}|L>v`Ea(5!LQqp!1eBp+s*BF@H$9)mQ< zq$B0${7Pf%5El-wqwzzT+)>u8=?YBFC9=!u-0N010#vX7>a4S9xQBrrT)042Pvae~N1=eTcKlhUfNDIW#IooRGBTnK#eD{#_Td2P9*;p&*m$ zHIgT}`55MOlAN%oN`Q$Hgu!r0Q5)`PM4v{+uGG*hjv%O3I?f>(>_q^wq zf9?}k{`#*BYDMXlqM!NLKm6baKYacA&FPsrsP1j-?LAji3L5`|AN;95_~)O2TmSJ7 zz3;a^`70G=<(Ds&{o1d624Mf2zx~Gx>xXE80h$EreMAyI`FrBo|9t{`=n|m9NHIcS z-+=b&?8e6Cj_hk71e!6{8kZ>0OA2VCr6vNQGV>(bv;f5c_UOt=K|tj=I?Fw88G-8w zBO72}-OIs#7)CP(drG$k*t7I8VG{#|u9-yak|=02XS1%;!UVac8E#U0Z<_{&f-%6G zar>xFYHHN_sNE{|L(K% zuhB)uOOz)|j^S;q2)&e3)GG@tAlw+rNuum4YR>^*2=y|$$y)tny=DmIPB-LM$Nc6+ zu*ZE%w=R(;R40M}6P`g~2Vg(2x!)wy)@xiKCHeGBVyJ8*%kAick*Uq(RrW(a|KQn!tr@>m80bq~LUIeAEqKq@xg2IYz%xc~^#%#<*bGOvCYK65->!FlD1Hy>3+45bH!Y=A5 zTq9%^0B(4)Fzv>0$5j)>ieWU~tWuyim{c@A1+WL4z3IXZ>_9VOs-MC|H^8t%Q#;%_ zBQO&wyOVnM05WF4!m_YEuZLowsV2dqi(pS01|00OY4rQ}L4iG5+@cWI8|oME`c;K#?QLp+c`&vF{g zGgmFsCDy5OW1vQ9d)elhI1jHPzO{N-BQ{rosL`VR@Cd-Z3hkMJ`V8+u7jv*LGq09e zH}FUnHxUnvf#?0IHU1{G(|t%_KNyQO=}gGgY*G)g4jE$z+C2-m-Z~3|l!E=R&J?7m zPhE=jLk#W6%0apSx?ID)E0F%bD@D5;9Y&ytxS0?ny3#LUUF=bH|| z9-K#$g7to;GzBDx7QlYG4EKsjfM>?w0Ys^^v>g4X@hi}vS~-TST{M)`%&p*7cASK7 z+WeJJmjtJxMo@5yL=jAAQ#6m;t`s26wk-_g`BVM~b{yLpf+J-XtfDEvbA!>>q;-gA zSI~V6VE>ZE)2wmS>ISfjF?63o>+1?Lx_zKA)EIxnDKUVj_IiA!;rMtn8ba6Uebo|E z-`Fe+5wM*P-2Nsd+IjhKbnI_ke19E^rdc8 z`9V&~hGusXxzjiV;7EQB*o3_XG(HD=5{!HIB{Ca%)L|~YUse8J0rnsL_>`a|hE%#v_j^OwxEsB+j1gZ~C0Z|MVaK^aDTsp3nZ)uf2Thdg$a((%1XDpZcAT zeB@(gWffMZ3rF5PJ}#@Q`_RXJ`saS`=l=75`rrQBfBirH<>x+&RK9=mi~soF{hL4g zxqtM@_y6QC{KlvL5MYn=U$_!5K>vP&J*V+0K`)b&4uQtEAW{yXCDPT1Zgo_FMt&ZU z9&{ZvMf5{gjs7df02pT00F6MA+Np6-9XUFAj?86pD3;T&tk?9(iN;5W3T?e$o?@TT zBh3~)pAea5O551S;4HEjC7OI*#$vaulw0BN&S15oZ10BN;X4n_64)QsNR8k=!HJ>= ze6fP%HlbHNrN;}6tay7BmW~dMQCfq%l7#|c+EI{aO>zrb+)_V#NN5`evCCBhBU#?G zkWx*qDthAzgQZDL!g;0dVha-?ZO|U54QmiPZ%S?O>`CGcw+ibg1f;+~0y58##Z#op zM3UKqG%LN6YGC>l!2XJK2E;9u1(CklWHI%B^8y_(SEzI-Hcu2;#x9#FXo5YqQNPd( z74truHHgim5Slp+V77&%@VUzNm+ z;0QRFnQ-=|%3LndUsXBLa29)MUOpU7CLulYy|6D`(c)zILD+7rMvP#rNI7&>?Ws^% zOM4}~V`JFRu;sq)2vqi4;am}gObyI{?xSUZlj#xMxqgER1|2iLFc)VBggAebD%v`sZCBd7XX1v!{Oc&-@vTH4gU3!az1Y zQY+!1_IQR{`i;2cq**8(qf`+@M<}I}C5Ka^@lk{ykc&qmIUt&FG4N|mfl`C7Oh;f} z)}uqQoPUpENUE8oCWWp>=PcI`R$IrAfyr8<&Rjw2A0jBo$utLw^`0x*fyA0sNi}ZRjRoWE5Fpc+Qqs zS=(I1U1XH>QLGs%hBad(zm^jlB=LyEKp7X+2_pP)21RphFOVrsN7CqCNIPl-&OIra z)AZ7&bmkONLM7;8r5%w=$Y_;bMRW}@PRd9kZ-aRl zR8hmLaiC$2jN;l(ozy$M8_%7`q1{DsLfpvMW(uDDyBqA`*{AjZ_MiCJKls=u-uubl z`Bm4j2i@*)tANQs-UmtCXXE=DOK-e%^GkpD>G!|qJs*4jkNxSt`O1}N|N1}v`Pcv9 zM}Hcg{TIIQrOMhmRP?QGZ&y?|eBx(+9)JJ!zx=D4>G&aT^CL>zCu3=Dz08`v~M565CIu}&ETricx%jj z-$PY1=VDn(0WG%)cs1B6f%I@*5!9f}H4IgtROUrGEoAmDf<25v4)&}^0|~WI#b%OJ z7sH7Rz#cLCda;Vp_+Tr?DcY?-?GJ0-a{=tJKVS!-?qcTg%hs{W)`=^oDdZtF>XsU0 zW0hSd&sl5+JN#*=T4rq0P1J$QM5vK`73@OTvEwu#*QlfSNEGVGB>?dr7@MNfu5jvG4hXbu?Q5t}9U$`;MLc%2grc@aG zgk1g7-my{Q36kkoEdv#OCf3iMr1zLBHC|~Qt+0)j*(OVItzm&_rl^?$cwd&V$vu1c zb)H~-BO^a^0l^OxQN_C=Qf_V^~q=K<^?s)OR5Blrio zT&L6D(5tELx0OpgrIb`PO+g(N+n}3IIfBHlGC6WFNMDZ$PmjJGusO23EfIT_<8AM4kyUDC~^XQJ+ znUC3Hi{Y&vX*H6q%FK40C+tdm1qNG(Z2hZ7Dcv0!Rejdd2co{f80^-8^H{sdUDTtg zR1emeCu<#xm1YY3Us4Uja>9HP-NF)IDb51oL_Pl3l40U7L0hQ;SLCju9vw(C`Qip_ zYX5m0UGE6&WtOW-H|!gHimfHs5dN!LtW~Ox#TwvE?pdFEy83j^sHH)eMqw}Rls_CC znMq`@HtxH8H4@zwk+H@+Uhi7Qpu7h%$PlX|)W`Lrj2`(&v265;d=!CvB72a@g8fJ` zSl(l;e5jNRPVFaiVdO)w1}9H}z0jfkU6P>3>g%cZF%tALiQ{bQD4Sw5{>O;M|Fz%x zC70h7M(5B>7Wp6W?7`^gvPpn_ZP}$?{e@4!v;XjW-}AE{`QWeplVAA6M?U=ipLpLt z`bR(iv;XMl{~!O)zrf@ytgT%tF8%f2_^n_0m0$VG|KI;HGUR^g`Rl*)so(hcM?d!d z_kH9;ANsiueCX#t`P-jc-6XINQQZhD@A>Y){)aMZ&%*XRbPpjw7PBvu-mw}DD!?N~ zEt3O3gi3S>g;i?;AcR4cflr1f0G5uDc6I2<8GzSOZt<5{0%d3}Wg3NrT5S*tcR}Mr zk`^)hg>(+yvvz*Dsz+LuW`se< zDlB7_mdQ#R;@`7mOc=%yS8kLUU3(`;vxUh{U=MoLFb)b0cu&bp`e`ID*BEBOjn{yf z1n{t+CtrzJl@xVGD!$x@f*LUDx8*k1{1(YpGDQePdl8-#G}~F{B-Yy@hvbi=RL;G+ z(Yg|KxtmO<-Pf2ZZe#@qq8=E$=455n1;znVqI#O<%ApsO0ys_Hv|;SGnHU-&5%Dr z64e~$4acvujWt+2RT4$tIPJF!iR|lkU!Ba+h`B{dvTh3WNwsyV$~J~kX`4dsA$?Fg z1>Uz-H^(!vcuE#3&ug@(+V?a`&0Ve`u)GDZ2Z0CA{!3r@A_x1wY3UtX40$0QYO%pv z*Q4WBS@D&xJ@aR*B_{c!SS#nLC|6Tp7lF0dj2Hn5H>P)Dd7Kv4Nr;{amz4D8qH#JXC!tyE?$k%8(7ToL=B zZ-W9hD7e+4jNUlbXdSDwjN%C7_iM)qP&LC~U7nM>izRlXs@}H;hW4nloMZC{mQkhD zR)#`Mg&$-unzW*7piVo{sGmhrdYx{j(Kz31T`HkmcQD4Fi*dV8BD7zlo2Wv~p4i@| zbXwPrK+Ax*zOFSlD9x2}d!=#^M=O^2FH48uA`?NR7{$TcjdKm4lr59B77ALUBTV6jruuu<4BLh2OA@TvH7q zaDcwFwCdCp6Tw$wfCH@3qkAtBO2?3wiX#9mN#t)@w67}Sf+ z(wPn4Y20_7c**Xol^Cn!)-st3$tkeRsw|Tj<+=bQZYTxMI8|$!r5o2kk91ojvy$>H zpe{1hsPoi{v}p4%8_6LdAF7bJ^hLdD{1a@2%L@M$g}+1@zz>9dhJnC51V@pSL?$*! z4!Hg*6l*};fzmwd6i$dcVVhnHrjKHoP&|zSGj92Q{)=Du+#mfRHhI?7rO=+0S+qZ_ z31ut-PcaDUU>gr1)`RsPi|m(`edEI){s$lb>7V=;pa191K}2SPr;!NUdNOXnxFusg zxBXAP_%A>Bz90L?|LEf%dEbwJ?1S(B$WOiRC*S+yAO9FC?LYEUAN;`o?ce|3f%I!T zJ4Gdk*?;V3e&%QX^MC$-jSLUX&&`yVmHhti|Nc+D|3mM4|A#*E@t^zk-}=nr+743x zNVGsGU?`-zu}oezzV+|BNfG+Hc~7A~UpB{D+&1&vLzFmTXT!b*i~BT_QwQc+MTT0b z9s9;rl^==sjIfq|*wa zFywq;7cwtwl)cdlvl|`)cwn+n730OSVequ*d5wj073sIAH&X?KrrXM?=cJ>EmjiU+ zDHZiuE5)W3rDf!@WUB z5r%jX|QrtS02cV&G8dN(aBCROUxT zS&0E>-(ze?x0|t-nk;P^um*K^G#! z81@sKJe(rpDv%qf7zHZTDhG=nD@^ML1kah=^9Fl^!cZl*lqsC}6n-XlZw#;+L~md= zzuoIHR7R)P_MYbSHSv#$`z#S@cMwU4@V?^Jn@4vj|f5iq;OA|G}$ zaLjOrkY09G37&Nrbp({ejfg4>(UK1XFH}AZ8n{m8YUt5Dw0c3JQ5Gkxja(#~?_=N7g5srjZ2M!aELp^bmG8!)0Q*1x>YtIb&*ZqVWnzEXgNAl4Cm)}A z-8KAZV)McL@oV$Jd&{xcmSV3gL|&T@KVFGsdeBQNk-mx5duo!xCKJvzVrP+S^lH!2LGMzqXF2w8GWf=1@a6IIH|C>nOotxL zhWh7YJu|`XrSPNW$eYX2*B7I&PM_mfKbQ;Mn>x5Rx%K?Or01A843vkVs~GD~8buRv zYH^FKGHOFktwiBYVRxT7N_04~We9_NQj^=sOh;Wstv_Jgct00;GT6```0{55MoLU;CfG`X8S8 z(x3e6fA!@*`O=^KoB#bU{|7_y1Y1Sh9b7^6aJOe&YS_``MrU*}wRUzi_!+*qsLl2QOW^^wE!g{KtOm zy+8R=AO6)({?_K+5x_ozMtR&u5|Y}VApN@u>=|`QgDDPv`m>*@Z*2xOii~4KCwUPP zzEvhjyc6QG*SYe#h=6J9BoyGs>5gzc%+NIM1to$~ha z5y;D+@CW68V7yce{78NdCC4M>vLO_yH%r~keFpdT5!lLs!}AuYzCmI~^&J2nmOVu_ z;gTsva0L2x1jvxnry4~G4{O9eQG+gj8cJSYQ6dJF?12Un#&cuJru$z?|b1nJ>YdxL49PN9`f%~H%ho$DN!Ksi`> z9|^mW2$FOdrVOqKydAU^fSwP?*BZqbwIVdGa8-PYR%<9P1U4l&-Ns&hk81=1Ym{`% zylslsr4wKN{J&^w_BI|tamHchw){KFGMsqusE zv7Mg4igIGzy?#1!k^tDhIXHWNc)4R{A7J0T61z8g`fw`oXgb|Fm+6>~bS|Dfnmc+h zw|{^3@b&3qV96VR=f&7VeD6X~v=Wj|?W!g>e4A&}XDR3A(JQv0$CF!~b0=MMk;hY! zj;ToZY^-N4-aQ+^BTyfJeSb38u@DE?KU_>aScpGbN&-&a1d5FB^-OHz^gP?ak)zn? zS>jDspksXH@$?SNl)k0JqlwV#W5IjV5&FnXq+=%3J00$xKJT0ZCIw%cKD{>+#2Ipg zemHaTV0Q0S|Fq|j+Umf8fUr4%Jf?7eNi7-TV()5O@gV-bcMCb5k_w;G! zLiGM@_>Jk{gPHK7nF!e~1o=Tcp)nPvF&W2Pb z4<=4~X3lyi_WCBav`hP=k=$@7+dZ=ID6rHqv2%ZH|Ml@>9PR!>7~?T)oy8E2-?12l zeTEYwLu5Ak=1drN11yX;=FZ-nIq09??;l??&hC0QPCctf(!gy0)LQ@I{u|Rf50=P6 zf+dFwfiuB?A%X*9R$++Tn}k6Sdu1wgZ#IhC36|O8xwEdRok!kT`^r8P(6Fb3)8t8T zu>ZoBzR1Cz^^AR@p!dV>*)!jW+Smh)(5MD_WXZGVp8GE!{TPA$d*A<_pZ~?5{lG^* z_?{pCG5iO>`Imp`AOE-i^8dYh`Pt9>(@(wkJ@5JV|Mn|`E_)(^4mapje-6j}w}1On z(AoW^U;EYO_Gj+MT>z4_q5n{atQ@PQBf_V4wHwGt_4?joyHip#(rWOyo3U^RJrycde6mJgHM?pkcMF11?Ln@#Jrs=3?twU=yj z_Z?GXhtZkSs3I_ZS7CkNTfJdh0EyUaUu&_h;{EN8g?9V&4cFXt=j@%qr5oE5{ESi5dt#p5-{YO{5|(Kg%SUTkqMH`$lkt;^3lmTzkUx0FuX+CEBN2lh{2 zvJAdxo4D;*z#E!9>n*O;M%!YubrA<|wam9!7jC&%Fz$FaZ+O<)+$(JZtB5t=u&tKq zyN>BQM*mAXmt}5sFHPwqJhy{J-XJW1{Yfmnm&kcH4{oW zl6CI{U$u=qZ=S-Z+8s+R&Lv!n>#n8ij(JS%ZP)zmfrY#NmFu4Qn}e&E-fOP4R{P3z z$7ZW#_3psV4a?#!%-0)7ULKmiYxg3}9G%!^gPESenU`(jx2#jQtqX02 z`I|PnCl3A+=OD0y;$t|=3{rcWP-`fP8RpOFZY@+^%}m}rTs>ip{U<=RXTK4 z8o&VWaaA&agptNR>zl^F_+ET1nX@eKp_6n`hZ=>Xu&PS?{3X2*M;u0@osu38jlO}R zF6XlyP7qdLbxNe8SLI{R_7M*TD;UB|)xD+`k;%PvPI9vck#>mzh2~c}beB8KWqpHJ zy9dkqhheAV=RDixy4ve0?{<}SIWKovaOR-k!1I@gi1~p9SF7^Xs9fcJ8e|fX%3CrE zVRHmvu$8k;_NZe9WfG%yCdm|1%#ISd;mxy`HG@cjflh6$j*^4G(czcEct?g{nbePy z#3&Jaul6Hna1_0EihGAIclk^E0_D8}^^$>xUQ3(UteK!X`;l<&6~}mKm!{~kwy4`& z-0ys@$NpE3kP`=*ZV+>OsoM@}?sAXqa*v~^-*Z)h==eC5Q;LR5`nSIh20)4NRdy-=#-#@RLO{_cwZ-&-S=WrNkv(>G$G%h}i>QCmp=p zOBCO8IHqa>(E&OpdUT3xRUJyGJ}jQb4`P`h!Xw`T_S~~CP+k}L=kJLG``dXlZ$m{I zdMUK4r}j39#QEL3t-t+Gf9iQ*FW`}zwuB1>2Lk+r~mE0 z`PcvXFaB4IKm7gA{^HO5bjg)x&yM!dE{~ExkxqMb@TY(J@BjHf|L1@FFaJf+m8;7u zYZVn0^q+tH$5*ag2?YG0UUBQ4pP&BaU;ZV8^ndNwKlyL|?Y~>u*ye5OqqsfEGk%Lt z|E{C)g_u3_>s`q7&L?CmY4J*|XC>6V7Vg_g^zEczG54-!Iv3LYYq^fOq;@x_UWrP^*8{s}-t`^8 zn{;H}c90RT#ql;At!E|EzmXBGCqx@j@n#Hvh@Uoek!PV9?T5|XVP-ru#7?q1DwEF@r9OEz+%wOrp)3Xi?Z z>3$q=D+`kB;c}vDJtJPvDb~{3rBL7SvVHwv=qTjeI_?^plgu6~w=#Vjxt`q|-u7rY z(Yu!FSxt5?#d}whqV<$yGb7!}iRkOnog1mgYY7~)YdI`l4fjrN^$gAWQQg7FNc5y2 z2l1_754?xHh|Cf=hvO%~`<_70+)n>?1Yg&?n|r*Hdt)a3cs3_m&dHWC@}-z^H7s2U ziB?j*tJ&_=bT7VlGl$cAI1v)hC$0EVz9o1sIDgyvVOL;LI(MSo&h@P)yEoE!6m6zu zn<>e9OuQDAZYQMM$-a$fHzt2Q-Lsa#bcxn;-Amc7*|=yatzJv?k8P{xb_Vu>&<=Uv z8R;2c7B3#kHsif(F)-8kUh#GgGmLA8>6I_1rHgUNS_)HyS?yTMbgt65^{l13S3|m^ zWY5eBni`KCher3KBL9+jdSAVfRIca5%UQ_^ZV&jS;m(a%&w2#65H5!r$6w2J z?yqMbtYz=7Bpp<~@K)Ob znnqXk7|R|=P0RaB>D){dk_!W^8c!o&TkfltBC}((-L%r~*g#t8>46i=?GNuNd_hZtdGN- z3MF$Vsoc=|$t{^3P2U0b$drMRbXn|2w2$l($xuP zNULGtnrXfnLBR*AiU%@8NdWeNVD44tM5DsdsvBz3k5E^8h*20Pn~?;h3Q#~%IskLB zK|fZf9m9kG=1b($h(T6JCo4pOTCopc-_UE4Pi>$!0eFwHz`p((Ys`1StRO@1_++RWs-oMYD% z_M2AxQsPd*i=v>p*|gjM)e*7(if8}_1od~R*Kw)WRc~ZT_^4im3;--MF(v3#JW}>~ z#y6t0>0mZ_%)a2+7h?AB7O>~pg@P+e5eW$4hj({ZH@B8n*XFi&H&)gb=a-k*SXo(H zU0GQ=JV5i?&E=)Jh56~@{T)h{qCw^Yj)ONKSiiQhj+S@0_YMvYmX{aT*H?FUA)HD+ zFWTwi@$Bpb@7&nhTwO;~p&S)4f{f+aZSQ8V=LyKHgMa|`5FCXBIOKzQ2@wes=%>|MW>jK~x6Y zRFi!U`8}||>vi)j=9S_;ANoasw13qcm<}dk)|+OQ+xpcl3ip*BBo7TEH;9yBjML3X zL^b%UmF{vW)Lv~Ri0mS-2$}H8=~~@Vy>1z0W-YquW=)_?2KCgw_2C$^z)6TrFtmpAS+Nk(Lhb%#<95(W3QfUwJ$bU=P2y}iA1)=Cc}J1-{5ss z;03J@qB@9D4+m4P*+*OC1F+Yz!yuuZYm7FUW=MJq8zWME6+<=Z5u_#7BWnkFe})Ol zMl?+z!LZpdg8k&C%5GiSf-VS>$Pij4Okz#fF({RUJ;D7)s-Tee+SqEADjRjqI+G8% zc*syAF(TC*7?Eb}Y=dODMeaibjyj36Nju(TBI*LEKS-qlfl(&%*NTU3sE4mh9iV{` z)EGPp-_g69Wz?Sx8EuGV;1PRTn|TTv09A5G)%tOzN@T7gu^UN0_>I-LpeT=2PJt(A zP))Wdhp(&s`lTZ%AWg)m&K@~&jY>BnIM@}TBHRM5#kz_e9@AT|ovso2%ey^54KN?@ z?2)ZiZ(eFNFMxQz)N4Z>GLpJfb8Fyk(aE!CV6IW@gl2hpkGDZN)2N!I*nxhm);vzZ z1(~N_I)tvFwTda+T@aN)^#Lj-kP3i2j0WR4653i7Ht6wT-cAwr#SF(|NfrQ!Kz6^^ z`@-k{_#FUyTF>l{(f9(`U%gUHU{4v&Ikdcl_zQHqjw+{M%0tT)-Zt|@@4|WS+{q0qU}NZtG*Dwg#XL!)G}~9uyu4ZEZ~c203C_zq)8m#FG9*Q-%(K3*#yE9vn9U$NehMFgB*8ls+TJ5L6u{cU;zyjXLE}fFOZsp#XIz^^QsT2{r8H5#4 z6i}_4XjD(M8^$1cQPZWBj4ndtVc;kML#kS2YS#F$5@3pf3jyBOBi~rz#cbU6EcdO( z-k3VP=~_lQ$z}1#WzqOm2@(}&VAj-0{4H{Cb*H9(Y7Cj-^2w)dL6R%@TEwpyZS)#rga!#zmm+o zZt_<5PysG#l=1?H|3eNn%AM55^IW+q$mKrNC)A4t&r$b;3f#;*?Ox$F~YG z*l)r6cSV98@tG= zR*TR!dJHSNRN^TTJCQe9i`qFk?)gDv#Uk4mNm!uqfK|`y1{V|L4nYmRMWk+&yHKx0 zY8j|EQxJC68PLhbT`abgNNq)ZmLic2>yavK6_DYY2mV#7W?@E`^*GQy3(CASg)U1!7qj5ewL8vb-tIi*@<= zfAufkNw6oV7r;JQTy*toU;PhQ5lFQNMiVHa04D?91|ETUXbfHEkkL>8`v&_gQsn{m zHQM33o<;L%ZU}jabEg0&IHolgD7Qmm1*Hg+@c&wkqt`UvyZXVQ(+o1k!O2wh8!BKr zXedt=W@b@8HvQ=h?;nTDV1b3E5OfPX5Hf^8m0#so|`SW>U) z0}wuhZDrj$$B7m7n~eyedBPz{ox-8|Z?_0)+qmbINt_z=ivLFzpkmZw;>9t;f#+cfek zleC?)nEmkf`E3=tP1|9gl6h;UT2j)AM`!;3mMI_T2$VuZY{F$F&l%YDKzK6riN{z8kMim14bVm;zcHK zCOG)x#Zy-@=S<{oy63U*;2$)ZQOpBnF67UTT$K%8w~p3|^bg$=SQXe7@0q=@gs%4M ztMz_VNEXS4AmLfBpM|nC41A>K;b?>D+$$rSu-vh|BIW|K3}6r7uh)$?AXlhQFBzGo zu6L}@5FBBijSsN@!$1Bb4)%m$7ykH7{=Me1{_W8C`LoJ!;*Og3r6L%t69#e-$Z_JV zIy`qW8{^mwpkczFaQ2t^PA0Syww8qY6@lKwXgZEI-BSWSqi4RdN zD5dDq1TvW9+QaW&&z^xjek!GBa`_OZjo(N)Nmy`Dd1Fl{kr{!SX35wLRy{;@;MXEf zM732AMVC#LVl9Zr+I4eCH-2?&7n0s^njuqz`x$!++&jdB0q20*8i}J_ZbLN}?`Pl8 ztGHnrZnMuK7KvC^w5AkLWekAaPZhMw# z1lo~k20H-V>ybil-8#l99yp1%s_fSdV>j)~*ix{!P<9xy@W3OAe6Z1Pd)AQ>bZ1}# zt{UZ{qwtJs&!zytwPyIx$oR9i$jyd@b<7)P4`F-W+6!z+VQ|kLOfWoD$M$xER8gxo zR_R?Rrbp%=Y$@tLE`^OUTqPQOZE&SyeEp7N;<+vxCKfCLES2YE{#r8_lqnceH&86s zse<;&SS$@4z1vnVycd+WQ9(7uI)`ACz?Pz}E85}PzSS3oH(wmz0epg!K)D@KAhB=2 zJc2C+lm&7Zy`fajAIxNPQ|a6t^B{y(8gzqAX0kJ|3!&Z$a1P(2Nb0}lT)sbg{Nlhm zs1DRnkUXey3MC969-#XP2yZYWUa9ICn}OpBGPujXgng-2HB_&fB87Fh^%|5uA>R{q z8J;%V`Cm8(1_5{MANXSMHef15zVRDM1a6HLGDodG0FDCX>BWix zbRi?Kv`(saQUUzz*^?IQY`tyn=I}n0P%lyD=P;}-H1;X$);7y5&b<(-NX0W74N7aN z#87J(MBN?0{*q`24jdK$EW3MCd)o7yHHk3KUY%*^n>`j7)G&lmmUnRQLIk zc5wQf=Gwe<(x|at?l+Zbhj20Ib}`P>8>f*6i>4IUJk!eKlr@o4pXBN+6Rn<=M*C8| zV;ObfFeIN7AtAs~uW^=iNld#3IJbeLSfkX677jHUAM8H3?6^BoZijoKR5gT^tUrRv zUe0uqYqd|-8Ut5lF05zd;8aN6SNp6j27i-=l#}i2J3COvg??l-3QbqwJ@(c_DoXv$ zpzaDsEDP;iD!mKr2A|iukrfOPK-hy+@(bA}^Z_EOZ3L+(kC%cDsCJy^u6q`*qNy`5 z&pHRq>S9y}$p&CKHmP7<7^E|6xR0Gj@!OUF*ahm=fIL-f0bufVWT8W#qun|4VD1P< zlk8?*9NEF~slf&saww;$mJbz2QqMJ=A4e0-tfIno;5_-f6&#f3YL6XxvyG;eddq69 zd9m5CTxXuD)(xTbz;K>326M=fDVIY~!P^4D6D>iNLq&4mb7DuG##7p*yzd^xl|s4( zYEr928b~ZO+9#^5WX_}31>7Cw6>G;|8{f0Va-L-Fg+U}Dp_6w2%Lai1Y_b#=z~z7{ zb+5V~{p=GNn4#zh3x(Hj1^c%ai!<`-ha2okFpGgPZW4~){0W3Jf%mv!7y=8PJ@skj zpiY(nK|V~$YzAUGbPy`qVSYK%Y^mOcOnEX6Ft`tftPzg-Y>>;#kDPCL_xBC#TNv2G zmSVk+ksM2Kg+@v}0hFy8P?QC7uFkRmewFvRsnae1eP3{IIl?)6GIwWWvq&{qr1GKi z3U7mLN+K=#DayPv47Y2Xa6LgaYv)%>?)O3s4ISJOiMu2lA%!UI7=$aT_1?RICG}|z z2XJ$ccB7D5eF%L1CuKK3Q&=K=mE$l2+RsqIgxk8WaEn>>*84 zEz;LW-S7?YaT1IJwSe?{?IfI{X49->Gv|Z#vzddXPL+F1XdI>LMe;%9lh^47>*Ur} zrAarpx`o`vEb_?EiIt%J3D}dDPhBYC*+ZAbwY}3MSJi9H)dqLD-j6-48bZI|=hTxR z$*);w^oKcbBB$DmlYF6ewAL|O??D5UVGP#P!;az%57%3!F>m-f&HoA<3-Uih6JG%0+$N=R}?w8zk`us*JwJ3x_@w6hw2# zZf13l!;oUcdE!t=T52#1qXfN4YJ6ZHqy#1puWLTbeSvf(V|5$)kiA5&-K|ZiyWw-uabMN zStcHiZN2VaY1R*av)hOYY^bc@%Kf#(b3L$uH>1JfAd=Bv`tzdDAjjBG#5E zttga4nIo8C1k4cWza$=j%uf5@#Pj2G&2E@!UZ}LfnnuXH&Atgh0>YJ`?z0aPJ^rzi z1R_$XNrjBeRhi|o+=(lPng{3*6GFNgxbs%u>~UVNX>(2f2Bm*sx}PnAMSu;rh=+uJQz-dLfCx~I0Qd~5+Jk} z!%_ipa+uBy@13KKEL45KbKu94;;#-2&nQ@K2GYZK{44jSckc`>Llp0-+zIWVVkPcT zKO`yYjbK&=0qtlM(LcQec=nv9U-QjEK>{31Eo5yJo?=)b=xw7JLi7>UDc3x+uZ(Zs z9a@FygCZ7K!4NVchRTTA{jpk=w?<+?y{}|$m1ZN7dchfh?TLM-N<9j)3$25`A@%~K zNdp|o4UXZLM;2cifr>D4Gu@?fSFw~zS5fdvqQyFYnZ#7lukH*?%_eeSy}JApO)Aq> zsjk}MgW%G05;v|mRr{y|C`U%r=CzTf7yUCZ5w7%Gk-dQ+4pQ^5<-yUf(+xp*q_SHk z9-dlDLRU76-tPc=%DvC!3N$`X&M$Piy-47{t#ki(%kbyFogf*@8!k9i!y=BK6`nBZ zk|!fiSX@x8=djJ%(+QdA0tugwzUMpFT_9YM<-iA6kOE~dDC&F%&z`d#f44LC9T=C3 zo;`6Jne1nN_tPy+&9uM8qgaW^ltqFb>Nr-TY1#B#WxujUg`Vp}4bJ6C>pbebA#jVR ze1m4{75}a$l^cfg`O?W%y$|wv*r1!-P==X&Rx}729Euv{V%v4Y0Cs$oZflm-%e$my z5*yGL%p3`e0WzVuryPV`dvA8%4RSl36YZrM?DK#YG`}fQOnp-{RxF=F7>gQGi#_dn z|5z+LjTGk1g9ar809>$}8|`SkOU2pX9vW=u(=&l!Fx(+k1QhFr3ZbMy%P?W=yuTn1wAw%)w7trmLzmo>cBAEdi_am1gTUG+2CH*+DO&=l6(jrJX|Bp_N}2c)mhG#$(b{`X3q*t0XTC1z0Z3^ zHqIK&p@-fGga=A{4eGVyNn|y}bL~b)sYqJ_?GVi%B#F>Q72-K$Mi?g{5YoS!4CHd& zbndnv-E{m{bp9F-1mL0d5xOesq6D-9!rCJ4W(TrFDnEv+30i+O`*AIX8O1k*+v z!WX{q{U+$;!JaLer@;PeU;R3#@nEI_?_p)2uo4!BdUEcb&D$ik)}t^HS`$(#Pb5is z?D$egc`tgxI?>ne@?#aEQ}B0T&0SJ>U=F`Beb9h@0DVSiMD&f%`%a<&d)&$|dximu zwHgO7wFX`6)DumXMc~b4k?XS9SfVsuRqBeQ+H$qC&WL(Sq^bpwoPx3w$X!7)x>e!m z@Qlxf(@6JoABUcI`Rk0(m2|`G0v2ACI;*ul0%MJ%Ol86-mKvVx)08S~O}5cG%hkJf2jXwME=%pO zFm4Ra-tx}0AO)sRbJsGUS=jTNx|u{?IrY;gt4UK-y#Lf%eXS>#<)# zo)Pp$R5)RKoz{L;tSXZl8xR-N4HSvYut=JnQ`h}VjgGNWiLOaykk77QLxj+nXl&-X z$yP4clxpk{Ni5Sq6C;57V`Q>Wd`o79wNxcFTo0&A%puoa-DnU=^4V8E0FS_+VgiQW-0Q;w4p8x0f2=)x`ziY7nb{u}8&i;oG z?B8O{F&ZC<_mqv42YYCcaj<8qWE7(W1&w?f-NH(POj9Z{l*rw6j@fD(z@AiTfksub zK)Y$GWA>J76>VOD0d!Oy zkQP%T(bx`;rr`htBW-$TJ(6#Aph}7S#8-!QdKN>k3~j=thCVKs z1K=o0k?I1})ct?q8DgbR%E|V5ot!#H_wHv;X@138d?Q+zmGB)(zJYqzDvt zk(FH1Yb)*6YSxc%w2@TqmJRumFhs0y)v7G$oi;*}MDjuS@mJLYFN~~pEgW^uA3|d9 zs@nCe%2{OeT|xenJ^*!Il6KWtOBI^_*#$UPAWj|)&0^au5|KPUrWAZ6u2HRH{Hoqj ztTJD7k9IHYJ)YjX=AaG;*w~8|Zfu@FDD(}%WkSazaoL4#&ZE~Y#VPh(iMx4XBd;F$%1dW3##F`f4 z;C1^%t6>5e-AM96(}ilKy-H=QRvSvC1R-DrihJ#sI;`cQfqFQ*N;hoYJ6e}wbbd8S ztb_OD{QeLOM&nhPsSb&+T3?9>ebwwudJob;n~nAs6Ubx>h?iE&;H|-lHai&OK?KsS zxu%+p-qJpGd7tD>pWkrYT4QcJdZ@PlKG7`t*a91U@9UT;JdV#hFeTSjrsvPl+02lu|Tl) zD!#%HfUkc=LJSRjdL%jGQ!PrSPNJR?EtnFi0RVYiVZM$?>Z+D^2cq~Wbn0_{rIJ5YYjmR z>oVxXU(80J*Du-x+_M+(kC&SZ`gi#w2oAmA**_rzSX$rP`t%n|?|ulu{^C4y z&mK!G5B7-J{~o{|uT$azmLnEol1lIP;0Ug)wX2Pwb4z4)M5d86{+!Hc~N6*mK$n7kuu#?5C#Ytlt}`xC(6V~rXH*}ddid*q|gHznRDnO`oz6}3NHi% zq4#&o;Dq+yR5mjZir%z(O8ZTh`Yhk*-hF1el9-bbii!0<% z?Y?cL#XJX0sTA3OscwW0lRwOqS%QW>mgMMQjyfTJpd;#+6m%22VUf3}pg8}?- z0*lp_OJa3}Lf@peqYqqhj}e?SvK^ryUMjOU8~ryOqrkGNZe^Q9BOMyu%Mg{$${c9z z;~xI}=fB^WJ=;m%QfFTP`^#VZ>NDhtQiv}B0~w9)(B&4O*YAUx}>AGQ6_)v^De|9$1s?{$Izg* z_PBf~g<|>ED6&Vg9xa%oWNJAW!76&}9>&_J>etmPEDdrS=A;ewiqu@wuWMAAo7Gl| zFG{sd8grA{S|!z8dMqjKR$O}65AyK3+6Yc^==cS{I&f+m3vjm|w_?<;zB6`gXlgp!U=kDQ&a=CZSixM_9u zp>G&oOG&pH3=NDevbVHuTUF*7slHsKzS^%W>XTpU5><)i z9Ye!`!;_U{W-%1;tZ(#;3^ys2m2x%s04P>As$s%gamm0aRExEkgeI{TH%y}hW`_+E za5x>4r_j$#vovvk8b0+-{e+EQa4ycL*lG!Or`H$~Nb>IVZu-lK!L zbbKNamrg@%-cc)cwiw4SP?U?<>s_b@8HS=vbPTo|16tDkr9v{o(yH=!!DYffoY+0MZ$ZAH0aiPHHOSx! ziMOjb>b8rW)RtR-_^={1tzpcKUgK4c0vY9~>w1TjHa zQOI_D)gBl)49#Y8lc`kyy{d|beYa#<)70F4CQBw1FREdJ-(UE` z7rwukJyYX;`#Sqem;b{vD7_=!9+^QX6^mw~p>*sdlbky^a4)TOIsG?fhUyM^#bYT- zRPm&tTiM#Dx+d1%lIh#~RV_X8<{o)XhX{V^HMJe(gOwdJya&R+o~5mc?Zb`pD2VHg zNMhpfR5~=?-mhrt>Tm3lHg;+nx(v;|=7t^|R$SZDSKZas&@XP2XyDN?vb(eJzmzi9EqT;Q#3J43W$9gyrH>I*DlrFkZIb0LtUbFsT$8)M2hBq zd0m&Jwp-F7)!sF@YPzMhosw6KcFn}h@b2DfB(#@By}; zlmSegSaZ-N1Qa2t%(J54(48eyyV1nVNl-O5b4zY0f6!b0sIT&oxT#xyQ=-2i);0Gi z=(i}0Eg}^rwx&zmjGv>kUf0?hBwBFVHxvfN=7scgP>k!y{>cO99pFsOzAa=;+9NZ|G2Ndv%eYVqcYyqnp;%* z2Bo%Hsd+Hyb8qf!BuR(yFqK*hhuj-$_dV{r7L@a;A)DWfXd-$nbG7X~vI4x(r!15LZq@?;;CE&A++(z&UV3ZHARuuLK9? zsg`?M)I;qGf1B9Z(51(1S<_D@euK^i>aJF0$JrKa~cjsg$o7+ui zR%402fj~`%s=V6-a{;BQW!?5Vu?H?nt=NH5UX-fWN-VGj&<+(%wm=?0#-l+oR3{yP zjzycye$P50A75AuQ!kRqvvBY5bSvzBr5()Z)qZ1fkG@$u*s2|X?cS)c!kK{w0*Vm@ z1@Hu#b=F#~rAle8Qd&U=Hpm4W%n2$v^bLo|I+P1cROKGo%u1UWcwO;U>aXu~R zG$FNPTu0X&^aK)_@S|?(y>K?rq!iw5n|=TpHZ}bcY-`H#$*H3wI56-&c@n9hl1pl! zNtB7uNTIkhiL}|3;JIpa>~)8`U2SPrIUA8J)aR(~L#@2`mVON1+opCj$;o?Z(++?- zh4TQD3)~@!v5OxYDm(S)4W^!0Sc)fB(#e^4Ogu4tOXEgmBV@JEyrTwL$qF~l9F6*raxoy{bo7Dpia0^AqlJ-<~St}l!YP*at=msUjbJO9(aw;H+P#mQLvPxKUeeef7(J@NvGKLtmE9AFnUgykOKk>YL#sPtm+uX&?lq=0G?v(o zgr|>>-J5%lTmkgjdBf~`)8KhvbUifNA6uRGw8l5}w);ja23~n!vfVd19@^X;4&Ob! z{gKtJ@Q)5Gt}LCL?Z;xLnanBVjzeM3`j%pB5D=K>#eV9OfPGUbQ-pS z`*z=(HgE61SpVSY$oAp#SrqgdOdqt-TM9=#E1UAbSg&XJfyMK(+Wex@_<~aRyh3|N zrpDt-TFVPs^Bs-ld7bS=gX6B&`i9lp=^pKMk4ikFrqP*+?fuL8QaPbXI+!O{Ia zk#Fdp$$3j+TQj^ zrZ$NUqeWsx0evgdoO>)#uezojYDELtF8vLOxo>d7xxBS}7T%7h=g*?X+12NDc9`Hz zGVoi5s~t*|)PiM%03{qP7$R*zIH|E!q=S`==JCy_a}#SX-4~TUlHs4QGmP#X!Fz#` zy`9PUH}_r!Ty(bPADbWq+Hpt5^6$TwsX|3&1l|7as8UC8q)}qig zOJt8co`H?c)ll#>%gR3rHYBBzrb$Nx)m(8XgNK8$P%x9&ibWiY3*BBnOn-b4;=Gle z#)ck8OTYVu%+m_bO<{&32a2>!Vu7JU1yB-X89N{lQg&a}+-vG`3`3W2JC)earPq=P z!`$NYYCF6>=zZ1onW{R}(38gr*FKb0-S5Xdw8Q%8)1VR(M{DZ0KvcB3&(YXrt-P;p ze5`#9D#al7g@KM@n9}Fvrn@pjQ#Uzy zNPPfXiA#?kQr@Ac6R96~1G@Qzi(0xB*JU_2Dr$gJTGb=Rlj|x=LysK$s%~<2{^$(5H{5z;%kjt~?%^+g z@hk5P*z?t00DE+`y#V%!R5pSUC&C5|B%)}ti5q$?6k7@#ism2+dlW)4EO{%DaN_VJbQp{u#&bK7EqbaSz`Arb{A#r3n;+IbvDScoQ< z(7830TVtoa7*5Qdh3CSNogC7~s1OCxc_?#+TzeX|%@Ek3_N(N$F)ef|lmbwpZz)Py z0Me_m)JiP9j`+qrcV>8 zqiFm%mB#L~9ZTV2;VR%HSK}ERb}1ZRizn9;DH_S_Vk|WmNufXETr4>g!LP{SVlAAd zmQLcUCy~Qw5=T2rCeG9G<8}AI;835}2ROIA%Dr9mgc&5vQ7^ z?~13ElIewHVm1=NG#){R5e_)wruel;0m&rigQu&R++s2_7sxqZOTHbAU-0>O$J@7c1phX( z$}dLR-}0Q=TM5QDXVAIC%|+)v5B6xXfJ7ReyF+i~e;n*lT#ms>0nQ4%UF7~-j02v) z|855Rr!FE{G;E_RfPG_Y8~5x<$pD2Lls8TS7^}gkXMMZJ9e7b~LlhGF4b>f5Y}PMG z+%JnAFZP*Vm04gY-&PwzN4Ls!*W{)bbe`uGuA5@(U4>mVIBB0*U%(zsMK{@laAabA zM?Ey(1D$qNpsnB2)@{Eja@`WUuk~5mdQ8{)OgF@)>tYic!{3y<8oQ0xr0y4W!!K$4 zFDe`l&0f>g;`r79uVUYcWk+{T0NBu_xUF%vN-d2d-7U4_j@o`xW`YQRd!P1srR_z9 z?XJuSO0`|8ebMB6$>Kp?H+*6^JcjYb={*=C$#aM(V>=DT%@b?l!8zbM;2b79ROH~Z zfknF|HN2>?UKeX{9C(?x)#g^Q1~3fHe85`PKS4~d5YIWd?n#F)#8({vn#Pgzj;Fiepg2D`^4)pbH znHkMz#%E*loGvTE7AA z-(88}R=@UIx8hZ`?T%D`U8Kddz-(&oH{F&xUsQQtRr~L$haTtyiot3B(hlGTQvvG- zIBlMu@AD3W{v<;|Vz@4|;56>4Z0&vOJ7V4I3hQe!;~h+bM14zRzNNKdR&U6y?Gn?A z2B&gz&b7L>c6N3ciK0En_E~gjbx$?Ca8EPP{78ETH;>xY*r#lfsb0|PU(p)wiZ#zm z^mnD!wmv<1`C>I#(mPt)i$>SWdIxw}*V6h%IF8~hfIYGu3G9(xntq2od)f=$0`?&8 z;n_d?AD;PBAQVZJkm3t|NI01aMdIg?RO~nw-GgVA%aqF^A0^A!J?r37_+ z78Vn3EIh(n1yt5*N(FIm87|;)%l~@RBk`@rENNLyM*Y z_y~@P=Mc!mz$+x|%+lg;sDlJ0MBsItMkJ9AM-p(qLotYzpn!z#37ibR3rB$T4W7WH zzz;+w>PaGVhT}s%B9+@tW{|XkQ>CwFvIMB0z;oj8sa%@%eMXTpw9BBXgW4Z-z(JON zGy=S*VmwkcOT)rI@E$*59}|nOBSeJrK_`GRA)F+>9n*4vy8J919Hp-KE{p@#JsP6C zXR*wAGzlG~vv~3(o&;NfbH)$Hyx}L~BXmGEfq0Gq_6g|@p#68USq$Q%(m6DV$JgOv z@d{cpGjo|x7W+jGosMxeam7)fj87q30`JER&@tJip{tXjuj5mJN!UXvM?QD=I2Zxa zha)j0e&XP#*c@5nC%zX~4fTikUHJLSH?*P{4aw z)su&jsk79?S!(n!F@2g_JjpE{rI(J#@SQ!2%!FdFhDXmL+Q6eBZzmYWad zX3laG2hsWCD4YsxbmvLzgsH=L7E56MEDjSKy`d-0l4B?F=@25s>A6sP8Lu3KVG}O| z)AK<%7_sSKbS4s?3dM1f)93KJ;Mia*rc65c*{7)#7U@nnw;s;ThO#54@yYW9>~*+p zlP3{;d_EYPK8cJTo=%-c;lhobhR4q$li|czFgz9v4Qy@9hk~Fg_u~MJa(D{XPL&uLRag#lhpKaVj-Bt<5Cd8f(#tL$%7!= zHq66ZC^>o%nhmBVj&L2*3s7M>&n+BhmyR=A!OVU%i;Hy}r`>EX9)~Y9e;&cFo;-<7 zohETna3+hP%=~!@)4qC^Tsn@-9ff92!*Ds~Fjt}MLL`R{hnRmjOKXwnc_sr{As`WY z|7^mog3}VFpSFs(ou%=q@w3oOFaj5G`8>W7q~nhs&|Neg&5WJLXXE(E$&tfzIAd#( z_+AX@aai80ODZgQmT~gM&wuIr^z7eKCQBXtD-*eL9%Qgi>kh%8Kh3 ziigj^a>o+j;m;B@YXBGwIK+_D4W?7LrGl9l26Cn%jKKyc033z20+RtC6h#a71a!F> z-xH02_`#aQ5~YgsOolpa!s%yf{PaPlmx95Lli~lw2NFX-bkJrE$@zE`Nwzp7l)WRV z6d)5P1Dl-<2IRQc)7kth%y`LvQ3(Ao_=y4A(DDmJJCYRQNz64;<$?FWB(h<1Svsps z8mAG;r14A7Q%Q1pVKQg1gs83*3#l-0CviF*<&p$PlpsOrm5`jET2#tnNj(`P^g-?} z0$q_*0_`PeCg~^Oob#v6rjZXE_Tms;hfR|t%PAboVnCG_Q%!^YG?;9-BQb=Dg~Tj6 z7pLj8fyw~gqg?7FmpIL0q=H%cnWTcrlvC-tpq7SRVtfR92ODJWaGME(AC`Xwb`}gQ zQEnvhL2tuvNvCjuKy2WEG^IJZIyeY!4?b|C)3-2V5wR;eJz)^hf^)-y2Xzn2Ba#Kq zL&zQ355Q_dacuFpAHMRHuYB%vpX1wo!L0|(;}z;5o8s*_1%Vyn17O-ATE%Noeqev! z7URF?e<&2zf6t%%n|oFlMm#5y7A&?Kl|C{))r1- z3WJ#epz+YZ8(aTgBn22aNWj)4CjkSklfgvNBaF^;7HLyRlV&zCV)JC1!X}1!O6~{l zc}Vh6?HR4RaDt$k5CyId8+aScaT?50X9F~37B)SelZ~69f8CGM0KlVf7FH7$0*rN7 z|MX_&GQqZHgH8soAnggpF(2#<(}jV5C2xW4NYIzWmW92G?<&{{v18LmV5zecfCGSc z;*m}@3Fn8b4erkIFNIRbW-PiH4I@-~7>i)j8GJJk84@|CyoI{)y_B2c{Yr9IjVloYX448Th z3kEh8_vPTpu%Ce0plgI_WT#EXVS_FKyBxSE!2}LP8$J#QmJ#j{Ldgg56wZwg{CvD0 zN5d)c6Ch`a4PmO;W#!N5$js}*#)YFHB!t=K)5w1s&1e3D{89|u zPFwNBb|Q&B^4N+2D1c6e_Xsr6s{Q^G^xhuq0h|E)uRZfWGXbAy4tW@vFuH5Q9i{G& zAZTC?;9^pjR(5|E20;P<8;)v>1{IOBI`S+$uq@7=zY`T23{z8aRB0gjFA>irW5oYZ z6AwYn3owI$Pw@}ZXX(gf5EKU7I08&?w`_2Rma;(Ed-(hDL5?USP*8v(zAW2lsA~zmljMY{3}0=Zykbp$FJR!!K{Qd?KOOsP%45)b~C&^*cbdf4hDdB z9tnHO%P%DEy)zJW;V4f*>^pw^AJ75cEvJp^NDU?sW6YattQ`QwQ#e)eEK>C0r2!J~1k8Ni<$y^FiV>DE%;F3N-{oQZVlBW9 zCVWeQu_JE*1Fiy$THq^CmVAROMwSi!oW^-3fdK$!?_rj7iVa{=np_L+RGg*Boq(D9 z)L>SxFtA6%Oh3ydVFwom3}OrfUJHY~7h%vhlDW^m5kG-@Q25mtcm<>a>{((y4Qgkf2YYN{@Ln*zAQ}l}@fC0m0lf+O_JWCD zaAOKjXxiCUgR21JDHKoPG9fohfS%L^HwI_Ud09rR^1)A=`yt%TD14C6hrGKiI2f1? z!B`59j9N~!D^9b)9aLcmP9xv+n4`!YQ8*{eUlM)@zY{L}(_7q2_By#hWW&)6gEz&` z7fw5qMacCinLJ6SaajrM`NodEoj)M3XR8H`*rJz>z`jsxhbaA&YvpS6L0&r?-jFu5NCMtnBVzqf6(9l-5;MvVJHEX zx~)=Z23M0#o`F4VXr?Gko;|Yhxo!cu1Zv(dI{Lye^Ht4Wm(J`W^?`4BFWZ1{d{>O>oD~M@J5o1?a$mEPM+l4&o z6fvnJSH*}rKKQxOK8z3~Z7&oJAxfC-W89VD`xt=*GsN)|XeKeM@J4V19G?@7!mJ8k zadEHaU&P50T$FG;h3G1-5tT3@ubFN2*yXrtiLf29^DMB5!r5}UE%rrx11}8ndB9mR z&$HmhaAyWzP5b%9pPY9T@lX8g_+?@S87E)KIO88I?2Q*aw2L6f{3Uun{Y*Asju6bJ zslWkt8!SAUIm!Es)nLp!j#&DB1TkNZ3!m&HJ0QPUUeGAybfS;-@3n*LI zxH$vQ?-eXa4)#1FiQ@#{^!a4*X}ah#@_ULeT83qG2k@JKALJsg@GmVK;o`{@PM*mZ zuv5x6Y7`Qq@GWnrUdF+`FgWB30SLx^UmTb;;XHYE7(Y=~_xJ>+`640KgTTIk_W)g1 zWyQaXV3(y|5t@S~Ou134^6egr&{&Be?MN@@~} z5w1eRz8Gm1PeFbGnE6uSZLfHC?o*e5&oh7VX;Ue0KyVX5Fcdh~a#7QjUvd6o{s(jR zg{Bkdme+)OipDQ^d)$u?27_?!`HMWb%e{Qg?SHRu`<>rk$P9RA5Bqm`v>)1Egv%EW zhjDuR&d-PK;SCVyR{(o>_B>;ep;P`YIC^o%^rE-Mya;mBunxKF!tN06S73DsRt4t;g?)|f@W43^{N!%E z)eWP~AG{+@fQ59KSuZ?tq%RCIC5WvQHVpxN1%xJDzXEUw#41%k<<+P2TSsBT=g?Dd zROsBe$H?*T!d~>`&f#i8*zNIK;DWM9h5$YJPqA8gUNO%~>GGite57Dlkc~p_K1Emx z<|DwKd`L!k|3GMbK|wE(C@Q-2wXc1ZC;MW9#i=rcAY>If0F6L$zizQLP_T|K{{B3I zvc}rH3D;XMr{KG@6-8hVmFhGoI4VVZa<}o2eBN&^pz=k)xVT&e_-D650Vf2z;Vs@_ z!BG|*baoA%yhv0Aw^5h@!EX+B^Ae&^Gl2{6Ul1MP_uo^*1D31c8MCX!afLvaEe^Im z7@?kbgYldX9wDKHCP1ZmEY!~+hPGDl;(UO0w)%DaRW5JPpsyVw@LfSdE z71+K*3s{Id&~s*zvnTw;ykSDN2~^LSEGj)=F?SC3fA9x?a1qYg4n#ZA*4Ebe`1sh^ z*v!n##Kgqx?Cc`@TUc0_pP$EnPESwcpWcx_9I9|M9GU;ky{rBTr_Cqi;xBqfrsy3x z2xfG2bQFie6%U1wBu7>q^YW?Z4VD}a=NDr37uAY*oR0f2*x5Kg#OspCbf%O~f*3sF zKN;@QBY7wAi5SY_KY5soB8MpION7(05FRth2>G=6r3#?MVCJmfK$NW zy^gR5$fk#9j5q*V8xxceF;3h#XUN%_Wygu~N48zSRp#V0J^$86=ESn2W%(nVZ?a|z z;|Yl7Uo4QFWC^iWUP7jnK+V#kBTs;P^~^n@;0f(@Tzs8TiFtS?2Pwb93%-E>4Nz#M zrHoydk)UK?@r{hg1J><=E}3v-ZkiJ-Ot_Im;(*|<^Rvyze~Gu?!B*~_3Rjk;N%21( z_7%!ic#3+S6(%&{@XQz>js?PRVH7eyAO6U0p(l^*H`06X`nP*zAVu~ib>(Ji^ssDj z(;z7U_YFmdNmiBS6=2URNZ*9t)l{zc5b?4k2_9=Dj{~4f7;0qZtGf zgkPaN760Tpv%CoJxlSRwcbQ+C-yxm?_=`pW|Cz#RTxhIH6}{|F*g=H#mB0^w8hx=K zGchh~fI%x3t+{B>EYaRU0c$v>5h{^*deCQi6LgkZ0I+B8$1Cix`GF7OIU1*8%>(QS z>Di!b%73*`ti@?B2KG6D%l0v;b9r(Y$>N7c!oi*s5&Vkcks7=)u*WU+6xj1vKIR`xugm3n{`u$Y>+7MM+SJt4+S=OQ z-i}B9pZpL1`HuXxw6qkC)Ala^EBug)Kk6NsrSJbB4Gj%-b#>j{-MGq#$Z)HUJ<|5V zpI-!ffmLI-ItP1J@<>+rQ-f#H2rn{9tT0%hj1ZUYG${Q=eMB5IJotEIpwmc((nyKp zixic%0t|CIo{E|IKs*{%PKjhNk;D^fXUvEOYz~yFOL;X!u|s!D7|bE0qZI~LG2aUK z6Bbek( z&yK(cUv#uruuv!uuyQ*dN278iaxH-I0z+A#{aMN_H?UY3k&8;@{|aEw-R*)C!mSp@ zI+M3XgKNDrr;DdA@9vI7Eu@%j0ToC zgY7^_uYAHQkjHRwbqO>CdG;Kic`hO6>zy?zC(HRskR?x( zT-;v@TTH>IDr`VPJ_FBcV1p1Hr^9!Jf?J2F^E(`;0=Lg@x8J>cx2C41wzd`k z59mTc3;wgRva-6m8nA&EVG6wCe>igC?;St*59omJmUAvEE5j88*yBR;U^%z>c-}6* z^mzQtXVA}{8sQYco&kxlnyGg%FKc8&VBlDVULmoj5QgGA9w*p2{By9+p_nus1K#J7 z7nW+CvCAg~(T-Ob7Y+16dtz2v$&W^{ewL&wL>k|k55u@ia+aP9peOq*+fj+K6dw68 zx%bPr8orh<4jv3I1Z(&co)^V^P)_A?*ru(Irbi%n3yYL*-$Fe%6?oBpA@o~-t*m2nnB>3v^ z2&uy%Pom0u=6TF!rW;KS#uLYkE<>LCBCyG)Vc}N^wNL!43ea9ykaSx;=>kHt%<;95 zOvi1>Jm?Fj@l>iEU+scH$IUu!IuyeH#J=D?jK|}lhP-jWcLL5r@DMVi`US$OU_j70 zTr@Z6O-!$geF8t9bASTa~z=Y@nqfou~tDcW=} z=u?FtJzqRRVosL&>hiUO@nmY*;AwPgN4zjeL zJhI~miGBjN!B!?S1mEWGqT$TfuwXQ>AOeof^9|`S@Anghz`g%(+69) zU|?Xt>-GBmemox<8p40Vyi>1#ujli3^?tY8jSDw9 zIf<)`$UU#VV+JS_&&h-R@BZ%Zg0^`p*b4zNhA2F)ny;3*uuXG@I=?D~Hx$Zu_-D!a zfinq+B~Cid2PZ7~65}iS!gx}a!=s1=AbDb25W`52PvD(TcX=w0>o1)7mBO(83Y z%;YEg@CEECtY{%chag&T#|tZy$jCg7a@42IjP0^)WkcWs90zTM1rxtO=F=WZ8Ch`Q z*`7e1Rs?H|@*7aI2ZKY1P!PpP+g(1nkM_nql|p+LRc@hJJ9<$MgJWv}?DH`fffM9N z&qslqDa@X*sgj0&AxJ7H~I4yuWw@BU=>}X9?^u7pT`O<`fs}t|X6w#-+Yzxm=;h_TV4I^4a#VV|_j)6VDw4DFopvWJY$s(k>u;nM1 zA_V_=qmou*-bcxMD*3yXn@VJI(`n~TT^_-rEsbj2L8GzBD%j3s6z4-_1@ejqFE3&; z%i$1gZPv+;uDHOa(PqbJISQ>K=s>9${0idd@NJG?9e$af1p4y#ec_+DfylT5p4H6q z@o zdyZFC;17I$f#>7*z^6a`Sp;ngzCghtD*#NMq8CDz`M{m90@*6a-^R=xg!RnJQw#pz z6SIKhD_e=&hAynu0x!)M7q>+i4DxQk(Jn`3`#JB|qo$~}w9L4-OUt}rR zhK3%U;bbC0sUFyvFBoAwSvY@h2;r44eBt{Q)%kXweY)sMDQJ9(+2p~VUCSV3(Ug^-DJ=4?N}kn%i8c!6SCA*$ntPzvy# z=j2lXCpw!DkTBCv;9dl(pVMW8_f$KZPa3Cq3>oq)AvPY1;0Xg8HrNe~e|>5x=AC)L zydr4f2Vv|VN!*|eCx>>Js9Gl>E|PGeqJI=cP%TZq)_~SL%z}6_0;GrSDHL(Bg9+j} zkj!JML3EaQPdXk+N5V+SVTG#P%`N;rg+uuT%vtPw;EJcKu;0&F10GuA+sXy5Ldd0H zr~0HIM2NlRd6+_|i6x{6aTXqSdXiwsHcn<$&{imHlux2LJnAIacHF<=DsS9^rWgdz z%%Iy^aG?aJglBLHaVw^ez#Up@rR7p$;yS&*COJ@J5Ge(+Q`%973RT2HR_ z6U@F4V&}Y9frAh#Xz~Z-=Rk2bYQmK-QO=vx1svsTjkACSx3~cI{DL!vVj|-#VV-)s; z!k)?Opn_X0@ZnSxLFJHChQJ5M_=`v{gi{&Y`qaqR9t+mbMX=}md?74bpb!KaIKOK> z;T7`OUwDNZ1gvlaP({+hyn7IV4pRc@Z3p$%K;0 zvs4_W13c$QBpk$jfFDdfr@?H%oz9a11(J<1W)!31UZ!B~F|(K7TS6jwJ`fJ1L05)I zER4hQkX}KB1Li+=3hs-Oy~=J*&O2e=qlsV?wOWbuXa>w6Qf%QYkYp??ctt#oTO3rX z&WmR_(F7Gg7Lg)}9Q*^w0)f2~@RC-#Pzg_wZu~~hl(T$<{OWz;e{h>z2>%GFtt@RW zpD4lNhZIp`_j@6un2aV&B>Z5O>Pg9d4DTWPzzZ6AsY0QOfo^KHd4T}n;zb1qoM|W# z)q!7!X~e)Ur7eI^92Of(?gd!@=|2?rQgR6;7VvAtQwpeznM`ql_`&inJrr_RDXy6ZbN;qMJ}gU%qjXvxZDqa`&RPIM?&Ibw0OOEEEBq&Stnmis z2#~$bWYuU_XGsPZ^8kd&c7IBn2OUPry_BJll2O9@Gom=Y_&@A|Ah8f?mNlUJMPUET*C|fVNo1Do!qxO3<&=ibPx6F7 z8eK@z=V0S0p%i{Fw(Ja9Ze;c0E{&1uI}gv$y((NnDmW3!MUXWL0tebkaaaryy#Ms{ z7+(imiQ;|(XAZz(n>!x)THp~fA+L@6N5<%q+eaV{(>{t$JuGR2nF2z(2fztv!PjFU z6OzYFaGtbBFhY#cL1YbJ6M$uaMgRc&5ZVExBN)L{6u+Nq<#OdX;5{oB5wIukg9;E% z3qGGr&Vp4W*+%;JRNzWD%N84pLFbKqZrAeyAdW-q#u73<_{q~bLt8$cfG{u#%M=s> zSTkviLhea88X|Or=m+}5Ky`%G%R>C2Acn)$4!D`k`3({PNh2+otZ|+ZTDa*+JQ2uB z)(mEna%XTAkU~uI#P|?|vg7D8wV7oauFlYV(Z5XWzh=&-#o3eX;lqYFWZzlfue zg~ybIShXFc+pxwObOj6dLxFbVPx(8xbl~ zusGm~@I8ua&Isz=yxN-ydGT!xn~C85GPxM8C`x_Y^DG@VJIF_7*vzZLDgK(z$mJF` zD`O$69br?7Gcz<}{waNeO$Y0O!nj7^m^^^RToOLGtH5G)1zL|j$*Dfl3?R>i-ouS< z7Qf@70`x-|A4zUlL9~P5mIWDs_q6XN;S7cFh<9>wj*omzFBSXbbqWaSsSqGv>BFsm zZW&-_;H))wWCiw)B_Z)ZIFH#eNdQ8Md=|4n+VhBbVQ@dY@Qa^tlRT?{ZIalJuv2kH z@FHWu1F4+7$j_Wmg~YTCFe5atg8z`$ZMz8YoTKGo7rHL&Jv^%pk7Q?DNG<2Pt8g^t zrIU?_Z;25HOmHAv-+W>L@iPRwd^h{SfxU2O?%99!t6yb?3q`wmIRo$*=$=TNqM;f_ zB+kSofjfdp2!$pt`2;5&5{a}7gjo=3A*c`)eRA2I1Ya#2NunhVk@>(3+$dN`CqZa7 zlL3h%P<1}RB|Y-0IhKD7oMiD8EGX`VlG_OUp{on8Gl)*SnB2UODh@@SH;b}!=z3zZ z38i;jVS^soN@nG!IC)?_N@fu&Vv7MV0|?~$4}wx2q#;Pemr7)~)yBb?Tlz4fx!KMZ zFzjS_rZ7Yx*+vEsC#(7W&Foeo^S=;~=IEQxL1p>#`5HWyu7drB@dwOS;E8f{_cQ;v zU^s^(r|{>2nwV;IG@waf+O!u$iZ}zwLJn*Va6}I$u*hM~lkx^FGl4-7YywVZQg#L9 zC=j1Pr-#iA%LG4-WxBG$9&(%LIaBc9SvGvtaf{*Nf3kgt_D~^@fkg_q#m(;rZb26E zR`^6Q>y26cj9ugm4IaPMvlsZ0xBlNd3@<-A+~4&K@XPzuS$r$l=jj|uVr85Y^Hr$$ zEtvOM1ia@hA+Q(mG#8Fqv^3Z~FSwx$cDaRJ2qv<29YR=v^9_`}n=j?RSklJv82`ka z4Sa-c5#Ugn8_kiD5K$;&r=$U_5%@yLL&Sc%wXQ*um+M#qekg#%{vt6H-ECduw6j)9{5@3Cm0rLW}~5%O;&CIrF0l z`F`v?4OF+Uj+Lr&-~d}2?X;= zVRYvKmD51?Gjt@!08|sh12d3Xl8_4h6QOmergO1quBOiXV6fk4lX?_8M~@s(;-~S@ zS?ml^8L}&hO` zOqd#?(1;mOuNt)H;ZheEOum5hDFw=!854k$5CRYJ7$G;ays*qTXT{8PKpBidfddz$ z{cwZh8xtV42?%*MJ|!cd1s_*(=6z`BJqRDst;22vF8RjF?VjX<3gHsm@hr5(qtt@p z71hfjlm+etJ5OE$kIx%7%<~!hIOlLGe$Gccj-CrhoQ&gKUJB|O@B_VOYA6$_FnNbs&{AWj{Eo6TbQ&L zQMLK_5lE7DA#DXsLWWk?GhxA*F@*BZFb?xndQ=!2l=%AW@^A zP6U&&vqbbXnS>fWg|``zh80a~mTc=ha577mMZvf@2O9;g4z9_A|K!p=Y*(g;D$h@% zWy?dzj6>xOO$uyffjQ$YAFWhu04%i3)3EZn0DWe(%q@GMiP$@+)kfG66X=H4&;pE=3{cV;P2?x=YY*i z`unJq|n2jPNHRwL8E;`WRUGJ1%SNt`gz za@JPvB}4(jCYM&X>Dz+D4!RVG1&x zDY6Fe40ZsRLkc+3j>$;N6x@08#fhM2hBFiJBIkvEbBvN5Fac}&Q;3&wp0AJ*#WV1E zNfWu&jDlu1I+wxCYy4f%3yLK{{hm^9RD^n&ug_IMaZH|}#5h!nV^b+M(wp*)Jh-#Y zbTw$%(-we4uV|c&Xf7Iq#4u@q5qrq=z1g6Af5;kP7LgwekAs{xa)Jtv`9e8f7{rfH z4toAQ^jUrc{s61sM>>ZU>tCI1MYL=2XdNDxZdZtS1<6QK@?@BV_9 zCe#S#vjM43H-~(|ImzWd|HUu7(=BfEU|(EZ{PkzP#%G0PdXW`L=^I(%e%x8loTZM(p?q>k=ruGm=8%Lh8tr${a*uJLjRjPz?PF_o4~31l~_kA9|c2 z|7twPiSbv^c@_u3BRt>Z6gsF1ed13NDRexcpkN=V^MIA3pIswut> z&`!pJ@h~#>G2u|Xj^v`j^f}pB=^zLd45X^E_!dhHWa%@s{Rd_42BtIvvyty>i-Mw zgysZ2vXIJ%PQ}dGfm$i}2i{c={s=Y5V!b`c9K>g-y$5W@1dJ+t1+F`qO&y+v4^D$e zXQ8ubl%hWf7bP<|AT!`#S|2{ zA$6l+X}P?V9-d|-k~$A2PciTWkVa67Hg$7@3eGdk$-QAY zr+9)88=mmylw9(b=~!cZ0N{iC^Z=Gv`!TlgvHVHvp2_ZF?c;#>I2Ljw9gb#0 zuAnSJHaZi=;c;kfo0CJ_u{N$06F~B?u+y^h2psaJm>Khh>PGW4c?=Oi|levo@jss`REAP zM#JzHD1O30nvn=k1`H9wiD%XWSD7iud~r*K2w@gxz9{s`&+UoIsvz}^vM!i-RHDZB zQg}7sDR2Zf8wC8sU$TG{4*=oW_Yd}h!X2AYc3fsnU{Wq$DSGCaXYy&xLibgsb(cGe zC3d5!)nI()II$!X{?6kR_F zFCK-qqq(hMZYP}Ej^x%undNX2=d&8mtVYx8k@RL1Cl+0fhSuWoxnOAeJhBwWzovI_ z5S$+MZl|CKB^&YzSJu>s8ey?DgY|~H1$GbmxC{N=p>rx!m5 zFM`fU%x$Nxj;!4~1v{`qfIy0eP$WrRvRK#;(9X&Sgzk!kZcAh&u|ciqg@_4(F<#$+ zn}f{{t(RlbU^H?TJUNhC8 zFNuN)vI6J^hAaQYFMjb4|L_l=;&t~ozMOr8sX4h zM|vJqOA9v|z9!0T4x=Y0so;4gbe={zQ0$EEPt+Q*`P`q(9M?W zeX>!6I1kT{G4{?+4x>T5ev(PjiIKtyJ6Z-&I{YUVZ1xyC1D#<8B~ob0$7Rll)P%v! z7Cu_vViP!xN62tNyp{D>!y5{XZCFn~`k6=UP&glQSMYIkab>@kemm<^N8~FE@faYA zqRph|N`C~vqzJEHtH?lSR~B3uK3i-J?MmvYgWf1eLre*Ozzf+Q3? znF3GwJ*~g@2y#(EZRQiO7Zmiq_6$1#9#zyxZ?RPbfm(0WW922ja{jXaFU(h%(?x>yjtO3*f*4!C9Z=h7} zD4AJ3iMZ!BJ558csXZ^NT-Qa~+X~|?mH8Eu?UNDkueQFRcigl4-gFM(OJC6s zzF`@8)!=>I?CLhL8U%5nnaXzLU*uXXzjyQ4T?(q|;zf@LlW4EdB{E(ssa-C2{wkq;fln z94s4HIeVGxewJ_=pV~|0_L5ngBp!DY8N32^2b2unvy;tkrc$f1*h)OMnoe%xZ1F=9 zuq5KxO}3M%^=xh_k=uyFyr7ToC6hbppC?-TWuV?WOhH50umpmDfoDd9_M)e zWLrpK-QiGJ0&J;fX$Q#W)^P;3xbTSef-?s*iX(5vGn*-HWM@tYtutrXOcGhV0Vj`r z1`7)0=pop(MEW3|#p&T78@cQzj*K;qk;dxFZRTj;ySN>|4Q}iSfH#(OMt_AOG-_Z^ zDO>=1#Ca3g;$Yd6ZNzMP@=wy)qh#tZ!OWr*PKnNnyK1n9U|4WJ1V#pLO$%c0>^y|# z)F_h%HW8*WJO&t}>i~H;v{S)$!JH#>`=r$@_i(ty1~UkK;^AqW1S5w>XGe$UC#T_~ za17QM`7Kn4#CQc}860Oam~*nsV26Qyr0(*tkf`n&@1zER39yggTe4suaiQ^x5TWMH zj)RHZS&T*~DP)_WogJM9-j)(30S64T!Y)_|VV2fL18i|LgX%B1eJs99=}p{PCC{F~ zzUBJ?`?vS(@ip-5zxuUj*w+x!bFjxO940fS5UyK}jfV9GwX)$5tQEqB!SsGB9aYf5vw!qO(Ow1`aYQahfsODs5-uDMz4F?A)peEWzH7)Zw~XyzCmBCwU8{+& zqugCq=SDXZYkoi#DquKLU@6A-PwXo@&b7mVwbQ|s6ZhhwWAT9gbL-T-apYXtcdqW* zmv(K7JI<8@&*q71?ZiC4YhBoJuO3XFM6km7Y9xOmm0LVG^)79?R<>R1`?jSmOpa%9 z&$qniUETF;?722}9UJ?e&BMWsWAEC5XKBN;zUA54x32DZcTPqQPOwQ~?XjhT4J$W& zobc}i%?n$DTL<3leedSM!1|tRW!JmD=ifMTFK;;()(2L1?DK23xmD-lhI@6#xx8gr zSaYxMj-G^OVrif|b~?}oFjhtmj$PX)=9L5I>ahnuaCOJMyl!1yHLtAU1aM-uwQa}7 zF3!ijx#L*dbgXT-H?{`0c3kW0{+->$Pzd`bcU*W~CL_F_2769c3Hwn#JMZrSlKto> zd^a<|(mVk6l(_N4v*$XGEYq7}M!>>!cJVAUesD6lx$9Wkb}sKZmkup+`{vm_>pX7t z5Ehza^}xQe?^-{imAG||R|Yo@J!`v;)!o2xaPmB~l}#NZgqDpVI=LH(&h4N3Ru5du z2fpnyECei>fvqE~wSo16!L?odlYi~dyK?AS+_lZ`*zk>OhgjhF*TD9{(s>jM8A}fX zsJxZV1-6eIOPiLtbu6gCjs2mWW4wYVgIh=LjRX7ofn)Q?xxMe+*~41%Ebk63@41(- zC+zulj>eA9S7Kx(P(U*a#~?d;a6YiP?^@gOZ0z{f_6C+W-HYq4)lJ9dwrzXIzO!rH z+IFsQx!1Sx193pd)}Cu;k6zi>!Yr*v6957r9IF}N>bcCj=j1$)jPjt-ke3>`0b;3B zJLmaeqG|3OVf(4F5D?}DROCY$(YMuoaP@o^!EL|LL ziK6naqr(zo0e%q)VKB!|BAJsI0?LFzJIMqLEXoW6^s;Y+S%cHymJLT19G{=L5V=n0 z&KT!`fHG%m@OY95Ltlk&0dhZ}kfRb8gKetWar-XyowON>o2TfNxUAa*uL>`f9|i`)$4zOJ%dQ(Bw* z&2^o|>JEKfm*zFI)3&?{-*GLToHz(ee6x3Cj{0tWO|Q0DVQrL|@q|uQY-#GZG>FYO z&)RMc-T(ulvENkHrE3)1>-)_xP10Nxvi;SWSsaOgpnqh-85xdi>a?ep`#k zTHmRw>(Jt{_OS+!O?_s(*e0=GmpflJ`W53#6UWib6nPRb$!88i3g6^ynen>Za7*jF zCbzWo=-a!st)0r&9(7Z%y1q|^0VAxnSBF<_iFMb-npTOvNvyxCb3SzVvG*+m!*J6# z5;_0QdB1=DC4;Z}v62~H8XTdaTiMv9xF*)$lAGIlHH{r|JT^a;W3=~aZ_CWr`gKj+ z$`@4jUjK}1`*bFp-bv=R5obAzsK)1CwGOpPEiHX!oIp#rqOn_C+byc;5y4ub6BDVL zBxFT3i&ae`Wka770~Xaax#~5uRW~y?8;Zg^*@>qSUpPaKW4;OIMbG{s*#9UG|9jdX z-u-W;cs)FpCt%MT;RsT)MDv2j$3BbTon>bE4fEhFrL~=wo~=#fsP8n^JT%ljHr-bF zu*9%I)phBxZ?uRkZBj?8*xJyc#JvqC=YeNTHN7%%9))MTpGkw?f&(KNoVqEtHg@V- z`r$~(8+%kOVqLRH+uWmS>QuCKDz9~`ul4BQv@~>RuSx8;RG#ZH+clB#RjorgGC#6= zvYyCb%mkxA^amF2Evc!cTZ1>;l$dXbO<1taT}u2X{Hx}EBlg((K1GX6b4{$p(rfF& zVpO*DszH&;#utY6&ljVa^<-`}mYX_F%EuO;*V*fP=Tfe%wTh`bqsmJOQNo&Qj zdWEV^BCqQa;}tkpZ4ynBOj9pb-q7f7YqY)o01WQca12xdiFP2uo_Fqf?J+NLf*XT8 z;r&E(_56JH;ACe1Y-;a(Z0CG@KNQ$H3GAN0;21qR3mhDIclW)!2SW!({)2rQ`-k3x zL%cGuy*qYvyq1XWrIUx*BqAn&((QP3dVgS88-CH){xN)pk;DCwql1y-z0sqC zv4j2b!=tgigVEhX_OFM5g9CQt-N4cQTqFdJ695kq6b1*ZDC{AFn>zy=n}MypvF(HD zgVTw_v%#Iiz~LEAa_lfTdU%SH#PIJO_;(L++CZ6+owJF((0U}hoydVP-HN4F!;#%w zdN~%JK0co~4AOh{&qntT0{hz|hr7c^yMaBN$q|0?@Zq6vZ_~fO>EGM&@9p8RL%Rn! zRhXN&b0`&^Q5Ogq6e9PyX`fQi6O4fGdWiz;pUTE(2@&`j=Gi~<^{?g&tVv!N#u|dJ zFnQdYd%eRmttxvpZg=HCt8TbaIZz|A*YueiM7Ac0t3l+b>vh!iJDZh*4RU+E$W$*f z0C#I8=4!yL+J7|IoyWk5!RdTx_1#tvRCbwarQTYFx6R)K`d{^8MikZ(C$xvV&__?mgFRqn2p zSQ?eiCZ(fZW^I((nq+nu4X{g^rS`ghV-1K0pnkuls?S^{vQ_k1O1q5}y?R&!Z@MRa z$En$LZtyJnMquoQYoJuAFIJf=^sZXnK!wa!t8&z;>{S|jg~ozWrLop)>`huno6*y% zA85A>)oDD%J^JD@R&^0%nQT2&);{Vt3axvxn&Slj2U6}jtWgT;>xWj&@A zh5LbT-nAQ=Pv#cm>50<_ypT8SBQUVadbKq&dyT|~Ys8ombEDXbX{nW2FzV$rEiE!f zi^N*nXTT(s_v)Kf&KGT7>-z3mF1H4|A)HoBZ@yq2!L=yuG_oGDNa1XR9 zUC+KLgFOdh3#?C_NL?q>)F}=1Dr>#c-k`SBtIQ2bVbm)PH8O3LL|H9X-ZncVGYc~b zq9}1s@)#kk3nm@(rAbtqYNNPk-`t8{!ewPCmJe1L zbb2l~xgQmIXKt!J*q|!AEOlZ}gVa~m>nZDSRra{C?i!?n^-@o**j3%{z>`Lqx3g^k%N}W9NpyJ^U7Kgi>D+2IHxWu3m-b%JdmDO9)g8u$ zUfVVKAVyQKwYJOHB6hT66IOb#uhz;;O-fs^Ksjcn*|YK=EErgpWlO`&U7YVW%TEvsv@;owF(v71Tm z<+3wp=dz)ZS9Rt)3Oxd~x7FsGYSS&P{*K;wN3Vx1a#N*76!m$P@vhSJyvqE%+JcDc zZN2%9!SuSzrC(YaIXqoXWDrtWN#sV)BHE?Zm#y|YS_8teuWGHY8yxsZn@oFGXTPJd zATTX_g@(T9n%r<*VZI@!^MB1SU|Zas3#DN~fr=pXiA9YoD-Z0x=hgPRO2n+~ch$x_ zTHP&;`X+`_e^YI^qqE*nYun_qHl?&pDR0v#uhYN6Wwpvi#z7>*7Qwu+3~`Ft3;R_z z_r=eD30)|Wtx!nZ`<~Y1w*`BIkS`To{_5BMjDbD1M`K_ghlRD0PK!pSUNjHXNGv6N z<|{pxYQX1$Z%L|quN<3We_E?H7FeQ3QL7pQ`WDkl-lZ) zz6SL$EQ9i1V`aDOMWbab7+TNe{CmL{H3PRb{(7maN^GxFIO_rSFgTSM)=HVBLS-#iSu0fLa;2$KY%J^3mGtXNM5YR*t4!f6S2(K_4lJiuna;Jb zjo2OtB82C$@6dKbJ$RAX!FoBP7y%h8u%edNUw8W24W4SHyIkR|l#kZP#;W>9%6o16n*KV}k8w|J(k5>`9NoNt-ho3?FI{SMn zW`7avzo!-Qqc^|nX5b0Z^F}hH7EQMVmfNR4^LtGU>?ui!#eI+h1{xNAo`3!HP3!12 zoxfT(P$?OxSB*5Pr>dnR<^BFjaiF4a5DN{iNVUvc(dViVd#c6WtDV-8PIFbiv$WgZ zrXPLDIV+vsSo~9!0tWi#N4itf`uQSRq5OLSTzv zaaDAhD|)PT5-dG;o!nicv{y*YC0&M+4kH#F`9dNGto%Fnkw-(bfwMFW_lZ!hdwA)t zZKT!YtAxX*aaYS7<^6_=etnhP1iz(1V=7l0tF;y~wLw@aZOs}7mSKrZ`^_FvnOt$# z&NlJDHJPuj%Lc2Z zvQrN}xK-(FR@rMM#&WQ~Dtn{CS|!GFU1^W1q)T>PYmrVb1WwMDGwG#xVm_7_*gkl} zI@r{&tLjkJi_JA+Q?*E6Bh}Y~UX^PRQLXA#z-IuF+#)eIi%l@CV9fwK>J<8BjjnfU z+Oc~um&mSWaubo1d3zt*NV~z(pwMBX2A1DYS`hrL=$17|b*)NMlT2UJuc{KMz}Mp3 zDn*9U9u3?`SW%#gdjkvJo%88nay6Aha{6#6+CRN`L*=aPHNpzSuY(Cy)hDYF$!o=m zTJmHJ#QRD#<$aP0v7|;RuU5z^B+_z`xLKioFz7=RW+RQXC(6-AU>DF(pz#Y}Unt4@ zp4KJ@dqHQPA&}tNUj%zDD};g&fIR{aNZ+{c@-<4}W7%sozAIvTnaq8|KHaSGVWTJ4 zS?UBU-XL~2$N)>0tDWjbg&j!WZuHf`=QH5e_tZ$u@EQZhVH{05Hjm&sVUKF4MCLD% zdz-D}6-pa?VQ*=m;x*h`1*BH3+@v5ulN<$t>aDl;Tq*&t$LtQ zI|$1FokMhMTX&02Q&h)ud&mRnc?G>^k@m} zRqisCr&jN4vBDfNmv!`m6hgKOi3RcUa<249O|U7K6CC4dJchMloLRW7v$dMM;ZG)o!{jdS8uUsLBwi)c7i8g8*012B2g>KUAq*l`5D5t~%8~qiT@sEwQUY z?yc7P%jAw~nXy%=_Xoq%sjM#;zhfN+i&Li^sn(3tC<0|-ccb19LjhYD223Sba+Up7 zjEY`c4SmZ%oy=D+_hZz_hVY+FN~dM}d^wq$i{@^tY>3xj+VOSxE%?_4g{whD+X~rF z3Tv6%Qmb)Pt8A4D3kI+s&-tiPnOgL=cCAUZxQy);)&r)ORUZo)_yGGq`lCO3%Ax;} zPyapb4)4-8zXj}>bk$Q}PewK?Z9=9uGH`L*AV1VFy^i!9n7rk3PpQOHrG)iAQ6(EG z7Y|iP{N0}S!b_3aOQe=kvAMX%j9min ztXBnUl>S<{F*0k5+Tq#{W3l+pGPm>tw9vFJ>{L~nff|(uCJ4yXdY!A*;I7bGD|NP7 zy#w2CliCjGg&R{LH`ke56*_aZ-rT4)Ueg-Piz`&g48>pO5%QLn_~T%Y%zuQy*TNb9 z)_JdI77@w%ZiIS$O|tP8#bj-Npt5_QveRDE?`i<;q3~c+Ef?Fcy)-DjrG0kHFiyKn zWCc>Z>KxH8Z^P(8U6p6&R64eVcnA1nd={HVmDE!ubK@s8Xx$if3VW5vR4cZ@Z@H!$ zX;lw3$jL2%15?sxtd?nC^9`w%*2W?!xI+WSLGkAX*ixF+R?yFFN;@7adNq|I z4PI$dSzt-j$W2We7mg2Fw^VF}X9&}(qQ`hw??e6>LL>lt|AjE&#?P`;Lwb>B{{s` z$YED?^9RhQq3Ev62-jMR&^mQxW=Pl2S_2M;a=-!CTB~rawZc66Jsy#fS((jlR`)ey zSELcvR#rgDfGEqJre*g3 z#9&Vo0X6$y{WStTZ8QTNj>r3#hjWMfkQFauJ(cZtko#U_tj66}>+QW0=;LWCdSxko zWh!g3SF||6``gyo9cMi6wYj+2PS{x5;VXeCCN*?+7c^NPdn1{>6G~j>!6_=}M4CXL36n`EBMhy@Sk{aIewU$-`P>aaUX14erR@U~i4HHNk9>*P`K3WfR zW}sGYt8sST(0e}B2a7GqI`{A$&se>+7dR|#g(d)WK#RW#6*c)F?Qgzt`c{pdK+A6p41vkSCtK< z4}z(M1}_OZL`ZRaClFp{?ctunOprzslV*;ON3v4UAVMl?59YV?uhJz(0egwtR5HN< zi6YNHt2@8RQQ97;cP1YMM{fHD*zc;WDS{FfaD%B~ot-lfIW2-B3GaMnBm+1Wzfar#_-wqN~mtd$`K%Lm^S#qHpUR;HHbhncjWz zF4XKMrSm`?{vu7HdnQBWnh*EA07q*Fyoz=1-@Gvw83T>(%%jeUCtWkQJi}$??h>HX9Au{ov-Gmr_}1wi zdx%BE+W)l8&E`Ou4hS_kqIdksdOwLlh@;vWXs~xe_2<6O|BtU*3frCcBIyU+{SBd5 zal5(TMYDZqoZOETg+x%m=-LoZ%q~gm5ysQ1M@776?W`#=*=USbw#O@V>GJkWX=_hK zTc*4S5Qt?S>o$Xu^%Qu97x;{u>kNCG_J{Xie_i&aX8)IeQC(gA zPY?EF_DZ1lpVcr^Wf;sz)~lnnvr~xo7hT;|ZFYH-?cF@YkGrQmTUpQgY1h^nIc54t zLebFrsekpi^SlkMEg26wYngr?)wyv()*jwI3v8XZR`=bU=U&#h zw13;50$nPN{wh>3CU@}d*3d;Zy0>3xFjQD2xv%p@8+{4O{9f`f8`-$*Ts;ZA-S@5R z@`mx9Giv2t z>RgFBTNhN=6JL}whp%%@&KJEl6m__&+|f#RywKcP<4I9@x%u3z%6meImzq2vYmwer z+~zE4b(O&@+I#ZaIw5STUGclV=xv)bb9^*>aT(t}Al`Zu9)1{^YVePcf>6GJbb){4 zs73{;cNa97;kej$N;^Ec%{GFkN=KZvQEH7>x=3+4kujON&{F5)nh9Hs3J!tDl=^NzvhotPrMz+z$Z zof{l+a{@?Bj{HR_dV6V(r-Vv67J$?2v{Y(u(ApwOf zMQBY!ogqDN&*Hx24%fOO1??`@)E!?hxn|xQd+uvXQL{smmR1`NIntamz3b*nBcUyu z48%M2MDkIn-AB%@X+no=);cy**=Axt$>kkP(JEiRqRImQ{iW`2U$>LSP>w8awidpz zQc28x<$#j|KdRm7Pn$e>Extxa^0u>^{5u6jcbWAsZZ;VG_grZbm{J|3gzg%X@MO6! zt+}sEh0Ugd=8lqfV~NgG?FbcixIcYqqgqEL0+53Ng|MtJ`07pWuGL*K`^d)rEsK|M zx!CN>M?_`rYOuv>O`+WH^f$h1XTL&jOew8QZ)5i;d8tRMTW|2)bw%@9EQLlFJb#13 zf8XYFkIc~BhxHF4M?rd38R7a%wO(96Xl2aOdgGZ|t!sCews`Vhdvcq?b@q{4uJLL^ zUyUx)XiVQWr|S)|%8pP;tB31Q-Vr5us&@`m+55|Fy+kSykm!LPSOdc+S*~q(VT(Sm z+va$ME>WUO6?X9L1^@zfm?}rI+C@P&4yM+c;&tZkyRLz}SApD@!8*raoon=7c(l?M zDA${j0z}t$*&##A8+RPuTb@9Pg|bDU!5!m9SLyvQj%+(*ag7exO=n@NHMiNC2faaF z?oN{kRoHuK-5DV=o9y?D-tg=SyAaB|&qLjiDBPWDLvNM7zoH`z3tOQl8jEu9z`$YD z+mbc5Zi1McChMnf3^&_M_5N66Fmc--t+k>SZ+DJQk{L>C0ImNbcvQ3BvX_7Q@BXZ| zzV1EP|6?`ze;FrkoGp~{ZFXEY| zp26>8J+IOuuX`t6_s_fITHnA|19M;Zj=vllYaN|#8JueB8}AsIHBK(+#%Ei`roZVK{wh8B+kq9x4C%5x4%axXkhm3ZG#uetHz!xGDS-5@oxs^UyUri?45a@8f)pFe%U?vefI!w zs~ekd9+`VJH2Z2~@zv;ZjUz(%cFWmSYj7e0_sy^LpPWW_woBSuN{q%*o1@YhfMI@~ zp3;x5yy+fqN{uz8$D8{n+J|QhBXj!x>DHc!w!T@ua?{8%QCo>MSOq5rq`hvVON~Ag zG?ro;rz~LUs#KZ1IPB8g3D^@R8`HftdQ+*J-a4b}>(1`X-qF~2A6wOpEq9DAw2m#bPA|62EVWE6wekDNvVLl}ed^#=U@*T!O6GMo zZ=KGN+TY_gb?@w#Hk*svJQVCf`rCoA=J9Rg^rn4o%{1}0V|30iv231OH_fgYX4YHA zm)?vm>*jZ!kFQY9$khjnY_VcXsLJN?u5Pg7jGSCH>g?~qo>ZoJc+D}t$6Fa^-s-28 z+Q*iyvzyNOE!W(paeUc2y|i|zx2oEQIK3_ zJ=3!_?FIm~!00cw$DgKWpO3CKO{~70n14Mn-9A0vF*V;lGRqn?Ol|0AwmRnabxTKI z_b=q@{5PANwbsaOXZ(pbp58r%C-BU!l)f=RC<=yIdLD+xn}#=A$2OaWmfFS_^)riY zlT-TXY2*At)5Kis6z8_7n>zTWX9aQs8o$PoU}tDBIbyWTk=bto_6T6Tl*1_G-3#=x zA=qZN+lLm|9%boGF0_u%S?1TQlgqlkDc96GX^ee#i)1&K3bfhVU<;t|_m0eN;(K#K zf6>o?{dV?jEqgh6erXt>Dye!TVQot(tpbob_iep>=tzj$&V@B{Ih4p{`dE>^yV}}U?-}A{H(R_nTb)H{ zx)p0e=uq6`LA+el1gjt86N?Q=kfO$pe#?H>?24`Ju%~(E7TIrWY_1}cv(g#9Hf2~ap9treV#pE`2D1`KMcW!w&43SOI{w&=4IFvyi@+?05Br#QJid1_t$i#A9n7#@nd$$jO zXUFIq^(_cwscc!n$oQSE{#sYK(G`5?@A@1+oM_+Y!PIA=?uXvad%n)wp04|yy^rHl z)T(Q}BkaNpyhdp_H`V609fmYcbTc8WS5k*i%IJc8l7Yi%rH#kEhb@FElzH_`4r@ z6OSCxr=Hjof8t3f@wBs>bNkGfc6lF9;c+r~GIk_5pkDt|il7uQ!*OY(*9q(!cxROmOGIF~8oqz8l`y>0ICJ+T0GU zZ6x-NlE-J^okQQoUSQ|gyLDX9V&RD--=$>mr7tpce9DEz%%`$NFR~PGxYFHuE0i)p zMQomTZk;1Pgihz52X~J`2PgiWWB=N|dwD;&o%L;IA4kUXI)p{9HM?utI?_Aa0`{A` zg)iIlURiSAc&T6B3y#{BPQ#n$k`XV`j`vUP?&1-%2cds3pKDv0t{g;^rN4p=v{+|vv|F?Xn zp8VvM2l5Q#zh!&yHV*Bc(51RvJtVE-xJ)FZ##Adl#Oy zgU+qv=>BQq;5@u_5Zu@fZtirkWOilgMGmvy_s$`Ix##T%iBW`o=?(+-vsV|MsmaRb zX4XF1qcTVAw?oVJ_4B~~m2G3!vAq-AKMEh5@Q?>LS#hULAi7=Zt?0rUjn??u276j+d-smTK|Qp}9%R#NbjI9ENAc}!bnA?T9Njw) z@11n*pLQQ!^z5C-*N$Q<$I-PD|Iz`?P_!B~x{%!3>za)I;aO=CJC$Z)NnDiEo0i$b z;nO3xn!Om<-)bAk)S3)Wqlvp+$x>UeK9GDI9lY=FdE)DN=IMT950kZj=8OC`mi{Kz zd)FSNmPZ+aupmztLTp5(91icU-pKY2*oWsAYnlxWTvbD7fv&UG(et2lm-bb z+zCV;24lAz0qUx<54*ab1o}UVOgxKC{^o__H!rRAj?M;)>z>J#c)QIG>7JS^Z)&SH z+o@TQm^HZLpGSs15A{EBb>FiFYxNE`K;H11)WGM_-aBsE#Nzh?{r5w|WUXZ;5jewW z($yT8oCg-^vU}k6pz*4+ge&2d+hXW0@3@jM1ou3l`@R4y{e5rOLr?UcGt5s;^0RRN zcRf@2O}4x@7K#N#QKUe=$vJT|!%-A{m&)u3Oac2p`FDR-*C2iN?|X^00_8t&q5gep z{13pM&*Nih2gmK`7`_6qCycGOB`X~Lm7bwH(dpn}*0Xs+ox6MgG;?yrgA(1lj2&k= zI`^~i%p1m%K|4X{Viz{BUBSfslhfop*-S!hh&jZE0Lv%%AB^gJ6o z%|`a+=%Ggfjq6D^ah8pp%U>CmE-UPPIqmU$T^GFWL%V0}RJ8s6$wfNvZ+vGf(nl-Y z{r8enR^II}+jSss5w@=5iNUoIs{^X+M3Djqhi>kFxQTY*Zb5&BS3gzL!lMWqVGtU58od z1eo&*XBg>t?su>K6Eo2GV(laXOEm@%227?v%bYjxo)I@Niq_u227{&2_dxg%0e*HAE1YzY*17+v#=D42#%&KmV-Tx})hP=zCTi?JcvY*fFHMgY?z+P6KY@lKz4Wq`sVd%*;bQl_~jz*{Nlkc0@<)4Kq zE!#@o=!;%;V4qditgF>qA9@4NLeabKV4cN9ugYCd=EigTEqf0gyx46$wR(UM6eRxX z3BVp!mWG{ATI{9P2w7vjtE<>(E3?`f-M%}Xz%84HN1akSY4;~DjkTWchp}OP*12N0 zebBsKvX0E|KG8#HajmA!{LtNf*PTK)K+dSe`a+XA-)#GCY$kG*jT~RG8xLX9b(S4G%?@8?drvPrH}|R4vfC#vv)_-b zNOjy4L?cX#|5Jx!^yF~*@}z5Vp|0JCZjK~7UmyCiXU4U08K+13BHIO6oSb*lyl`+D z-#?4*Uqp7V635x-Vb-;gMOcjtNEB3P>|4z^jB#OI<1?kNTQsn*aCF}FXWXmj-TT?} zp*pwAZ0zVFetHE`06p=eY!c`>&N|nx(0HLVuX9GZeT{mXXMBc7D89IK&xMrD3W%UL zrxC+Ed&~*+XR|$Lmp$jF-6toR^Q&H@d^otAX47EMVV0gRSR8bPw_P#%v>_}VgX4^n z!FL?5K$+@*m|Fw;|LgY!`=2+}(Q5WM=qVas1s4ngxDvjU5T-fOQ>*t=4Jx+xm$-+k zL*rleEj^77e;FMhUH-Bw^Hpr{S!D35)YLcWna`6GU-iv@*|&HvK3nSQFLCyj+LDkn z-$narz2o7t3{2DZO(CknF;wXuEqC=j?4E>kz7tMAjt%fWpG8N$h>m`p82c3`j>zYM$k>Uvu~M_gDX-p^g5dvzzp7kw!-9PtvZ0K2M z=C`TQCm@i&`?Fa8x9Q2RG80d_#~#FoKhMlcbD9P9X(xCCNkyjs1X^Twuf}ez@%kFO zVo>X)w$Ou6|I^OFC&Aul;en^ILC)}DeBfzf=*z_DSMkwjp~0`>lb?6bRC#*x%%M_O zloeIkVW#y?W%dk!IhMIMB=>684*ZM?q^`lHLaHuWw9?VjS-y1PavQ4~84URTv^MnZ=w zSFqUTtMSHK=eF!ihpxo~_u`&of#0`X>wB)P1KZl3X>MINv1XY+uq+%u438qE#7jWr zscpv0?jB7OiH*JD7dmQh6{u%zJ@Ds}cGteLYh2v6 zt(~+k?B4Afq8FglnY>BYN{h}pJ3o1THGXV}9Q@fA~%JB$9rLYWNOSzif5%4&uGfxMVVWMvH#T?EfVq z_&?%1{Sw&Al;aP;9(OV|vrcGy7&Y`zEnV@_?_Ser-r$sy{5Q!F`{F(;!aB2UpXbY;IN=j+r0Sqjv8@hexwE6eAOpk`R>o0=?n{i?#~D6@t@`p?pH zy4ij6@_}J_)3Cg1U)^-B%0lJ|cP#DMmJdwJN5+-YZ-?I^kI!wP1T4w3_ z7k53gJMOtX>--*)Aj*YE7OFASYPAH$=a3m~GdN3)kCV(^d94EW*y-Tpzj}2#dbrb? zj+M5&c-ZM{2nBMChMGX|i@t$-flgwkZ#rZ6q0u${b!XT2@zmGR#Am_Sm$A%?p?T~~ z{@ZhtsCGX?C z=G%Dsn`jTr>mz@XjT#t2oxr`|TvZM$ zIXbP#v`~H(oBlF7bl=-~%Wi)b4E#18d)kEJ63t#*Te?~vDidG3S0I)Bk z1>kwBYj6U?AF(Fn31#ZB=A4^7`;&kFC%jVqGVNu?kZZU45-H4k{NJW5vdF zp`o|ZGga#yBglR14h}Q*PO0&48!@*TfOW{VWj?jUtAqU&u2?zkr%hJ0_eC^u>O8b@ zBS5S6Bp}%eED@CI)Rke4sP~}N$3iZ)vWu`bad^_`_LSM3mEKU5H%5|*`Xm1hJ$q;j zA;E&go`t4B5t1}2F$NLVBAe&*3T@qm&hAn-8b;dWjJUnxGA-kD)jW8abi`-}0edDu z%a}-1`l80~8LVx$m7$ukCxF0*kr~Uf?up=oI! z`Qgj)RT2WqZ=wy+d!D*Fdv_&@;zKD|(BOf#w$L6Zb4OlIu9@cd1yg5t%(L63g)Q^S zo^Acmvbs;iz~*Itdt#ac?8gYJ=~SVnPDKN01yOMK#$m-9J8D5Xh5`GB;fWVRYz>Fb zl_UG|9-9uOdj0g8WogH^edbs{v92DQ*G^wgZQbn}sl+(Jk)kxfX6j#FfhU+a&EB_o z39abrCsOSZR!H9w+H^M3)kAYY4P~8_>|Zo z;J(m?mzlxZH$)W0W69tzSsfoU`+o#7{tJBRPns=JjcG0QAA&u3D5G6vV!3iAr_Y{= z53QkO$#0v)^^(G>JqAB-T{t$+9XglJysH=Pl@qka_T>Y|+e6FZzIExqzItJpKmN9N z{g!96!PbYTNKLc*Yj=bVZSL|aw6Ivw+=|i-oeKS~PgAq4le?zH!;Yn`j-?Ij>aKHb z-?^-gl_T@gk#+T4w|x5b;7Yl(o3cGvLaGPT+jo36c6sTVU4md>UoE1&+ur>!KG8hA z$3Jnbox9df9Vp7hso%y%A6UYSdilZ^rH=erJoRmQ;H!B0 zX&?^S^V`(;H<_t&TPFv!4Tc{z2CHpi8tCcS-n!*N(_j{ZHESHP;diOUZ<7-Q3b%3W z2?o9nM~G>@i=+r6z6mBi^F$x=q{Wdd2I{R{fIUQM`O9|q&?G|>WHOM1qYV2|J~!F( zKmC9IsRs5RO7s3vG`>t_R&9KuV9_X0et|&9$;~ITS75Iu|H?Qc{EeZ?FPpn5lZT3J zJvolS9M@Q`bEL>Q0QS=f#zqV_(+^f;N|jjBFtj=C{sLn(&z#7yWQshad5*y{*AU1J zag$bv2yaeg zQ0bb^Hx1?MQ}lY(8SH)gxD{qY3!6B>A@L}-^_BQW^S%8go}PS5Cjbv&B?=2iyw=-O z0F5UxuU1ohh<)^Zw{>5vG3TiKn(sV4J%(_n(AfjedyeAX*zrHRQFmrUd?3 z^%nWQtUYwLbiQE{P;N?8nlnYZRIVXe;Os5-^%r}4O6;M6W<4J7U=nGBHt$Y`Yl;KR zB@-%T<`afR410y-NRJ?fps}agGf?InEYPJ&41L9I$-r2qaOi^6+_QmZ;Sg7~F-nV7@K%soq!Yh~_lAa$aIR5y1Vh znCO@;b{qRqh`37J3yfiuqGSUiW}%dB3EeU}2aYZ#F0$QQXLl?(BV~~MRM~s!wimgL zJ4N0=+a7)P2v|OSW#VU}x0@K5n2gQ~y8p!;$ku}z14a?5p!4nK>}8e-gcwnAwn#Mc z%t;&t=xi>b7q;D%uXoUAUty&MKcM?4!hbkz88 zPK+1&sK);n2;l#iuk=%}ms~~|eDb6BnZ26Xtp>HR(NR!Sp6B+SiMt&>>~2sLQ3qRCM2dFvd(`-DdbRHb2I(ec>9GXRd==|^Dl7vf?MeEotDC*wc)DdwPBx36 zn!ZqJTX%PtSi9N5irR>t{q(rz8{B1P3^(cN??kRe!)g&-XI6syOif_0+?_$fOcxpQ z=X>s0YX5{r7`k532Ho-|kTw?@yUHE16C&LwBLDQ2lU6`hG&MEaZXbk`w}Uiyp*8L< zx3URizmevjpa8gIam80fpqJb_c<2t*xuf*^6yXQziqmSJ-{z*dj>eoaN1)8*uM5V( zKLpF<52(rLcfS=HRGCe8CFVJfP0PMuIPK8YTl!@Wz5m1(QgTaV@4 ztSeUy{vq0eup+>L2dnS!+&XR_{+Y#EgSpZw_1SV)*VE*rap4Gdz5g`ZcbFZ%kg9R# z@p}P}?cBREFYiB$4K{{)kX04vtQ2${(+lX3BCD(S{GL*?jopt14Fvh7 z*(<6N$Kg%*= zhoQQ{s*B$OHrr`&?BBvWP^D6kFYq9<_n6?-$p&iPHcuB9~?5#jC6;baKa&LO)9o*2_72>83T<)_ zMG20YL3*OAbfu#gZl<)&OG&=>@DlxeeB-3S)LCok#`>e$#mySbY3VGp#0fHJYf=vO03e=N2Dm_8+#w`2IpDk zh?Kgz_*sH~qN9sL0ug(n2>U;KibtY~8)}Kt_DaFbG(3r;4y7eY?mvS4KL!{7MZWeQ z0QTaIf6CZrWva*6H0*M5&@&~+>sx5}?9A(k@N8E(hKh7O6~;bnE9>pU)#hH>w}?lP zh=0;dV``$xHBxBAZDR1Yf3{TDTWw6&w1vO+r`dvtWo@H?y#bXjX?>vu8;pM1!QsOy zoylTrCqy7p6?j3SbL?60xgw?E`KQFmjzrc(h)l$f5n1o?Mf&K-y|jMI*@-I#y|~2| zcB=lHx=6W$wx%S?B+?9`6QX}e&?3FN)EZz7vVJ+kQu}ZT+mSvZC1I-%u!mGbjn8h` z;7QOXkYkCI1$v7d2~5DK8&-R}D}C`2cUQhGfHioT9m|aft6Lgz{e5Mgo&syA$n2vt z_KwZ#9-C&e4t%6^-d(ArQEg>1-%CyU5VxY^3o6jnP5K722hzI&2hw1&+7lDc44tPA zw>12=-`L8r_jgBOFGiM(AioN~o%TqbqyKJrnq&jd18HzKcRsOqB{!t&o4G+4 zbJe*5g(fdm*BVc{+!Vb9MXd`JQ$%=WqDit)Z>zM*JL5WlKJkt}bti!QC20sUub99w>H>@P`oT`L^Cd z`%tlayx2R!QREscwht8RBDk!lw@(O!9i!_Fdf0COX1v5cUSu6EaSY~LQUntv_WnXM z$DsU#7_g>_>;liC72uWjsRHLjv1^E;z++cr^yq5#D(j!$sc83?nbSqK!2-t!2cKSK z>*vuawL%XFlJN{lEMgttD7N;?Vd}52k5oEFB~j0q!6~u?`p57zrvk%OzQ50ueDf6U?Jn1}VrpP_W z;wR*@Vg4)`7{L<%j*BC9k;RyYQED*~VrQ*Wt0K@Y2Uaf9BZ!NUvm zcXXYWI^D4>a%-FlNP#0!?3B%j6XS8>Jtg?FXA10ng^u15XQt4~X#_D`(hW~? z(^s>XOcZ3u5PJtDPL# z%Rl;~KWb>a4aRFTyE(`~rDlJXZBLFeoq=*8k9U!UZDF{?I$36!tTsxDP&>Ahq_0p7JQ!2x)&pK4hKFJQ_Bshj6WEx0k&%x=?@p6H_j;?O9Eu?o3AGm~3z#@g! zROIe2boCWDdI}uLV%P#ty3pNI=uUDJxq1qnDdG>(Azo1G?JsbpIC(Vuw555+=jo(Z zdm4U(+K_3HJ<4#@t1CK>P(IF`Uzi7nD_$F*OYzJpQ=tqw0BH%KzZiwJcNVq>r6tD^ zz@blyp0;q2EuL?evM*aJ2>_AGEu(wz_)_}pmp5qo7sowfBilfweTa5^t|2Tv9)Gk2 zVrhziiHa)kPa}S*vA4oDSneDya}Ad|`f&GRPkUeu#aH(Lds^=hDbr9*1jcRyM@u$X zp^L%Ol(mEloBdQC^9>#h64-6T{ZP@UTsPtd`nnM|;VD8CWt*H+l*!KL;e>oTNuVjs zF3mGjW)M3jQ6Av=4UI~pou!K`i*5p`P@AKqivpS!8M%U8>@B@3IJLlqCHEsURO97k zkORf^Kl_V6XP!M3{Ob|-+MxRn1o`~mPmT|F1Jw9Gq48CZ3Ucw2iyJrdKLC3?U}PlZ zZuXK4wK>v@K^|dmv1KgJI{qH)OI_(=XLqq9UF_;Fa*gEKhXFhi`XYhkNjUsM@_Ud8 z;;SR}9_($S8}<5bz`oo$Uh0_8z`j7By|>gpSfmyeKY6%;c?I?~)D0Bc#|rF|H*KSZ zjsYky>?{~kM`<~@ypIx6z}`BnUNcr~8{v13VynU@b06=PZym|EjdI9}wG4xx<+gE- zVnZL6v<%USuk1?{ynTRPoktMDpD!g*i)FOfI6}@XCuSc&g}@0EAVjdrLEOM=3LO#v z@lAPH$x0R2ue=9)@P}I>O?R?3PDLw3Abj!z*jLifXztIm3>TRui_HS|WflQ@mTswy zU}%&DOqe1t?;PdGb&eJCarAVW2Z}plm~UhbG_YSsS|LVw?o^?t|E8-y&oxSX##e?U zAXbA6D7Gi%esQQ!y4VC|LdZ2y<{0AzI9bwAl-@rdL%y(gLZ2vWkLQ?uq|`E6YNErH z_=bkbz7jhe!(a)>O?+nU;~>CdHI~@8u)}O*ytvGO^H;#TxHWZ!sm|qn1Q0O%c6mJb zI=r6fn|Q7O6*1|)E5jjyl)az7;tH$ChM01Yu(HsaVEO&3tGCq(1?B ztv>Y;?Ei2gfS-W9HjPbz{Ruw#fBdfm>?IQwqaCrUrfE~a9vxC@6rB{L8WA&bvsxz8 zC6?I=69?4VI1fBeAXP|em+Wk^+*prhIV!B<<>r1YARl{s@MVJr!`>AKJs`H+PCafs z9}dzwCIJNGzay3JOy;=~d5&)4S_zj;)Ua41BgM8cb`Mm{KL-1(Yk32MsVZwiYGHWg z+GPjOHZ-ye-hoh?gKSvRAuH>#pOBqTWGzE~lF4BBkD4d4vZ28Km^hn`L10ogQs*E? zo}-VWz%@|l9xQP8v0d;}4rhO!y*H1~b@Z2db_V2^8C%=uHZv-MZl27qCH%4yqT1@=I`-CJY_>0=UkID0uO z*1q_0(Q0e*Xkg!ozmU$$zJ!+!yNZSjyaTc@YLQGPLxo3eUik)QN=V5p!|a)*j`H57i5L_bup;iSBse2FA>|+=vczC;IUtxh z|4L#hQ?kq`4&S%Te(TsAtOMQJnCjHoAX8zW`wPs_DkEsicp0@$k!sqz2r1xExbNyc z6CfI2wgm4PvKl%hF>f`x1Jesol|)e_gi;x~2Ky{vUt3#C+vq<%*x$@4_+7By{{^sz z#+S0as~0dz>fqie!mPqPsqg(Z`^!jVCFSL)4 ztiuPe#h1B8<(cM^ao7hmum=p()(=!q+^~-DJ5lf>XXn_-B^+Al?E$gdFMvJfk%KG) zAtdZ$96&M)Ox7Vw!otrnP#>uv=8mzqDzo1`yzS|flD0cl>>0`jnXF?Z+(l+iY?M>w z8x=Z-!Cg>|gVY{yiAV+sJ4$kY@=H^c!KkZ z^eq#l7w^-0d35ADs80d&TtgB(xrWh92!~-CWRVlkK5(Vd2SRxT=Qn_7q5jDMDpXLO0G?BC1iMhw_e~cX5Yv8$Qn7cZel8J6Ex5 ziZ9EHc`G8kd|RKabC-k;M0;Aqz~A!Sx!}B>Sda*#Sf(0!4MWolS3={{)`$k;BiR4p z;QG&g(|;7$Lp!4-{}Aj^75bga0159?02G_`fe zLLfiVzK=9t$2YiZ23BiQ0-^>C%00qcTIgnJ_`XCZ` zCFNJe0sH{=lG%#~=-E%eo_eJQ_H4+Qy_YxJumdX9Cm{|C&}M=6wgK2a(g1Sqau=5< zndc1U+Wm$0NTEG0JCX}BSXMf~NokUq=7UFBk_3@tVS2;R4cibY2Y@P!roESDANqUC z9W+c~_#ah%YeWMh>{N2)9pl`u62~y4W|c8^*A|Ja?F-n?E;ZUEJ}7thl({8ikz%nS z4Np^J9<3IFv%l0y7Ssb(Szu0curmvz099b=!=SLn6lpXCETeP08-vG?1VS^5PJ(=l z2+uyQqOlY*BMj6_D3`c;!@y||9H5tnFfSli11|KH(zm(a_pNpx^yUhM(PTo7Pr|<4W9CNeug1^t~ zJ1b1R*O|S2QrL3t8ALG@jZi~8D`N%DaV_;E(*^JO6TyB`Z8FRDgn<2~(D;CToerr> znh?0yIUxzLYY4&?6bJ0t;<;oR*elLFeGT^3F~I(V%wE9$U1lG85B97dR>ix_UXpk| z9ZDeAF|CeCUcl2(OvY>;FI4e+wYi_6ZCcGfx_hjFeX#=jJRUDA>t~8r7O*&}Uyv)S z?2>Mxp*9(#`+%+il30b~vP)dXP=zXWu>}5>A{5sQmkmr=R2MWBy zH(X;mo@pt&Sa@6R0)aD&84^#u8ylHyaHu7wIV8Ea4Ine2u9n)nRA7G&^(W$96RTo^ zfPjdHJ%vYKN`sDJ&W!|J$Pcan5f4F|TzQq#Km!Qa_u=jDU67c11h9uUGjz*!bIz&T zG@?RvDX;K#_>PJjQ2eKaVk#O_cauLXHTPnn#TY}=&>WADcyuFPRrD+>YhvNQ{L4Rw z#{Zbv|Dj?3?+f+_S*+bsFtV{676r)vvmofoswyp1VlOd@1Qu*l4t2-PfITX_ zXrr;yI=Tq-B-Rg}22zTEVG|pH^Cs())1bnOfZ7h{osyf*Q{*Hmg01X>M?pef8W=>% zjd~i60iOfU^gOk4F)X@-vKO%DC#V6~i#Pu9$)yZbk%?bFAMbuZ=KlojH5KJ`{qhF~ zy^}Km`(OX+U6cJa*fVQ{Xs)HJzn+#}~kqgW?rl|dw*rp0h6CB_Q z)2^^65Y&YxzT{3Pvu!A6>5r_fq$FF1E)wA zVi&nn_PKoP4A+FX8#N+BxV!f*maxz49k0M2QWtYspXZMmg8;3G5|go|)0_8xydnv@Gg8sYPDX_-btTHQ1w7G>y!b zHo2I`O@)-V((0!w)rR<-B)gOFf~tYPr!YX)D|)PVdx(Tbg!N1TxN8m}&?B=C&TrMX zli6!vKPeolluc*Iy}r8$!ZL)bdH42MB;GPZya zFuVz=U>>*0F(aTu#6XdhG}YP5W}64SElM)z*dhKm*V9)R94PW;=#0Q&5H}~+%sdu7 z8rY+g zABIXj%n88gU{ei4VYDW%dI0>|Dj} zMK&)*V^AU)f&R*u;-z8**kX+1CB{jPBGYuSc?P@}_&4^KcXZdb2JHh&GIW6JiEQN^ z*rU<=FtgXf*^dIfKZGFQ_Xhh;tXlGHNs;V;{OVzp1G$OIAby@>^-j>keL$GHmG@@e- z>{)RF_LiAK>#RmGN>v`ZOM7z_^vhN$B)`<-G_Wtx#Wk=e&hFXGK5$57TC__1E3&5R z3WA3BFm0?^RVmOanZX;we zMaCh%a+!I$RJM98fw{giTqv$52b&dF53sM&#~JBv8ClXY`zO9M%@Ke-taF}irob`B zKjC&z<&;(nSXeikt%etZhHSh!zQLj(`*WrM3mH6rtR7^88rb7a^d9VijQ5%S|1#K% zfaQbC{#~E_>B-H%7wj9_g4DJp53?Y}pHM{5kd#=Y+A4))z(axk1d$iu$Fr^>!F#ZO z=I+b{Zo3k;&SF`ytrhaLh?&Zx^uF_%7B4U|(U#)akpZ*%JlF_fIvjr&+(yHDv9(#EHC>_CPDh=cY?IY-9{nn9N>k_D2Ht2w^1BdI$DwWatS#1be9zm;~%gO*6%& z*`I)YO-sPmzxW>PiFdWk{v+7`A>jHyyk`H8$?StPqhh<6@1U>>S1;6>)VwVcObgh1 z`m}?Mo19O|cD6B5vl7-Dx`f7O#I35?zX$swXnY&gCK0w&j(J;^&nZR?1{+K=vpcay zKzdS$AA!ASW3RG+eR(_1l?e^(1;Q2BD~b{9xu(z{!(^AZ2791O)dd9X2dM6SnAtx7 zH9^Y0a3lbGqIm*w_Azab4U8jX(* zpKT}CI)2k3+$!%NX%8uoqgSi*Y9K9pyM0E$UiNt@P13l3N748qt(aXVv!_!Lp+0w$ zdqEFWp?-+qro=K`Vw*v1L0qC;94-FPm|)3XMCROEsgGmet${t6Jqk^#Eg}oEPUKnm zT+%|dLz3LX+mq9~$H))!T@rk-TjqPmbEpda0QU4Y`#iJD?=$=NG`leUYQ`P-Il}Iy_R=oMtyF<9-;qa_2e-DvpW3;AnM~|nGOvj)eDsyX z)bb9J)+@TtA-tRfd>q_v?xRa;93Z^}YKkP3b!(>6p%Vj6F02Ckc!iPPiV47;&(#Vp z@4;STYRibW4=MtmoPMUrBALBp8wLhN$EsQbmVxyvTq#p|GrHG62i`Fv0UQ^@Srxvk3 zMSLUGHCJ`Iik%x`%$C@gsmeU9bM-9R>C=KIIzh zt%6|!WADIT@y0`%yiRJ~^df27k(~cviN33%!_)b8n>ahQdv?zuav71x*e67%Y!j(6 zOI0*U8rZXVRmLx3rfW&E=!ZlWfL)I+1lapV6peo=L%Id*#YRK}d+}P;GJ9qRTZiWr z*hAwFu%dxaKBdGv!xljan1hW%@?CF#o<}R3%b9-&_LaKG2bukb0(-oDqy_=> zDWt-eNS#F_5m>dV<{bt0lH3=AK8{&-Ex=yY?86B3xD-e$7~4%-JYbJ>K<*~QiJ2zJ zG4>fDPE?sdOIp<3(Mm0ZQ0O6sN{vjpjR5x4?4j{fn@3N)Ddg4EN4TMEvw*!Ii#$J~ zTo&G*QzbjQX9Q4bWS9EfpiC#! z2jj~-^mL^Tj&3=<^x2oX;u0_-98*Q;e52GOirhpeq_9N9njk>FokV{svsZ*cpLpnY z*vRa;Zp?oE3D}=VW`9J_*#6~(*m(6%)wI)N(~bO-aV(#XD*si70c>Bw8o)%$mSQibh(^UQ03v*MvWp zkW(%T0^>Ss5`Ur0?m5oE$=x$9EEO-&mx`Y8T^BEsd%5Xc)e)!yA_gVQe0L}YO!he8 zqr)ihz{rH9mm&Niq5FbVjoFVEg$gaRUNoy7t@s z=D}H-q*)j=Op9!lpC9zue<;vRfV|&}O3} zd+be3UP&X2dvd9&nPD?%uu(FJN=7Bw(g3EdIQBPfi<71mA@C$gV6S8OkS~?t9A!@2 zwsvV27yjifpjrcaDZsx|(Z0{@B{G(EBr0hQ>{UC=yIPpqg{nGS;gv0+@%PRcE{DEV zR0s~q$N129KSzWTM_J{5C%Ykhm#|w7ks{I{W4Q+V_nE!8u&6$JjmAgH&4#9d{UZKC zL$g&(o27Z(JXl~Fpre?cCQa7!F1{IO^T~Dh=DX5BHT$-z>q~Oa@&{0vAct8!uAowy zWF}CD8;Y@t<}ZhIROJefI4=DWS|5RcJ)K;t`&Y{LN~5P(TLgV{`q6WUem=f=$ffYk zy~Qv9DH2V0We(*1s-apW6ax05!iMc21-d?@$z1LcCkBwwBuUewF+zHV8ef6^0Y(d? zTbL@6kU+iUo2P|NHqj`77P6na&s$090d=#4cfx#waYg>oVh?)cI3A|;Ca-_)EoyFv zUaj@+y}#|B{n?)Z_V4X-|Nff&@2>9OoqG44#y`r*yQzUaH;?3=j&KLm!G4siH{Y%569l1(#s|(7*bi5NVQAd z@HykTc-;DiiJhgKj6lyhUF@FMV%~Q`3|d5_y-jp>l!P6F@4;RWPQaebzFUGR@Qi+0 z?J<29p^MsrkWj)a6}8!Axl)c2C4o|mR~x%;=|kEK#Pq@8U8k>_UQAa^6c&i#%}_n+ z;E7a50)lXM5&28dt)5W{=-i~yGSNVsNCo8kk35{0m?&efICb9uJIn zV2|NRqQKgNVn#7lq{RqRt|Mcx8Kl4I8~PCJSxN%-FuJ-f%qua~!R5&_xm4L?6Q4>N zN*$9BfzrJq>fj-=`CM0jj=Nv*-X%FG$}iR0BLpGqh3ppZ9a`@M>>c4%>9c1c-@_VD1AC>cdUwdC z4F$--KFn4l;f_Q+g7+M3?~(yyS^X}vhsGzf$1;yADcyID0U3hF zzYF%n7Q*9lwJF?KWC&rM-fJ11CC}{H*kx!hmC6cBKiVTgF}d%`=YU%vl74|6VNXRO zt>O;^UUJ91M0ofjbQ5bf7`^_PWvzAV8tj!b7;?LRZm?(SopRw;w zFF+p!V6Vvb>)ta}my+;Mdv3rOh4j;O-ixx4G}<&&(H48=O7(6WtIU3_-Vi5(BR!>M z99jdKi)FxwogAffAXfMR>{VJXuqVCjGYag7xqnZb;gJK;_6L_XF?tiQS2tU_)m5uE zhY&A{PEl~8*k&z2p_NPd>PQa^B_cxtk_xcr{$=(K?$}+}Lt>7o^@K>%2-;LhxWRjw z>m%o5r$bEMME;P+R59T~f|+~8v%1ZiSiH{c--Eqsy<6&N0@8qi?pZEkLE5 zXAa5)1)L#IU_5Y}D5T%Cz?*V36fqW5+y=u+TMxC3nI@%r~U~k487rVfb5~J3~ zsP*qDu0?@88;3-AY6sEw3-zATn1^9Sb%)nCzs9+ZGWE_b9#V4tZlW%3C@Vpo1K*A~ zT~!}d7$I4UBGXjiQ<8o{2XXTS>@7nx8rU=CR6j`*Y ze7#rEt9~ecr=UszO1A7c^>kLjrS{3-2;=QJzM@RzN1;uVlYtNPg zu$OLr#|R0%6d*7x`4H^a_jotF@v-Z{`W;hVWmF(ms_(&G(JhjMD8f=WON|1NefSr^ z{xf%XWe0Qa@i{=)e=T8_%wB9;#lHdOQd9ZSe0534P2MWY=`Hka0{GAOIM~I zTFDfposrTqH5g6ep+up=lk*H|V6S9)l7&m3J)1b36%`SLT5XZ1&hE^H48pMvt<>qG zOqCGzhj*;dKVBRdgV{ixCkdaF0OmBEszMZv!5kAxL?hON@@Lpsm|_9$_Q7yV!njtXcl^#1trmM}Bhy9|#n)!DJL7I!Kq zarzifkJjj-dAezBZ>nEbN@Wt3T5*Yia|Y~D%n8_cFRzif!zBJdsMs#aj1M{KCTULG0&d#M*Fu$M>ex))Z!UY4S$YR!XyJ>)iEpWYV-J-qRW$=Ee; zm;%lP?0Hg^+EIbMa=T-7Nx!cc)hS;>)j0P7*f(@IG_X(Y9s~B`s4rlz$SxuzvAemh zkjWBN7xn|NS6#4TGz8f9lQSu>UtyVzoLt_q24(V%0|rmo706}ed}yJO+Jy9UXuq?C zsRqt>qCZvmRAA4$S%+t+Z@mY5ap!&q_W$PJ{A-N?d#}d-2=;#)>XJ|Td$?PTBpr6Ph38E4K zC^3`jG_Y4+^F7#$s!3%C(y=9AuZ&g& z>}ia&39>@pQs|@vfJIOD`W{u<;5)FV>`U@3X6F`ma>?bSAS-1T#W?;L>_vFaj)cG- z=L2K7-r&JO53rET-W{u;DVV&>pGa?^$IwsX^yWUYXG z@Rz~9rtY5w>`!y^^M3s|;*GBcx5&(Oz#eWvz&@IWyU^Of0edCvx(0iZzQ4=tRja)` zG184D9KP_z(g$M5l(%+1bH>x_yJXLn!KHeASBbHkSPZa7SNWcx*1#V1xCZvh=BXdY zO2P{?FZ4EiYN7LF2zBUtun#P5V5|lbFnCzb#@4#Ms2`L=H&S9$4b0~#F&+JMqUK|SS#r9GaR#bC@Xcsg`9TiZEg!LNig_2Qn zOWZ(B8Eh#VI$Zc`(&3uiIjYf7-x9Ew;+E>}6)!&tSA~B6A%s%uJk@gfQ?RF%nK~KE zjHyMpEdd53a!(nY$VMZ${!3u5xI{&As&WQCdiaig0QSnQh~{23isR?N{=*{uhwUeS zAmI0h`8BoNM!;T%d;BP~SARu%{{z@}q{i^ZN9GNo^$zR>p;>xLi^GopBY~cfVOob9 zcyFg>FLt@;v#8mN>H6eS#TzRHT?MEjcbE06#4EzFX__P|mF{aXofHQVKdE}Qv}qaX zgAY+!re^;R?6KcqJYBD*yaVjTvqgdZyMmX1z2>i_PE6GJABe^~nVlVdy8$Ko^ZUX*@?)GIbn=O%eD5>W~EM2dI5e8>=eFU(_V4Dkz2_Qr_ zU{94@YrxiY^4eCTWrJ)ZRPLn#j~om(!a)kE%JyQ~(aR2oMNeEg=s}nR{OFF0aVV!X zUs(#u-iTsR9TH2uBQ~_mo~rauz`oAtvyIP7X1Kodl$qx3j1PszXIP1JXbT4{E{EJW z8OWl6Jp{foL6<@dibbUW_yOz#lZ)$DlGz`fNnH}3Fl{F7pZ?ijXkdRG#r{&C{V!PP z{XI4NlbpQVU;Xv3)o@!`EBs_o6lb7;J>d+48SwlQ_X(Qm-`Ur^PwoYf+(O6`8;TAe zrot^jt9Y;&Gv&>Jr;bQ^U8r*V@X{?4k3y2a1;}sM`fpg$pPGAaSbB4;!<09atWh+O zf}v8sq31-aqAvd*@cm+8o#s6No4y8nSjVY3FmLf-dRoyGoV1ZAwW# zig}+B56X>U08EWB7)V?f*xBJ*gy5mxif71$$3lj;~j#@iqTD$t=Ym0zW7X z>^(j21$r9Tv&BPmih~~1<~7)Bt#<%q4NqhGQOddVx>>jdf83Z#LO(DP@mmbgZ zc6kTM6=2^ll2!Y}JFu73P&30($^8!{cCF8ztwXb^Mo&=N?$E$Kvvd3o?E9}hdaof< zdU2%^2R%+s@02#555ZpKZ@8f|-ACvB5bPl=RfibWdX>6luaN426o5T__JDmwi&NZr ztv>>L&Q=+$sb(l46d?%3fnV19JFsVy`2g&*3@c#}4t_oi!eL0DDuFBAzG_)~=L4wl z9_@niV3o_OItis1N+lkTTT_hsNOXOlu6=6u06*^r*b|6|HH!)kzyf4+a@XF)QrZ@D z4fZPIk|AoQZprLTe%tuMJFrhm6ondJ^cmt#rp!jg2wd6lF>^tlD-G;5CnavWJQzZJ zQoHHm!ul21L!xV7uTAsfG5%);`(OW!^ogmF5&S~x#2LsSoM;lTue4(8pUmSs{5;qz zO+GSj9%v@rDX^C={13psXKhP?{Q~p;D(#FSknuU!x=O|0Dgpm&kz@V?u$NgB>Y~r& zI~VBMDtAp`u>{x;?VY>_dj##wel1ts=fvp}veAD+--T<0@kg9YfoC$`GnMaEQHTd8 zMd8WinV@^poF>-mT#-gh(%HKf52KAL9Np3=$t}mG7h#$f@v$Vt(}p)xCeH}vRDXpQ z6^dNI+=HDTV4qxE0qjN0@?)@Pd8p(3;Nl7{+TAx(-x|PLlI`XvU@woXOc_x5y#y$# z@j*OprJ5i-7!I=dG8($>xf97F8A|J(TP7E0$9WI-H$^WgWpB-4UITliF&fw_W#zbB z7S}YturQDWQ?E(wUQfMKU?1ay)b=Fu3!y`lFydMd?8Hk^M8%@0xNg1s5!ge#`xn-s z@yYA~dj@iIxd3|+1i_1HTH<$LuP6tNlT<1ptq)kKC1g6E0(-~MEYYXbN3JxRPFWit z!Tt{o*MGj}UV*(Dh4hPHFV#s!9Z$+f&08=?nH;gMoj}08>!gzs<~p~)$`sb?Ygg6&Pp=-AHW`g zUZma_v`)Gu?ElX{|50YIO`reKe;?-b|L~eU`hKDDHLzD7NcoGt3Y=YOQ+X3k*6Li9 z<}hwb)L^uzM|#pUA3-&*MdOr&?2kaG*{vPam=ow`u^n0)eT!l-dW)FFY za}%CLss}+C`2Q%g*VJ$#ufy=mEv8h2qSe*}_PxoS6Rq{GQt#*Dh(|Q&3-O?kUK2*B ztVP@d-%b3}Mgd_etUBn3PE^gF;3Kpk>BY!lmXf5Z*$;3du4^|x275)viIztLd#&+T z^&&D4FvFB&1bSrl8rY*O$zF*+;YYCli1gYOc%RuTYEqt!_pNstjn4xk)k(nqDy!=m zt?BUoF4*I}A#!f|U>(Gd#}M>Wa_FjR@y_bxbP(Dv;Pq6A&1|Ay|NjV6kIk& zjU-nxCgn=2tnOfR#k0?eHa3Z<8yxD{`vtI9H2#NRui0H_!G{L+*O@&-`Hb(tUg?k+ zgag>i6jx?dC`wtQ)3ri_0{b5v^8bFYXX!~hxB`1k$E{`wli3RkP2my_54+s*7T1Sh zFVj%3!T#E0Q&Hv6x=h*8hIuN25X}Ucy$1HCum<*>i(7y_Y^4Hw6&7gO#Rti~0v+_- zeUzAGw`5a-EjLZkFD;5`Q>?)n4zKTXr`)rv06$<)nxr^(bw^}`8y$U`^^KNU{a_-Z z6)0tg$GR9#Lk0G>Yp_4iz`k5-k8}#yi!xoB(nZLuTK}*43>^BTm7lonI+UO%rV@ z;(4UGG;6aN@=XH-aa<0xm*Q{Wq-hm9z_jQ)7{J^fsea@8%o^?A7B#+cXo}{-GDEPy z6wbHDW&Wc*&0NK68Mf48gi+xU;kb!J-+}2Hm^72`o+}TnNRrwi{SO0&S74HLcD1nG zS?NwOo;larP0MV7m0lXA3i2nMYKk*qvinU(ulV*T;z2~VgzYS5YY#JyFxDt-w{>pr zi*4M_UcK2-W%e;p^`<$_`sCB4_)WhVMu9w1ww`=73Yrg6pVh-S$XxURPKcnPTmwGR zfs!{JvBj0Q=NG%@m&a!^K3KJcpIwM*2%A?Xw_K3)i>XfEBz^5QR&on6`$2p(Ve-qZ zQ{|2+WIjTCfs1NLj5Onlk&c@G3*`vnGE2&u0zU6~yL(Q=*~2~amSm;c6sO^YN9{ca zg%6-8DBl&gG|wq{Pxtk}Tp(XYPNC~DbrID8_Pkqs>y-3@Y9b#*C5*eAXOMxL1tw}; z((_6Hp%IBno+;g^nwgSvGN3-!%M|Booii}EhAMOPAbZCU;t`_9fy!lxPaA)%25wUQ zA&Qb_3aKoJ$%YEWG^8i=QPZx{%#$u^cGyPdL?uL&%XT9konov5;Q!@c{^h^%xKb=ySv3+dC{rCp5-?GXN+P96XncIH$1koO^gt;-h1sbB$;{i8 zS@k2t*=jV4sElQrsH_n+Pehw4jylIOj54meJzUddkFV}$}uP7=&}z(Zw!=2^+lcJBJvR3OdKdi~YJ_dfkukgNcX)D_rdqZs?qkeNr6sn0iOF zCj|>;7Aunm`o=Yxk!E@(4FXK;6stb5QBvk?O3yC#`*Z6;Dd~sy!!AQprC@T zy6u&zPE}nk_1~YV+5b)+|GO*w7i;#X$7i`W^M3tTe=XrJBX*QZ;NI zmckUN|KO6L5tf;?QiHGD*$n{a+q;>LAk>qin{mYz-s>spLcuXE-3!KyAp6Iix73fR z)1VA=Qga4tOs>e*9#2Ep)@HTdRAF?L+q!s%vVceqRY%HT3IB=-_cqR14nB_0V#qI! zkDy(tZ-xQ^uD65^<*kgkUFH7apa&zvWjs5-#I}ez&It8rX3wkHMWS4l%w8H*rGY{!=nxgwRIN2asZTQdD*<~fao`d;H$IN1S?14* zgq61YA(;iDhDK{Wfl>~DIzw5B=N&wu&=1`u=0J_X#w}X&G_nWZUl6W zV&23p{F*`(aU~WaYS5o5C5jOc({#k)WJm^Gn4=}_F6-Dl!?k!U6{B?yu73pkKQu)D z`@gR?4;10%kHKCw--%_ItV#jzy#@T8T_Su*|}M zc@YmX^^TG*pblpuXmm-=asscWmDApXy=7#fs*TK$n!QlvY8;%_y`{$OpxVaYldMU3 z4@i3gTY(S56|vEkp|lo}2~%b7JI;!8@bU)WT4jrrX-)a!v8LvvXlg_*2&RNCBuc3~ zXOGaXq~>C7Hihdp{w^4RmK#HWePCmIn6B@+MZlhs=#@;iWw+5X4e3XaDQ-e@s)k0b z$yu9jDi$DAfu$p?j~$XZXPl3DLPp$1H@0zABG>1fp=U%%jZ2{Tt4iOdAex;S#P6D( zlwqQDVJOX$h|^7ROj$IrhiZBc_CT-dD*9&w`;(Ki9H!p=)vwinJ#L^V^vLYlRmD`N ztB3Y$M)?Bv+&_6N#Dzd6C8z@!kV!#eJ7A|*O-4AEV1?*r3So-pxYiJ_e(nC!7U|zS z!c@mPGF79qmUeiGO`S6FS0)u^q`I#(rIIqrGc8-cR>9>7Mz*S1D{~_$kkp&HAKRla z!$|7f^IMScj19K%Gzy=r7w5%l>3Bk(sJuE_W<;G5#wkru@WmE6a}|rUY4zw z%w9@#K$JO6=vVwk57@)ZXsvhT80JNPWx263JiGu^yeGX`B-4` z&h(F3M0+BKY9p3#MC1j;+TwYKsfaKEw(fFUXJv=kH?uN#o*g^L-f9m3_W9-*q5*;} zmDy{_gcN-w1ZH@y_##N~B(e`>jv@Usw&6TePl2(ksMTQ_p4p^vQJ4~C)o6 zSmqWbUa<~WIfklDv8TQy>rSch7XW*Bf*Q{-I~Ylxc(sZ~kFjquyIHzk72hVVYLs{t ze<@btjxpYfO%im3VJ3kQQ=P?a9P~tdV@l=f(|JZo?u8uICfzADlF}g4&pk{Zpt7?R zvoBT+i>1Ppu59twHrrw=GS5D9aB<%eAzdzLC%2I@8HlURc6)zFM8cRK@=*a}K|3ki zliV}q#TY5;2*RbiM$~XNnbIvTZy$oaM&oOlJz!ti>cS_R>=_o2FDpItVgw}V!H>Y+ zl>zKU(4b~{;74GJ_U1Cq>u9Hc_u;@P1xVd>!Gy?bxSNrJY*6!s7Y^0 z4qJnA>>*x*aU!#iEUqq}U$HRIJqU3xU1Hor3W=&y<@^ja{`<_n$`S`e^Jz;pVoRWA zysCljs)=9P{I2I&M|^JJnVzeQv}$ zOmWu0UJf=d3KI@yQVlVui`6b3IOcHaD+B2TVwS|_DKqp?J{B950D}C3n8Vg9*8%GS z6JsUiVj_u9D^SX5G9(G_2xxrjRTk_@0@ZqJ=i=t#Rd)O^d%GhDIm9C&&RNt`=(E6Z z1HGqqM=W&ZW<(cFBq1>b0i_zXX_RDz>s&~z+2J;i%p&?_b756To61M9{|NT~7_cX^ z*VszH9zP7go&-zzSE4zcF0##O8aFa#QByNf&D{fsuVwZ!i&9O81EKL@E$iqcv&TVi z{9M4Eg;R=IHTj(K-cUW|LfG_?kLPsa**b&BHaeUMvS1?ZMFI z4)5leU4-PQHAZAehe3!3jhIy5SFGC<*QjcXN-(Z_Hq2?1@)YbCaZqbOVyaa^D3$M36{pl^Re2*hB9-(N82bu=h>BeGm3(?!a~H z-Jkr~pZ>Fe{joOB{#Spksqt|p-7=;vpa?$~IZ3{umj5L<}9fw_n36mwIg zp#=#&bM0LHm5y|!qgQMtjOn}%SlrQS&%&oonY^Y%byMhxA(UF&nLf>SF6=#W^&kr< z)Fq1a-6ckNl^$YSewOMP;he6t_2H`wAtrto(v>Iitn{rB9UHrAEs0yE@FQy{vQO}` zdv)t}yY-gQTWjuO1s3X4xgFhtK!!ALMLftr!y0?4+LiiA%xG;JzL*3(iZWr~v?Q{U z&Wa6=vZm4Y2(j5x7g?*-$)e>8a_MxzyRLZe$u-#1dPnxGbiAT$*Pc{DSItBkq%XB4 zsl#btFKReweCz?F+BXbOKom@Z!X#jb)0Gbtu==ovRB$;~R&j*{M~FJmCYucQzf%kraH(?N*oIn*4c78U=0HV(~yEd2(hWwy`sQgP`m=d*_0`13zFHh zQiN2SUaDv^mbCdLMG@H-Mzdl#rWOLtEboGBs5KPFlG~|4?-1gg=9(3WUYy@jCjvMZ^&QKFHQ5k*oUwxDchyVd5T+oNQ=B< zZ#A%2G(Nrd(((Yi#!AFg2eTandaBQU{vFtN!v!kMfto3xhIy*)Xl-!0XdbEPN#$Nm zha)HX3D`4E5a-}plb=Q~Xnd4LGS>Fm@BmeibQqO5dy3rMMV@4yBaZN^#50D1TNr)@ z*%=v`CbPJ1+k&BuJ@nVE*`>R-kmN9ktBf=nphiN(E%A+#8I3Aw1zV-yz1AtFxq@n7 z53NgPUvKqWrxpMUz@AA+hzF&dN=X!rGg*^oQmlO0-yBT%>>*kb&A0$OZ;r9LvSC$V z&(MKVeL&0Xsrl~Fs{P~4{+|Wx;l2R-oZS3h{rYc&cGk>Fq~we*l3{ZM)toRIr?_=?AxJ}?*@-v3?IE3KYcYR$BU`c7vm?-Cr`egI{0>akK@Jk?(5N= zrvCM|^rEqMrfY6>`0&g%z4%?Cw|R8Ad1mMJ7p2&VTaQSBFRNud0NKN-F?GA08dM4H|hySK~@%#R5 z{`srX;}?Uw&-=Dt4IVU&95;_2w@#imPo2J*IOaTGPMp3NJNuRon!IeDzI-vb{d#cy zO?v)SY{WbHwtHn~>^S?ftEatxPB*^UJa)iG|90^3+o7ZH2KHZ!?7tq~Z64cepE_up z+;5)PZJs&$Zv5!Wk)x*r$6ts6`8wpjXik=3OPt~N7!p{nv@(DaWCx;f~*krJIMf&=e~$My2Fo zPV$MYLud(MCU^wV>$dx9TkRN75VtZctKJx*882TK&(U}15k*13*n8LtJGvl>KQjItdjd%&i-)QQ zuaxcyYY$4-A{B14$+GXTBnW+qj09wZ1Y7x*RGz+*E(ySXUG3M$=h7njF|+@O_8(q> zUl!;I*i#7w?8)pAni+crFpK1YEYwAaWphl(b&L!i$x$+5(4nBzCh z-BL(0^+7S_=zH^x{W-0%;?}@pPeLN&%dB}|y!?#;P-MWgDpZSRS=N*}%^Kl-;KTXJ zfz&j|z&j7*8v1f{nF0eLJBseEl6G&6-X)nmMsIJo0DBR*=%b&u$3E>yK>LYU3iivK zE;J@dkT{?%NkZU3HSZd-4t|mY6nDhR-ngLgyOzYMI&Tog!IQ5*yIYUhQQ1k~ubrTJE2D$In9)G@^uryG9YL}mvEmN%;n zbQpyw&Z<$i9O8tdH6^h(QUxw@M2kI%JVy-mG_Wn~3l=v9+za}uOc~l7ZreILsnV+E zWU%BswD>Ajom6ma%CujJiljAB$Y4Bi#o!ioxqWJ$3k%q@ z_bM7+%BI3$sK_9Poed@uaS{SAJIR%(EM;~YU3^Rm(8yZoeRC9*x>mDa0S;)@Rsub( zX3x&{r@t?aFRS54Q}49yy_2)k8+p0^`LBNsC&SIdyq$&$aX`GdBD3#E^nK=weU+N} zvVY}CX8lpm#-rr=gXr4_UCYlBt53UE9>nJFcFo+0&OYv5d7N6i-?Mx>HFGyT_b9#a zI5zjtH}p+lNSB(9%xw%EoLVPlpZH^+caJ_x&V1Ih{8j(vf?_CGNTh z?}ule#FpiVEOI=JEI*AdJ&W=CJb(3|YnC^8np%4lU%eAqy4|(-S$gAfY~g8m{LAq0 zo9^+@!X^!=-RlRBtidn*na{dLpGFtHNN+w)Y&?jrK90&eKZ`7U9-sTNd+zi2%#-N! z)8ryw_HOrjV{H9idiP;w?{UxSXWiqEebM;aHNal%j=8p)sm>+ZaRGZd&T;aR7}+yC z^VAdjEI#%uz4$P-!d3mOXXo?O&Xf4sf@g%+SD7DP{Kgn!7%WOXE zUU`;!`*nKx>%`P|iLvnNL2CODGr{i@Bj0pSewmv8ym#es-^RnRZOwQq)2zaCusa&Y6bfemiu!}Rv!-u-*Yt*5=)PZJBjO-_9q?u{&O4@zKnD`C+3E>FZ$Nco`;7&kBmIY%suE^eAK)8xNnVr^&qvx@g%+cW$)@& z8IJWY`?fyo-{yRtr8b`?)}JNTpLVbEh7S@GpT~v*i<=1axT=7?C{jPt_#e^!XK8$; z#{c6#ey_%-K8YG%XiLVjZd_&^gX3TL;ym!5B^MqgS0D6jJnTzqE1B8U7+Z_GM=AY3ePj@2lZ`7R8e^d&0_BeQVziZTxm{{mb6BUuU-1 zHXoZ{VMhL`}pKnzC>_tIlZ$-4)AqX|5uUeuTrv#c`@%JJ4ted z7k`#kn@mPdjDx>@nBx0xKI+|gkbL_nKKCR#$!7RnWH`RGPX!H3BD2?uXflTC>PKKd zj~ah)rnbddZ;sS3p~INeGzTC(X+3=hmChlETM<9OWm?G=Vx;)0vtOhPFb&uqly?Lf z$2D*$YJ4sQ?(3cOT691Odp`^tM4z6+Mlv% z*CjD9Al3=4ZbRey7M8JIrDdt8$zMW9gv834DQAo-4H1MR#(u~}J_RO55Sk=}c{W53 zP#vg}?fn(j6x<|(d8}hIN{zowxk2b0gEa0-K&D21Q2yY10rM5k6m@F;N*Gx@cgd_w z5<`e#gALID;;Jwv;@+vnw^VhZ(rGMGX3tuI@}CRrsUqLZ&-?%U`d1)5Ij9EqguvQ3 z$QMCG+O~RUqSn`kPL0#8^wQSZ5Aj}YAA%vzBLZy=ebR>gaz8{9ylxI%aQ0M*hiE97 z|Jq;O>~Beo_idkYjVzP%m3n)z&Xdy~AU>^N>?6jCOkwekK|X)d=mkZffi`k~-?CaT+_N8;<>IH{k$*b23+3)$> z#cccZsbzLw*Ei)FpG&Rn#a8z}5B0Z9?6l3Fw=7;Y&78H&pS_vhdp@@Da(uUA@!YtU zZC^QWUpX^uW#25EeLH^CtexA~>Fe1u)Ap5NW!E@0ZyuTmPAtdfc1(kB4U^mY)$=zi zr>_>znienL%$>iOI($BPz_-&aT^X0Nrngz+io6LY|7Q9^NTbn{mt)7A?VH)X_KDTR z)-gMdOcppik~sAv*nb53e;C+{3zKrZ0PN9t2B#KU(qp#8qmKErrui#Y%8U8zi<#`p zvFxkC3+sH=ypT04UA8ZszF9bVJ%9RU{)%Vv^=$V0iOZK$mu>Tx*2S~dfw{=?CXu~w zdZ9TpVw&G?UpQ-Jwes}x4%^wb<*QzE^rQd)emzM)DsnXWx&X$ZO`aUytm+nmAww=os5F z4!!N$JZ0%^pUDKOpMX7(0SzpD_9s`?q3OCd$1Qubo@%-=nWIbI)Pr_oRnDysaNc?6u*`X?b^_>)?Pc&5=~iN>6IG?Iwh z#z1k4S0)OYd*EFVRd8;YyL{5t3DVbkshh+>K%lJ}5wv9p`cq1lD8#t}Xv1yM2hJ`` ze3#EfphrR|$vDif;Qi(A-Fo*6nf>AE31I)9e)U(BmX%ov-4w#%Z)dM)w|LPRLqA__ z@5(cE{YDpsv(=313Uo<%ARzTw1-1-O=S; z*bl(I!5*XxQ|?H@=b*iUQm*TqsP<9BO5}6|%boFCU6}&2@21XK;*3>zB&w2j7l$|x zS6T#{T8sF|_N9s4N$tv%3wQKx0uRCuvb;5O5 z+0*4V9;qO4Wtnr3xQBK@=_R#s5z=LtJGL3ab91Yg=Q|gox`)FD+Clw&CJs=uzdSiU zJ%S2@#L& zb+Z^UC@Si_13b_-+C62iWPRsIeQ1g{fhx~bm1nFmJW=jJ>)@%@TRNB4XU?*{Yo}k( zliK0U)%){oF?uq~+}M_-D%}0GfsrZ}uy_VUDL{zVgd1JRAnFamHA1&)*I-Qmjbymq z7Gk*M@@e(}UgYEyI_e|Xf5iHq(D=glXeOXn*+2TDKWezu2wSPSlJO_fdUugsInB1k zdTtw>j{=!`uWSWmmgRx@n&8_?>vWBIyv{gK*^wx04YF9uT&XI*Ojpx7V(!J>;;6&w zyW>ui*`v;b#t+Oc@orSXko#5p$18l39`AkQDT6pM2BVSFMW{G)KsV zSf|xA!Wgdmm*w8?2~3tEmj{G`gUZKy!pHFy#ngu7Cd%nzNam08;N5i_=ZT1$yY7h+~E4FY`V-?gX8T@wU%Fg&*dW3R{e%xl@E*(1=G zI$W+HS&7S@2S5Ko%3!SRDpP>wy~x7cM)qp&>WU^YSTBA8_6L`$W`7Bd&un`EdrNAd zs>MuvP-gE&Jtu+@V=o3+6{cP|iQLvOmyQ82H=6u3o&nh9ct>zG^x^uz2unKmm8;-| zmFbhcYQ!xG0bpNmjWV*R&=950O{u;rI8o(i>|-iV7p!34u|?^n;cvwt!ZA?kooIhc_5Q(?xLVY^w1a@K{I$0 z8M8T#@=>EqVEM%S7?rZAM@8SF^vvmS7qyr>`o>mO&7R7LC|5sb_8-yy-4)P`Mt&CT z1)dbxOU<6vy9-K_nMR$Rw%Za@gq4E2C70fM$7Hd7xUyrQMwe+YXRysFXoG!>V7*0= zHOG*|e!DI(^+_{ip+EnPi?ob+EoikuvrCv{V`vW;S9zImlewWwRC-`NA&TL6Bb82i zf?{F}#l}LXnR2F*!RMylI!AwLduLIzi-|AY>pNp7XQAb-I=!1otk{gA^hVA?+(5J~ z&05wtE;rS#ZW_KZ)GF$biDI-Ie`<{8SW|V;X;|pryl~`xr>}Ue3opH8hs1pBa}R#! z@jBZ8Su9wd(;g*-MQx1$Fux^CjUUxIz|TQa3++s;ot=%ht#L^^Zt-hdjmrV4tZA4ODm&Rl#m({F?B{y~Og}$Xcy?uGTr*=$igKy!cII>N9WBIkVh< za_XL4cyc#ao5~^-{2Ep?^l5o#QW?;mbq>(V z#k2(?149&bK(3iz$ke-EAs_O=0vhqZ^e8D@0xN?e`R14V_F_T5mVTD zK1o~~r?22x})mMaP z6a5~x9)J_DzY|O~Is>7teR!VW`awm93!x2(X1!lb9{!eEfOVkS$N1VF;#|vGHgc79 z?PnW9h}sh5_mYEp#!<}X8j0fLHD+&JM@M3LeoGXpm-{E8#Y4TKfjvl%?;9))ksE#X zWcGl4VY{nXSwxA>R!=KIFXEBHwg@ZvL2%?jXt*3tKtpGxH&Ycr?GnAA3zAh61TZP9 zULR~|_n@37geM_D?g7}>Vk=}u3e=5=y(TzT<)@=BUT6$IjE+Bujx>^{nBo*iD)GTq ze2F+An))kEsX9aFJ-xek^Kj-O+p~2jBPf_RWlg}`ux(Lx$!YTx(3EKC%4-ka3Jl!s z8fplp|Hm6knIj>_1~dra>ERsayA;_Y1>`!d`oR1O-M5oR*{?m_cbu^@M+Eh3sXLWp z=q$A*8Uh3P`p%neKECD`-80W(lSC5u=MVwV9l(A>-Gt^J{1}QYfx>2!zHbCcFa;*S z{v(b55$Jy=tykNcX07u+vsalux-!*jL_6_S)|l)Uc-DCs&O`zACb~H_3#V8&SgGr~ zWgK{(So&@ET!Sk`M$WSe5%sA~dU3&W=tH!zaLf0sK_;=nFLur?*S4C0=X&2Ddxg|x zt-W=@@fuI2oT{y{>uz}PPH5;>Xov@!Ko)XaO3sE}hCWnT(ol4;0swr^*8arVMR;Y0 zStNuGGzP;e^KrFayh@Wr#+dZ|>ch_x6JI8$ABBh61UM7o<{V4nZ*}1kF9UWnM9d{` zY~`;y0DCR7zhekgw|6zz2H>mNWT4@2XR!;^K+ULMi@m}ftFDhUi7(KRi$k`^Z{%gxqKco8TT zQm<<7c)mXMo8~}{A@sMc_P=en{cW2Ii!<04*x%p0?#XHHD{SelXirtOg_-x^o?9W$ z^v*BcF}f;R>`c+EWDle(+tLlULC01>bG%H~Q_>pFd+oXL+yOIRXYVU&kKb&Hwmx^qOI|thp1X>giJ21l9o@VYiDESk#x1Vg*S5Sj_8YHV zq;1@28Tf0Wr=ahqshhO6#*?lv2f68v(V3+)6-B8r&05W#2mAM~+5a5a^S&C`|Mh?Q zHSsSi0;GqZA-UhpULrKp_w+UxY{fcvsa1kpDcfV1ppQ|#My>rMGVb2a# zc)mw#f+MB=z8j={w#47Q@#MVr6u-1TwuKXGyF|hIk*V?y>!+_wKx4VP2ax`6O&)Ql zfx|a+JxESBFC91zv)@g=t?TSTX<2BGLs9{#6m@9vr#e#6?x*y^AT=?nTv)9zdOm4( z<{HBlo^(Tam>A+#=kVRg#I4RzXlK*frT;ic>AOsCxBjx5ddU^uivtE)p?e6?q(Z817K2~ zz6+3(z6+%Ckka;8wb@bE(Vloavza|R$e!;WOL<K$ChwTN zWkz3xV#4cuqgD3)Tdt8??xEYB!KabQ=ux&82gBJz?0Y}$@RxZ~rS4>bCBh~`cm7>x z62s2h25--{bnGx?IsbcI!5c@FHNmD+EHn!the^wwZ`vtxO&!hMi0+pE8Bz)_zl1WK#CYD;($A=C+1v2!Ig37`rN5=tbh?dK(G= zcAV|g^P@9ye)^c%f5iLuSKxhS|Fd8(v@>w8=6fN~Go=Prh#FhS2ROm8t$bqYT`t^AE$NH`=35JrQVpFv&GKQTn2- zu*nW*Mp~Rhn~5<6RgMS?%KcetDtM9&on-Y(XRO4(dlSslC8dW}fjymax15<}7$uvG~4#M!U^l0w2ub)Rxv#yOS+b`V&>}-|6L1a=zl*8Qp z4Zc3i_-^SPiS^B~^9xd}+j>7>k6H~gTKZ;l+v0F$Fbas&?)rwU%a=(k1!s=X;(>n@ z)S>a%)@a+oA`CZf0I9ys;vJhI@!P%>;kMXSi_%+Yd=2cWHxmaeU1jZKGYGsY4L(j) zJbBcx5_kfsf*ZRj)!p+C)j7H=%@Nv$*=C7gK5YqeR(Ha4bRi<}DtcvaXtR%~cH+dw z9z$s#`g(48`)jb-wx=t7!wsPsd{il>aGv)P6Srf775?rLSDYIpLVe>%e#cO`ajMER zUfI@D`pQRlfOlaHu<$J`-nV$+ZZXlrppZBzUng36Xw*+$1j}^E$Dz^BA`^GqeGm!c z_oxL4P^bt$jLcQpdm&&-TRikD*+-_a0B4^4Z-ePumgpUO=ALJ$fjGk`WszId9N8rG z-Uop+n$3FwtY{L<7gkf*#9kIAJ0D*{XA%cnVlBdOigGW7b{a#CNiIiWv%j#}^LcFaUZ59d9#RNF96WOl zER!pFFELr=Na4m)*Y5q=8)pg=_s=u8_%s;B4Xwx+V3I6s!KbYOSaD+5TxJ8?yAk0( zN={UH5=E9!u_H>&Oy~snC^U$4YMOdro4;^{M-EPSH+nPhOei%53+arv$4N1N)8qns z7+Xh7jR(SK-IJuVFxaGzG=JYTM4`N}%V2p_`)R?Cl^fi*9KO(|C^*u`m!Aa_HO2rP zkkv>KaKhIos#rhXLA>g2zP2@b68D1Xhh6v|@ey$(7@WE4qwVFF3QMR$Z*0)DbT3VA zT<)XD+dDcuq2Wg*qlYqJNDQUV&Q3`1>1p(jOy0NoO7xz5I#t|U0b%cX=3vyT%*p$K zVaM`Wij8Ucq{UAtFRZo2HcDCkt-F{zhEc>?fU zQ?T3~$bJ6GJF~EGo}D_#Hnup5URhw{`3iSp7zxCPZSQxDG77KO+3j4p=%4?qt`h4GmWW$LUkY-J#SlSk{vK$~TqV$@ZUT+s8N9rbr2HMMkC0h{D_= zop1R_i*%kR?#_|Z^Qnugu7&0Lb}Q;FN?q{e(y2vzJ)^6Qh|-fJ;;g&QW5E8j+SN;i zk*t*d_#%C8VMi+WbtnGnIK4cyM@EmX#!oMx=@}@Mz#b+QS3bsu)#{>veTgAW85AiD zLUax69|gv|8`+-AZ0qDMuvFL{Dl~S{pbG;5k>uLa?C!4UWfe?={p^OY=8~UKKkHq zn>@@U8#=l2&94ymFp=vDDd{n`d8+F4lzyAu^@YuVbj# zFi~ch2JCNHhN~%t>iob2k}T`wIVf#y3IvyVNuI4(tNvp+t}&(Hnqzy6g5 z_QErtBPBZ{A6>t^bPta`bp}gY?O-ZA5;#%R9<4NW-*xqsw}lE{yY9G>h4faMaUz8I zVXO-UPgR3wLQ+|fNFjd%`o+j8dB%d+n31p)b73I>aMbM^nYt zmFmA^4;8mKL3eJOC~lm@m3?&%n%doH#Hua+Dl7ha?h1SORuCVn7|jU0ZI!F1*x)H^ zYrAJ^>v=o5cCmAqJ>EY)!l8(rMwu262%nr^kdz*QRoj;W_V=vboF)qv{X`xtY3V#M zbmI{O$kkc9E%Qg#g~QJip zk{yR3^d_+mBoUFqA(abLgl(dAa$7gM|6TtI88fv&z`oiwS?!vx#NgK0pZ_{Yd+uYs zcWCDrW^!Qnl(xXUSC&s+I*>$EI#Yzriu0MM;5DH%%qCrfFv!y zx-@r0wa|vwD^lGuF^ImV9$<&g9rU1Jf9yk`;U1456SFhM&diL zro7{s>x8?;)ax_--ntbL_P4v1_D`q%a?gQw%#^M1F{|F}Kz_6I78RLL`2BT8sN{fed znX`G;28U`xI0Fver19OFA%jO^TxW1*j?Ssl(iva;##m_#5Q31jK&uFcYV5n&io9U# z^URv>I2&Xx>(Xw0*U;a!g`s})+eh+SM?ZPdTkxi@(KJ%m5q)5bMmD5M3)tT>IxE|q zHI^kwR<}DWqtjg2 z=;|i+YtYPiDauSvT+8s&wWY{2tL@Rx62sn|a}pTa=GFZu#*1k&U0}4XJ#&L=Yfl1g zRo2iQn;#wGKaW7~7r_1mu+Pi+>tFwsjKKl#X`nnmX7)3;6y7E_vj9{#m;>|$mKmcc zw$LLqSksR^gZ2F07OXdh3tn4ssJP?I+(ifO2A;#E_O3dAiqZuoADY*l!_%V&$Mf0C z@WOm+-$1RwS<>dLHizJIAfq4&YUsYBwUK&~AzsuTEba&pbb~AT9ihCA2zD!oSqeLt zp_h1QjhWM9<9kQg(*1o0MA_T>xY-Jtve>YYV@2I|C-1o9SX|W_ z+!Z<-d~&tLd&?UG=koP|8*S*@&bXCxG ze{lqpkA#)XzH4fpijpG?37MQ+uV#fC?fT@c9uILgo!n_I0~2BejJ*;`9^!K+3A{`#r{m0 z>afdvRO5fd`X8>q-$UbT;3RvSgo)YB^K58x;l9};0v)}-+>4Ya^C{g^j^SF{DC)e* zm%+QbC`m<8lbs!mB3iz=>o*<#B3m5PrvsVHq0ZX*BGET?a!zDSW?$1{qJl<{4O@#N z4bmli@)5YPeDYd7q#p(CK0*X|a%!DOWH5}XaZc8|Cu^B#)e(ip|H9vs-Z&ULI*YFC zKJ&)m?S$4sLx+J?dlz(f6=-K9f$@V?<$51WCg+WtcmhWAQ)B0+=161rRAbi|^S&$E z98X-q)Ydk5Ie2*S*wIz=+)W=^wT1YjFSjLz$7nrby;k;ZZ+Uws&ND?VUbfsC2ZXaI z3enTldVB8p6O~3sd5iv;!xNcXRAA3=fpZZ+$gcJyum>r|k50`p`ovzQ^As@Gl+eM` z2fWd5C8}F%kO3SV_3_Kq|+`*NLw zMldu|=&u2L>2Sk;*U(?%97hn>ws_LAjGB89Jrl-97~z#$W=iza`OTw%ePwH=s=14P zrP!9p#oY7rFsbFO_F}Y0x=_9$jQ$A|TyPq~7`6i&H`i{qd18kogn~=054F|^0ySjj z)JOOyRG*7m%*L@PnAFtXA@Vwe8j_GuUm%QgWT<%71^No~zT1JMZecTYoNb-oZwN|7 z^}n@*KhY=9jp0HD*u!5UgS+Rr?z8{h%>JK6peHo`@nK%>&0qiOKWoOk3hYl2jq^Y7 zHvMlmZ2jZUoLx*@E^GA`ym6H`2g=|0ZyTcZ1f<3={z^D9*0F3_=(Y->aizeY-)b+> zyKtVi~)57OU~K#jTb>4lQ*2cYW(kho@KP4lhXZs4lmqhVVsVbb8?%Yk9jH6NY#g-hN1eq}Y4RekD`@x7USH>o)R-Jk z+%7|JVr*x5=W?I1Hz#MulnDjwPwBg30xQOE$80@lF}S-u%j@4qdn9%?2O+DAIy%94 zG{E&l@eo`F;=U;DY9Q_{U-ChGu%WZ3(iN$6bUh6B6}LJHp6gM`ng*u&_s>R7E*ZJ_ zO=JK&3oNLhA|6DTNhsBNDk*o6E}FV9;3R;^YxW}Hp^U^7ZTyMz+EO4rR#PtutEC;jDH?9>#{z1jwub zTQ8#7T7U1~HCcc2y8X*gqaS5hVI+CSVLQ|@{JQdoG0)> zcz`^Nf+JjDV{m||mx^AU(Wf7nr0)*5kKWDQr#63On};d4^}b=z_TtH*MpxjuMvhcL z4~s6y(qoO2tlxOk^>;5q%x-UR4Bc}N)|(TbhX$LH6PQ`eUXs8Zf7jJlClg$f%)c(f zD3cUR7sjlO?wDJS1Y|NKe_6Yy(UBw_uO-8A((>2)I5k~q??x9^`o{EiAkwqFJ9CvC zJIl6qk2Q3Jt6C$C_Q8k2>D!*+I!mUGzB_aGEnBJ{{-E7YH5yXzzCQ!I&NQ}S8p`d_ zTkddqn}PPC?}DB2#U)C*D2-0kJbUSGP++gM-W^~5hm}BnW=ZgH(q*(pi%S8 z3F>-W4KjQ3>3iP(JDwCYFw8E5CH20)YYQOLzLl7v%Nl`Boh|%jR}VFmzO$?F_TEE( z^yV82aT0A-QW-I4YH`sw^pX4Jwnx!CBC^J>6S*{6X*^MC95apXxA(kjHHNXW*YWa~ zKDM>a6>-q>EG*r(gI@+o7d3R?2V&y_o#?GnbmT2!|;`HaOrO#XHnv9LDmfANaPOScox%QR5_LY(U ztwtmI+Q3oOWU6j9m%VH+d(nDVZ*7Tnb3Rk2XRC~+KD+AMIkYWbj2V+@=))UUlJ@?EW2m>M&aGg}1@S@ffB5SYF z`H8L?j6vylHwMTg>g*xv2n|+jw7q3bX1>c;o=9kJWBMYCe!h3-ys2lh!PJ@a+*s1; zYOuv<-o^3>18^c6q8t_&zq9Yi?qi9NRg1HPp}SHSt7!4pG`k)#y)iQ~vU50ldNqE0 z-V{qqdy?K&*ld9UL0eF33)WbDHP!&Xa8wZ{o82WQXN{ARm7CH=rM0ut7An#^i#i-N z7N@>%XmD4w{iDYhmZ9kfR$sl@S=?fzXQAH4Hwpsn)h17!#aCnURCU;xl6TAIx#tPp zas?|4r28nGowuFAqBrfe9mZGD?w-{RY4%l2KFll{JswxCE%WUE^}qht+9Un`=MONT z=G=e$?_c&6_%O3qkHO_1|Iz=mv9VrF2N&Hj&kFTslu+2eWES6k@9m^CPU9{+ID8e^ zUV|-h-t}gtzqc2l2#8aCfe1n{oM@Im^#VMNi2KP z7R%H(*{WMyq%L(<(K8|Ps?r4+X~U#04q|sw7v{Gzk3)3bx(HcK13>=9Rq@*KO|aiP zyES#5rO=6$XLDkpzTH{bWUJK&>MdcwnB{!i5o2c{t0B&yU`K{ZFDrjSNgMdT<%n^r zmp?{bJfLH__fOmZ9Uc0a6jf-h`4aVqgOQKHSRol_Yyc~vNSGRd9nq7d7+U0Z2#(v)-7>4 zo1D83*tanQ5N0$?T>$O@Kt%$@ND+7r=G?F;!PTSlC;cslurvlq4<09H5HsdAJ1g|= zZ{wNN{s|*)3{rB+h0vPRjoxp@*K79%Fqc@2{G7gpDtR;WrN6 zbT}bz|wq6>G3}BMGA8>C+|;?M3M4IXCHJ(i4RQ zX+ctc25FL_LVfsQcT5QMFUc_=Rc)fWn*1E zJ{w)zijB{?dWOyM0Yj|Mni+>kH}#AflEa47AO~L4rt~09mK>NU*+*tg{bTynprwDr zH$F?3!sy}Y{8g4??(Ayv=s5FsH8475=ox8G4!0*qP3cJveR8ZLF{V#Wv_^;9+w!ty`z=&^bGPV46d~a{*?3}_0 z4HYcizO`-N@Ps+pZ|d$dCFJ{Rhq-&u+B0ZO^matL^|6$>XRtNi^P(%>p6)mF51I!0 z9YX`*>9MK3&CQEr764Q;#xF8({ajQKDh)-De|~gvws&#Pjx)Bq?-`kBN%S^$&?=*6 zylkW1SJUd`z%a6=*@5vSKdYPU*fv3s)O9$a94LuZHyi6(&BQmlTKchgNUV^R~iQdSn)LMpv!ghQm^o&LY+e2Ag)2v{JCtQ&TVR zFqgF&IPNZIqycu&sdJ8M9-H#I$=x|%{sWj(k5Nu%hux8?My+eX*X51 z8X63iMx(V}Z!T+TFL?Q;@KqBp;}37?EMI$qXh$aY53$Q-RY=*M2lzs+D(m4t{D=SW zZ~yJT)t1EjbYFXzKmPXzas@Q77Z6haX<&b=v0f`p(oRftsDVA{>BQ!?Ej{p6pc_al zZ?+Y`GL^hA(xXM;oYhzL#>P)n+*Qpe-JDfz&dOFt4YXF9o7IFK``bX$lo=b`mP+FC zWp;A^)HgiE@?jBGy|xy8XDV;@0jvCTR<68Ny9<^PYfd6~!Ea-@(b##1pB?_{H?~`? z&R1Ono%5U1=fV?=9$r%P{L0mJSLdi}u~#)&I1?C39OD~wboM$KIF7M@O^b!!IjigQ zY_ZienGqs9wR@Z6J)zkJNCMuCNp#8O-Iw0P*Y+++(%4LzoN9mI7dzjy{FRZP#1?gI z5lt43$~Q*I+1jlQI`bW)37?ht^2+SV*}5|MmYI{<@NVU;{vp^eTwX<&R-T9A_e^|W zM=4+2*jZK)9mK^caAxx$!`&O`bP)q#er8uKxI9QL=oHComjEBVfUuPyqW zGh^(T8a%v0)zr1N**-Y>z!il3g~=#t@*yy%;tufN_4YJ4qSQwqU}SosDOzh!3O^44 zV2a#zry3Z>1I=UfcJxk$m$oM^vYFj;_ssSqSF)h7LyO4uV)ZUYy1TizGCo?C93&)3B zSLX*zhPXP%mk@U39sG!$H1Qg9qPU&>EhVnG!=udG_0aUZC|>A7=v_9EspLU@qJfW*yq{am4m?8 za&svC%;)dxKMszrD>s;#a+%be6n7W==y1ul9U-=W3 z?*7c;Dqoyi%r_l^(V3bfpy3nmncR=`cyy{94QKgsH7Edp@FB~W~0@HKGzTw{n!VoG*et_`$He+Ll`JTz~*y?_4 zb>1^r?&wUnJB*Dw<86cap4rAblURHojL;IowOT&C)V3SxX1tJoU!GO|`%nMrKmGfE z|L=Kp30|*5RrQ2_#QQ%Gq~~k?{a{aKuQkQ+{E(i`?d&I}7uu48kL&^Z$_SksTFk7w zM`rJRgX@9b#fsy1*5gBy=dRJk@zCmfVD>*SdOx>^^vMCxeQf85rm44Qa`Y~)JJQ1} zA|$Rfo`B+ajTHV}cXhIyS#GyFoD2%*0IF~4e6*6^HwAb%=(O*Bao5mX-^StW$;HC? z<@EE7eZ7P4`^Y34&h0kK-45FWgPpfxEq~_p zejDs0tsmUnWsK*R5{voopI;~S{SN3ykKoro4vv0CTAeeMka~cv8>Haw+;rhZJAL_h^H>Hggyg1NA|(# zC(tTQuA=WO)$ru)p_-1)DqUw~M+lV|p##;28dI2J1RV`97LA5@Lwlt1b)f7!SN-?S z_Rw%-W^3w9@(t9Kp6U6|+?~WX1>fq6zqi)3hi;prx2+M_)6!-u@-lLF_0vw-x3two zMnsNO^4ePQ#@f*4d>!qLt?W{50YrGu?zLn5e+bBU!?M@LlGjEGC@2bHYRfy|51h5w zu2_5w%HtRQ1WTB5gA)}(Wjp-0jWcnNO-~)2$<#DJ_DZ~KiAvS{0Rj8k+G8S=s;h|KHwsz*$jU|1V{M1(pt?uDA6EDFWbxOZC?9-&pCJI&3n5KcLnp4 z<<93Y%+8y+bLY&xr=N2dFUGM51h@=ZDcBX9&CZ~K%;v^Ww80?eiuzLkumm}PxF-On zNNm#N+`=h^Swb5F|EQ0}Aiprga0b9k8mas-ks)vk+(H2PQ&^7diQPs?_93kfcnQay zp%}o$